##// END OF EJS Templates
localrepo: move new repo requirements into standalone function (API)...
Gregory Szorc -
r28164:ad11edef default
parent child Browse files
Show More
@@ -1,1971 +1,1977
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import inspect
11 import inspect
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import urllib
15 import urllib
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 cmdutil,
31 cmdutil,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 namespaces,
44 namespaces,
45 obsolete,
45 obsolete,
46 pathutil,
46 pathutil,
47 peer,
47 peer,
48 phases,
48 phases,
49 pushkey,
49 pushkey,
50 repoview,
50 repoview,
51 revset,
51 revset,
52 scmutil,
52 scmutil,
53 store,
53 store,
54 subrepo,
54 subrepo,
55 tags as tagsmod,
55 tags as tagsmod,
56 transaction,
56 transaction,
57 util,
57 util,
58 )
58 )
59
59
60 release = lockmod.release
60 release = lockmod.release
61 propertycache = util.propertycache
61 propertycache = util.propertycache
62 filecache = scmutil.filecache
62 filecache = scmutil.filecache
63
63
64 class repofilecache(filecache):
64 class repofilecache(filecache):
65 """All filecache usage on repo are done for logic that should be unfiltered
65 """All filecache usage on repo are done for logic that should be unfiltered
66 """
66 """
67
67
68 def __get__(self, repo, type=None):
68 def __get__(self, repo, type=None):
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
69 return super(repofilecache, self).__get__(repo.unfiltered(), type)
70 def __set__(self, repo, value):
70 def __set__(self, repo, value):
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
71 return super(repofilecache, self).__set__(repo.unfiltered(), value)
72 def __delete__(self, repo):
72 def __delete__(self, repo):
73 return super(repofilecache, self).__delete__(repo.unfiltered())
73 return super(repofilecache, self).__delete__(repo.unfiltered())
74
74
75 class storecache(repofilecache):
75 class storecache(repofilecache):
76 """filecache for files in the store"""
76 """filecache for files in the store"""
77 def join(self, obj, fname):
77 def join(self, obj, fname):
78 return obj.sjoin(fname)
78 return obj.sjoin(fname)
79
79
80 class unfilteredpropertycache(propertycache):
80 class unfilteredpropertycache(propertycache):
81 """propertycache that apply to unfiltered repo only"""
81 """propertycache that apply to unfiltered repo only"""
82
82
83 def __get__(self, repo, type=None):
83 def __get__(self, repo, type=None):
84 unfi = repo.unfiltered()
84 unfi = repo.unfiltered()
85 if unfi is repo:
85 if unfi is repo:
86 return super(unfilteredpropertycache, self).__get__(unfi)
86 return super(unfilteredpropertycache, self).__get__(unfi)
87 return getattr(unfi, self.name)
87 return getattr(unfi, self.name)
88
88
89 class filteredpropertycache(propertycache):
89 class filteredpropertycache(propertycache):
90 """propertycache that must take filtering in account"""
90 """propertycache that must take filtering in account"""
91
91
92 def cachevalue(self, obj, value):
92 def cachevalue(self, obj, value):
93 object.__setattr__(obj, self.name, value)
93 object.__setattr__(obj, self.name, value)
94
94
95
95
96 def hasunfilteredcache(repo, name):
96 def hasunfilteredcache(repo, name):
97 """check if a repo has an unfilteredpropertycache value for <name>"""
97 """check if a repo has an unfilteredpropertycache value for <name>"""
98 return name in vars(repo.unfiltered())
98 return name in vars(repo.unfiltered())
99
99
100 def unfilteredmethod(orig):
100 def unfilteredmethod(orig):
101 """decorate method that always need to be run on unfiltered version"""
101 """decorate method that always need to be run on unfiltered version"""
102 def wrapper(repo, *args, **kwargs):
102 def wrapper(repo, *args, **kwargs):
103 return orig(repo.unfiltered(), *args, **kwargs)
103 return orig(repo.unfiltered(), *args, **kwargs)
104 return wrapper
104 return wrapper
105
105
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
106 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
107 'unbundle'))
107 'unbundle'))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
108 legacycaps = moderncaps.union(set(['changegroupsubset']))
109
109
110 class localpeer(peer.peerrepository):
110 class localpeer(peer.peerrepository):
111 '''peer for a local repo; reflects only the most recent API'''
111 '''peer for a local repo; reflects only the most recent API'''
112
112
113 def __init__(self, repo, caps=moderncaps):
113 def __init__(self, repo, caps=moderncaps):
114 peer.peerrepository.__init__(self)
114 peer.peerrepository.__init__(self)
115 self._repo = repo.filtered('served')
115 self._repo = repo.filtered('served')
116 self.ui = repo.ui
116 self.ui = repo.ui
117 self._caps = repo._restrictcapabilities(caps)
117 self._caps = repo._restrictcapabilities(caps)
118 self.requirements = repo.requirements
118 self.requirements = repo.requirements
119 self.supportedformats = repo.supportedformats
119 self.supportedformats = repo.supportedformats
120
120
121 def close(self):
121 def close(self):
122 self._repo.close()
122 self._repo.close()
123
123
124 def _capabilities(self):
124 def _capabilities(self):
125 return self._caps
125 return self._caps
126
126
127 def local(self):
127 def local(self):
128 return self._repo
128 return self._repo
129
129
130 def canpush(self):
130 def canpush(self):
131 return True
131 return True
132
132
133 def url(self):
133 def url(self):
134 return self._repo.url()
134 return self._repo.url()
135
135
136 def lookup(self, key):
136 def lookup(self, key):
137 return self._repo.lookup(key)
137 return self._repo.lookup(key)
138
138
139 def branchmap(self):
139 def branchmap(self):
140 return self._repo.branchmap()
140 return self._repo.branchmap()
141
141
142 def heads(self):
142 def heads(self):
143 return self._repo.heads()
143 return self._repo.heads()
144
144
145 def known(self, nodes):
145 def known(self, nodes):
146 return self._repo.known(nodes)
146 return self._repo.known(nodes)
147
147
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
148 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
149 **kwargs):
149 **kwargs):
150 cg = exchange.getbundle(self._repo, source, heads=heads,
150 cg = exchange.getbundle(self._repo, source, heads=heads,
151 common=common, bundlecaps=bundlecaps, **kwargs)
151 common=common, bundlecaps=bundlecaps, **kwargs)
152 if bundlecaps is not None and 'HG20' in bundlecaps:
152 if bundlecaps is not None and 'HG20' in bundlecaps:
153 # When requesting a bundle2, getbundle returns a stream to make the
153 # When requesting a bundle2, getbundle returns a stream to make the
154 # wire level function happier. We need to build a proper object
154 # wire level function happier. We need to build a proper object
155 # from it in local peer.
155 # from it in local peer.
156 cg = bundle2.getunbundler(self.ui, cg)
156 cg = bundle2.getunbundler(self.ui, cg)
157 return cg
157 return cg
158
158
159 # TODO We might want to move the next two calls into legacypeer and add
159 # TODO We might want to move the next two calls into legacypeer and add
160 # unbundle instead.
160 # unbundle instead.
161
161
162 def unbundle(self, cg, heads, url):
162 def unbundle(self, cg, heads, url):
163 """apply a bundle on a repo
163 """apply a bundle on a repo
164
164
165 This function handles the repo locking itself."""
165 This function handles the repo locking itself."""
166 try:
166 try:
167 try:
167 try:
168 cg = exchange.readbundle(self.ui, cg, None)
168 cg = exchange.readbundle(self.ui, cg, None)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
169 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
170 if util.safehasattr(ret, 'getchunks'):
170 if util.safehasattr(ret, 'getchunks'):
171 # This is a bundle20 object, turn it into an unbundler.
171 # This is a bundle20 object, turn it into an unbundler.
172 # This little dance should be dropped eventually when the
172 # This little dance should be dropped eventually when the
173 # API is finally improved.
173 # API is finally improved.
174 stream = util.chunkbuffer(ret.getchunks())
174 stream = util.chunkbuffer(ret.getchunks())
175 ret = bundle2.getunbundler(self.ui, stream)
175 ret = bundle2.getunbundler(self.ui, stream)
176 return ret
176 return ret
177 except Exception as exc:
177 except Exception as exc:
178 # If the exception contains output salvaged from a bundle2
178 # If the exception contains output salvaged from a bundle2
179 # reply, we need to make sure it is printed before continuing
179 # reply, we need to make sure it is printed before continuing
180 # to fail. So we build a bundle2 with such output and consume
180 # to fail. So we build a bundle2 with such output and consume
181 # it directly.
181 # it directly.
182 #
182 #
183 # This is not very elegant but allows a "simple" solution for
183 # This is not very elegant but allows a "simple" solution for
184 # issue4594
184 # issue4594
185 output = getattr(exc, '_bundle2salvagedoutput', ())
185 output = getattr(exc, '_bundle2salvagedoutput', ())
186 if output:
186 if output:
187 bundler = bundle2.bundle20(self._repo.ui)
187 bundler = bundle2.bundle20(self._repo.ui)
188 for out in output:
188 for out in output:
189 bundler.addpart(out)
189 bundler.addpart(out)
190 stream = util.chunkbuffer(bundler.getchunks())
190 stream = util.chunkbuffer(bundler.getchunks())
191 b = bundle2.getunbundler(self.ui, stream)
191 b = bundle2.getunbundler(self.ui, stream)
192 bundle2.processbundle(self._repo, b)
192 bundle2.processbundle(self._repo, b)
193 raise
193 raise
194 except error.PushRaced as exc:
194 except error.PushRaced as exc:
195 raise error.ResponseError(_('push failed:'), str(exc))
195 raise error.ResponseError(_('push failed:'), str(exc))
196
196
197 def lock(self):
197 def lock(self):
198 return self._repo.lock()
198 return self._repo.lock()
199
199
200 def addchangegroup(self, cg, source, url):
200 def addchangegroup(self, cg, source, url):
201 return cg.apply(self._repo, source, url)
201 return cg.apply(self._repo, source, url)
202
202
203 def pushkey(self, namespace, key, old, new):
203 def pushkey(self, namespace, key, old, new):
204 return self._repo.pushkey(namespace, key, old, new)
204 return self._repo.pushkey(namespace, key, old, new)
205
205
206 def listkeys(self, namespace):
206 def listkeys(self, namespace):
207 return self._repo.listkeys(namespace)
207 return self._repo.listkeys(namespace)
208
208
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
209 def debugwireargs(self, one, two, three=None, four=None, five=None):
210 '''used to test argument passing over the wire'''
210 '''used to test argument passing over the wire'''
211 return "%s %s %s %s %s" % (one, two, three, four, five)
211 return "%s %s %s %s %s" % (one, two, three, four, five)
212
212
213 class locallegacypeer(localpeer):
213 class locallegacypeer(localpeer):
214 '''peer extension which implements legacy methods too; used for tests with
214 '''peer extension which implements legacy methods too; used for tests with
215 restricted capabilities'''
215 restricted capabilities'''
216
216
217 def __init__(self, repo):
217 def __init__(self, repo):
218 localpeer.__init__(self, repo, caps=legacycaps)
218 localpeer.__init__(self, repo, caps=legacycaps)
219
219
220 def branches(self, nodes):
220 def branches(self, nodes):
221 return self._repo.branches(nodes)
221 return self._repo.branches(nodes)
222
222
223 def between(self, pairs):
223 def between(self, pairs):
224 return self._repo.between(pairs)
224 return self._repo.between(pairs)
225
225
226 def changegroup(self, basenodes, source):
226 def changegroup(self, basenodes, source):
227 return changegroup.changegroup(self._repo, basenodes, source)
227 return changegroup.changegroup(self._repo, basenodes, source)
228
228
229 def changegroupsubset(self, bases, heads, source):
229 def changegroupsubset(self, bases, heads, source):
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
230 return changegroup.changegroupsubset(self._repo, bases, heads, source)
231
231
232 class localrepository(object):
232 class localrepository(object):
233
233
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
234 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
235 'manifestv2'))
235 'manifestv2'))
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
236 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
237 'dotencode'))
237 'dotencode'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
238 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
239 filtername = None
239 filtername = None
240
240
241 # a list of (ui, featureset) functions.
241 # a list of (ui, featureset) functions.
242 # only functions defined in module of enabled extensions are invoked
242 # only functions defined in module of enabled extensions are invoked
243 featuresetupfuncs = set()
243 featuresetupfuncs = set()
244
244
245 def _baserequirements(self, create):
246 return ['revlogv1']
247
248 def __init__(self, baseui, path=None, create=False):
245 def __init__(self, baseui, path=None, create=False):
249 self.requirements = set()
246 self.requirements = set()
250 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
247 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
251 self.wopener = self.wvfs
248 self.wopener = self.wvfs
252 self.root = self.wvfs.base
249 self.root = self.wvfs.base
253 self.path = self.wvfs.join(".hg")
250 self.path = self.wvfs.join(".hg")
254 self.origroot = path
251 self.origroot = path
255 self.auditor = pathutil.pathauditor(self.root, self._checknested)
252 self.auditor = pathutil.pathauditor(self.root, self._checknested)
256 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
253 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
257 realfs=False)
254 realfs=False)
258 self.vfs = scmutil.vfs(self.path)
255 self.vfs = scmutil.vfs(self.path)
259 self.opener = self.vfs
256 self.opener = self.vfs
260 self.baseui = baseui
257 self.baseui = baseui
261 self.ui = baseui.copy()
258 self.ui = baseui.copy()
262 self.ui.copy = baseui.copy # prevent copying repo configuration
259 self.ui.copy = baseui.copy # prevent copying repo configuration
263 # A list of callback to shape the phase if no data were found.
260 # A list of callback to shape the phase if no data were found.
264 # Callback are in the form: func(repo, roots) --> processed root.
261 # Callback are in the form: func(repo, roots) --> processed root.
265 # This list it to be filled by extension during repo setup
262 # This list it to be filled by extension during repo setup
266 self._phasedefaults = []
263 self._phasedefaults = []
267 try:
264 try:
268 self.ui.readconfig(self.join("hgrc"), self.root)
265 self.ui.readconfig(self.join("hgrc"), self.root)
269 extensions.loadall(self.ui)
266 extensions.loadall(self.ui)
270 except IOError:
267 except IOError:
271 pass
268 pass
272
269
273 if self.featuresetupfuncs:
270 if self.featuresetupfuncs:
274 self.supported = set(self._basesupported) # use private copy
271 self.supported = set(self._basesupported) # use private copy
275 extmods = set(m.__name__ for n, m
272 extmods = set(m.__name__ for n, m
276 in extensions.extensions(self.ui))
273 in extensions.extensions(self.ui))
277 for setupfunc in self.featuresetupfuncs:
274 for setupfunc in self.featuresetupfuncs:
278 if setupfunc.__module__ in extmods:
275 if setupfunc.__module__ in extmods:
279 setupfunc(self.ui, self.supported)
276 setupfunc(self.ui, self.supported)
280 else:
277 else:
281 self.supported = self._basesupported
278 self.supported = self._basesupported
282
279
283 if not self.vfs.isdir():
280 if not self.vfs.isdir():
284 if create:
281 if create:
285 requirements = set(self._baserequirements(create))
282 self.requirements = newreporequirements(self)
286 if self.ui.configbool('format', 'usestore', True):
287 requirements.add("store")
288 if self.ui.configbool('format', 'usefncache', True):
289 requirements.add("fncache")
290 if self.ui.configbool('format', 'dotencode', True):
291 requirements.add('dotencode')
292
293 if scmutil.gdinitconfig(self.ui):
294 requirements.add("generaldelta")
295 if self.ui.configbool('experimental', 'treemanifest', False):
296 requirements.add("treemanifest")
297 if self.ui.configbool('experimental', 'manifestv2', False):
298 requirements.add("manifestv2")
299
300 self.requirements = requirements
301
283
302 if not self.wvfs.exists():
284 if not self.wvfs.exists():
303 self.wvfs.makedirs()
285 self.wvfs.makedirs()
304 self.vfs.makedir(notindexed=True)
286 self.vfs.makedir(notindexed=True)
305
287
306 if 'store' in requirements:
288 if 'store' in self.requirements:
307 self.vfs.mkdir("store")
289 self.vfs.mkdir("store")
308
290
309 # create an invalid changelog
291 # create an invalid changelog
310 self.vfs.append(
292 self.vfs.append(
311 "00changelog.i",
293 "00changelog.i",
312 '\0\0\0\2' # represents revlogv2
294 '\0\0\0\2' # represents revlogv2
313 ' dummy changelog to prevent using the old repo layout'
295 ' dummy changelog to prevent using the old repo layout'
314 )
296 )
315 else:
297 else:
316 raise error.RepoError(_("repository %s not found") % path)
298 raise error.RepoError(_("repository %s not found") % path)
317 elif create:
299 elif create:
318 raise error.RepoError(_("repository %s already exists") % path)
300 raise error.RepoError(_("repository %s already exists") % path)
319 else:
301 else:
320 try:
302 try:
321 self.requirements = scmutil.readrequires(
303 self.requirements = scmutil.readrequires(
322 self.vfs, self.supported)
304 self.vfs, self.supported)
323 except IOError as inst:
305 except IOError as inst:
324 if inst.errno != errno.ENOENT:
306 if inst.errno != errno.ENOENT:
325 raise
307 raise
326
308
327 self.sharedpath = self.path
309 self.sharedpath = self.path
328 try:
310 try:
329 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
311 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
330 realpath=True)
312 realpath=True)
331 s = vfs.base
313 s = vfs.base
332 if not vfs.exists():
314 if not vfs.exists():
333 raise error.RepoError(
315 raise error.RepoError(
334 _('.hg/sharedpath points to nonexistent directory %s') % s)
316 _('.hg/sharedpath points to nonexistent directory %s') % s)
335 self.sharedpath = s
317 self.sharedpath = s
336 except IOError as inst:
318 except IOError as inst:
337 if inst.errno != errno.ENOENT:
319 if inst.errno != errno.ENOENT:
338 raise
320 raise
339
321
340 self.store = store.store(
322 self.store = store.store(
341 self.requirements, self.sharedpath, scmutil.vfs)
323 self.requirements, self.sharedpath, scmutil.vfs)
342 self.spath = self.store.path
324 self.spath = self.store.path
343 self.svfs = self.store.vfs
325 self.svfs = self.store.vfs
344 self.sjoin = self.store.join
326 self.sjoin = self.store.join
345 self.vfs.createmode = self.store.createmode
327 self.vfs.createmode = self.store.createmode
346 self._applyopenerreqs()
328 self._applyopenerreqs()
347 if create:
329 if create:
348 self._writerequirements()
330 self._writerequirements()
349
331
350 self._dirstatevalidatewarned = False
332 self._dirstatevalidatewarned = False
351
333
352 self._branchcaches = {}
334 self._branchcaches = {}
353 self._revbranchcache = None
335 self._revbranchcache = None
354 self.filterpats = {}
336 self.filterpats = {}
355 self._datafilters = {}
337 self._datafilters = {}
356 self._transref = self._lockref = self._wlockref = None
338 self._transref = self._lockref = self._wlockref = None
357
339
358 # A cache for various files under .hg/ that tracks file changes,
340 # A cache for various files under .hg/ that tracks file changes,
359 # (used by the filecache decorator)
341 # (used by the filecache decorator)
360 #
342 #
361 # Maps a property name to its util.filecacheentry
343 # Maps a property name to its util.filecacheentry
362 self._filecache = {}
344 self._filecache = {}
363
345
364 # hold sets of revision to be filtered
346 # hold sets of revision to be filtered
365 # should be cleared when something might have changed the filter value:
347 # should be cleared when something might have changed the filter value:
366 # - new changesets,
348 # - new changesets,
367 # - phase change,
349 # - phase change,
368 # - new obsolescence marker,
350 # - new obsolescence marker,
369 # - working directory parent change,
351 # - working directory parent change,
370 # - bookmark changes
352 # - bookmark changes
371 self.filteredrevcache = {}
353 self.filteredrevcache = {}
372
354
373 # generic mapping between names and nodes
355 # generic mapping between names and nodes
374 self.names = namespaces.namespaces()
356 self.names = namespaces.namespaces()
375
357
376 def close(self):
358 def close(self):
377 self._writecaches()
359 self._writecaches()
378
360
379 def _writecaches(self):
361 def _writecaches(self):
380 if self._revbranchcache:
362 if self._revbranchcache:
381 self._revbranchcache.write()
363 self._revbranchcache.write()
382
364
383 def _restrictcapabilities(self, caps):
365 def _restrictcapabilities(self, caps):
384 if self.ui.configbool('experimental', 'bundle2-advertise', True):
366 if self.ui.configbool('experimental', 'bundle2-advertise', True):
385 caps = set(caps)
367 caps = set(caps)
386 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
368 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
387 caps.add('bundle2=' + urllib.quote(capsblob))
369 caps.add('bundle2=' + urllib.quote(capsblob))
388 return caps
370 return caps
389
371
390 def _applyopenerreqs(self):
372 def _applyopenerreqs(self):
391 self.svfs.options = dict((r, 1) for r in self.requirements
373 self.svfs.options = dict((r, 1) for r in self.requirements
392 if r in self.openerreqs)
374 if r in self.openerreqs)
393 # experimental config: format.chunkcachesize
375 # experimental config: format.chunkcachesize
394 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
376 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
395 if chunkcachesize is not None:
377 if chunkcachesize is not None:
396 self.svfs.options['chunkcachesize'] = chunkcachesize
378 self.svfs.options['chunkcachesize'] = chunkcachesize
397 # experimental config: format.maxchainlen
379 # experimental config: format.maxchainlen
398 maxchainlen = self.ui.configint('format', 'maxchainlen')
380 maxchainlen = self.ui.configint('format', 'maxchainlen')
399 if maxchainlen is not None:
381 if maxchainlen is not None:
400 self.svfs.options['maxchainlen'] = maxchainlen
382 self.svfs.options['maxchainlen'] = maxchainlen
401 # experimental config: format.manifestcachesize
383 # experimental config: format.manifestcachesize
402 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
384 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
403 if manifestcachesize is not None:
385 if manifestcachesize is not None:
404 self.svfs.options['manifestcachesize'] = manifestcachesize
386 self.svfs.options['manifestcachesize'] = manifestcachesize
405 # experimental config: format.aggressivemergedeltas
387 # experimental config: format.aggressivemergedeltas
406 aggressivemergedeltas = self.ui.configbool('format',
388 aggressivemergedeltas = self.ui.configbool('format',
407 'aggressivemergedeltas', False)
389 'aggressivemergedeltas', False)
408 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
390 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
409 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
391 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
410
392
411 def _writerequirements(self):
393 def _writerequirements(self):
412 scmutil.writerequires(self.vfs, self.requirements)
394 scmutil.writerequires(self.vfs, self.requirements)
413
395
414 def _checknested(self, path):
396 def _checknested(self, path):
415 """Determine if path is a legal nested repository."""
397 """Determine if path is a legal nested repository."""
416 if not path.startswith(self.root):
398 if not path.startswith(self.root):
417 return False
399 return False
418 subpath = path[len(self.root) + 1:]
400 subpath = path[len(self.root) + 1:]
419 normsubpath = util.pconvert(subpath)
401 normsubpath = util.pconvert(subpath)
420
402
421 # XXX: Checking against the current working copy is wrong in
403 # XXX: Checking against the current working copy is wrong in
422 # the sense that it can reject things like
404 # the sense that it can reject things like
423 #
405 #
424 # $ hg cat -r 10 sub/x.txt
406 # $ hg cat -r 10 sub/x.txt
425 #
407 #
426 # if sub/ is no longer a subrepository in the working copy
408 # if sub/ is no longer a subrepository in the working copy
427 # parent revision.
409 # parent revision.
428 #
410 #
429 # However, it can of course also allow things that would have
411 # However, it can of course also allow things that would have
430 # been rejected before, such as the above cat command if sub/
412 # been rejected before, such as the above cat command if sub/
431 # is a subrepository now, but was a normal directory before.
413 # is a subrepository now, but was a normal directory before.
432 # The old path auditor would have rejected by mistake since it
414 # The old path auditor would have rejected by mistake since it
433 # panics when it sees sub/.hg/.
415 # panics when it sees sub/.hg/.
434 #
416 #
435 # All in all, checking against the working copy seems sensible
417 # All in all, checking against the working copy seems sensible
436 # since we want to prevent access to nested repositories on
418 # since we want to prevent access to nested repositories on
437 # the filesystem *now*.
419 # the filesystem *now*.
438 ctx = self[None]
420 ctx = self[None]
439 parts = util.splitpath(subpath)
421 parts = util.splitpath(subpath)
440 while parts:
422 while parts:
441 prefix = '/'.join(parts)
423 prefix = '/'.join(parts)
442 if prefix in ctx.substate:
424 if prefix in ctx.substate:
443 if prefix == normsubpath:
425 if prefix == normsubpath:
444 return True
426 return True
445 else:
427 else:
446 sub = ctx.sub(prefix)
428 sub = ctx.sub(prefix)
447 return sub.checknested(subpath[len(prefix) + 1:])
429 return sub.checknested(subpath[len(prefix) + 1:])
448 else:
430 else:
449 parts.pop()
431 parts.pop()
450 return False
432 return False
451
433
452 def peer(self):
434 def peer(self):
453 return localpeer(self) # not cached to avoid reference cycle
435 return localpeer(self) # not cached to avoid reference cycle
454
436
455 def unfiltered(self):
437 def unfiltered(self):
456 """Return unfiltered version of the repository
438 """Return unfiltered version of the repository
457
439
458 Intended to be overwritten by filtered repo."""
440 Intended to be overwritten by filtered repo."""
459 return self
441 return self
460
442
461 def filtered(self, name):
443 def filtered(self, name):
462 """Return a filtered version of a repository"""
444 """Return a filtered version of a repository"""
463 # build a new class with the mixin and the current class
445 # build a new class with the mixin and the current class
464 # (possibly subclass of the repo)
446 # (possibly subclass of the repo)
465 class proxycls(repoview.repoview, self.unfiltered().__class__):
447 class proxycls(repoview.repoview, self.unfiltered().__class__):
466 pass
448 pass
467 return proxycls(self, name)
449 return proxycls(self, name)
468
450
469 @repofilecache('bookmarks', 'bookmarks.current')
451 @repofilecache('bookmarks', 'bookmarks.current')
470 def _bookmarks(self):
452 def _bookmarks(self):
471 return bookmarks.bmstore(self)
453 return bookmarks.bmstore(self)
472
454
473 @property
455 @property
474 def _activebookmark(self):
456 def _activebookmark(self):
475 return self._bookmarks.active
457 return self._bookmarks.active
476
458
477 def bookmarkheads(self, bookmark):
459 def bookmarkheads(self, bookmark):
478 name = bookmark.split('@', 1)[0]
460 name = bookmark.split('@', 1)[0]
479 heads = []
461 heads = []
480 for mark, n in self._bookmarks.iteritems():
462 for mark, n in self._bookmarks.iteritems():
481 if mark.split('@', 1)[0] == name:
463 if mark.split('@', 1)[0] == name:
482 heads.append(n)
464 heads.append(n)
483 return heads
465 return heads
484
466
485 # _phaserevs and _phasesets depend on changelog. what we need is to
467 # _phaserevs and _phasesets depend on changelog. what we need is to
486 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
468 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
487 # can't be easily expressed in filecache mechanism.
469 # can't be easily expressed in filecache mechanism.
488 @storecache('phaseroots', '00changelog.i')
470 @storecache('phaseroots', '00changelog.i')
489 def _phasecache(self):
471 def _phasecache(self):
490 return phases.phasecache(self, self._phasedefaults)
472 return phases.phasecache(self, self._phasedefaults)
491
473
492 @storecache('obsstore')
474 @storecache('obsstore')
493 def obsstore(self):
475 def obsstore(self):
494 # read default format for new obsstore.
476 # read default format for new obsstore.
495 # developer config: format.obsstore-version
477 # developer config: format.obsstore-version
496 defaultformat = self.ui.configint('format', 'obsstore-version', None)
478 defaultformat = self.ui.configint('format', 'obsstore-version', None)
497 # rely on obsstore class default when possible.
479 # rely on obsstore class default when possible.
498 kwargs = {}
480 kwargs = {}
499 if defaultformat is not None:
481 if defaultformat is not None:
500 kwargs['defaultformat'] = defaultformat
482 kwargs['defaultformat'] = defaultformat
501 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
483 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
502 store = obsolete.obsstore(self.svfs, readonly=readonly,
484 store = obsolete.obsstore(self.svfs, readonly=readonly,
503 **kwargs)
485 **kwargs)
504 if store and readonly:
486 if store and readonly:
505 self.ui.warn(
487 self.ui.warn(
506 _('obsolete feature not enabled but %i markers found!\n')
488 _('obsolete feature not enabled but %i markers found!\n')
507 % len(list(store)))
489 % len(list(store)))
508 return store
490 return store
509
491
510 @storecache('00changelog.i')
492 @storecache('00changelog.i')
511 def changelog(self):
493 def changelog(self):
512 c = changelog.changelog(self.svfs)
494 c = changelog.changelog(self.svfs)
513 if 'HG_PENDING' in os.environ:
495 if 'HG_PENDING' in os.environ:
514 p = os.environ['HG_PENDING']
496 p = os.environ['HG_PENDING']
515 if p.startswith(self.root):
497 if p.startswith(self.root):
516 c.readpending('00changelog.i.a')
498 c.readpending('00changelog.i.a')
517 return c
499 return c
518
500
519 @storecache('00manifest.i')
501 @storecache('00manifest.i')
520 def manifest(self):
502 def manifest(self):
521 return manifest.manifest(self.svfs)
503 return manifest.manifest(self.svfs)
522
504
523 def dirlog(self, dir):
505 def dirlog(self, dir):
524 return self.manifest.dirlog(dir)
506 return self.manifest.dirlog(dir)
525
507
526 @repofilecache('dirstate')
508 @repofilecache('dirstate')
527 def dirstate(self):
509 def dirstate(self):
528 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 return dirstate.dirstate(self.vfs, self.ui, self.root,
529 self._dirstatevalidate)
511 self._dirstatevalidate)
530
512
531 def _dirstatevalidate(self, node):
513 def _dirstatevalidate(self, node):
532 try:
514 try:
533 self.changelog.rev(node)
515 self.changelog.rev(node)
534 return node
516 return node
535 except error.LookupError:
517 except error.LookupError:
536 if not self._dirstatevalidatewarned:
518 if not self._dirstatevalidatewarned:
537 self._dirstatevalidatewarned = True
519 self._dirstatevalidatewarned = True
538 self.ui.warn(_("warning: ignoring unknown"
520 self.ui.warn(_("warning: ignoring unknown"
539 " working parent %s!\n") % short(node))
521 " working parent %s!\n") % short(node))
540 return nullid
522 return nullid
541
523
542 def __getitem__(self, changeid):
524 def __getitem__(self, changeid):
543 if changeid is None or changeid == wdirrev:
525 if changeid is None or changeid == wdirrev:
544 return context.workingctx(self)
526 return context.workingctx(self)
545 if isinstance(changeid, slice):
527 if isinstance(changeid, slice):
546 return [context.changectx(self, i)
528 return [context.changectx(self, i)
547 for i in xrange(*changeid.indices(len(self)))
529 for i in xrange(*changeid.indices(len(self)))
548 if i not in self.changelog.filteredrevs]
530 if i not in self.changelog.filteredrevs]
549 return context.changectx(self, changeid)
531 return context.changectx(self, changeid)
550
532
551 def __contains__(self, changeid):
533 def __contains__(self, changeid):
552 try:
534 try:
553 self[changeid]
535 self[changeid]
554 return True
536 return True
555 except error.RepoLookupError:
537 except error.RepoLookupError:
556 return False
538 return False
557
539
558 def __nonzero__(self):
540 def __nonzero__(self):
559 return True
541 return True
560
542
561 def __len__(self):
543 def __len__(self):
562 return len(self.changelog)
544 return len(self.changelog)
563
545
564 def __iter__(self):
546 def __iter__(self):
565 return iter(self.changelog)
547 return iter(self.changelog)
566
548
567 def revs(self, expr, *args):
549 def revs(self, expr, *args):
568 '''Find revisions matching a revset.
550 '''Find revisions matching a revset.
569
551
570 The revset is specified as a string ``expr`` that may contain
552 The revset is specified as a string ``expr`` that may contain
571 %-formatting to escape certain types. See ``revset.formatspec``.
553 %-formatting to escape certain types. See ``revset.formatspec``.
572
554
573 Return a revset.abstractsmartset, which is a list-like interface
555 Return a revset.abstractsmartset, which is a list-like interface
574 that contains integer revisions.
556 that contains integer revisions.
575 '''
557 '''
576 expr = revset.formatspec(expr, *args)
558 expr = revset.formatspec(expr, *args)
577 m = revset.match(None, expr)
559 m = revset.match(None, expr)
578 return m(self)
560 return m(self)
579
561
580 def set(self, expr, *args):
562 def set(self, expr, *args):
581 '''Find revisions matching a revset and emit changectx instances.
563 '''Find revisions matching a revset and emit changectx instances.
582
564
583 This is a convenience wrapper around ``revs()`` that iterates the
565 This is a convenience wrapper around ``revs()`` that iterates the
584 result and is a generator of changectx instances.
566 result and is a generator of changectx instances.
585 '''
567 '''
586 for r in self.revs(expr, *args):
568 for r in self.revs(expr, *args):
587 yield self[r]
569 yield self[r]
588
570
589 def url(self):
571 def url(self):
590 return 'file:' + self.root
572 return 'file:' + self.root
591
573
592 def hook(self, name, throw=False, **args):
574 def hook(self, name, throw=False, **args):
593 """Call a hook, passing this repo instance.
575 """Call a hook, passing this repo instance.
594
576
595 This a convenience method to aid invoking hooks. Extensions likely
577 This a convenience method to aid invoking hooks. Extensions likely
596 won't call this unless they have registered a custom hook or are
578 won't call this unless they have registered a custom hook or are
597 replacing code that is expected to call a hook.
579 replacing code that is expected to call a hook.
598 """
580 """
599 return hook.hook(self.ui, self, name, throw, **args)
581 return hook.hook(self.ui, self, name, throw, **args)
600
582
601 @unfilteredmethod
583 @unfilteredmethod
602 def _tag(self, names, node, message, local, user, date, extra=None,
584 def _tag(self, names, node, message, local, user, date, extra=None,
603 editor=False):
585 editor=False):
604 if isinstance(names, str):
586 if isinstance(names, str):
605 names = (names,)
587 names = (names,)
606
588
607 branches = self.branchmap()
589 branches = self.branchmap()
608 for name in names:
590 for name in names:
609 self.hook('pretag', throw=True, node=hex(node), tag=name,
591 self.hook('pretag', throw=True, node=hex(node), tag=name,
610 local=local)
592 local=local)
611 if name in branches:
593 if name in branches:
612 self.ui.warn(_("warning: tag %s conflicts with existing"
594 self.ui.warn(_("warning: tag %s conflicts with existing"
613 " branch name\n") % name)
595 " branch name\n") % name)
614
596
615 def writetags(fp, names, munge, prevtags):
597 def writetags(fp, names, munge, prevtags):
616 fp.seek(0, 2)
598 fp.seek(0, 2)
617 if prevtags and prevtags[-1] != '\n':
599 if prevtags and prevtags[-1] != '\n':
618 fp.write('\n')
600 fp.write('\n')
619 for name in names:
601 for name in names:
620 if munge:
602 if munge:
621 m = munge(name)
603 m = munge(name)
622 else:
604 else:
623 m = name
605 m = name
624
606
625 if (self._tagscache.tagtypes and
607 if (self._tagscache.tagtypes and
626 name in self._tagscache.tagtypes):
608 name in self._tagscache.tagtypes):
627 old = self.tags().get(name, nullid)
609 old = self.tags().get(name, nullid)
628 fp.write('%s %s\n' % (hex(old), m))
610 fp.write('%s %s\n' % (hex(old), m))
629 fp.write('%s %s\n' % (hex(node), m))
611 fp.write('%s %s\n' % (hex(node), m))
630 fp.close()
612 fp.close()
631
613
632 prevtags = ''
614 prevtags = ''
633 if local:
615 if local:
634 try:
616 try:
635 fp = self.vfs('localtags', 'r+')
617 fp = self.vfs('localtags', 'r+')
636 except IOError:
618 except IOError:
637 fp = self.vfs('localtags', 'a')
619 fp = self.vfs('localtags', 'a')
638 else:
620 else:
639 prevtags = fp.read()
621 prevtags = fp.read()
640
622
641 # local tags are stored in the current charset
623 # local tags are stored in the current charset
642 writetags(fp, names, None, prevtags)
624 writetags(fp, names, None, prevtags)
643 for name in names:
625 for name in names:
644 self.hook('tag', node=hex(node), tag=name, local=local)
626 self.hook('tag', node=hex(node), tag=name, local=local)
645 return
627 return
646
628
647 try:
629 try:
648 fp = self.wfile('.hgtags', 'rb+')
630 fp = self.wfile('.hgtags', 'rb+')
649 except IOError as e:
631 except IOError as e:
650 if e.errno != errno.ENOENT:
632 if e.errno != errno.ENOENT:
651 raise
633 raise
652 fp = self.wfile('.hgtags', 'ab')
634 fp = self.wfile('.hgtags', 'ab')
653 else:
635 else:
654 prevtags = fp.read()
636 prevtags = fp.read()
655
637
656 # committed tags are stored in UTF-8
638 # committed tags are stored in UTF-8
657 writetags(fp, names, encoding.fromlocal, prevtags)
639 writetags(fp, names, encoding.fromlocal, prevtags)
658
640
659 fp.close()
641 fp.close()
660
642
661 self.invalidatecaches()
643 self.invalidatecaches()
662
644
663 if '.hgtags' not in self.dirstate:
645 if '.hgtags' not in self.dirstate:
664 self[None].add(['.hgtags'])
646 self[None].add(['.hgtags'])
665
647
666 m = matchmod.exact(self.root, '', ['.hgtags'])
648 m = matchmod.exact(self.root, '', ['.hgtags'])
667 tagnode = self.commit(message, user, date, extra=extra, match=m,
649 tagnode = self.commit(message, user, date, extra=extra, match=m,
668 editor=editor)
650 editor=editor)
669
651
670 for name in names:
652 for name in names:
671 self.hook('tag', node=hex(node), tag=name, local=local)
653 self.hook('tag', node=hex(node), tag=name, local=local)
672
654
673 return tagnode
655 return tagnode
674
656
675 def tag(self, names, node, message, local, user, date, editor=False):
657 def tag(self, names, node, message, local, user, date, editor=False):
676 '''tag a revision with one or more symbolic names.
658 '''tag a revision with one or more symbolic names.
677
659
678 names is a list of strings or, when adding a single tag, names may be a
660 names is a list of strings or, when adding a single tag, names may be a
679 string.
661 string.
680
662
681 if local is True, the tags are stored in a per-repository file.
663 if local is True, the tags are stored in a per-repository file.
682 otherwise, they are stored in the .hgtags file, and a new
664 otherwise, they are stored in the .hgtags file, and a new
683 changeset is committed with the change.
665 changeset is committed with the change.
684
666
685 keyword arguments:
667 keyword arguments:
686
668
687 local: whether to store tags in non-version-controlled file
669 local: whether to store tags in non-version-controlled file
688 (default False)
670 (default False)
689
671
690 message: commit message to use if committing
672 message: commit message to use if committing
691
673
692 user: name of user to use if committing
674 user: name of user to use if committing
693
675
694 date: date tuple to use if committing'''
676 date: date tuple to use if committing'''
695
677
696 if not local:
678 if not local:
697 m = matchmod.exact(self.root, '', ['.hgtags'])
679 m = matchmod.exact(self.root, '', ['.hgtags'])
698 if any(self.status(match=m, unknown=True, ignored=True)):
680 if any(self.status(match=m, unknown=True, ignored=True)):
699 raise error.Abort(_('working copy of .hgtags is changed'),
681 raise error.Abort(_('working copy of .hgtags is changed'),
700 hint=_('please commit .hgtags manually'))
682 hint=_('please commit .hgtags manually'))
701
683
702 self.tags() # instantiate the cache
684 self.tags() # instantiate the cache
703 self._tag(names, node, message, local, user, date, editor=editor)
685 self._tag(names, node, message, local, user, date, editor=editor)
704
686
705 @filteredpropertycache
687 @filteredpropertycache
706 def _tagscache(self):
688 def _tagscache(self):
707 '''Returns a tagscache object that contains various tags related
689 '''Returns a tagscache object that contains various tags related
708 caches.'''
690 caches.'''
709
691
710 # This simplifies its cache management by having one decorated
692 # This simplifies its cache management by having one decorated
711 # function (this one) and the rest simply fetch things from it.
693 # function (this one) and the rest simply fetch things from it.
712 class tagscache(object):
694 class tagscache(object):
713 def __init__(self):
695 def __init__(self):
714 # These two define the set of tags for this repository. tags
696 # These two define the set of tags for this repository. tags
715 # maps tag name to node; tagtypes maps tag name to 'global' or
697 # maps tag name to node; tagtypes maps tag name to 'global' or
716 # 'local'. (Global tags are defined by .hgtags across all
698 # 'local'. (Global tags are defined by .hgtags across all
717 # heads, and local tags are defined in .hg/localtags.)
699 # heads, and local tags are defined in .hg/localtags.)
718 # They constitute the in-memory cache of tags.
700 # They constitute the in-memory cache of tags.
719 self.tags = self.tagtypes = None
701 self.tags = self.tagtypes = None
720
702
721 self.nodetagscache = self.tagslist = None
703 self.nodetagscache = self.tagslist = None
722
704
723 cache = tagscache()
705 cache = tagscache()
724 cache.tags, cache.tagtypes = self._findtags()
706 cache.tags, cache.tagtypes = self._findtags()
725
707
726 return cache
708 return cache
727
709
728 def tags(self):
710 def tags(self):
729 '''return a mapping of tag to node'''
711 '''return a mapping of tag to node'''
730 t = {}
712 t = {}
731 if self.changelog.filteredrevs:
713 if self.changelog.filteredrevs:
732 tags, tt = self._findtags()
714 tags, tt = self._findtags()
733 else:
715 else:
734 tags = self._tagscache.tags
716 tags = self._tagscache.tags
735 for k, v in tags.iteritems():
717 for k, v in tags.iteritems():
736 try:
718 try:
737 # ignore tags to unknown nodes
719 # ignore tags to unknown nodes
738 self.changelog.rev(v)
720 self.changelog.rev(v)
739 t[k] = v
721 t[k] = v
740 except (error.LookupError, ValueError):
722 except (error.LookupError, ValueError):
741 pass
723 pass
742 return t
724 return t
743
725
744 def _findtags(self):
726 def _findtags(self):
745 '''Do the hard work of finding tags. Return a pair of dicts
727 '''Do the hard work of finding tags. Return a pair of dicts
746 (tags, tagtypes) where tags maps tag name to node, and tagtypes
728 (tags, tagtypes) where tags maps tag name to node, and tagtypes
747 maps tag name to a string like \'global\' or \'local\'.
729 maps tag name to a string like \'global\' or \'local\'.
748 Subclasses or extensions are free to add their own tags, but
730 Subclasses or extensions are free to add their own tags, but
749 should be aware that the returned dicts will be retained for the
731 should be aware that the returned dicts will be retained for the
750 duration of the localrepo object.'''
732 duration of the localrepo object.'''
751
733
752 # XXX what tagtype should subclasses/extensions use? Currently
734 # XXX what tagtype should subclasses/extensions use? Currently
753 # mq and bookmarks add tags, but do not set the tagtype at all.
735 # mq and bookmarks add tags, but do not set the tagtype at all.
754 # Should each extension invent its own tag type? Should there
736 # Should each extension invent its own tag type? Should there
755 # be one tagtype for all such "virtual" tags? Or is the status
737 # be one tagtype for all such "virtual" tags? Or is the status
756 # quo fine?
738 # quo fine?
757
739
758 alltags = {} # map tag name to (node, hist)
740 alltags = {} # map tag name to (node, hist)
759 tagtypes = {}
741 tagtypes = {}
760
742
761 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
743 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
762 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
744 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
763
745
764 # Build the return dicts. Have to re-encode tag names because
746 # Build the return dicts. Have to re-encode tag names because
765 # the tags module always uses UTF-8 (in order not to lose info
747 # the tags module always uses UTF-8 (in order not to lose info
766 # writing to the cache), but the rest of Mercurial wants them in
748 # writing to the cache), but the rest of Mercurial wants them in
767 # local encoding.
749 # local encoding.
768 tags = {}
750 tags = {}
769 for (name, (node, hist)) in alltags.iteritems():
751 for (name, (node, hist)) in alltags.iteritems():
770 if node != nullid:
752 if node != nullid:
771 tags[encoding.tolocal(name)] = node
753 tags[encoding.tolocal(name)] = node
772 tags['tip'] = self.changelog.tip()
754 tags['tip'] = self.changelog.tip()
773 tagtypes = dict([(encoding.tolocal(name), value)
755 tagtypes = dict([(encoding.tolocal(name), value)
774 for (name, value) in tagtypes.iteritems()])
756 for (name, value) in tagtypes.iteritems()])
775 return (tags, tagtypes)
757 return (tags, tagtypes)
776
758
777 def tagtype(self, tagname):
759 def tagtype(self, tagname):
778 '''
760 '''
779 return the type of the given tag. result can be:
761 return the type of the given tag. result can be:
780
762
781 'local' : a local tag
763 'local' : a local tag
782 'global' : a global tag
764 'global' : a global tag
783 None : tag does not exist
765 None : tag does not exist
784 '''
766 '''
785
767
786 return self._tagscache.tagtypes.get(tagname)
768 return self._tagscache.tagtypes.get(tagname)
787
769
788 def tagslist(self):
770 def tagslist(self):
789 '''return a list of tags ordered by revision'''
771 '''return a list of tags ordered by revision'''
790 if not self._tagscache.tagslist:
772 if not self._tagscache.tagslist:
791 l = []
773 l = []
792 for t, n in self.tags().iteritems():
774 for t, n in self.tags().iteritems():
793 l.append((self.changelog.rev(n), t, n))
775 l.append((self.changelog.rev(n), t, n))
794 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
776 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
795
777
796 return self._tagscache.tagslist
778 return self._tagscache.tagslist
797
779
798 def nodetags(self, node):
780 def nodetags(self, node):
799 '''return the tags associated with a node'''
781 '''return the tags associated with a node'''
800 if not self._tagscache.nodetagscache:
782 if not self._tagscache.nodetagscache:
801 nodetagscache = {}
783 nodetagscache = {}
802 for t, n in self._tagscache.tags.iteritems():
784 for t, n in self._tagscache.tags.iteritems():
803 nodetagscache.setdefault(n, []).append(t)
785 nodetagscache.setdefault(n, []).append(t)
804 for tags in nodetagscache.itervalues():
786 for tags in nodetagscache.itervalues():
805 tags.sort()
787 tags.sort()
806 self._tagscache.nodetagscache = nodetagscache
788 self._tagscache.nodetagscache = nodetagscache
807 return self._tagscache.nodetagscache.get(node, [])
789 return self._tagscache.nodetagscache.get(node, [])
808
790
809 def nodebookmarks(self, node):
791 def nodebookmarks(self, node):
810 """return the list of bookmarks pointing to the specified node"""
792 """return the list of bookmarks pointing to the specified node"""
811 marks = []
793 marks = []
812 for bookmark, n in self._bookmarks.iteritems():
794 for bookmark, n in self._bookmarks.iteritems():
813 if n == node:
795 if n == node:
814 marks.append(bookmark)
796 marks.append(bookmark)
815 return sorted(marks)
797 return sorted(marks)
816
798
817 def branchmap(self):
799 def branchmap(self):
818 '''returns a dictionary {branch: [branchheads]} with branchheads
800 '''returns a dictionary {branch: [branchheads]} with branchheads
819 ordered by increasing revision number'''
801 ordered by increasing revision number'''
820 branchmap.updatecache(self)
802 branchmap.updatecache(self)
821 return self._branchcaches[self.filtername]
803 return self._branchcaches[self.filtername]
822
804
823 @unfilteredmethod
805 @unfilteredmethod
824 def revbranchcache(self):
806 def revbranchcache(self):
825 if not self._revbranchcache:
807 if not self._revbranchcache:
826 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
808 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
827 return self._revbranchcache
809 return self._revbranchcache
828
810
829 def branchtip(self, branch, ignoremissing=False):
811 def branchtip(self, branch, ignoremissing=False):
830 '''return the tip node for a given branch
812 '''return the tip node for a given branch
831
813
832 If ignoremissing is True, then this method will not raise an error.
814 If ignoremissing is True, then this method will not raise an error.
833 This is helpful for callers that only expect None for a missing branch
815 This is helpful for callers that only expect None for a missing branch
834 (e.g. namespace).
816 (e.g. namespace).
835
817
836 '''
818 '''
837 try:
819 try:
838 return self.branchmap().branchtip(branch)
820 return self.branchmap().branchtip(branch)
839 except KeyError:
821 except KeyError:
840 if not ignoremissing:
822 if not ignoremissing:
841 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
823 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
842 else:
824 else:
843 pass
825 pass
844
826
845 def lookup(self, key):
827 def lookup(self, key):
846 return self[key].node()
828 return self[key].node()
847
829
848 def lookupbranch(self, key, remote=None):
830 def lookupbranch(self, key, remote=None):
849 repo = remote or self
831 repo = remote or self
850 if key in repo.branchmap():
832 if key in repo.branchmap():
851 return key
833 return key
852
834
853 repo = (remote and remote.local()) and remote or self
835 repo = (remote and remote.local()) and remote or self
854 return repo[key].branch()
836 return repo[key].branch()
855
837
856 def known(self, nodes):
838 def known(self, nodes):
857 cl = self.changelog
839 cl = self.changelog
858 nm = cl.nodemap
840 nm = cl.nodemap
859 filtered = cl.filteredrevs
841 filtered = cl.filteredrevs
860 result = []
842 result = []
861 for n in nodes:
843 for n in nodes:
862 r = nm.get(n)
844 r = nm.get(n)
863 resp = not (r is None or r in filtered)
845 resp = not (r is None or r in filtered)
864 result.append(resp)
846 result.append(resp)
865 return result
847 return result
866
848
867 def local(self):
849 def local(self):
868 return self
850 return self
869
851
870 def publishing(self):
852 def publishing(self):
871 # it's safe (and desirable) to trust the publish flag unconditionally
853 # it's safe (and desirable) to trust the publish flag unconditionally
872 # so that we don't finalize changes shared between users via ssh or nfs
854 # so that we don't finalize changes shared between users via ssh or nfs
873 return self.ui.configbool('phases', 'publish', True, untrusted=True)
855 return self.ui.configbool('phases', 'publish', True, untrusted=True)
874
856
875 def cancopy(self):
857 def cancopy(self):
876 # so statichttprepo's override of local() works
858 # so statichttprepo's override of local() works
877 if not self.local():
859 if not self.local():
878 return False
860 return False
879 if not self.publishing():
861 if not self.publishing():
880 return True
862 return True
881 # if publishing we can't copy if there is filtered content
863 # if publishing we can't copy if there is filtered content
882 return not self.filtered('visible').changelog.filteredrevs
864 return not self.filtered('visible').changelog.filteredrevs
883
865
884 def shared(self):
866 def shared(self):
885 '''the type of shared repository (None if not shared)'''
867 '''the type of shared repository (None if not shared)'''
886 if self.sharedpath != self.path:
868 if self.sharedpath != self.path:
887 return 'store'
869 return 'store'
888 return None
870 return None
889
871
890 def join(self, f, *insidef):
872 def join(self, f, *insidef):
891 return self.vfs.join(os.path.join(f, *insidef))
873 return self.vfs.join(os.path.join(f, *insidef))
892
874
893 def wjoin(self, f, *insidef):
875 def wjoin(self, f, *insidef):
894 return self.vfs.reljoin(self.root, f, *insidef)
876 return self.vfs.reljoin(self.root, f, *insidef)
895
877
896 def file(self, f):
878 def file(self, f):
897 if f[0] == '/':
879 if f[0] == '/':
898 f = f[1:]
880 f = f[1:]
899 return filelog.filelog(self.svfs, f)
881 return filelog.filelog(self.svfs, f)
900
882
901 def parents(self, changeid=None):
883 def parents(self, changeid=None):
902 '''get list of changectxs for parents of changeid'''
884 '''get list of changectxs for parents of changeid'''
903 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
885 msg = 'repo.parents() is deprecated, use repo[%r].parents()' % changeid
904 self.ui.deprecwarn(msg, '3.7')
886 self.ui.deprecwarn(msg, '3.7')
905 return self[changeid].parents()
887 return self[changeid].parents()
906
888
907 def changectx(self, changeid):
889 def changectx(self, changeid):
908 return self[changeid]
890 return self[changeid]
909
891
910 def setparents(self, p1, p2=nullid):
892 def setparents(self, p1, p2=nullid):
911 self.dirstate.beginparentchange()
893 self.dirstate.beginparentchange()
912 copies = self.dirstate.setparents(p1, p2)
894 copies = self.dirstate.setparents(p1, p2)
913 pctx = self[p1]
895 pctx = self[p1]
914 if copies:
896 if copies:
915 # Adjust copy records, the dirstate cannot do it, it
897 # Adjust copy records, the dirstate cannot do it, it
916 # requires access to parents manifests. Preserve them
898 # requires access to parents manifests. Preserve them
917 # only for entries added to first parent.
899 # only for entries added to first parent.
918 for f in copies:
900 for f in copies:
919 if f not in pctx and copies[f] in pctx:
901 if f not in pctx and copies[f] in pctx:
920 self.dirstate.copy(copies[f], f)
902 self.dirstate.copy(copies[f], f)
921 if p2 == nullid:
903 if p2 == nullid:
922 for f, s in sorted(self.dirstate.copies().items()):
904 for f, s in sorted(self.dirstate.copies().items()):
923 if f not in pctx and s not in pctx:
905 if f not in pctx and s not in pctx:
924 self.dirstate.copy(None, f)
906 self.dirstate.copy(None, f)
925 self.dirstate.endparentchange()
907 self.dirstate.endparentchange()
926
908
927 def filectx(self, path, changeid=None, fileid=None):
909 def filectx(self, path, changeid=None, fileid=None):
928 """changeid can be a changeset revision, node, or tag.
910 """changeid can be a changeset revision, node, or tag.
929 fileid can be a file revision or node."""
911 fileid can be a file revision or node."""
930 return context.filectx(self, path, changeid, fileid)
912 return context.filectx(self, path, changeid, fileid)
931
913
932 def getcwd(self):
914 def getcwd(self):
933 return self.dirstate.getcwd()
915 return self.dirstate.getcwd()
934
916
935 def pathto(self, f, cwd=None):
917 def pathto(self, f, cwd=None):
936 return self.dirstate.pathto(f, cwd)
918 return self.dirstate.pathto(f, cwd)
937
919
938 def wfile(self, f, mode='r'):
920 def wfile(self, f, mode='r'):
939 return self.wvfs(f, mode)
921 return self.wvfs(f, mode)
940
922
941 def _link(self, f):
923 def _link(self, f):
942 return self.wvfs.islink(f)
924 return self.wvfs.islink(f)
943
925
944 def _loadfilter(self, filter):
926 def _loadfilter(self, filter):
945 if filter not in self.filterpats:
927 if filter not in self.filterpats:
946 l = []
928 l = []
947 for pat, cmd in self.ui.configitems(filter):
929 for pat, cmd in self.ui.configitems(filter):
948 if cmd == '!':
930 if cmd == '!':
949 continue
931 continue
950 mf = matchmod.match(self.root, '', [pat])
932 mf = matchmod.match(self.root, '', [pat])
951 fn = None
933 fn = None
952 params = cmd
934 params = cmd
953 for name, filterfn in self._datafilters.iteritems():
935 for name, filterfn in self._datafilters.iteritems():
954 if cmd.startswith(name):
936 if cmd.startswith(name):
955 fn = filterfn
937 fn = filterfn
956 params = cmd[len(name):].lstrip()
938 params = cmd[len(name):].lstrip()
957 break
939 break
958 if not fn:
940 if not fn:
959 fn = lambda s, c, **kwargs: util.filter(s, c)
941 fn = lambda s, c, **kwargs: util.filter(s, c)
960 # Wrap old filters not supporting keyword arguments
942 # Wrap old filters not supporting keyword arguments
961 if not inspect.getargspec(fn)[2]:
943 if not inspect.getargspec(fn)[2]:
962 oldfn = fn
944 oldfn = fn
963 fn = lambda s, c, **kwargs: oldfn(s, c)
945 fn = lambda s, c, **kwargs: oldfn(s, c)
964 l.append((mf, fn, params))
946 l.append((mf, fn, params))
965 self.filterpats[filter] = l
947 self.filterpats[filter] = l
966 return self.filterpats[filter]
948 return self.filterpats[filter]
967
949
968 def _filter(self, filterpats, filename, data):
950 def _filter(self, filterpats, filename, data):
969 for mf, fn, cmd in filterpats:
951 for mf, fn, cmd in filterpats:
970 if mf(filename):
952 if mf(filename):
971 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
953 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
972 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
954 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
973 break
955 break
974
956
975 return data
957 return data
976
958
977 @unfilteredpropertycache
959 @unfilteredpropertycache
978 def _encodefilterpats(self):
960 def _encodefilterpats(self):
979 return self._loadfilter('encode')
961 return self._loadfilter('encode')
980
962
981 @unfilteredpropertycache
963 @unfilteredpropertycache
982 def _decodefilterpats(self):
964 def _decodefilterpats(self):
983 return self._loadfilter('decode')
965 return self._loadfilter('decode')
984
966
985 def adddatafilter(self, name, filter):
967 def adddatafilter(self, name, filter):
986 self._datafilters[name] = filter
968 self._datafilters[name] = filter
987
969
988 def wread(self, filename):
970 def wread(self, filename):
989 if self._link(filename):
971 if self._link(filename):
990 data = self.wvfs.readlink(filename)
972 data = self.wvfs.readlink(filename)
991 else:
973 else:
992 data = self.wvfs.read(filename)
974 data = self.wvfs.read(filename)
993 return self._filter(self._encodefilterpats, filename, data)
975 return self._filter(self._encodefilterpats, filename, data)
994
976
995 def wwrite(self, filename, data, flags):
977 def wwrite(self, filename, data, flags):
996 """write ``data`` into ``filename`` in the working directory
978 """write ``data`` into ``filename`` in the working directory
997
979
998 This returns length of written (maybe decoded) data.
980 This returns length of written (maybe decoded) data.
999 """
981 """
1000 data = self._filter(self._decodefilterpats, filename, data)
982 data = self._filter(self._decodefilterpats, filename, data)
1001 if 'l' in flags:
983 if 'l' in flags:
1002 self.wvfs.symlink(data, filename)
984 self.wvfs.symlink(data, filename)
1003 else:
985 else:
1004 self.wvfs.write(filename, data)
986 self.wvfs.write(filename, data)
1005 if 'x' in flags:
987 if 'x' in flags:
1006 self.wvfs.setflags(filename, False, True)
988 self.wvfs.setflags(filename, False, True)
1007 return len(data)
989 return len(data)
1008
990
1009 def wwritedata(self, filename, data):
991 def wwritedata(self, filename, data):
1010 return self._filter(self._decodefilterpats, filename, data)
992 return self._filter(self._decodefilterpats, filename, data)
1011
993
1012 def currenttransaction(self):
994 def currenttransaction(self):
1013 """return the current transaction or None if non exists"""
995 """return the current transaction or None if non exists"""
1014 if self._transref:
996 if self._transref:
1015 tr = self._transref()
997 tr = self._transref()
1016 else:
998 else:
1017 tr = None
999 tr = None
1018
1000
1019 if tr and tr.running():
1001 if tr and tr.running():
1020 return tr
1002 return tr
1021 return None
1003 return None
1022
1004
1023 def transaction(self, desc, report=None):
1005 def transaction(self, desc, report=None):
1024 if (self.ui.configbool('devel', 'all-warnings')
1006 if (self.ui.configbool('devel', 'all-warnings')
1025 or self.ui.configbool('devel', 'check-locks')):
1007 or self.ui.configbool('devel', 'check-locks')):
1026 l = self._lockref and self._lockref()
1008 l = self._lockref and self._lockref()
1027 if l is None or not l.held:
1009 if l is None or not l.held:
1028 self.ui.develwarn('transaction with no lock')
1010 self.ui.develwarn('transaction with no lock')
1029 tr = self.currenttransaction()
1011 tr = self.currenttransaction()
1030 if tr is not None:
1012 if tr is not None:
1031 return tr.nest()
1013 return tr.nest()
1032
1014
1033 # abort here if the journal already exists
1015 # abort here if the journal already exists
1034 if self.svfs.exists("journal"):
1016 if self.svfs.exists("journal"):
1035 raise error.RepoError(
1017 raise error.RepoError(
1036 _("abandoned transaction found"),
1018 _("abandoned transaction found"),
1037 hint=_("run 'hg recover' to clean up transaction"))
1019 hint=_("run 'hg recover' to clean up transaction"))
1038
1020
1039 # make journal.dirstate contain in-memory changes at this point
1021 # make journal.dirstate contain in-memory changes at this point
1040 self.dirstate.write(None)
1022 self.dirstate.write(None)
1041
1023
1042 idbase = "%.40f#%f" % (random.random(), time.time())
1024 idbase = "%.40f#%f" % (random.random(), time.time())
1043 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1025 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
1044 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1045
1027
1046 self._writejournal(desc)
1028 self._writejournal(desc)
1047 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1048 if report:
1030 if report:
1049 rp = report
1031 rp = report
1050 else:
1032 else:
1051 rp = self.ui.warn
1033 rp = self.ui.warn
1052 vfsmap = {'plain': self.vfs} # root of .hg/
1034 vfsmap = {'plain': self.vfs} # root of .hg/
1053 # we must avoid cyclic reference between repo and transaction.
1035 # we must avoid cyclic reference between repo and transaction.
1054 reporef = weakref.ref(self)
1036 reporef = weakref.ref(self)
1055 def validate(tr):
1037 def validate(tr):
1056 """will run pre-closing hooks"""
1038 """will run pre-closing hooks"""
1057 reporef().hook('pretxnclose', throw=True,
1039 reporef().hook('pretxnclose', throw=True,
1058 txnname=desc, **tr.hookargs)
1040 txnname=desc, **tr.hookargs)
1059 def releasefn(tr, success):
1041 def releasefn(tr, success):
1060 repo = reporef()
1042 repo = reporef()
1061 if success:
1043 if success:
1062 # this should be explicitly invoked here, because
1044 # this should be explicitly invoked here, because
1063 # in-memory changes aren't written out at closing
1045 # in-memory changes aren't written out at closing
1064 # transaction, if tr.addfilegenerator (via
1046 # transaction, if tr.addfilegenerator (via
1065 # dirstate.write or so) isn't invoked while
1047 # dirstate.write or so) isn't invoked while
1066 # transaction running
1048 # transaction running
1067 repo.dirstate.write(None)
1049 repo.dirstate.write(None)
1068 else:
1050 else:
1069 # prevent in-memory changes from being written out at
1051 # prevent in-memory changes from being written out at
1070 # the end of outer wlock scope or so
1052 # the end of outer wlock scope or so
1071 repo.dirstate.invalidate()
1053 repo.dirstate.invalidate()
1072
1054
1073 # discard all changes (including ones already written
1055 # discard all changes (including ones already written
1074 # out) in this transaction
1056 # out) in this transaction
1075 repo.vfs.rename('journal.dirstate', 'dirstate')
1057 repo.vfs.rename('journal.dirstate', 'dirstate')
1076
1058
1077 repo.invalidate(clearfilecache=True)
1059 repo.invalidate(clearfilecache=True)
1078
1060
1079 tr = transaction.transaction(rp, self.svfs, vfsmap,
1061 tr = transaction.transaction(rp, self.svfs, vfsmap,
1080 "journal",
1062 "journal",
1081 "undo",
1063 "undo",
1082 aftertrans(renames),
1064 aftertrans(renames),
1083 self.store.createmode,
1065 self.store.createmode,
1084 validator=validate,
1066 validator=validate,
1085 releasefn=releasefn)
1067 releasefn=releasefn)
1086
1068
1087 tr.hookargs['txnid'] = txnid
1069 tr.hookargs['txnid'] = txnid
1088 # note: writing the fncache only during finalize mean that the file is
1070 # note: writing the fncache only during finalize mean that the file is
1089 # outdated when running hooks. As fncache is used for streaming clone,
1071 # outdated when running hooks. As fncache is used for streaming clone,
1090 # this is not expected to break anything that happen during the hooks.
1072 # this is not expected to break anything that happen during the hooks.
1091 tr.addfinalize('flush-fncache', self.store.write)
1073 tr.addfinalize('flush-fncache', self.store.write)
1092 def txnclosehook(tr2):
1074 def txnclosehook(tr2):
1093 """To be run if transaction is successful, will schedule a hook run
1075 """To be run if transaction is successful, will schedule a hook run
1094 """
1076 """
1095 # Don't reference tr2 in hook() so we don't hold a reference.
1077 # Don't reference tr2 in hook() so we don't hold a reference.
1096 # This reduces memory consumption when there are multiple
1078 # This reduces memory consumption when there are multiple
1097 # transactions per lock. This can likely go away if issue5045
1079 # transactions per lock. This can likely go away if issue5045
1098 # fixes the function accumulation.
1080 # fixes the function accumulation.
1099 hookargs = tr2.hookargs
1081 hookargs = tr2.hookargs
1100
1082
1101 def hook():
1083 def hook():
1102 reporef().hook('txnclose', throw=False, txnname=desc,
1084 reporef().hook('txnclose', throw=False, txnname=desc,
1103 **hookargs)
1085 **hookargs)
1104 reporef()._afterlock(hook)
1086 reporef()._afterlock(hook)
1105 tr.addfinalize('txnclose-hook', txnclosehook)
1087 tr.addfinalize('txnclose-hook', txnclosehook)
1106 def txnaborthook(tr2):
1088 def txnaborthook(tr2):
1107 """To be run if transaction is aborted
1089 """To be run if transaction is aborted
1108 """
1090 """
1109 reporef().hook('txnabort', throw=False, txnname=desc,
1091 reporef().hook('txnabort', throw=False, txnname=desc,
1110 **tr2.hookargs)
1092 **tr2.hookargs)
1111 tr.addabort('txnabort-hook', txnaborthook)
1093 tr.addabort('txnabort-hook', txnaborthook)
1112 # avoid eager cache invalidation. in-memory data should be identical
1094 # avoid eager cache invalidation. in-memory data should be identical
1113 # to stored data if transaction has no error.
1095 # to stored data if transaction has no error.
1114 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1096 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1115 self._transref = weakref.ref(tr)
1097 self._transref = weakref.ref(tr)
1116 return tr
1098 return tr
1117
1099
1118 def _journalfiles(self):
1100 def _journalfiles(self):
1119 return ((self.svfs, 'journal'),
1101 return ((self.svfs, 'journal'),
1120 (self.vfs, 'journal.dirstate'),
1102 (self.vfs, 'journal.dirstate'),
1121 (self.vfs, 'journal.branch'),
1103 (self.vfs, 'journal.branch'),
1122 (self.vfs, 'journal.desc'),
1104 (self.vfs, 'journal.desc'),
1123 (self.vfs, 'journal.bookmarks'),
1105 (self.vfs, 'journal.bookmarks'),
1124 (self.svfs, 'journal.phaseroots'))
1106 (self.svfs, 'journal.phaseroots'))
1125
1107
1126 def undofiles(self):
1108 def undofiles(self):
1127 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1109 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1128
1110
1129 def _writejournal(self, desc):
1111 def _writejournal(self, desc):
1130 self.vfs.write("journal.dirstate",
1112 self.vfs.write("journal.dirstate",
1131 self.vfs.tryread("dirstate"))
1113 self.vfs.tryread("dirstate"))
1132 self.vfs.write("journal.branch",
1114 self.vfs.write("journal.branch",
1133 encoding.fromlocal(self.dirstate.branch()))
1115 encoding.fromlocal(self.dirstate.branch()))
1134 self.vfs.write("journal.desc",
1116 self.vfs.write("journal.desc",
1135 "%d\n%s\n" % (len(self), desc))
1117 "%d\n%s\n" % (len(self), desc))
1136 self.vfs.write("journal.bookmarks",
1118 self.vfs.write("journal.bookmarks",
1137 self.vfs.tryread("bookmarks"))
1119 self.vfs.tryread("bookmarks"))
1138 self.svfs.write("journal.phaseroots",
1120 self.svfs.write("journal.phaseroots",
1139 self.svfs.tryread("phaseroots"))
1121 self.svfs.tryread("phaseroots"))
1140
1122
1141 def recover(self):
1123 def recover(self):
1142 with self.lock():
1124 with self.lock():
1143 if self.svfs.exists("journal"):
1125 if self.svfs.exists("journal"):
1144 self.ui.status(_("rolling back interrupted transaction\n"))
1126 self.ui.status(_("rolling back interrupted transaction\n"))
1145 vfsmap = {'': self.svfs,
1127 vfsmap = {'': self.svfs,
1146 'plain': self.vfs,}
1128 'plain': self.vfs,}
1147 transaction.rollback(self.svfs, vfsmap, "journal",
1129 transaction.rollback(self.svfs, vfsmap, "journal",
1148 self.ui.warn)
1130 self.ui.warn)
1149 self.invalidate()
1131 self.invalidate()
1150 return True
1132 return True
1151 else:
1133 else:
1152 self.ui.warn(_("no interrupted transaction available\n"))
1134 self.ui.warn(_("no interrupted transaction available\n"))
1153 return False
1135 return False
1154
1136
1155 def rollback(self, dryrun=False, force=False):
1137 def rollback(self, dryrun=False, force=False):
1156 wlock = lock = dsguard = None
1138 wlock = lock = dsguard = None
1157 try:
1139 try:
1158 wlock = self.wlock()
1140 wlock = self.wlock()
1159 lock = self.lock()
1141 lock = self.lock()
1160 if self.svfs.exists("undo"):
1142 if self.svfs.exists("undo"):
1161 dsguard = cmdutil.dirstateguard(self, 'rollback')
1143 dsguard = cmdutil.dirstateguard(self, 'rollback')
1162
1144
1163 return self._rollback(dryrun, force, dsguard)
1145 return self._rollback(dryrun, force, dsguard)
1164 else:
1146 else:
1165 self.ui.warn(_("no rollback information available\n"))
1147 self.ui.warn(_("no rollback information available\n"))
1166 return 1
1148 return 1
1167 finally:
1149 finally:
1168 release(dsguard, lock, wlock)
1150 release(dsguard, lock, wlock)
1169
1151
1170 @unfilteredmethod # Until we get smarter cache management
1152 @unfilteredmethod # Until we get smarter cache management
1171 def _rollback(self, dryrun, force, dsguard):
1153 def _rollback(self, dryrun, force, dsguard):
1172 ui = self.ui
1154 ui = self.ui
1173 try:
1155 try:
1174 args = self.vfs.read('undo.desc').splitlines()
1156 args = self.vfs.read('undo.desc').splitlines()
1175 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1157 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1176 if len(args) >= 3:
1158 if len(args) >= 3:
1177 detail = args[2]
1159 detail = args[2]
1178 oldtip = oldlen - 1
1160 oldtip = oldlen - 1
1179
1161
1180 if detail and ui.verbose:
1162 if detail and ui.verbose:
1181 msg = (_('repository tip rolled back to revision %s'
1163 msg = (_('repository tip rolled back to revision %s'
1182 ' (undo %s: %s)\n')
1164 ' (undo %s: %s)\n')
1183 % (oldtip, desc, detail))
1165 % (oldtip, desc, detail))
1184 else:
1166 else:
1185 msg = (_('repository tip rolled back to revision %s'
1167 msg = (_('repository tip rolled back to revision %s'
1186 ' (undo %s)\n')
1168 ' (undo %s)\n')
1187 % (oldtip, desc))
1169 % (oldtip, desc))
1188 except IOError:
1170 except IOError:
1189 msg = _('rolling back unknown transaction\n')
1171 msg = _('rolling back unknown transaction\n')
1190 desc = None
1172 desc = None
1191
1173
1192 if not force and self['.'] != self['tip'] and desc == 'commit':
1174 if not force and self['.'] != self['tip'] and desc == 'commit':
1193 raise error.Abort(
1175 raise error.Abort(
1194 _('rollback of last commit while not checked out '
1176 _('rollback of last commit while not checked out '
1195 'may lose data'), hint=_('use -f to force'))
1177 'may lose data'), hint=_('use -f to force'))
1196
1178
1197 ui.status(msg)
1179 ui.status(msg)
1198 if dryrun:
1180 if dryrun:
1199 return 0
1181 return 0
1200
1182
1201 parents = self.dirstate.parents()
1183 parents = self.dirstate.parents()
1202 self.destroying()
1184 self.destroying()
1203 vfsmap = {'plain': self.vfs, '': self.svfs}
1185 vfsmap = {'plain': self.vfs, '': self.svfs}
1204 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1186 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1205 if self.vfs.exists('undo.bookmarks'):
1187 if self.vfs.exists('undo.bookmarks'):
1206 self.vfs.rename('undo.bookmarks', 'bookmarks')
1188 self.vfs.rename('undo.bookmarks', 'bookmarks')
1207 if self.svfs.exists('undo.phaseroots'):
1189 if self.svfs.exists('undo.phaseroots'):
1208 self.svfs.rename('undo.phaseroots', 'phaseroots')
1190 self.svfs.rename('undo.phaseroots', 'phaseroots')
1209 self.invalidate()
1191 self.invalidate()
1210
1192
1211 parentgone = (parents[0] not in self.changelog.nodemap or
1193 parentgone = (parents[0] not in self.changelog.nodemap or
1212 parents[1] not in self.changelog.nodemap)
1194 parents[1] not in self.changelog.nodemap)
1213 if parentgone:
1195 if parentgone:
1214 # prevent dirstateguard from overwriting already restored one
1196 # prevent dirstateguard from overwriting already restored one
1215 dsguard.close()
1197 dsguard.close()
1216
1198
1217 self.vfs.rename('undo.dirstate', 'dirstate')
1199 self.vfs.rename('undo.dirstate', 'dirstate')
1218 try:
1200 try:
1219 branch = self.vfs.read('undo.branch')
1201 branch = self.vfs.read('undo.branch')
1220 self.dirstate.setbranch(encoding.tolocal(branch))
1202 self.dirstate.setbranch(encoding.tolocal(branch))
1221 except IOError:
1203 except IOError:
1222 ui.warn(_('named branch could not be reset: '
1204 ui.warn(_('named branch could not be reset: '
1223 'current branch is still \'%s\'\n')
1205 'current branch is still \'%s\'\n')
1224 % self.dirstate.branch())
1206 % self.dirstate.branch())
1225
1207
1226 self.dirstate.invalidate()
1208 self.dirstate.invalidate()
1227 parents = tuple([p.rev() for p in self[None].parents()])
1209 parents = tuple([p.rev() for p in self[None].parents()])
1228 if len(parents) > 1:
1210 if len(parents) > 1:
1229 ui.status(_('working directory now based on '
1211 ui.status(_('working directory now based on '
1230 'revisions %d and %d\n') % parents)
1212 'revisions %d and %d\n') % parents)
1231 else:
1213 else:
1232 ui.status(_('working directory now based on '
1214 ui.status(_('working directory now based on '
1233 'revision %d\n') % parents)
1215 'revision %d\n') % parents)
1234 mergemod.mergestate.clean(self, self['.'].node())
1216 mergemod.mergestate.clean(self, self['.'].node())
1235
1217
1236 # TODO: if we know which new heads may result from this rollback, pass
1218 # TODO: if we know which new heads may result from this rollback, pass
1237 # them to destroy(), which will prevent the branchhead cache from being
1219 # them to destroy(), which will prevent the branchhead cache from being
1238 # invalidated.
1220 # invalidated.
1239 self.destroyed()
1221 self.destroyed()
1240 return 0
1222 return 0
1241
1223
1242 def invalidatecaches(self):
1224 def invalidatecaches(self):
1243
1225
1244 if '_tagscache' in vars(self):
1226 if '_tagscache' in vars(self):
1245 # can't use delattr on proxy
1227 # can't use delattr on proxy
1246 del self.__dict__['_tagscache']
1228 del self.__dict__['_tagscache']
1247
1229
1248 self.unfiltered()._branchcaches.clear()
1230 self.unfiltered()._branchcaches.clear()
1249 self.invalidatevolatilesets()
1231 self.invalidatevolatilesets()
1250
1232
1251 def invalidatevolatilesets(self):
1233 def invalidatevolatilesets(self):
1252 self.filteredrevcache.clear()
1234 self.filteredrevcache.clear()
1253 obsolete.clearobscaches(self)
1235 obsolete.clearobscaches(self)
1254
1236
1255 def invalidatedirstate(self):
1237 def invalidatedirstate(self):
1256 '''Invalidates the dirstate, causing the next call to dirstate
1238 '''Invalidates the dirstate, causing the next call to dirstate
1257 to check if it was modified since the last time it was read,
1239 to check if it was modified since the last time it was read,
1258 rereading it if it has.
1240 rereading it if it has.
1259
1241
1260 This is different to dirstate.invalidate() that it doesn't always
1242 This is different to dirstate.invalidate() that it doesn't always
1261 rereads the dirstate. Use dirstate.invalidate() if you want to
1243 rereads the dirstate. Use dirstate.invalidate() if you want to
1262 explicitly read the dirstate again (i.e. restoring it to a previous
1244 explicitly read the dirstate again (i.e. restoring it to a previous
1263 known good state).'''
1245 known good state).'''
1264 if hasunfilteredcache(self, 'dirstate'):
1246 if hasunfilteredcache(self, 'dirstate'):
1265 for k in self.dirstate._filecache:
1247 for k in self.dirstate._filecache:
1266 try:
1248 try:
1267 delattr(self.dirstate, k)
1249 delattr(self.dirstate, k)
1268 except AttributeError:
1250 except AttributeError:
1269 pass
1251 pass
1270 delattr(self.unfiltered(), 'dirstate')
1252 delattr(self.unfiltered(), 'dirstate')
1271
1253
1272 def invalidate(self, clearfilecache=False):
1254 def invalidate(self, clearfilecache=False):
1273 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1255 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1274 for k in self._filecache.keys():
1256 for k in self._filecache.keys():
1275 # dirstate is invalidated separately in invalidatedirstate()
1257 # dirstate is invalidated separately in invalidatedirstate()
1276 if k == 'dirstate':
1258 if k == 'dirstate':
1277 continue
1259 continue
1278
1260
1279 if clearfilecache:
1261 if clearfilecache:
1280 del self._filecache[k]
1262 del self._filecache[k]
1281 try:
1263 try:
1282 delattr(unfiltered, k)
1264 delattr(unfiltered, k)
1283 except AttributeError:
1265 except AttributeError:
1284 pass
1266 pass
1285 self.invalidatecaches()
1267 self.invalidatecaches()
1286 self.store.invalidatecaches()
1268 self.store.invalidatecaches()
1287
1269
1288 def invalidateall(self):
1270 def invalidateall(self):
1289 '''Fully invalidates both store and non-store parts, causing the
1271 '''Fully invalidates both store and non-store parts, causing the
1290 subsequent operation to reread any outside changes.'''
1272 subsequent operation to reread any outside changes.'''
1291 # extension should hook this to invalidate its caches
1273 # extension should hook this to invalidate its caches
1292 self.invalidate()
1274 self.invalidate()
1293 self.invalidatedirstate()
1275 self.invalidatedirstate()
1294
1276
1295 def _refreshfilecachestats(self, tr):
1277 def _refreshfilecachestats(self, tr):
1296 """Reload stats of cached files so that they are flagged as valid"""
1278 """Reload stats of cached files so that they are flagged as valid"""
1297 for k, ce in self._filecache.items():
1279 for k, ce in self._filecache.items():
1298 if k == 'dirstate' or k not in self.__dict__:
1280 if k == 'dirstate' or k not in self.__dict__:
1299 continue
1281 continue
1300 ce.refresh()
1282 ce.refresh()
1301
1283
1302 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1284 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1303 inheritchecker=None, parentenvvar=None):
1285 inheritchecker=None, parentenvvar=None):
1304 parentlock = None
1286 parentlock = None
1305 # the contents of parentenvvar are used by the underlying lock to
1287 # the contents of parentenvvar are used by the underlying lock to
1306 # determine whether it can be inherited
1288 # determine whether it can be inherited
1307 if parentenvvar is not None:
1289 if parentenvvar is not None:
1308 parentlock = os.environ.get(parentenvvar)
1290 parentlock = os.environ.get(parentenvvar)
1309 try:
1291 try:
1310 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1292 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1311 acquirefn=acquirefn, desc=desc,
1293 acquirefn=acquirefn, desc=desc,
1312 inheritchecker=inheritchecker,
1294 inheritchecker=inheritchecker,
1313 parentlock=parentlock)
1295 parentlock=parentlock)
1314 except error.LockHeld as inst:
1296 except error.LockHeld as inst:
1315 if not wait:
1297 if not wait:
1316 raise
1298 raise
1317 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1299 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1318 (desc, inst.locker))
1300 (desc, inst.locker))
1319 # default to 600 seconds timeout
1301 # default to 600 seconds timeout
1320 l = lockmod.lock(vfs, lockname,
1302 l = lockmod.lock(vfs, lockname,
1321 int(self.ui.config("ui", "timeout", "600")),
1303 int(self.ui.config("ui", "timeout", "600")),
1322 releasefn=releasefn, acquirefn=acquirefn,
1304 releasefn=releasefn, acquirefn=acquirefn,
1323 desc=desc)
1305 desc=desc)
1324 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1306 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1325 return l
1307 return l
1326
1308
1327 def _afterlock(self, callback):
1309 def _afterlock(self, callback):
1328 """add a callback to be run when the repository is fully unlocked
1310 """add a callback to be run when the repository is fully unlocked
1329
1311
1330 The callback will be executed when the outermost lock is released
1312 The callback will be executed when the outermost lock is released
1331 (with wlock being higher level than 'lock')."""
1313 (with wlock being higher level than 'lock')."""
1332 for ref in (self._wlockref, self._lockref):
1314 for ref in (self._wlockref, self._lockref):
1333 l = ref and ref()
1315 l = ref and ref()
1334 if l and l.held:
1316 if l and l.held:
1335 l.postrelease.append(callback)
1317 l.postrelease.append(callback)
1336 break
1318 break
1337 else: # no lock have been found.
1319 else: # no lock have been found.
1338 callback()
1320 callback()
1339
1321
1340 def lock(self, wait=True):
1322 def lock(self, wait=True):
1341 '''Lock the repository store (.hg/store) and return a weak reference
1323 '''Lock the repository store (.hg/store) and return a weak reference
1342 to the lock. Use this before modifying the store (e.g. committing or
1324 to the lock. Use this before modifying the store (e.g. committing or
1343 stripping). If you are opening a transaction, get a lock as well.)
1325 stripping). If you are opening a transaction, get a lock as well.)
1344
1326
1345 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1327 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1346 'wlock' first to avoid a dead-lock hazard.'''
1328 'wlock' first to avoid a dead-lock hazard.'''
1347 l = self._lockref and self._lockref()
1329 l = self._lockref and self._lockref()
1348 if l is not None and l.held:
1330 if l is not None and l.held:
1349 l.lock()
1331 l.lock()
1350 return l
1332 return l
1351
1333
1352 l = self._lock(self.svfs, "lock", wait, None,
1334 l = self._lock(self.svfs, "lock", wait, None,
1353 self.invalidate, _('repository %s') % self.origroot)
1335 self.invalidate, _('repository %s') % self.origroot)
1354 self._lockref = weakref.ref(l)
1336 self._lockref = weakref.ref(l)
1355 return l
1337 return l
1356
1338
1357 def _wlockchecktransaction(self):
1339 def _wlockchecktransaction(self):
1358 if self.currenttransaction() is not None:
1340 if self.currenttransaction() is not None:
1359 raise error.LockInheritanceContractViolation(
1341 raise error.LockInheritanceContractViolation(
1360 'wlock cannot be inherited in the middle of a transaction')
1342 'wlock cannot be inherited in the middle of a transaction')
1361
1343
1362 def wlock(self, wait=True):
1344 def wlock(self, wait=True):
1363 '''Lock the non-store parts of the repository (everything under
1345 '''Lock the non-store parts of the repository (everything under
1364 .hg except .hg/store) and return a weak reference to the lock.
1346 .hg except .hg/store) and return a weak reference to the lock.
1365
1347
1366 Use this before modifying files in .hg.
1348 Use this before modifying files in .hg.
1367
1349
1368 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1369 'wlock' first to avoid a dead-lock hazard.'''
1351 'wlock' first to avoid a dead-lock hazard.'''
1370 l = self._wlockref and self._wlockref()
1352 l = self._wlockref and self._wlockref()
1371 if l is not None and l.held:
1353 if l is not None and l.held:
1372 l.lock()
1354 l.lock()
1373 return l
1355 return l
1374
1356
1375 # We do not need to check for non-waiting lock acquisition. Such
1357 # We do not need to check for non-waiting lock acquisition. Such
1376 # acquisition would not cause dead-lock as they would just fail.
1358 # acquisition would not cause dead-lock as they would just fail.
1377 if wait and (self.ui.configbool('devel', 'all-warnings')
1359 if wait and (self.ui.configbool('devel', 'all-warnings')
1378 or self.ui.configbool('devel', 'check-locks')):
1360 or self.ui.configbool('devel', 'check-locks')):
1379 l = self._lockref and self._lockref()
1361 l = self._lockref and self._lockref()
1380 if l is not None and l.held:
1362 if l is not None and l.held:
1381 self.ui.develwarn('"wlock" acquired after "lock"')
1363 self.ui.develwarn('"wlock" acquired after "lock"')
1382
1364
1383 def unlock():
1365 def unlock():
1384 if self.dirstate.pendingparentchange():
1366 if self.dirstate.pendingparentchange():
1385 self.dirstate.invalidate()
1367 self.dirstate.invalidate()
1386 else:
1368 else:
1387 self.dirstate.write(None)
1369 self.dirstate.write(None)
1388
1370
1389 self._filecache['dirstate'].refresh()
1371 self._filecache['dirstate'].refresh()
1390
1372
1391 l = self._lock(self.vfs, "wlock", wait, unlock,
1373 l = self._lock(self.vfs, "wlock", wait, unlock,
1392 self.invalidatedirstate, _('working directory of %s') %
1374 self.invalidatedirstate, _('working directory of %s') %
1393 self.origroot,
1375 self.origroot,
1394 inheritchecker=self._wlockchecktransaction,
1376 inheritchecker=self._wlockchecktransaction,
1395 parentenvvar='HG_WLOCK_LOCKER')
1377 parentenvvar='HG_WLOCK_LOCKER')
1396 self._wlockref = weakref.ref(l)
1378 self._wlockref = weakref.ref(l)
1397 return l
1379 return l
1398
1380
1399 def _currentlock(self, lockref):
1381 def _currentlock(self, lockref):
1400 """Returns the lock if it's held, or None if it's not."""
1382 """Returns the lock if it's held, or None if it's not."""
1401 if lockref is None:
1383 if lockref is None:
1402 return None
1384 return None
1403 l = lockref()
1385 l = lockref()
1404 if l is None or not l.held:
1386 if l is None or not l.held:
1405 return None
1387 return None
1406 return l
1388 return l
1407
1389
1408 def currentwlock(self):
1390 def currentwlock(self):
1409 """Returns the wlock if it's held, or None if it's not."""
1391 """Returns the wlock if it's held, or None if it's not."""
1410 return self._currentlock(self._wlockref)
1392 return self._currentlock(self._wlockref)
1411
1393
1412 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1394 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1413 """
1395 """
1414 commit an individual file as part of a larger transaction
1396 commit an individual file as part of a larger transaction
1415 """
1397 """
1416
1398
1417 fname = fctx.path()
1399 fname = fctx.path()
1418 fparent1 = manifest1.get(fname, nullid)
1400 fparent1 = manifest1.get(fname, nullid)
1419 fparent2 = manifest2.get(fname, nullid)
1401 fparent2 = manifest2.get(fname, nullid)
1420 if isinstance(fctx, context.filectx):
1402 if isinstance(fctx, context.filectx):
1421 node = fctx.filenode()
1403 node = fctx.filenode()
1422 if node in [fparent1, fparent2]:
1404 if node in [fparent1, fparent2]:
1423 self.ui.debug('reusing %s filelog entry\n' % fname)
1405 self.ui.debug('reusing %s filelog entry\n' % fname)
1424 return node
1406 return node
1425
1407
1426 flog = self.file(fname)
1408 flog = self.file(fname)
1427 meta = {}
1409 meta = {}
1428 copy = fctx.renamed()
1410 copy = fctx.renamed()
1429 if copy and copy[0] != fname:
1411 if copy and copy[0] != fname:
1430 # Mark the new revision of this file as a copy of another
1412 # Mark the new revision of this file as a copy of another
1431 # file. This copy data will effectively act as a parent
1413 # file. This copy data will effectively act as a parent
1432 # of this new revision. If this is a merge, the first
1414 # of this new revision. If this is a merge, the first
1433 # parent will be the nullid (meaning "look up the copy data")
1415 # parent will be the nullid (meaning "look up the copy data")
1434 # and the second one will be the other parent. For example:
1416 # and the second one will be the other parent. For example:
1435 #
1417 #
1436 # 0 --- 1 --- 3 rev1 changes file foo
1418 # 0 --- 1 --- 3 rev1 changes file foo
1437 # \ / rev2 renames foo to bar and changes it
1419 # \ / rev2 renames foo to bar and changes it
1438 # \- 2 -/ rev3 should have bar with all changes and
1420 # \- 2 -/ rev3 should have bar with all changes and
1439 # should record that bar descends from
1421 # should record that bar descends from
1440 # bar in rev2 and foo in rev1
1422 # bar in rev2 and foo in rev1
1441 #
1423 #
1442 # this allows this merge to succeed:
1424 # this allows this merge to succeed:
1443 #
1425 #
1444 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1426 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1445 # \ / merging rev3 and rev4 should use bar@rev2
1427 # \ / merging rev3 and rev4 should use bar@rev2
1446 # \- 2 --- 4 as the merge base
1428 # \- 2 --- 4 as the merge base
1447 #
1429 #
1448
1430
1449 cfname = copy[0]
1431 cfname = copy[0]
1450 crev = manifest1.get(cfname)
1432 crev = manifest1.get(cfname)
1451 newfparent = fparent2
1433 newfparent = fparent2
1452
1434
1453 if manifest2: # branch merge
1435 if manifest2: # branch merge
1454 if fparent2 == nullid or crev is None: # copied on remote side
1436 if fparent2 == nullid or crev is None: # copied on remote side
1455 if cfname in manifest2:
1437 if cfname in manifest2:
1456 crev = manifest2[cfname]
1438 crev = manifest2[cfname]
1457 newfparent = fparent1
1439 newfparent = fparent1
1458
1440
1459 # Here, we used to search backwards through history to try to find
1441 # Here, we used to search backwards through history to try to find
1460 # where the file copy came from if the source of a copy was not in
1442 # where the file copy came from if the source of a copy was not in
1461 # the parent directory. However, this doesn't actually make sense to
1443 # the parent directory. However, this doesn't actually make sense to
1462 # do (what does a copy from something not in your working copy even
1444 # do (what does a copy from something not in your working copy even
1463 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1445 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1464 # the user that copy information was dropped, so if they didn't
1446 # the user that copy information was dropped, so if they didn't
1465 # expect this outcome it can be fixed, but this is the correct
1447 # expect this outcome it can be fixed, but this is the correct
1466 # behavior in this circumstance.
1448 # behavior in this circumstance.
1467
1449
1468 if crev:
1450 if crev:
1469 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1451 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1470 meta["copy"] = cfname
1452 meta["copy"] = cfname
1471 meta["copyrev"] = hex(crev)
1453 meta["copyrev"] = hex(crev)
1472 fparent1, fparent2 = nullid, newfparent
1454 fparent1, fparent2 = nullid, newfparent
1473 else:
1455 else:
1474 self.ui.warn(_("warning: can't find ancestor for '%s' "
1456 self.ui.warn(_("warning: can't find ancestor for '%s' "
1475 "copied from '%s'!\n") % (fname, cfname))
1457 "copied from '%s'!\n") % (fname, cfname))
1476
1458
1477 elif fparent1 == nullid:
1459 elif fparent1 == nullid:
1478 fparent1, fparent2 = fparent2, nullid
1460 fparent1, fparent2 = fparent2, nullid
1479 elif fparent2 != nullid:
1461 elif fparent2 != nullid:
1480 # is one parent an ancestor of the other?
1462 # is one parent an ancestor of the other?
1481 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1463 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1482 if fparent1 in fparentancestors:
1464 if fparent1 in fparentancestors:
1483 fparent1, fparent2 = fparent2, nullid
1465 fparent1, fparent2 = fparent2, nullid
1484 elif fparent2 in fparentancestors:
1466 elif fparent2 in fparentancestors:
1485 fparent2 = nullid
1467 fparent2 = nullid
1486
1468
1487 # is the file changed?
1469 # is the file changed?
1488 text = fctx.data()
1470 text = fctx.data()
1489 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1471 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1490 changelist.append(fname)
1472 changelist.append(fname)
1491 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1473 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1492 # are just the flags changed during merge?
1474 # are just the flags changed during merge?
1493 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1475 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1494 changelist.append(fname)
1476 changelist.append(fname)
1495
1477
1496 return fparent1
1478 return fparent1
1497
1479
1498 @unfilteredmethod
1480 @unfilteredmethod
1499 def commit(self, text="", user=None, date=None, match=None, force=False,
1481 def commit(self, text="", user=None, date=None, match=None, force=False,
1500 editor=False, extra=None):
1482 editor=False, extra=None):
1501 """Add a new revision to current repository.
1483 """Add a new revision to current repository.
1502
1484
1503 Revision information is gathered from the working directory,
1485 Revision information is gathered from the working directory,
1504 match can be used to filter the committed files. If editor is
1486 match can be used to filter the committed files. If editor is
1505 supplied, it is called to get a commit message.
1487 supplied, it is called to get a commit message.
1506 """
1488 """
1507 if extra is None:
1489 if extra is None:
1508 extra = {}
1490 extra = {}
1509
1491
1510 def fail(f, msg):
1492 def fail(f, msg):
1511 raise error.Abort('%s: %s' % (f, msg))
1493 raise error.Abort('%s: %s' % (f, msg))
1512
1494
1513 if not match:
1495 if not match:
1514 match = matchmod.always(self.root, '')
1496 match = matchmod.always(self.root, '')
1515
1497
1516 if not force:
1498 if not force:
1517 vdirs = []
1499 vdirs = []
1518 match.explicitdir = vdirs.append
1500 match.explicitdir = vdirs.append
1519 match.bad = fail
1501 match.bad = fail
1520
1502
1521 wlock = lock = tr = None
1503 wlock = lock = tr = None
1522 try:
1504 try:
1523 wlock = self.wlock()
1505 wlock = self.wlock()
1524 lock = self.lock() # for recent changelog (see issue4368)
1506 lock = self.lock() # for recent changelog (see issue4368)
1525
1507
1526 wctx = self[None]
1508 wctx = self[None]
1527 merge = len(wctx.parents()) > 1
1509 merge = len(wctx.parents()) > 1
1528
1510
1529 if not force and merge and match.ispartial():
1511 if not force and merge and match.ispartial():
1530 raise error.Abort(_('cannot partially commit a merge '
1512 raise error.Abort(_('cannot partially commit a merge '
1531 '(do not specify files or patterns)'))
1513 '(do not specify files or patterns)'))
1532
1514
1533 status = self.status(match=match, clean=force)
1515 status = self.status(match=match, clean=force)
1534 if force:
1516 if force:
1535 status.modified.extend(status.clean) # mq may commit clean files
1517 status.modified.extend(status.clean) # mq may commit clean files
1536
1518
1537 # check subrepos
1519 # check subrepos
1538 subs = []
1520 subs = []
1539 commitsubs = set()
1521 commitsubs = set()
1540 newstate = wctx.substate.copy()
1522 newstate = wctx.substate.copy()
1541 # only manage subrepos and .hgsubstate if .hgsub is present
1523 # only manage subrepos and .hgsubstate if .hgsub is present
1542 if '.hgsub' in wctx:
1524 if '.hgsub' in wctx:
1543 # we'll decide whether to track this ourselves, thanks
1525 # we'll decide whether to track this ourselves, thanks
1544 for c in status.modified, status.added, status.removed:
1526 for c in status.modified, status.added, status.removed:
1545 if '.hgsubstate' in c:
1527 if '.hgsubstate' in c:
1546 c.remove('.hgsubstate')
1528 c.remove('.hgsubstate')
1547
1529
1548 # compare current state to last committed state
1530 # compare current state to last committed state
1549 # build new substate based on last committed state
1531 # build new substate based on last committed state
1550 oldstate = wctx.p1().substate
1532 oldstate = wctx.p1().substate
1551 for s in sorted(newstate.keys()):
1533 for s in sorted(newstate.keys()):
1552 if not match(s):
1534 if not match(s):
1553 # ignore working copy, use old state if present
1535 # ignore working copy, use old state if present
1554 if s in oldstate:
1536 if s in oldstate:
1555 newstate[s] = oldstate[s]
1537 newstate[s] = oldstate[s]
1556 continue
1538 continue
1557 if not force:
1539 if not force:
1558 raise error.Abort(
1540 raise error.Abort(
1559 _("commit with new subrepo %s excluded") % s)
1541 _("commit with new subrepo %s excluded") % s)
1560 dirtyreason = wctx.sub(s).dirtyreason(True)
1542 dirtyreason = wctx.sub(s).dirtyreason(True)
1561 if dirtyreason:
1543 if dirtyreason:
1562 if not self.ui.configbool('ui', 'commitsubrepos'):
1544 if not self.ui.configbool('ui', 'commitsubrepos'):
1563 raise error.Abort(dirtyreason,
1545 raise error.Abort(dirtyreason,
1564 hint=_("use --subrepos for recursive commit"))
1546 hint=_("use --subrepos for recursive commit"))
1565 subs.append(s)
1547 subs.append(s)
1566 commitsubs.add(s)
1548 commitsubs.add(s)
1567 else:
1549 else:
1568 bs = wctx.sub(s).basestate()
1550 bs = wctx.sub(s).basestate()
1569 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1551 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1570 if oldstate.get(s, (None, None, None))[1] != bs:
1552 if oldstate.get(s, (None, None, None))[1] != bs:
1571 subs.append(s)
1553 subs.append(s)
1572
1554
1573 # check for removed subrepos
1555 # check for removed subrepos
1574 for p in wctx.parents():
1556 for p in wctx.parents():
1575 r = [s for s in p.substate if s not in newstate]
1557 r = [s for s in p.substate if s not in newstate]
1576 subs += [s for s in r if match(s)]
1558 subs += [s for s in r if match(s)]
1577 if subs:
1559 if subs:
1578 if (not match('.hgsub') and
1560 if (not match('.hgsub') and
1579 '.hgsub' in (wctx.modified() + wctx.added())):
1561 '.hgsub' in (wctx.modified() + wctx.added())):
1580 raise error.Abort(
1562 raise error.Abort(
1581 _("can't commit subrepos without .hgsub"))
1563 _("can't commit subrepos without .hgsub"))
1582 status.modified.insert(0, '.hgsubstate')
1564 status.modified.insert(0, '.hgsubstate')
1583
1565
1584 elif '.hgsub' in status.removed:
1566 elif '.hgsub' in status.removed:
1585 # clean up .hgsubstate when .hgsub is removed
1567 # clean up .hgsubstate when .hgsub is removed
1586 if ('.hgsubstate' in wctx and
1568 if ('.hgsubstate' in wctx and
1587 '.hgsubstate' not in (status.modified + status.added +
1569 '.hgsubstate' not in (status.modified + status.added +
1588 status.removed)):
1570 status.removed)):
1589 status.removed.insert(0, '.hgsubstate')
1571 status.removed.insert(0, '.hgsubstate')
1590
1572
1591 # make sure all explicit patterns are matched
1573 # make sure all explicit patterns are matched
1592 if not force and (match.isexact() or match.prefix()):
1574 if not force and (match.isexact() or match.prefix()):
1593 matched = set(status.modified + status.added + status.removed)
1575 matched = set(status.modified + status.added + status.removed)
1594
1576
1595 for f in match.files():
1577 for f in match.files():
1596 f = self.dirstate.normalize(f)
1578 f = self.dirstate.normalize(f)
1597 if f == '.' or f in matched or f in wctx.substate:
1579 if f == '.' or f in matched or f in wctx.substate:
1598 continue
1580 continue
1599 if f in status.deleted:
1581 if f in status.deleted:
1600 fail(f, _('file not found!'))
1582 fail(f, _('file not found!'))
1601 if f in vdirs: # visited directory
1583 if f in vdirs: # visited directory
1602 d = f + '/'
1584 d = f + '/'
1603 for mf in matched:
1585 for mf in matched:
1604 if mf.startswith(d):
1586 if mf.startswith(d):
1605 break
1587 break
1606 else:
1588 else:
1607 fail(f, _("no match under directory!"))
1589 fail(f, _("no match under directory!"))
1608 elif f not in self.dirstate:
1590 elif f not in self.dirstate:
1609 fail(f, _("file not tracked!"))
1591 fail(f, _("file not tracked!"))
1610
1592
1611 cctx = context.workingcommitctx(self, status,
1593 cctx = context.workingcommitctx(self, status,
1612 text, user, date, extra)
1594 text, user, date, extra)
1613
1595
1614 # internal config: ui.allowemptycommit
1596 # internal config: ui.allowemptycommit
1615 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1616 or extra.get('close') or merge or cctx.files()
1598 or extra.get('close') or merge or cctx.files()
1617 or self.ui.configbool('ui', 'allowemptycommit'))
1599 or self.ui.configbool('ui', 'allowemptycommit'))
1618 if not allowemptycommit:
1600 if not allowemptycommit:
1619 return None
1601 return None
1620
1602
1621 if merge and cctx.deleted():
1603 if merge and cctx.deleted():
1622 raise error.Abort(_("cannot commit merge with missing files"))
1604 raise error.Abort(_("cannot commit merge with missing files"))
1623
1605
1624 ms = mergemod.mergestate.read(self)
1606 ms = mergemod.mergestate.read(self)
1625
1607
1626 if list(ms.unresolved()):
1608 if list(ms.unresolved()):
1627 raise error.Abort(_('unresolved merge conflicts '
1609 raise error.Abort(_('unresolved merge conflicts '
1628 '(see "hg help resolve")'))
1610 '(see "hg help resolve")'))
1629 if ms.mdstate() != 's' or list(ms.driverresolved()):
1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1630 raise error.Abort(_('driver-resolved merge conflicts'),
1612 raise error.Abort(_('driver-resolved merge conflicts'),
1631 hint=_('run "hg resolve --all" to resolve'))
1613 hint=_('run "hg resolve --all" to resolve'))
1632
1614
1633 if editor:
1615 if editor:
1634 cctx._text = editor(self, cctx, subs)
1616 cctx._text = editor(self, cctx, subs)
1635 edited = (text != cctx._text)
1617 edited = (text != cctx._text)
1636
1618
1637 # Save commit message in case this transaction gets rolled back
1619 # Save commit message in case this transaction gets rolled back
1638 # (e.g. by a pretxncommit hook). Leave the content alone on
1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1639 # the assumption that the user will use the same editor again.
1621 # the assumption that the user will use the same editor again.
1640 msgfn = self.savecommitmessage(cctx._text)
1622 msgfn = self.savecommitmessage(cctx._text)
1641
1623
1642 # commit subs and write new state
1624 # commit subs and write new state
1643 if subs:
1625 if subs:
1644 for s in sorted(commitsubs):
1626 for s in sorted(commitsubs):
1645 sub = wctx.sub(s)
1627 sub = wctx.sub(s)
1646 self.ui.status(_('committing subrepository %s\n') %
1628 self.ui.status(_('committing subrepository %s\n') %
1647 subrepo.subrelpath(sub))
1629 subrepo.subrelpath(sub))
1648 sr = sub.commit(cctx._text, user, date)
1630 sr = sub.commit(cctx._text, user, date)
1649 newstate[s] = (newstate[s][0], sr)
1631 newstate[s] = (newstate[s][0], sr)
1650 subrepo.writestate(self, newstate)
1632 subrepo.writestate(self, newstate)
1651
1633
1652 p1, p2 = self.dirstate.parents()
1634 p1, p2 = self.dirstate.parents()
1653 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1654 try:
1636 try:
1655 self.hook("precommit", throw=True, parent1=hookp1,
1637 self.hook("precommit", throw=True, parent1=hookp1,
1656 parent2=hookp2)
1638 parent2=hookp2)
1657 tr = self.transaction('commit')
1639 tr = self.transaction('commit')
1658 ret = self.commitctx(cctx, True)
1640 ret = self.commitctx(cctx, True)
1659 except: # re-raises
1641 except: # re-raises
1660 if edited:
1642 if edited:
1661 self.ui.write(
1643 self.ui.write(
1662 _('note: commit message saved in %s\n') % msgfn)
1644 _('note: commit message saved in %s\n') % msgfn)
1663 raise
1645 raise
1664 # update bookmarks, dirstate and mergestate
1646 # update bookmarks, dirstate and mergestate
1665 bookmarks.update(self, [p1, p2], ret)
1647 bookmarks.update(self, [p1, p2], ret)
1666 cctx.markcommitted(ret)
1648 cctx.markcommitted(ret)
1667 ms.reset()
1649 ms.reset()
1668 tr.close()
1650 tr.close()
1669
1651
1670 finally:
1652 finally:
1671 lockmod.release(tr, lock, wlock)
1653 lockmod.release(tr, lock, wlock)
1672
1654
1673 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1674 # hack for command that use a temporary commit (eg: histedit)
1656 # hack for command that use a temporary commit (eg: histedit)
1675 # temporary commit got stripped before hook release
1657 # temporary commit got stripped before hook release
1676 if self.changelog.hasnode(ret):
1658 if self.changelog.hasnode(ret):
1677 self.hook("commit", node=node, parent1=parent1,
1659 self.hook("commit", node=node, parent1=parent1,
1678 parent2=parent2)
1660 parent2=parent2)
1679 self._afterlock(commithook)
1661 self._afterlock(commithook)
1680 return ret
1662 return ret
1681
1663
1682 @unfilteredmethod
1664 @unfilteredmethod
1683 def commitctx(self, ctx, error=False):
1665 def commitctx(self, ctx, error=False):
1684 """Add a new revision to current repository.
1666 """Add a new revision to current repository.
1685 Revision information is passed via the context argument.
1667 Revision information is passed via the context argument.
1686 """
1668 """
1687
1669
1688 tr = None
1670 tr = None
1689 p1, p2 = ctx.p1(), ctx.p2()
1671 p1, p2 = ctx.p1(), ctx.p2()
1690 user = ctx.user()
1672 user = ctx.user()
1691
1673
1692 lock = self.lock()
1674 lock = self.lock()
1693 try:
1675 try:
1694 tr = self.transaction("commit")
1676 tr = self.transaction("commit")
1695 trp = weakref.proxy(tr)
1677 trp = weakref.proxy(tr)
1696
1678
1697 if ctx.files():
1679 if ctx.files():
1698 m1 = p1.manifest()
1680 m1 = p1.manifest()
1699 m2 = p2.manifest()
1681 m2 = p2.manifest()
1700 m = m1.copy()
1682 m = m1.copy()
1701
1683
1702 # check in files
1684 # check in files
1703 added = []
1685 added = []
1704 changed = []
1686 changed = []
1705 removed = list(ctx.removed())
1687 removed = list(ctx.removed())
1706 linkrev = len(self)
1688 linkrev = len(self)
1707 self.ui.note(_("committing files:\n"))
1689 self.ui.note(_("committing files:\n"))
1708 for f in sorted(ctx.modified() + ctx.added()):
1690 for f in sorted(ctx.modified() + ctx.added()):
1709 self.ui.note(f + "\n")
1691 self.ui.note(f + "\n")
1710 try:
1692 try:
1711 fctx = ctx[f]
1693 fctx = ctx[f]
1712 if fctx is None:
1694 if fctx is None:
1713 removed.append(f)
1695 removed.append(f)
1714 else:
1696 else:
1715 added.append(f)
1697 added.append(f)
1716 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1717 trp, changed)
1699 trp, changed)
1718 m.setflag(f, fctx.flags())
1700 m.setflag(f, fctx.flags())
1719 except OSError as inst:
1701 except OSError as inst:
1720 self.ui.warn(_("trouble committing %s!\n") % f)
1702 self.ui.warn(_("trouble committing %s!\n") % f)
1721 raise
1703 raise
1722 except IOError as inst:
1704 except IOError as inst:
1723 errcode = getattr(inst, 'errno', errno.ENOENT)
1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1724 if error or errcode and errcode != errno.ENOENT:
1706 if error or errcode and errcode != errno.ENOENT:
1725 self.ui.warn(_("trouble committing %s!\n") % f)
1707 self.ui.warn(_("trouble committing %s!\n") % f)
1726 raise
1708 raise
1727
1709
1728 # update manifest
1710 # update manifest
1729 self.ui.note(_("committing manifest\n"))
1711 self.ui.note(_("committing manifest\n"))
1730 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1731 drop = [f for f in removed if f in m]
1713 drop = [f for f in removed if f in m]
1732 for f in drop:
1714 for f in drop:
1733 del m[f]
1715 del m[f]
1734 mn = self.manifest.add(m, trp, linkrev,
1716 mn = self.manifest.add(m, trp, linkrev,
1735 p1.manifestnode(), p2.manifestnode(),
1717 p1.manifestnode(), p2.manifestnode(),
1736 added, drop)
1718 added, drop)
1737 files = changed + removed
1719 files = changed + removed
1738 else:
1720 else:
1739 mn = p1.manifestnode()
1721 mn = p1.manifestnode()
1740 files = []
1722 files = []
1741
1723
1742 # update changelog
1724 # update changelog
1743 self.ui.note(_("committing changelog\n"))
1725 self.ui.note(_("committing changelog\n"))
1744 self.changelog.delayupdate(tr)
1726 self.changelog.delayupdate(tr)
1745 n = self.changelog.add(mn, files, ctx.description(),
1727 n = self.changelog.add(mn, files, ctx.description(),
1746 trp, p1.node(), p2.node(),
1728 trp, p1.node(), p2.node(),
1747 user, ctx.date(), ctx.extra().copy())
1729 user, ctx.date(), ctx.extra().copy())
1748 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1749 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1750 parent2=xp2)
1732 parent2=xp2)
1751 # set the new commit is proper phase
1733 # set the new commit is proper phase
1752 targetphase = subrepo.newcommitphase(self.ui, ctx)
1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1753 if targetphase:
1735 if targetphase:
1754 # retract boundary do not alter parent changeset.
1736 # retract boundary do not alter parent changeset.
1755 # if a parent have higher the resulting phase will
1737 # if a parent have higher the resulting phase will
1756 # be compliant anyway
1738 # be compliant anyway
1757 #
1739 #
1758 # if minimal phase was 0 we don't need to retract anything
1740 # if minimal phase was 0 we don't need to retract anything
1759 phases.retractboundary(self, tr, targetphase, [n])
1741 phases.retractboundary(self, tr, targetphase, [n])
1760 tr.close()
1742 tr.close()
1761 branchmap.updatecache(self.filtered('served'))
1743 branchmap.updatecache(self.filtered('served'))
1762 return n
1744 return n
1763 finally:
1745 finally:
1764 if tr:
1746 if tr:
1765 tr.release()
1747 tr.release()
1766 lock.release()
1748 lock.release()
1767
1749
1768 @unfilteredmethod
1750 @unfilteredmethod
1769 def destroying(self):
1751 def destroying(self):
1770 '''Inform the repository that nodes are about to be destroyed.
1752 '''Inform the repository that nodes are about to be destroyed.
1771 Intended for use by strip and rollback, so there's a common
1753 Intended for use by strip and rollback, so there's a common
1772 place for anything that has to be done before destroying history.
1754 place for anything that has to be done before destroying history.
1773
1755
1774 This is mostly useful for saving state that is in memory and waiting
1756 This is mostly useful for saving state that is in memory and waiting
1775 to be flushed when the current lock is released. Because a call to
1757 to be flushed when the current lock is released. Because a call to
1776 destroyed is imminent, the repo will be invalidated causing those
1758 destroyed is imminent, the repo will be invalidated causing those
1777 changes to stay in memory (waiting for the next unlock), or vanish
1759 changes to stay in memory (waiting for the next unlock), or vanish
1778 completely.
1760 completely.
1779 '''
1761 '''
1780 # When using the same lock to commit and strip, the phasecache is left
1762 # When using the same lock to commit and strip, the phasecache is left
1781 # dirty after committing. Then when we strip, the repo is invalidated,
1763 # dirty after committing. Then when we strip, the repo is invalidated,
1782 # causing those changes to disappear.
1764 # causing those changes to disappear.
1783 if '_phasecache' in vars(self):
1765 if '_phasecache' in vars(self):
1784 self._phasecache.write()
1766 self._phasecache.write()
1785
1767
1786 @unfilteredmethod
1768 @unfilteredmethod
1787 def destroyed(self):
1769 def destroyed(self):
1788 '''Inform the repository that nodes have been destroyed.
1770 '''Inform the repository that nodes have been destroyed.
1789 Intended for use by strip and rollback, so there's a common
1771 Intended for use by strip and rollback, so there's a common
1790 place for anything that has to be done after destroying history.
1772 place for anything that has to be done after destroying history.
1791 '''
1773 '''
1792 # When one tries to:
1774 # When one tries to:
1793 # 1) destroy nodes thus calling this method (e.g. strip)
1775 # 1) destroy nodes thus calling this method (e.g. strip)
1794 # 2) use phasecache somewhere (e.g. commit)
1776 # 2) use phasecache somewhere (e.g. commit)
1795 #
1777 #
1796 # then 2) will fail because the phasecache contains nodes that were
1778 # then 2) will fail because the phasecache contains nodes that were
1797 # removed. We can either remove phasecache from the filecache,
1779 # removed. We can either remove phasecache from the filecache,
1798 # causing it to reload next time it is accessed, or simply filter
1780 # causing it to reload next time it is accessed, or simply filter
1799 # the removed nodes now and write the updated cache.
1781 # the removed nodes now and write the updated cache.
1800 self._phasecache.filterunknown(self)
1782 self._phasecache.filterunknown(self)
1801 self._phasecache.write()
1783 self._phasecache.write()
1802
1784
1803 # update the 'served' branch cache to help read only server process
1785 # update the 'served' branch cache to help read only server process
1804 # Thanks to branchcache collaboration this is done from the nearest
1786 # Thanks to branchcache collaboration this is done from the nearest
1805 # filtered subset and it is expected to be fast.
1787 # filtered subset and it is expected to be fast.
1806 branchmap.updatecache(self.filtered('served'))
1788 branchmap.updatecache(self.filtered('served'))
1807
1789
1808 # Ensure the persistent tag cache is updated. Doing it now
1790 # Ensure the persistent tag cache is updated. Doing it now
1809 # means that the tag cache only has to worry about destroyed
1791 # means that the tag cache only has to worry about destroyed
1810 # heads immediately after a strip/rollback. That in turn
1792 # heads immediately after a strip/rollback. That in turn
1811 # guarantees that "cachetip == currenttip" (comparing both rev
1793 # guarantees that "cachetip == currenttip" (comparing both rev
1812 # and node) always means no nodes have been added or destroyed.
1794 # and node) always means no nodes have been added or destroyed.
1813
1795
1814 # XXX this is suboptimal when qrefresh'ing: we strip the current
1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1815 # head, refresh the tag cache, then immediately add a new head.
1797 # head, refresh the tag cache, then immediately add a new head.
1816 # But I think doing it this way is necessary for the "instant
1798 # But I think doing it this way is necessary for the "instant
1817 # tag cache retrieval" case to work.
1799 # tag cache retrieval" case to work.
1818 self.invalidate()
1800 self.invalidate()
1819
1801
1820 def walk(self, match, node=None):
1802 def walk(self, match, node=None):
1821 '''
1803 '''
1822 walk recursively through the directory tree or a given
1804 walk recursively through the directory tree or a given
1823 changeset, finding all files matched by the match
1805 changeset, finding all files matched by the match
1824 function
1806 function
1825 '''
1807 '''
1826 return self[node].walk(match)
1808 return self[node].walk(match)
1827
1809
1828 def status(self, node1='.', node2=None, match=None,
1810 def status(self, node1='.', node2=None, match=None,
1829 ignored=False, clean=False, unknown=False,
1811 ignored=False, clean=False, unknown=False,
1830 listsubrepos=False):
1812 listsubrepos=False):
1831 '''a convenience method that calls node1.status(node2)'''
1813 '''a convenience method that calls node1.status(node2)'''
1832 return self[node1].status(node2, match, ignored, clean, unknown,
1814 return self[node1].status(node2, match, ignored, clean, unknown,
1833 listsubrepos)
1815 listsubrepos)
1834
1816
1835 def heads(self, start=None):
1817 def heads(self, start=None):
1836 heads = self.changelog.heads(start)
1818 heads = self.changelog.heads(start)
1837 # sort the output in rev descending order
1819 # sort the output in rev descending order
1838 return sorted(heads, key=self.changelog.rev, reverse=True)
1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1839
1821
1840 def branchheads(self, branch=None, start=None, closed=False):
1822 def branchheads(self, branch=None, start=None, closed=False):
1841 '''return a (possibly filtered) list of heads for the given branch
1823 '''return a (possibly filtered) list of heads for the given branch
1842
1824
1843 Heads are returned in topological order, from newest to oldest.
1825 Heads are returned in topological order, from newest to oldest.
1844 If branch is None, use the dirstate branch.
1826 If branch is None, use the dirstate branch.
1845 If start is not None, return only heads reachable from start.
1827 If start is not None, return only heads reachable from start.
1846 If closed is True, return heads that are marked as closed as well.
1828 If closed is True, return heads that are marked as closed as well.
1847 '''
1829 '''
1848 if branch is None:
1830 if branch is None:
1849 branch = self[None].branch()
1831 branch = self[None].branch()
1850 branches = self.branchmap()
1832 branches = self.branchmap()
1851 if branch not in branches:
1833 if branch not in branches:
1852 return []
1834 return []
1853 # the cache returns heads ordered lowest to highest
1835 # the cache returns heads ordered lowest to highest
1854 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1855 if start is not None:
1837 if start is not None:
1856 # filter out the heads that cannot be reached from startrev
1838 # filter out the heads that cannot be reached from startrev
1857 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1858 bheads = [h for h in bheads if h in fbheads]
1840 bheads = [h for h in bheads if h in fbheads]
1859 return bheads
1841 return bheads
1860
1842
1861 def branches(self, nodes):
1843 def branches(self, nodes):
1862 if not nodes:
1844 if not nodes:
1863 nodes = [self.changelog.tip()]
1845 nodes = [self.changelog.tip()]
1864 b = []
1846 b = []
1865 for n in nodes:
1847 for n in nodes:
1866 t = n
1848 t = n
1867 while True:
1849 while True:
1868 p = self.changelog.parents(n)
1850 p = self.changelog.parents(n)
1869 if p[1] != nullid or p[0] == nullid:
1851 if p[1] != nullid or p[0] == nullid:
1870 b.append((t, n, p[0], p[1]))
1852 b.append((t, n, p[0], p[1]))
1871 break
1853 break
1872 n = p[0]
1854 n = p[0]
1873 return b
1855 return b
1874
1856
1875 def between(self, pairs):
1857 def between(self, pairs):
1876 r = []
1858 r = []
1877
1859
1878 for top, bottom in pairs:
1860 for top, bottom in pairs:
1879 n, l, i = top, [], 0
1861 n, l, i = top, [], 0
1880 f = 1
1862 f = 1
1881
1863
1882 while n != bottom and n != nullid:
1864 while n != bottom and n != nullid:
1883 p = self.changelog.parents(n)[0]
1865 p = self.changelog.parents(n)[0]
1884 if i == f:
1866 if i == f:
1885 l.append(n)
1867 l.append(n)
1886 f = f * 2
1868 f = f * 2
1887 n = p
1869 n = p
1888 i += 1
1870 i += 1
1889
1871
1890 r.append(l)
1872 r.append(l)
1891
1873
1892 return r
1874 return r
1893
1875
1894 def checkpush(self, pushop):
1876 def checkpush(self, pushop):
1895 """Extensions can override this function if additional checks have
1877 """Extensions can override this function if additional checks have
1896 to be performed before pushing, or call it if they override push
1878 to be performed before pushing, or call it if they override push
1897 command.
1879 command.
1898 """
1880 """
1899 pass
1881 pass
1900
1882
1901 @unfilteredpropertycache
1883 @unfilteredpropertycache
1902 def prepushoutgoinghooks(self):
1884 def prepushoutgoinghooks(self):
1903 """Return util.hooks consists of "(repo, remote, outgoing)"
1885 """Return util.hooks consists of "(repo, remote, outgoing)"
1904 functions, which are called before pushing changesets.
1886 functions, which are called before pushing changesets.
1905 """
1887 """
1906 return util.hooks()
1888 return util.hooks()
1907
1889
1908 def pushkey(self, namespace, key, old, new):
1890 def pushkey(self, namespace, key, old, new):
1909 try:
1891 try:
1910 tr = self.currenttransaction()
1892 tr = self.currenttransaction()
1911 hookargs = {}
1893 hookargs = {}
1912 if tr is not None:
1894 if tr is not None:
1913 hookargs.update(tr.hookargs)
1895 hookargs.update(tr.hookargs)
1914 hookargs['namespace'] = namespace
1896 hookargs['namespace'] = namespace
1915 hookargs['key'] = key
1897 hookargs['key'] = key
1916 hookargs['old'] = old
1898 hookargs['old'] = old
1917 hookargs['new'] = new
1899 hookargs['new'] = new
1918 self.hook('prepushkey', throw=True, **hookargs)
1900 self.hook('prepushkey', throw=True, **hookargs)
1919 except error.HookAbort as exc:
1901 except error.HookAbort as exc:
1920 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1921 if exc.hint:
1903 if exc.hint:
1922 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1923 return False
1905 return False
1924 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1925 ret = pushkey.push(self, namespace, key, old, new)
1907 ret = pushkey.push(self, namespace, key, old, new)
1926 def runhook():
1908 def runhook():
1927 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1928 ret=ret)
1910 ret=ret)
1929 self._afterlock(runhook)
1911 self._afterlock(runhook)
1930 return ret
1912 return ret
1931
1913
1932 def listkeys(self, namespace):
1914 def listkeys(self, namespace):
1933 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1934 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1935 values = pushkey.list(self, namespace)
1917 values = pushkey.list(self, namespace)
1936 self.hook('listkeys', namespace=namespace, values=values)
1918 self.hook('listkeys', namespace=namespace, values=values)
1937 return values
1919 return values
1938
1920
1939 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1940 '''used to test argument passing over the wire'''
1922 '''used to test argument passing over the wire'''
1941 return "%s %s %s %s %s" % (one, two, three, four, five)
1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1942
1924
1943 def savecommitmessage(self, text):
1925 def savecommitmessage(self, text):
1944 fp = self.vfs('last-message.txt', 'wb')
1926 fp = self.vfs('last-message.txt', 'wb')
1945 try:
1927 try:
1946 fp.write(text)
1928 fp.write(text)
1947 finally:
1929 finally:
1948 fp.close()
1930 fp.close()
1949 return self.pathto(fp.name[len(self.root) + 1:])
1931 return self.pathto(fp.name[len(self.root) + 1:])
1950
1932
1951 # used to avoid circular references so destructors work
1933 # used to avoid circular references so destructors work
1952 def aftertrans(files):
1934 def aftertrans(files):
1953 renamefiles = [tuple(t) for t in files]
1935 renamefiles = [tuple(t) for t in files]
1954 def a():
1936 def a():
1955 for vfs, src, dest in renamefiles:
1937 for vfs, src, dest in renamefiles:
1956 try:
1938 try:
1957 vfs.rename(src, dest)
1939 vfs.rename(src, dest)
1958 except OSError: # journal file does not yet exist
1940 except OSError: # journal file does not yet exist
1959 pass
1941 pass
1960 return a
1942 return a
1961
1943
1962 def undoname(fn):
1944 def undoname(fn):
1963 base, name = os.path.split(fn)
1945 base, name = os.path.split(fn)
1964 assert name.startswith('journal')
1946 assert name.startswith('journal')
1965 return os.path.join(base, name.replace('journal', 'undo', 1))
1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1966
1948
1967 def instance(ui, path, create):
1949 def instance(ui, path, create):
1968 return localrepository(ui, util.urllocalpath(path), create)
1950 return localrepository(ui, util.urllocalpath(path), create)
1969
1951
1970 def islocal(path):
1952 def islocal(path):
1971 return True
1953 return True
1954
1955 def newreporequirements(repo):
1956 """Determine the set of requirements for a new local repository.
1957
1958 Extensions can wrap this function to specify custom requirements for
1959 new repositories.
1960 """
1961 ui = repo.ui
1962 requirements = set(['revlogv1'])
1963 if ui.configbool('format', 'usestore', True):
1964 requirements.add('store')
1965 if ui.configbool('format', 'usefncache', True):
1966 requirements.add('fncache')
1967 if ui.configbool('format', 'dotencode', True):
1968 requirements.add('dotencode')
1969
1970 if scmutil.gdinitconfig(ui):
1971 requirements.add('generaldelta')
1972 if ui.configbool('experimental', 'treemanifest', False):
1973 requirements.add('treemanifest')
1974 if ui.configbool('experimental', 'manifestv2', False):
1975 requirements.add('manifestv2')
1976
1977 return requirements
General Comments 0
You need to be logged in to leave comments. Login now