##// END OF EJS Templates
localrepo: add some comment about role of various vfs object...
Pierre-Yves David -
r31144:afcc4b4a default
parent child Browse files
Show More
@@ -1,2052 +1,2057 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 wdirrev,
23 wdirrev,
24 )
24 )
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 branchmap,
27 branchmap,
28 bundle2,
28 bundle2,
29 changegroup,
29 changegroup,
30 changelog,
30 changelog,
31 color,
31 color,
32 context,
32 context,
33 dirstate,
33 dirstate,
34 dirstateguard,
34 dirstateguard,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 )
62 )
63
63
64 release = lockmod.release
64 release = lockmod.release
65 urlerr = util.urlerr
65 urlerr = util.urlerr
66 urlreq = util.urlreq
66 urlreq = util.urlreq
67
67
68 class repofilecache(scmutil.filecache):
68 class repofilecache(scmutil.filecache):
69 """All filecache usage on repo are done for logic that should be unfiltered
69 """All filecache usage on repo are done for logic that should be unfiltered
70 """
70 """
71
71
72 def __get__(self, repo, type=None):
72 def __get__(self, repo, type=None):
73 if repo is None:
73 if repo is None:
74 return self
74 return self
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
75 return super(repofilecache, self).__get__(repo.unfiltered(), type)
76 def __set__(self, repo, value):
76 def __set__(self, repo, value):
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
77 return super(repofilecache, self).__set__(repo.unfiltered(), value)
78 def __delete__(self, repo):
78 def __delete__(self, repo):
79 return super(repofilecache, self).__delete__(repo.unfiltered())
79 return super(repofilecache, self).__delete__(repo.unfiltered())
80
80
81 class storecache(repofilecache):
81 class storecache(repofilecache):
82 """filecache for files in the store"""
82 """filecache for files in the store"""
83 def join(self, obj, fname):
83 def join(self, obj, fname):
84 return obj.sjoin(fname)
84 return obj.sjoin(fname)
85
85
86 class unfilteredpropertycache(util.propertycache):
86 class unfilteredpropertycache(util.propertycache):
87 """propertycache that apply to unfiltered repo only"""
87 """propertycache that apply to unfiltered repo only"""
88
88
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 unfi = repo.unfiltered()
90 unfi = repo.unfiltered()
91 if unfi is repo:
91 if unfi is repo:
92 return super(unfilteredpropertycache, self).__get__(unfi)
92 return super(unfilteredpropertycache, self).__get__(unfi)
93 return getattr(unfi, self.name)
93 return getattr(unfi, self.name)
94
94
95 class filteredpropertycache(util.propertycache):
95 class filteredpropertycache(util.propertycache):
96 """propertycache that must take filtering in account"""
96 """propertycache that must take filtering in account"""
97
97
98 def cachevalue(self, obj, value):
98 def cachevalue(self, obj, value):
99 object.__setattr__(obj, self.name, value)
99 object.__setattr__(obj, self.name, value)
100
100
101
101
102 def hasunfilteredcache(repo, name):
102 def hasunfilteredcache(repo, name):
103 """check if a repo has an unfilteredpropertycache value for <name>"""
103 """check if a repo has an unfilteredpropertycache value for <name>"""
104 return name in vars(repo.unfiltered())
104 return name in vars(repo.unfiltered())
105
105
106 def unfilteredmethod(orig):
106 def unfilteredmethod(orig):
107 """decorate method that always need to be run on unfiltered version"""
107 """decorate method that always need to be run on unfiltered version"""
108 def wrapper(repo, *args, **kwargs):
108 def wrapper(repo, *args, **kwargs):
109 return orig(repo.unfiltered(), *args, **kwargs)
109 return orig(repo.unfiltered(), *args, **kwargs)
110 return wrapper
110 return wrapper
111
111
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
112 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
113 'unbundle'))
113 'unbundle'))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
114 legacycaps = moderncaps.union(set(['changegroupsubset']))
115
115
116 class localpeer(peer.peerrepository):
116 class localpeer(peer.peerrepository):
117 '''peer for a local repo; reflects only the most recent API'''
117 '''peer for a local repo; reflects only the most recent API'''
118
118
119 def __init__(self, repo, caps=moderncaps):
119 def __init__(self, repo, caps=moderncaps):
120 peer.peerrepository.__init__(self)
120 peer.peerrepository.__init__(self)
121 self._repo = repo.filtered('served')
121 self._repo = repo.filtered('served')
122 self.ui = repo.ui
122 self.ui = repo.ui
123 self._caps = repo._restrictcapabilities(caps)
123 self._caps = repo._restrictcapabilities(caps)
124 self.requirements = repo.requirements
124 self.requirements = repo.requirements
125 self.supportedformats = repo.supportedformats
125 self.supportedformats = repo.supportedformats
126
126
127 def close(self):
127 def close(self):
128 self._repo.close()
128 self._repo.close()
129
129
130 def _capabilities(self):
130 def _capabilities(self):
131 return self._caps
131 return self._caps
132
132
133 def local(self):
133 def local(self):
134 return self._repo
134 return self._repo
135
135
136 def canpush(self):
136 def canpush(self):
137 return True
137 return True
138
138
139 def url(self):
139 def url(self):
140 return self._repo.url()
140 return self._repo.url()
141
141
142 def lookup(self, key):
142 def lookup(self, key):
143 return self._repo.lookup(key)
143 return self._repo.lookup(key)
144
144
145 def branchmap(self):
145 def branchmap(self):
146 return self._repo.branchmap()
146 return self._repo.branchmap()
147
147
148 def heads(self):
148 def heads(self):
149 return self._repo.heads()
149 return self._repo.heads()
150
150
151 def known(self, nodes):
151 def known(self, nodes):
152 return self._repo.known(nodes)
152 return self._repo.known(nodes)
153
153
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
154 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
155 **kwargs):
155 **kwargs):
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
156 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
157 common=common, bundlecaps=bundlecaps,
157 common=common, bundlecaps=bundlecaps,
158 **kwargs)
158 **kwargs)
159 cb = util.chunkbuffer(chunks)
159 cb = util.chunkbuffer(chunks)
160
160
161 if bundlecaps is not None and 'HG20' in bundlecaps:
161 if bundlecaps is not None and 'HG20' in bundlecaps:
162 # When requesting a bundle2, getbundle returns a stream to make the
162 # When requesting a bundle2, getbundle returns a stream to make the
163 # wire level function happier. We need to build a proper object
163 # wire level function happier. We need to build a proper object
164 # from it in local peer.
164 # from it in local peer.
165 return bundle2.getunbundler(self.ui, cb)
165 return bundle2.getunbundler(self.ui, cb)
166 else:
166 else:
167 return changegroup.getunbundler('01', cb, None)
167 return changegroup.getunbundler('01', cb, None)
168
168
169 # TODO We might want to move the next two calls into legacypeer and add
169 # TODO We might want to move the next two calls into legacypeer and add
170 # unbundle instead.
170 # unbundle instead.
171
171
172 def unbundle(self, cg, heads, url):
172 def unbundle(self, cg, heads, url):
173 """apply a bundle on a repo
173 """apply a bundle on a repo
174
174
175 This function handles the repo locking itself."""
175 This function handles the repo locking itself."""
176 try:
176 try:
177 try:
177 try:
178 cg = exchange.readbundle(self.ui, cg, None)
178 cg = exchange.readbundle(self.ui, cg, None)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
179 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
180 if util.safehasattr(ret, 'getchunks'):
180 if util.safehasattr(ret, 'getchunks'):
181 # This is a bundle20 object, turn it into an unbundler.
181 # This is a bundle20 object, turn it into an unbundler.
182 # This little dance should be dropped eventually when the
182 # This little dance should be dropped eventually when the
183 # API is finally improved.
183 # API is finally improved.
184 stream = util.chunkbuffer(ret.getchunks())
184 stream = util.chunkbuffer(ret.getchunks())
185 ret = bundle2.getunbundler(self.ui, stream)
185 ret = bundle2.getunbundler(self.ui, stream)
186 return ret
186 return ret
187 except Exception as exc:
187 except Exception as exc:
188 # If the exception contains output salvaged from a bundle2
188 # If the exception contains output salvaged from a bundle2
189 # reply, we need to make sure it is printed before continuing
189 # reply, we need to make sure it is printed before continuing
190 # to fail. So we build a bundle2 with such output and consume
190 # to fail. So we build a bundle2 with such output and consume
191 # it directly.
191 # it directly.
192 #
192 #
193 # This is not very elegant but allows a "simple" solution for
193 # This is not very elegant but allows a "simple" solution for
194 # issue4594
194 # issue4594
195 output = getattr(exc, '_bundle2salvagedoutput', ())
195 output = getattr(exc, '_bundle2salvagedoutput', ())
196 if output:
196 if output:
197 bundler = bundle2.bundle20(self._repo.ui)
197 bundler = bundle2.bundle20(self._repo.ui)
198 for out in output:
198 for out in output:
199 bundler.addpart(out)
199 bundler.addpart(out)
200 stream = util.chunkbuffer(bundler.getchunks())
200 stream = util.chunkbuffer(bundler.getchunks())
201 b = bundle2.getunbundler(self.ui, stream)
201 b = bundle2.getunbundler(self.ui, stream)
202 bundle2.processbundle(self._repo, b)
202 bundle2.processbundle(self._repo, b)
203 raise
203 raise
204 except error.PushRaced as exc:
204 except error.PushRaced as exc:
205 raise error.ResponseError(_('push failed:'), str(exc))
205 raise error.ResponseError(_('push failed:'), str(exc))
206
206
207 def lock(self):
207 def lock(self):
208 return self._repo.lock()
208 return self._repo.lock()
209
209
210 def addchangegroup(self, cg, source, url):
210 def addchangegroup(self, cg, source, url):
211 return cg.apply(self._repo, source, url)
211 return cg.apply(self._repo, source, url)
212
212
213 def pushkey(self, namespace, key, old, new):
213 def pushkey(self, namespace, key, old, new):
214 return self._repo.pushkey(namespace, key, old, new)
214 return self._repo.pushkey(namespace, key, old, new)
215
215
216 def listkeys(self, namespace):
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
217 return self._repo.listkeys(namespace)
218
218
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
219 def debugwireargs(self, one, two, three=None, four=None, five=None):
220 '''used to test argument passing over the wire'''
220 '''used to test argument passing over the wire'''
221 return "%s %s %s %s %s" % (one, two, three, four, five)
221 return "%s %s %s %s %s" % (one, two, three, four, five)
222
222
223 class locallegacypeer(localpeer):
223 class locallegacypeer(localpeer):
224 '''peer extension which implements legacy methods too; used for tests with
224 '''peer extension which implements legacy methods too; used for tests with
225 restricted capabilities'''
225 restricted capabilities'''
226
226
227 def __init__(self, repo):
227 def __init__(self, repo):
228 localpeer.__init__(self, repo, caps=legacycaps)
228 localpeer.__init__(self, repo, caps=legacycaps)
229
229
230 def branches(self, nodes):
230 def branches(self, nodes):
231 return self._repo.branches(nodes)
231 return self._repo.branches(nodes)
232
232
233 def between(self, pairs):
233 def between(self, pairs):
234 return self._repo.between(pairs)
234 return self._repo.between(pairs)
235
235
236 def changegroup(self, basenodes, source):
236 def changegroup(self, basenodes, source):
237 return changegroup.changegroup(self._repo, basenodes, source)
237 return changegroup.changegroup(self._repo, basenodes, source)
238
238
239 def changegroupsubset(self, bases, heads, source):
239 def changegroupsubset(self, bases, heads, source):
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
240 return changegroup.changegroupsubset(self._repo, bases, heads, source)
241
241
242 class localrepository(object):
242 class localrepository(object):
243
243
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
244 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
245 'manifestv2'))
245 'manifestv2'))
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
246 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
247 'relshared', 'dotencode'))
247 'relshared', 'dotencode'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
248 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
249 filtername = None
249 filtername = None
250
250
251 # a list of (ui, featureset) functions.
251 # a list of (ui, featureset) functions.
252 # only functions defined in module of enabled extensions are invoked
252 # only functions defined in module of enabled extensions are invoked
253 featuresetupfuncs = set()
253 featuresetupfuncs = set()
254
254
255 def __init__(self, baseui, path, create=False):
255 def __init__(self, baseui, path, create=False):
256 self.requirements = set()
256 self.requirements = set()
257 # vfs to access the working copy
257 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
258 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
259 # vfs to access the content of the repository
260 self.vfs = None
261 # vfs to access the store part of the repository
262 self.svfs = None
258 self.wopener = self.wvfs
263 self.wopener = self.wvfs
259 self.root = self.wvfs.base
264 self.root = self.wvfs.base
260 self.path = self.wvfs.join(".hg")
265 self.path = self.wvfs.join(".hg")
261 self.origroot = path
266 self.origroot = path
262 self.auditor = pathutil.pathauditor(self.root, self._checknested)
267 self.auditor = pathutil.pathauditor(self.root, self._checknested)
263 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
268 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
264 realfs=False)
269 realfs=False)
265 self.vfs = scmutil.vfs(self.path)
270 self.vfs = scmutil.vfs(self.path)
266 self.opener = self.vfs
271 self.opener = self.vfs
267 self.baseui = baseui
272 self.baseui = baseui
268 self.ui = baseui.copy()
273 self.ui = baseui.copy()
269 self.ui.copy = baseui.copy # prevent copying repo configuration
274 self.ui.copy = baseui.copy # prevent copying repo configuration
270 # A list of callback to shape the phase if no data were found.
275 # A list of callback to shape the phase if no data were found.
271 # Callback are in the form: func(repo, roots) --> processed root.
276 # Callback are in the form: func(repo, roots) --> processed root.
272 # This list it to be filled by extension during repo setup
277 # This list it to be filled by extension during repo setup
273 self._phasedefaults = []
278 self._phasedefaults = []
274 try:
279 try:
275 self.ui.readconfig(self.join("hgrc"), self.root)
280 self.ui.readconfig(self.join("hgrc"), self.root)
276 self._loadextensions()
281 self._loadextensions()
277 except IOError:
282 except IOError:
278 pass
283 pass
279
284
280 if self.featuresetupfuncs:
285 if self.featuresetupfuncs:
281 self.supported = set(self._basesupported) # use private copy
286 self.supported = set(self._basesupported) # use private copy
282 extmods = set(m.__name__ for n, m
287 extmods = set(m.__name__ for n, m
283 in extensions.extensions(self.ui))
288 in extensions.extensions(self.ui))
284 for setupfunc in self.featuresetupfuncs:
289 for setupfunc in self.featuresetupfuncs:
285 if setupfunc.__module__ in extmods:
290 if setupfunc.__module__ in extmods:
286 setupfunc(self.ui, self.supported)
291 setupfunc(self.ui, self.supported)
287 else:
292 else:
288 self.supported = self._basesupported
293 self.supported = self._basesupported
289 color.setup(self.ui)
294 color.setup(self.ui)
290
295
291 # Add compression engines.
296 # Add compression engines.
292 for name in util.compengines:
297 for name in util.compengines:
293 engine = util.compengines[name]
298 engine = util.compengines[name]
294 if engine.revlogheader():
299 if engine.revlogheader():
295 self.supported.add('exp-compression-%s' % name)
300 self.supported.add('exp-compression-%s' % name)
296
301
297 if not self.vfs.isdir():
302 if not self.vfs.isdir():
298 if create:
303 if create:
299 self.requirements = newreporequirements(self)
304 self.requirements = newreporequirements(self)
300
305
301 if not self.wvfs.exists():
306 if not self.wvfs.exists():
302 self.wvfs.makedirs()
307 self.wvfs.makedirs()
303 self.vfs.makedir(notindexed=True)
308 self.vfs.makedir(notindexed=True)
304
309
305 if 'store' in self.requirements:
310 if 'store' in self.requirements:
306 self.vfs.mkdir("store")
311 self.vfs.mkdir("store")
307
312
308 # create an invalid changelog
313 # create an invalid changelog
309 self.vfs.append(
314 self.vfs.append(
310 "00changelog.i",
315 "00changelog.i",
311 '\0\0\0\2' # represents revlogv2
316 '\0\0\0\2' # represents revlogv2
312 ' dummy changelog to prevent using the old repo layout'
317 ' dummy changelog to prevent using the old repo layout'
313 )
318 )
314 else:
319 else:
315 raise error.RepoError(_("repository %s not found") % path)
320 raise error.RepoError(_("repository %s not found") % path)
316 elif create:
321 elif create:
317 raise error.RepoError(_("repository %s already exists") % path)
322 raise error.RepoError(_("repository %s already exists") % path)
318 else:
323 else:
319 try:
324 try:
320 self.requirements = scmutil.readrequires(
325 self.requirements = scmutil.readrequires(
321 self.vfs, self.supported)
326 self.vfs, self.supported)
322 except IOError as inst:
327 except IOError as inst:
323 if inst.errno != errno.ENOENT:
328 if inst.errno != errno.ENOENT:
324 raise
329 raise
325
330
326 self.sharedpath = self.path
331 self.sharedpath = self.path
327 try:
332 try:
328 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
333 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
329 if 'relshared' in self.requirements:
334 if 'relshared' in self.requirements:
330 sharedpath = self.vfs.join(sharedpath)
335 sharedpath = self.vfs.join(sharedpath)
331 vfs = scmutil.vfs(sharedpath, realpath=True)
336 vfs = scmutil.vfs(sharedpath, realpath=True)
332
337
333 s = vfs.base
338 s = vfs.base
334 if not vfs.exists():
339 if not vfs.exists():
335 raise error.RepoError(
340 raise error.RepoError(
336 _('.hg/sharedpath points to nonexistent directory %s') % s)
341 _('.hg/sharedpath points to nonexistent directory %s') % s)
337 self.sharedpath = s
342 self.sharedpath = s
338 except IOError as inst:
343 except IOError as inst:
339 if inst.errno != errno.ENOENT:
344 if inst.errno != errno.ENOENT:
340 raise
345 raise
341
346
342 self.store = store.store(
347 self.store = store.store(
343 self.requirements, self.sharedpath, scmutil.vfs)
348 self.requirements, self.sharedpath, scmutil.vfs)
344 self.spath = self.store.path
349 self.spath = self.store.path
345 self.svfs = self.store.vfs
350 self.svfs = self.store.vfs
346 self.sjoin = self.store.join
351 self.sjoin = self.store.join
347 self.vfs.createmode = self.store.createmode
352 self.vfs.createmode = self.store.createmode
348 self._applyopenerreqs()
353 self._applyopenerreqs()
349 if create:
354 if create:
350 self._writerequirements()
355 self._writerequirements()
351
356
352 self._dirstatevalidatewarned = False
357 self._dirstatevalidatewarned = False
353
358
354 self._branchcaches = {}
359 self._branchcaches = {}
355 self._revbranchcache = None
360 self._revbranchcache = None
356 self.filterpats = {}
361 self.filterpats = {}
357 self._datafilters = {}
362 self._datafilters = {}
358 self._transref = self._lockref = self._wlockref = None
363 self._transref = self._lockref = self._wlockref = None
359
364
360 # A cache for various files under .hg/ that tracks file changes,
365 # A cache for various files under .hg/ that tracks file changes,
361 # (used by the filecache decorator)
366 # (used by the filecache decorator)
362 #
367 #
363 # Maps a property name to its util.filecacheentry
368 # Maps a property name to its util.filecacheentry
364 self._filecache = {}
369 self._filecache = {}
365
370
366 # hold sets of revision to be filtered
371 # hold sets of revision to be filtered
367 # should be cleared when something might have changed the filter value:
372 # should be cleared when something might have changed the filter value:
368 # - new changesets,
373 # - new changesets,
369 # - phase change,
374 # - phase change,
370 # - new obsolescence marker,
375 # - new obsolescence marker,
371 # - working directory parent change,
376 # - working directory parent change,
372 # - bookmark changes
377 # - bookmark changes
373 self.filteredrevcache = {}
378 self.filteredrevcache = {}
374
379
375 # generic mapping between names and nodes
380 # generic mapping between names and nodes
376 self.names = namespaces.namespaces()
381 self.names = namespaces.namespaces()
377
382
378 def close(self):
383 def close(self):
379 self._writecaches()
384 self._writecaches()
380
385
381 def _loadextensions(self):
386 def _loadextensions(self):
382 extensions.loadall(self.ui)
387 extensions.loadall(self.ui)
383
388
384 def _writecaches(self):
389 def _writecaches(self):
385 if self._revbranchcache:
390 if self._revbranchcache:
386 self._revbranchcache.write()
391 self._revbranchcache.write()
387
392
388 def _restrictcapabilities(self, caps):
393 def _restrictcapabilities(self, caps):
389 if self.ui.configbool('experimental', 'bundle2-advertise', True):
394 if self.ui.configbool('experimental', 'bundle2-advertise', True):
390 caps = set(caps)
395 caps = set(caps)
391 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
396 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
392 caps.add('bundle2=' + urlreq.quote(capsblob))
397 caps.add('bundle2=' + urlreq.quote(capsblob))
393 return caps
398 return caps
394
399
395 def _applyopenerreqs(self):
400 def _applyopenerreqs(self):
396 self.svfs.options = dict((r, 1) for r in self.requirements
401 self.svfs.options = dict((r, 1) for r in self.requirements
397 if r in self.openerreqs)
402 if r in self.openerreqs)
398 # experimental config: format.chunkcachesize
403 # experimental config: format.chunkcachesize
399 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
404 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
400 if chunkcachesize is not None:
405 if chunkcachesize is not None:
401 self.svfs.options['chunkcachesize'] = chunkcachesize
406 self.svfs.options['chunkcachesize'] = chunkcachesize
402 # experimental config: format.maxchainlen
407 # experimental config: format.maxchainlen
403 maxchainlen = self.ui.configint('format', 'maxchainlen')
408 maxchainlen = self.ui.configint('format', 'maxchainlen')
404 if maxchainlen is not None:
409 if maxchainlen is not None:
405 self.svfs.options['maxchainlen'] = maxchainlen
410 self.svfs.options['maxchainlen'] = maxchainlen
406 # experimental config: format.manifestcachesize
411 # experimental config: format.manifestcachesize
407 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
412 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
408 if manifestcachesize is not None:
413 if manifestcachesize is not None:
409 self.svfs.options['manifestcachesize'] = manifestcachesize
414 self.svfs.options['manifestcachesize'] = manifestcachesize
410 # experimental config: format.aggressivemergedeltas
415 # experimental config: format.aggressivemergedeltas
411 aggressivemergedeltas = self.ui.configbool('format',
416 aggressivemergedeltas = self.ui.configbool('format',
412 'aggressivemergedeltas', False)
417 'aggressivemergedeltas', False)
413 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
418 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
414 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
419 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
415
420
416 for r in self.requirements:
421 for r in self.requirements:
417 if r.startswith('exp-compression-'):
422 if r.startswith('exp-compression-'):
418 self.svfs.options['compengine'] = r[len('exp-compression-'):]
423 self.svfs.options['compengine'] = r[len('exp-compression-'):]
419
424
420 def _writerequirements(self):
425 def _writerequirements(self):
421 scmutil.writerequires(self.vfs, self.requirements)
426 scmutil.writerequires(self.vfs, self.requirements)
422
427
423 def _checknested(self, path):
428 def _checknested(self, path):
424 """Determine if path is a legal nested repository."""
429 """Determine if path is a legal nested repository."""
425 if not path.startswith(self.root):
430 if not path.startswith(self.root):
426 return False
431 return False
427 subpath = path[len(self.root) + 1:]
432 subpath = path[len(self.root) + 1:]
428 normsubpath = util.pconvert(subpath)
433 normsubpath = util.pconvert(subpath)
429
434
430 # XXX: Checking against the current working copy is wrong in
435 # XXX: Checking against the current working copy is wrong in
431 # the sense that it can reject things like
436 # the sense that it can reject things like
432 #
437 #
433 # $ hg cat -r 10 sub/x.txt
438 # $ hg cat -r 10 sub/x.txt
434 #
439 #
435 # if sub/ is no longer a subrepository in the working copy
440 # if sub/ is no longer a subrepository in the working copy
436 # parent revision.
441 # parent revision.
437 #
442 #
438 # However, it can of course also allow things that would have
443 # However, it can of course also allow things that would have
439 # been rejected before, such as the above cat command if sub/
444 # been rejected before, such as the above cat command if sub/
440 # is a subrepository now, but was a normal directory before.
445 # is a subrepository now, but was a normal directory before.
441 # The old path auditor would have rejected by mistake since it
446 # The old path auditor would have rejected by mistake since it
442 # panics when it sees sub/.hg/.
447 # panics when it sees sub/.hg/.
443 #
448 #
444 # All in all, checking against the working copy seems sensible
449 # All in all, checking against the working copy seems sensible
445 # since we want to prevent access to nested repositories on
450 # since we want to prevent access to nested repositories on
446 # the filesystem *now*.
451 # the filesystem *now*.
447 ctx = self[None]
452 ctx = self[None]
448 parts = util.splitpath(subpath)
453 parts = util.splitpath(subpath)
449 while parts:
454 while parts:
450 prefix = '/'.join(parts)
455 prefix = '/'.join(parts)
451 if prefix in ctx.substate:
456 if prefix in ctx.substate:
452 if prefix == normsubpath:
457 if prefix == normsubpath:
453 return True
458 return True
454 else:
459 else:
455 sub = ctx.sub(prefix)
460 sub = ctx.sub(prefix)
456 return sub.checknested(subpath[len(prefix) + 1:])
461 return sub.checknested(subpath[len(prefix) + 1:])
457 else:
462 else:
458 parts.pop()
463 parts.pop()
459 return False
464 return False
460
465
461 def peer(self):
466 def peer(self):
462 return localpeer(self) # not cached to avoid reference cycle
467 return localpeer(self) # not cached to avoid reference cycle
463
468
464 def unfiltered(self):
469 def unfiltered(self):
465 """Return unfiltered version of the repository
470 """Return unfiltered version of the repository
466
471
467 Intended to be overwritten by filtered repo."""
472 Intended to be overwritten by filtered repo."""
468 return self
473 return self
469
474
470 def filtered(self, name):
475 def filtered(self, name):
471 """Return a filtered version of a repository"""
476 """Return a filtered version of a repository"""
472 # build a new class with the mixin and the current class
477 # build a new class with the mixin and the current class
473 # (possibly subclass of the repo)
478 # (possibly subclass of the repo)
474 class proxycls(repoview.repoview, self.unfiltered().__class__):
479 class proxycls(repoview.repoview, self.unfiltered().__class__):
475 pass
480 pass
476 return proxycls(self, name)
481 return proxycls(self, name)
477
482
478 @repofilecache('bookmarks', 'bookmarks.current')
483 @repofilecache('bookmarks', 'bookmarks.current')
479 def _bookmarks(self):
484 def _bookmarks(self):
480 return bookmarks.bmstore(self)
485 return bookmarks.bmstore(self)
481
486
482 @property
487 @property
483 def _activebookmark(self):
488 def _activebookmark(self):
484 return self._bookmarks.active
489 return self._bookmarks.active
485
490
486 def bookmarkheads(self, bookmark):
491 def bookmarkheads(self, bookmark):
487 name = bookmark.split('@', 1)[0]
492 name = bookmark.split('@', 1)[0]
488 heads = []
493 heads = []
489 for mark, n in self._bookmarks.iteritems():
494 for mark, n in self._bookmarks.iteritems():
490 if mark.split('@', 1)[0] == name:
495 if mark.split('@', 1)[0] == name:
491 heads.append(n)
496 heads.append(n)
492 return heads
497 return heads
493
498
494 # _phaserevs and _phasesets depend on changelog. what we need is to
499 # _phaserevs and _phasesets depend on changelog. what we need is to
495 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
500 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
496 # can't be easily expressed in filecache mechanism.
501 # can't be easily expressed in filecache mechanism.
497 @storecache('phaseroots', '00changelog.i')
502 @storecache('phaseroots', '00changelog.i')
498 def _phasecache(self):
503 def _phasecache(self):
499 return phases.phasecache(self, self._phasedefaults)
504 return phases.phasecache(self, self._phasedefaults)
500
505
501 @storecache('obsstore')
506 @storecache('obsstore')
502 def obsstore(self):
507 def obsstore(self):
503 # read default format for new obsstore.
508 # read default format for new obsstore.
504 # developer config: format.obsstore-version
509 # developer config: format.obsstore-version
505 defaultformat = self.ui.configint('format', 'obsstore-version', None)
510 defaultformat = self.ui.configint('format', 'obsstore-version', None)
506 # rely on obsstore class default when possible.
511 # rely on obsstore class default when possible.
507 kwargs = {}
512 kwargs = {}
508 if defaultformat is not None:
513 if defaultformat is not None:
509 kwargs['defaultformat'] = defaultformat
514 kwargs['defaultformat'] = defaultformat
510 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
515 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
511 store = obsolete.obsstore(self.svfs, readonly=readonly,
516 store = obsolete.obsstore(self.svfs, readonly=readonly,
512 **kwargs)
517 **kwargs)
513 if store and readonly:
518 if store and readonly:
514 self.ui.warn(
519 self.ui.warn(
515 _('obsolete feature not enabled but %i markers found!\n')
520 _('obsolete feature not enabled but %i markers found!\n')
516 % len(list(store)))
521 % len(list(store)))
517 return store
522 return store
518
523
519 @storecache('00changelog.i')
524 @storecache('00changelog.i')
520 def changelog(self):
525 def changelog(self):
521 c = changelog.changelog(self.svfs)
526 c = changelog.changelog(self.svfs)
522 if txnutil.mayhavepending(self.root):
527 if txnutil.mayhavepending(self.root):
523 c.readpending('00changelog.i.a')
528 c.readpending('00changelog.i.a')
524 return c
529 return c
525
530
526 def _constructmanifest(self):
531 def _constructmanifest(self):
527 # This is a temporary function while we migrate from manifest to
532 # This is a temporary function while we migrate from manifest to
528 # manifestlog. It allows bundlerepo and unionrepo to intercept the
533 # manifestlog. It allows bundlerepo and unionrepo to intercept the
529 # manifest creation.
534 # manifest creation.
530 return manifest.manifestrevlog(self.svfs)
535 return manifest.manifestrevlog(self.svfs)
531
536
532 @storecache('00manifest.i')
537 @storecache('00manifest.i')
533 def manifestlog(self):
538 def manifestlog(self):
534 return manifest.manifestlog(self.svfs, self)
539 return manifest.manifestlog(self.svfs, self)
535
540
536 @repofilecache('dirstate')
541 @repofilecache('dirstate')
537 def dirstate(self):
542 def dirstate(self):
538 return dirstate.dirstate(self.vfs, self.ui, self.root,
543 return dirstate.dirstate(self.vfs, self.ui, self.root,
539 self._dirstatevalidate)
544 self._dirstatevalidate)
540
545
541 def _dirstatevalidate(self, node):
546 def _dirstatevalidate(self, node):
542 try:
547 try:
543 self.changelog.rev(node)
548 self.changelog.rev(node)
544 return node
549 return node
545 except error.LookupError:
550 except error.LookupError:
546 if not self._dirstatevalidatewarned:
551 if not self._dirstatevalidatewarned:
547 self._dirstatevalidatewarned = True
552 self._dirstatevalidatewarned = True
548 self.ui.warn(_("warning: ignoring unknown"
553 self.ui.warn(_("warning: ignoring unknown"
549 " working parent %s!\n") % short(node))
554 " working parent %s!\n") % short(node))
550 return nullid
555 return nullid
551
556
552 def __getitem__(self, changeid):
557 def __getitem__(self, changeid):
553 if changeid is None or changeid == wdirrev:
558 if changeid is None or changeid == wdirrev:
554 return context.workingctx(self)
559 return context.workingctx(self)
555 if isinstance(changeid, slice):
560 if isinstance(changeid, slice):
556 return [context.changectx(self, i)
561 return [context.changectx(self, i)
557 for i in xrange(*changeid.indices(len(self)))
562 for i in xrange(*changeid.indices(len(self)))
558 if i not in self.changelog.filteredrevs]
563 if i not in self.changelog.filteredrevs]
559 return context.changectx(self, changeid)
564 return context.changectx(self, changeid)
560
565
561 def __contains__(self, changeid):
566 def __contains__(self, changeid):
562 try:
567 try:
563 self[changeid]
568 self[changeid]
564 return True
569 return True
565 except error.RepoLookupError:
570 except error.RepoLookupError:
566 return False
571 return False
567
572
568 def __nonzero__(self):
573 def __nonzero__(self):
569 return True
574 return True
570
575
571 def __len__(self):
576 def __len__(self):
572 return len(self.changelog)
577 return len(self.changelog)
573
578
574 def __iter__(self):
579 def __iter__(self):
575 return iter(self.changelog)
580 return iter(self.changelog)
576
581
577 def revs(self, expr, *args):
582 def revs(self, expr, *args):
578 '''Find revisions matching a revset.
583 '''Find revisions matching a revset.
579
584
580 The revset is specified as a string ``expr`` that may contain
585 The revset is specified as a string ``expr`` that may contain
581 %-formatting to escape certain types. See ``revsetlang.formatspec``.
586 %-formatting to escape certain types. See ``revsetlang.formatspec``.
582
587
583 Revset aliases from the configuration are not expanded. To expand
588 Revset aliases from the configuration are not expanded. To expand
584 user aliases, consider calling ``scmutil.revrange()`` or
589 user aliases, consider calling ``scmutil.revrange()`` or
585 ``repo.anyrevs([expr], user=True)``.
590 ``repo.anyrevs([expr], user=True)``.
586
591
587 Returns a revset.abstractsmartset, which is a list-like interface
592 Returns a revset.abstractsmartset, which is a list-like interface
588 that contains integer revisions.
593 that contains integer revisions.
589 '''
594 '''
590 expr = revsetlang.formatspec(expr, *args)
595 expr = revsetlang.formatspec(expr, *args)
591 m = revset.match(None, expr)
596 m = revset.match(None, expr)
592 return m(self)
597 return m(self)
593
598
594 def set(self, expr, *args):
599 def set(self, expr, *args):
595 '''Find revisions matching a revset and emit changectx instances.
600 '''Find revisions matching a revset and emit changectx instances.
596
601
597 This is a convenience wrapper around ``revs()`` that iterates the
602 This is a convenience wrapper around ``revs()`` that iterates the
598 result and is a generator of changectx instances.
603 result and is a generator of changectx instances.
599
604
600 Revset aliases from the configuration are not expanded. To expand
605 Revset aliases from the configuration are not expanded. To expand
601 user aliases, consider calling ``scmutil.revrange()``.
606 user aliases, consider calling ``scmutil.revrange()``.
602 '''
607 '''
603 for r in self.revs(expr, *args):
608 for r in self.revs(expr, *args):
604 yield self[r]
609 yield self[r]
605
610
606 def anyrevs(self, specs, user=False):
611 def anyrevs(self, specs, user=False):
607 '''Find revisions matching one of the given revsets.
612 '''Find revisions matching one of the given revsets.
608
613
609 Revset aliases from the configuration are not expanded by default. To
614 Revset aliases from the configuration are not expanded by default. To
610 expand user aliases, specify ``user=True``.
615 expand user aliases, specify ``user=True``.
611 '''
616 '''
612 if user:
617 if user:
613 m = revset.matchany(self.ui, specs, repo=self)
618 m = revset.matchany(self.ui, specs, repo=self)
614 else:
619 else:
615 m = revset.matchany(None, specs)
620 m = revset.matchany(None, specs)
616 return m(self)
621 return m(self)
617
622
618 def url(self):
623 def url(self):
619 return 'file:' + self.root
624 return 'file:' + self.root
620
625
621 def hook(self, name, throw=False, **args):
626 def hook(self, name, throw=False, **args):
622 """Call a hook, passing this repo instance.
627 """Call a hook, passing this repo instance.
623
628
624 This a convenience method to aid invoking hooks. Extensions likely
629 This a convenience method to aid invoking hooks. Extensions likely
625 won't call this unless they have registered a custom hook or are
630 won't call this unless they have registered a custom hook or are
626 replacing code that is expected to call a hook.
631 replacing code that is expected to call a hook.
627 """
632 """
628 return hook.hook(self.ui, self, name, throw, **args)
633 return hook.hook(self.ui, self, name, throw, **args)
629
634
630 @unfilteredmethod
635 @unfilteredmethod
631 def _tag(self, names, node, message, local, user, date, extra=None,
636 def _tag(self, names, node, message, local, user, date, extra=None,
632 editor=False):
637 editor=False):
633 if isinstance(names, str):
638 if isinstance(names, str):
634 names = (names,)
639 names = (names,)
635
640
636 branches = self.branchmap()
641 branches = self.branchmap()
637 for name in names:
642 for name in names:
638 self.hook('pretag', throw=True, node=hex(node), tag=name,
643 self.hook('pretag', throw=True, node=hex(node), tag=name,
639 local=local)
644 local=local)
640 if name in branches:
645 if name in branches:
641 self.ui.warn(_("warning: tag %s conflicts with existing"
646 self.ui.warn(_("warning: tag %s conflicts with existing"
642 " branch name\n") % name)
647 " branch name\n") % name)
643
648
644 def writetags(fp, names, munge, prevtags):
649 def writetags(fp, names, munge, prevtags):
645 fp.seek(0, 2)
650 fp.seek(0, 2)
646 if prevtags and prevtags[-1] != '\n':
651 if prevtags and prevtags[-1] != '\n':
647 fp.write('\n')
652 fp.write('\n')
648 for name in names:
653 for name in names:
649 if munge:
654 if munge:
650 m = munge(name)
655 m = munge(name)
651 else:
656 else:
652 m = name
657 m = name
653
658
654 if (self._tagscache.tagtypes and
659 if (self._tagscache.tagtypes and
655 name in self._tagscache.tagtypes):
660 name in self._tagscache.tagtypes):
656 old = self.tags().get(name, nullid)
661 old = self.tags().get(name, nullid)
657 fp.write('%s %s\n' % (hex(old), m))
662 fp.write('%s %s\n' % (hex(old), m))
658 fp.write('%s %s\n' % (hex(node), m))
663 fp.write('%s %s\n' % (hex(node), m))
659 fp.close()
664 fp.close()
660
665
661 prevtags = ''
666 prevtags = ''
662 if local:
667 if local:
663 try:
668 try:
664 fp = self.vfs('localtags', 'r+')
669 fp = self.vfs('localtags', 'r+')
665 except IOError:
670 except IOError:
666 fp = self.vfs('localtags', 'a')
671 fp = self.vfs('localtags', 'a')
667 else:
672 else:
668 prevtags = fp.read()
673 prevtags = fp.read()
669
674
670 # local tags are stored in the current charset
675 # local tags are stored in the current charset
671 writetags(fp, names, None, prevtags)
676 writetags(fp, names, None, prevtags)
672 for name in names:
677 for name in names:
673 self.hook('tag', node=hex(node), tag=name, local=local)
678 self.hook('tag', node=hex(node), tag=name, local=local)
674 return
679 return
675
680
676 try:
681 try:
677 fp = self.wfile('.hgtags', 'rb+')
682 fp = self.wfile('.hgtags', 'rb+')
678 except IOError as e:
683 except IOError as e:
679 if e.errno != errno.ENOENT:
684 if e.errno != errno.ENOENT:
680 raise
685 raise
681 fp = self.wfile('.hgtags', 'ab')
686 fp = self.wfile('.hgtags', 'ab')
682 else:
687 else:
683 prevtags = fp.read()
688 prevtags = fp.read()
684
689
685 # committed tags are stored in UTF-8
690 # committed tags are stored in UTF-8
686 writetags(fp, names, encoding.fromlocal, prevtags)
691 writetags(fp, names, encoding.fromlocal, prevtags)
687
692
688 fp.close()
693 fp.close()
689
694
690 self.invalidatecaches()
695 self.invalidatecaches()
691
696
692 if '.hgtags' not in self.dirstate:
697 if '.hgtags' not in self.dirstate:
693 self[None].add(['.hgtags'])
698 self[None].add(['.hgtags'])
694
699
695 m = matchmod.exact(self.root, '', ['.hgtags'])
700 m = matchmod.exact(self.root, '', ['.hgtags'])
696 tagnode = self.commit(message, user, date, extra=extra, match=m,
701 tagnode = self.commit(message, user, date, extra=extra, match=m,
697 editor=editor)
702 editor=editor)
698
703
699 for name in names:
704 for name in names:
700 self.hook('tag', node=hex(node), tag=name, local=local)
705 self.hook('tag', node=hex(node), tag=name, local=local)
701
706
702 return tagnode
707 return tagnode
703
708
704 def tag(self, names, node, message, local, user, date, editor=False):
709 def tag(self, names, node, message, local, user, date, editor=False):
705 '''tag a revision with one or more symbolic names.
710 '''tag a revision with one or more symbolic names.
706
711
707 names is a list of strings or, when adding a single tag, names may be a
712 names is a list of strings or, when adding a single tag, names may be a
708 string.
713 string.
709
714
710 if local is True, the tags are stored in a per-repository file.
715 if local is True, the tags are stored in a per-repository file.
711 otherwise, they are stored in the .hgtags file, and a new
716 otherwise, they are stored in the .hgtags file, and a new
712 changeset is committed with the change.
717 changeset is committed with the change.
713
718
714 keyword arguments:
719 keyword arguments:
715
720
716 local: whether to store tags in non-version-controlled file
721 local: whether to store tags in non-version-controlled file
717 (default False)
722 (default False)
718
723
719 message: commit message to use if committing
724 message: commit message to use if committing
720
725
721 user: name of user to use if committing
726 user: name of user to use if committing
722
727
723 date: date tuple to use if committing'''
728 date: date tuple to use if committing'''
724
729
725 if not local:
730 if not local:
726 m = matchmod.exact(self.root, '', ['.hgtags'])
731 m = matchmod.exact(self.root, '', ['.hgtags'])
727 if any(self.status(match=m, unknown=True, ignored=True)):
732 if any(self.status(match=m, unknown=True, ignored=True)):
728 raise error.Abort(_('working copy of .hgtags is changed'),
733 raise error.Abort(_('working copy of .hgtags is changed'),
729 hint=_('please commit .hgtags manually'))
734 hint=_('please commit .hgtags manually'))
730
735
731 self.tags() # instantiate the cache
736 self.tags() # instantiate the cache
732 self._tag(names, node, message, local, user, date, editor=editor)
737 self._tag(names, node, message, local, user, date, editor=editor)
733
738
734 @filteredpropertycache
739 @filteredpropertycache
735 def _tagscache(self):
740 def _tagscache(self):
736 '''Returns a tagscache object that contains various tags related
741 '''Returns a tagscache object that contains various tags related
737 caches.'''
742 caches.'''
738
743
739 # This simplifies its cache management by having one decorated
744 # This simplifies its cache management by having one decorated
740 # function (this one) and the rest simply fetch things from it.
745 # function (this one) and the rest simply fetch things from it.
741 class tagscache(object):
746 class tagscache(object):
742 def __init__(self):
747 def __init__(self):
743 # These two define the set of tags for this repository. tags
748 # These two define the set of tags for this repository. tags
744 # maps tag name to node; tagtypes maps tag name to 'global' or
749 # maps tag name to node; tagtypes maps tag name to 'global' or
745 # 'local'. (Global tags are defined by .hgtags across all
750 # 'local'. (Global tags are defined by .hgtags across all
746 # heads, and local tags are defined in .hg/localtags.)
751 # heads, and local tags are defined in .hg/localtags.)
747 # They constitute the in-memory cache of tags.
752 # They constitute the in-memory cache of tags.
748 self.tags = self.tagtypes = None
753 self.tags = self.tagtypes = None
749
754
750 self.nodetagscache = self.tagslist = None
755 self.nodetagscache = self.tagslist = None
751
756
752 cache = tagscache()
757 cache = tagscache()
753 cache.tags, cache.tagtypes = self._findtags()
758 cache.tags, cache.tagtypes = self._findtags()
754
759
755 return cache
760 return cache
756
761
757 def tags(self):
762 def tags(self):
758 '''return a mapping of tag to node'''
763 '''return a mapping of tag to node'''
759 t = {}
764 t = {}
760 if self.changelog.filteredrevs:
765 if self.changelog.filteredrevs:
761 tags, tt = self._findtags()
766 tags, tt = self._findtags()
762 else:
767 else:
763 tags = self._tagscache.tags
768 tags = self._tagscache.tags
764 for k, v in tags.iteritems():
769 for k, v in tags.iteritems():
765 try:
770 try:
766 # ignore tags to unknown nodes
771 # ignore tags to unknown nodes
767 self.changelog.rev(v)
772 self.changelog.rev(v)
768 t[k] = v
773 t[k] = v
769 except (error.LookupError, ValueError):
774 except (error.LookupError, ValueError):
770 pass
775 pass
771 return t
776 return t
772
777
773 def _findtags(self):
778 def _findtags(self):
774 '''Do the hard work of finding tags. Return a pair of dicts
779 '''Do the hard work of finding tags. Return a pair of dicts
775 (tags, tagtypes) where tags maps tag name to node, and tagtypes
780 (tags, tagtypes) where tags maps tag name to node, and tagtypes
776 maps tag name to a string like \'global\' or \'local\'.
781 maps tag name to a string like \'global\' or \'local\'.
777 Subclasses or extensions are free to add their own tags, but
782 Subclasses or extensions are free to add their own tags, but
778 should be aware that the returned dicts will be retained for the
783 should be aware that the returned dicts will be retained for the
779 duration of the localrepo object.'''
784 duration of the localrepo object.'''
780
785
781 # XXX what tagtype should subclasses/extensions use? Currently
786 # XXX what tagtype should subclasses/extensions use? Currently
782 # mq and bookmarks add tags, but do not set the tagtype at all.
787 # mq and bookmarks add tags, but do not set the tagtype at all.
783 # Should each extension invent its own tag type? Should there
788 # Should each extension invent its own tag type? Should there
784 # be one tagtype for all such "virtual" tags? Or is the status
789 # be one tagtype for all such "virtual" tags? Or is the status
785 # quo fine?
790 # quo fine?
786
791
787 alltags = {} # map tag name to (node, hist)
792 alltags = {} # map tag name to (node, hist)
788 tagtypes = {}
793 tagtypes = {}
789
794
790 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
795 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
791 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
796 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
792
797
793 # Build the return dicts. Have to re-encode tag names because
798 # Build the return dicts. Have to re-encode tag names because
794 # the tags module always uses UTF-8 (in order not to lose info
799 # the tags module always uses UTF-8 (in order not to lose info
795 # writing to the cache), but the rest of Mercurial wants them in
800 # writing to the cache), but the rest of Mercurial wants them in
796 # local encoding.
801 # local encoding.
797 tags = {}
802 tags = {}
798 for (name, (node, hist)) in alltags.iteritems():
803 for (name, (node, hist)) in alltags.iteritems():
799 if node != nullid:
804 if node != nullid:
800 tags[encoding.tolocal(name)] = node
805 tags[encoding.tolocal(name)] = node
801 tags['tip'] = self.changelog.tip()
806 tags['tip'] = self.changelog.tip()
802 tagtypes = dict([(encoding.tolocal(name), value)
807 tagtypes = dict([(encoding.tolocal(name), value)
803 for (name, value) in tagtypes.iteritems()])
808 for (name, value) in tagtypes.iteritems()])
804 return (tags, tagtypes)
809 return (tags, tagtypes)
805
810
806 def tagtype(self, tagname):
811 def tagtype(self, tagname):
807 '''
812 '''
808 return the type of the given tag. result can be:
813 return the type of the given tag. result can be:
809
814
810 'local' : a local tag
815 'local' : a local tag
811 'global' : a global tag
816 'global' : a global tag
812 None : tag does not exist
817 None : tag does not exist
813 '''
818 '''
814
819
815 return self._tagscache.tagtypes.get(tagname)
820 return self._tagscache.tagtypes.get(tagname)
816
821
817 def tagslist(self):
822 def tagslist(self):
818 '''return a list of tags ordered by revision'''
823 '''return a list of tags ordered by revision'''
819 if not self._tagscache.tagslist:
824 if not self._tagscache.tagslist:
820 l = []
825 l = []
821 for t, n in self.tags().iteritems():
826 for t, n in self.tags().iteritems():
822 l.append((self.changelog.rev(n), t, n))
827 l.append((self.changelog.rev(n), t, n))
823 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
828 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
824
829
825 return self._tagscache.tagslist
830 return self._tagscache.tagslist
826
831
827 def nodetags(self, node):
832 def nodetags(self, node):
828 '''return the tags associated with a node'''
833 '''return the tags associated with a node'''
829 if not self._tagscache.nodetagscache:
834 if not self._tagscache.nodetagscache:
830 nodetagscache = {}
835 nodetagscache = {}
831 for t, n in self._tagscache.tags.iteritems():
836 for t, n in self._tagscache.tags.iteritems():
832 nodetagscache.setdefault(n, []).append(t)
837 nodetagscache.setdefault(n, []).append(t)
833 for tags in nodetagscache.itervalues():
838 for tags in nodetagscache.itervalues():
834 tags.sort()
839 tags.sort()
835 self._tagscache.nodetagscache = nodetagscache
840 self._tagscache.nodetagscache = nodetagscache
836 return self._tagscache.nodetagscache.get(node, [])
841 return self._tagscache.nodetagscache.get(node, [])
837
842
838 def nodebookmarks(self, node):
843 def nodebookmarks(self, node):
839 """return the list of bookmarks pointing to the specified node"""
844 """return the list of bookmarks pointing to the specified node"""
840 marks = []
845 marks = []
841 for bookmark, n in self._bookmarks.iteritems():
846 for bookmark, n in self._bookmarks.iteritems():
842 if n == node:
847 if n == node:
843 marks.append(bookmark)
848 marks.append(bookmark)
844 return sorted(marks)
849 return sorted(marks)
845
850
846 def branchmap(self):
851 def branchmap(self):
847 '''returns a dictionary {branch: [branchheads]} with branchheads
852 '''returns a dictionary {branch: [branchheads]} with branchheads
848 ordered by increasing revision number'''
853 ordered by increasing revision number'''
849 branchmap.updatecache(self)
854 branchmap.updatecache(self)
850 return self._branchcaches[self.filtername]
855 return self._branchcaches[self.filtername]
851
856
852 @unfilteredmethod
857 @unfilteredmethod
853 def revbranchcache(self):
858 def revbranchcache(self):
854 if not self._revbranchcache:
859 if not self._revbranchcache:
855 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
860 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
856 return self._revbranchcache
861 return self._revbranchcache
857
862
858 def branchtip(self, branch, ignoremissing=False):
863 def branchtip(self, branch, ignoremissing=False):
859 '''return the tip node for a given branch
864 '''return the tip node for a given branch
860
865
861 If ignoremissing is True, then this method will not raise an error.
866 If ignoremissing is True, then this method will not raise an error.
862 This is helpful for callers that only expect None for a missing branch
867 This is helpful for callers that only expect None for a missing branch
863 (e.g. namespace).
868 (e.g. namespace).
864
869
865 '''
870 '''
866 try:
871 try:
867 return self.branchmap().branchtip(branch)
872 return self.branchmap().branchtip(branch)
868 except KeyError:
873 except KeyError:
869 if not ignoremissing:
874 if not ignoremissing:
870 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
875 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
871 else:
876 else:
872 pass
877 pass
873
878
874 def lookup(self, key):
879 def lookup(self, key):
875 return self[key].node()
880 return self[key].node()
876
881
877 def lookupbranch(self, key, remote=None):
882 def lookupbranch(self, key, remote=None):
878 repo = remote or self
883 repo = remote or self
879 if key in repo.branchmap():
884 if key in repo.branchmap():
880 return key
885 return key
881
886
882 repo = (remote and remote.local()) and remote or self
887 repo = (remote and remote.local()) and remote or self
883 return repo[key].branch()
888 return repo[key].branch()
884
889
885 def known(self, nodes):
890 def known(self, nodes):
886 cl = self.changelog
891 cl = self.changelog
887 nm = cl.nodemap
892 nm = cl.nodemap
888 filtered = cl.filteredrevs
893 filtered = cl.filteredrevs
889 result = []
894 result = []
890 for n in nodes:
895 for n in nodes:
891 r = nm.get(n)
896 r = nm.get(n)
892 resp = not (r is None or r in filtered)
897 resp = not (r is None or r in filtered)
893 result.append(resp)
898 result.append(resp)
894 return result
899 return result
895
900
896 def local(self):
901 def local(self):
897 return self
902 return self
898
903
899 def publishing(self):
904 def publishing(self):
900 # it's safe (and desirable) to trust the publish flag unconditionally
905 # it's safe (and desirable) to trust the publish flag unconditionally
901 # so that we don't finalize changes shared between users via ssh or nfs
906 # so that we don't finalize changes shared between users via ssh or nfs
902 return self.ui.configbool('phases', 'publish', True, untrusted=True)
907 return self.ui.configbool('phases', 'publish', True, untrusted=True)
903
908
904 def cancopy(self):
909 def cancopy(self):
905 # so statichttprepo's override of local() works
910 # so statichttprepo's override of local() works
906 if not self.local():
911 if not self.local():
907 return False
912 return False
908 if not self.publishing():
913 if not self.publishing():
909 return True
914 return True
910 # if publishing we can't copy if there is filtered content
915 # if publishing we can't copy if there is filtered content
911 return not self.filtered('visible').changelog.filteredrevs
916 return not self.filtered('visible').changelog.filteredrevs
912
917
913 def shared(self):
918 def shared(self):
914 '''the type of shared repository (None if not shared)'''
919 '''the type of shared repository (None if not shared)'''
915 if self.sharedpath != self.path:
920 if self.sharedpath != self.path:
916 return 'store'
921 return 'store'
917 return None
922 return None
918
923
919 def join(self, f, *insidef):
924 def join(self, f, *insidef):
920 return self.vfs.join(os.path.join(f, *insidef))
925 return self.vfs.join(os.path.join(f, *insidef))
921
926
922 def wjoin(self, f, *insidef):
927 def wjoin(self, f, *insidef):
923 return self.vfs.reljoin(self.root, f, *insidef)
928 return self.vfs.reljoin(self.root, f, *insidef)
924
929
925 def file(self, f):
930 def file(self, f):
926 if f[0] == '/':
931 if f[0] == '/':
927 f = f[1:]
932 f = f[1:]
928 return filelog.filelog(self.svfs, f)
933 return filelog.filelog(self.svfs, f)
929
934
930 def changectx(self, changeid):
935 def changectx(self, changeid):
931 return self[changeid]
936 return self[changeid]
932
937
933 def setparents(self, p1, p2=nullid):
938 def setparents(self, p1, p2=nullid):
934 self.dirstate.beginparentchange()
939 self.dirstate.beginparentchange()
935 copies = self.dirstate.setparents(p1, p2)
940 copies = self.dirstate.setparents(p1, p2)
936 pctx = self[p1]
941 pctx = self[p1]
937 if copies:
942 if copies:
938 # Adjust copy records, the dirstate cannot do it, it
943 # Adjust copy records, the dirstate cannot do it, it
939 # requires access to parents manifests. Preserve them
944 # requires access to parents manifests. Preserve them
940 # only for entries added to first parent.
945 # only for entries added to first parent.
941 for f in copies:
946 for f in copies:
942 if f not in pctx and copies[f] in pctx:
947 if f not in pctx and copies[f] in pctx:
943 self.dirstate.copy(copies[f], f)
948 self.dirstate.copy(copies[f], f)
944 if p2 == nullid:
949 if p2 == nullid:
945 for f, s in sorted(self.dirstate.copies().items()):
950 for f, s in sorted(self.dirstate.copies().items()):
946 if f not in pctx and s not in pctx:
951 if f not in pctx and s not in pctx:
947 self.dirstate.copy(None, f)
952 self.dirstate.copy(None, f)
948 self.dirstate.endparentchange()
953 self.dirstate.endparentchange()
949
954
950 def filectx(self, path, changeid=None, fileid=None):
955 def filectx(self, path, changeid=None, fileid=None):
951 """changeid can be a changeset revision, node, or tag.
956 """changeid can be a changeset revision, node, or tag.
952 fileid can be a file revision or node."""
957 fileid can be a file revision or node."""
953 return context.filectx(self, path, changeid, fileid)
958 return context.filectx(self, path, changeid, fileid)
954
959
955 def getcwd(self):
960 def getcwd(self):
956 return self.dirstate.getcwd()
961 return self.dirstate.getcwd()
957
962
958 def pathto(self, f, cwd=None):
963 def pathto(self, f, cwd=None):
959 return self.dirstate.pathto(f, cwd)
964 return self.dirstate.pathto(f, cwd)
960
965
961 def wfile(self, f, mode='r'):
966 def wfile(self, f, mode='r'):
962 return self.wvfs(f, mode)
967 return self.wvfs(f, mode)
963
968
964 def _link(self, f):
969 def _link(self, f):
965 return self.wvfs.islink(f)
970 return self.wvfs.islink(f)
966
971
967 def _loadfilter(self, filter):
972 def _loadfilter(self, filter):
968 if filter not in self.filterpats:
973 if filter not in self.filterpats:
969 l = []
974 l = []
970 for pat, cmd in self.ui.configitems(filter):
975 for pat, cmd in self.ui.configitems(filter):
971 if cmd == '!':
976 if cmd == '!':
972 continue
977 continue
973 mf = matchmod.match(self.root, '', [pat])
978 mf = matchmod.match(self.root, '', [pat])
974 fn = None
979 fn = None
975 params = cmd
980 params = cmd
976 for name, filterfn in self._datafilters.iteritems():
981 for name, filterfn in self._datafilters.iteritems():
977 if cmd.startswith(name):
982 if cmd.startswith(name):
978 fn = filterfn
983 fn = filterfn
979 params = cmd[len(name):].lstrip()
984 params = cmd[len(name):].lstrip()
980 break
985 break
981 if not fn:
986 if not fn:
982 fn = lambda s, c, **kwargs: util.filter(s, c)
987 fn = lambda s, c, **kwargs: util.filter(s, c)
983 # Wrap old filters not supporting keyword arguments
988 # Wrap old filters not supporting keyword arguments
984 if not inspect.getargspec(fn)[2]:
989 if not inspect.getargspec(fn)[2]:
985 oldfn = fn
990 oldfn = fn
986 fn = lambda s, c, **kwargs: oldfn(s, c)
991 fn = lambda s, c, **kwargs: oldfn(s, c)
987 l.append((mf, fn, params))
992 l.append((mf, fn, params))
988 self.filterpats[filter] = l
993 self.filterpats[filter] = l
989 return self.filterpats[filter]
994 return self.filterpats[filter]
990
995
991 def _filter(self, filterpats, filename, data):
996 def _filter(self, filterpats, filename, data):
992 for mf, fn, cmd in filterpats:
997 for mf, fn, cmd in filterpats:
993 if mf(filename):
998 if mf(filename):
994 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
999 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
995 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1000 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
996 break
1001 break
997
1002
998 return data
1003 return data
999
1004
1000 @unfilteredpropertycache
1005 @unfilteredpropertycache
1001 def _encodefilterpats(self):
1006 def _encodefilterpats(self):
1002 return self._loadfilter('encode')
1007 return self._loadfilter('encode')
1003
1008
1004 @unfilteredpropertycache
1009 @unfilteredpropertycache
1005 def _decodefilterpats(self):
1010 def _decodefilterpats(self):
1006 return self._loadfilter('decode')
1011 return self._loadfilter('decode')
1007
1012
1008 def adddatafilter(self, name, filter):
1013 def adddatafilter(self, name, filter):
1009 self._datafilters[name] = filter
1014 self._datafilters[name] = filter
1010
1015
1011 def wread(self, filename):
1016 def wread(self, filename):
1012 if self._link(filename):
1017 if self._link(filename):
1013 data = self.wvfs.readlink(filename)
1018 data = self.wvfs.readlink(filename)
1014 else:
1019 else:
1015 data = self.wvfs.read(filename)
1020 data = self.wvfs.read(filename)
1016 return self._filter(self._encodefilterpats, filename, data)
1021 return self._filter(self._encodefilterpats, filename, data)
1017
1022
1018 def wwrite(self, filename, data, flags, backgroundclose=False):
1023 def wwrite(self, filename, data, flags, backgroundclose=False):
1019 """write ``data`` into ``filename`` in the working directory
1024 """write ``data`` into ``filename`` in the working directory
1020
1025
1021 This returns length of written (maybe decoded) data.
1026 This returns length of written (maybe decoded) data.
1022 """
1027 """
1023 data = self._filter(self._decodefilterpats, filename, data)
1028 data = self._filter(self._decodefilterpats, filename, data)
1024 if 'l' in flags:
1029 if 'l' in flags:
1025 self.wvfs.symlink(data, filename)
1030 self.wvfs.symlink(data, filename)
1026 else:
1031 else:
1027 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1032 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1028 if 'x' in flags:
1033 if 'x' in flags:
1029 self.wvfs.setflags(filename, False, True)
1034 self.wvfs.setflags(filename, False, True)
1030 return len(data)
1035 return len(data)
1031
1036
1032 def wwritedata(self, filename, data):
1037 def wwritedata(self, filename, data):
1033 return self._filter(self._decodefilterpats, filename, data)
1038 return self._filter(self._decodefilterpats, filename, data)
1034
1039
1035 def currenttransaction(self):
1040 def currenttransaction(self):
1036 """return the current transaction or None if non exists"""
1041 """return the current transaction or None if non exists"""
1037 if self._transref:
1042 if self._transref:
1038 tr = self._transref()
1043 tr = self._transref()
1039 else:
1044 else:
1040 tr = None
1045 tr = None
1041
1046
1042 if tr and tr.running():
1047 if tr and tr.running():
1043 return tr
1048 return tr
1044 return None
1049 return None
1045
1050
1046 def transaction(self, desc, report=None):
1051 def transaction(self, desc, report=None):
1047 if (self.ui.configbool('devel', 'all-warnings')
1052 if (self.ui.configbool('devel', 'all-warnings')
1048 or self.ui.configbool('devel', 'check-locks')):
1053 or self.ui.configbool('devel', 'check-locks')):
1049 if self._currentlock(self._lockref) is None:
1054 if self._currentlock(self._lockref) is None:
1050 raise error.ProgrammingError('transaction requires locking')
1055 raise error.ProgrammingError('transaction requires locking')
1051 tr = self.currenttransaction()
1056 tr = self.currenttransaction()
1052 if tr is not None:
1057 if tr is not None:
1053 return tr.nest()
1058 return tr.nest()
1054
1059
1055 # abort here if the journal already exists
1060 # abort here if the journal already exists
1056 if self.svfs.exists("journal"):
1061 if self.svfs.exists("journal"):
1057 raise error.RepoError(
1062 raise error.RepoError(
1058 _("abandoned transaction found"),
1063 _("abandoned transaction found"),
1059 hint=_("run 'hg recover' to clean up transaction"))
1064 hint=_("run 'hg recover' to clean up transaction"))
1060
1065
1061 idbase = "%.40f#%f" % (random.random(), time.time())
1066 idbase = "%.40f#%f" % (random.random(), time.time())
1062 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1067 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1063 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1068 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1064
1069
1065 self._writejournal(desc)
1070 self._writejournal(desc)
1066 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1071 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1067 if report:
1072 if report:
1068 rp = report
1073 rp = report
1069 else:
1074 else:
1070 rp = self.ui.warn
1075 rp = self.ui.warn
1071 vfsmap = {'plain': self.vfs} # root of .hg/
1076 vfsmap = {'plain': self.vfs} # root of .hg/
1072 # we must avoid cyclic reference between repo and transaction.
1077 # we must avoid cyclic reference between repo and transaction.
1073 reporef = weakref.ref(self)
1078 reporef = weakref.ref(self)
1074 def validate(tr):
1079 def validate(tr):
1075 """will run pre-closing hooks"""
1080 """will run pre-closing hooks"""
1076 reporef().hook('pretxnclose', throw=True,
1081 reporef().hook('pretxnclose', throw=True,
1077 txnname=desc, **tr.hookargs)
1082 txnname=desc, **tr.hookargs)
1078 def releasefn(tr, success):
1083 def releasefn(tr, success):
1079 repo = reporef()
1084 repo = reporef()
1080 if success:
1085 if success:
1081 # this should be explicitly invoked here, because
1086 # this should be explicitly invoked here, because
1082 # in-memory changes aren't written out at closing
1087 # in-memory changes aren't written out at closing
1083 # transaction, if tr.addfilegenerator (via
1088 # transaction, if tr.addfilegenerator (via
1084 # dirstate.write or so) isn't invoked while
1089 # dirstate.write or so) isn't invoked while
1085 # transaction running
1090 # transaction running
1086 repo.dirstate.write(None)
1091 repo.dirstate.write(None)
1087 else:
1092 else:
1088 # discard all changes (including ones already written
1093 # discard all changes (including ones already written
1089 # out) in this transaction
1094 # out) in this transaction
1090 repo.dirstate.restorebackup(None, prefix='journal.')
1095 repo.dirstate.restorebackup(None, prefix='journal.')
1091
1096
1092 repo.invalidate(clearfilecache=True)
1097 repo.invalidate(clearfilecache=True)
1093
1098
1094 tr = transaction.transaction(rp, self.svfs, vfsmap,
1099 tr = transaction.transaction(rp, self.svfs, vfsmap,
1095 "journal",
1100 "journal",
1096 "undo",
1101 "undo",
1097 aftertrans(renames),
1102 aftertrans(renames),
1098 self.store.createmode,
1103 self.store.createmode,
1099 validator=validate,
1104 validator=validate,
1100 releasefn=releasefn)
1105 releasefn=releasefn)
1101
1106
1102 tr.hookargs['txnid'] = txnid
1107 tr.hookargs['txnid'] = txnid
1103 # note: writing the fncache only during finalize mean that the file is
1108 # note: writing the fncache only during finalize mean that the file is
1104 # outdated when running hooks. As fncache is used for streaming clone,
1109 # outdated when running hooks. As fncache is used for streaming clone,
1105 # this is not expected to break anything that happen during the hooks.
1110 # this is not expected to break anything that happen during the hooks.
1106 tr.addfinalize('flush-fncache', self.store.write)
1111 tr.addfinalize('flush-fncache', self.store.write)
1107 def txnclosehook(tr2):
1112 def txnclosehook(tr2):
1108 """To be run if transaction is successful, will schedule a hook run
1113 """To be run if transaction is successful, will schedule a hook run
1109 """
1114 """
1110 # Don't reference tr2 in hook() so we don't hold a reference.
1115 # Don't reference tr2 in hook() so we don't hold a reference.
1111 # This reduces memory consumption when there are multiple
1116 # This reduces memory consumption when there are multiple
1112 # transactions per lock. This can likely go away if issue5045
1117 # transactions per lock. This can likely go away if issue5045
1113 # fixes the function accumulation.
1118 # fixes the function accumulation.
1114 hookargs = tr2.hookargs
1119 hookargs = tr2.hookargs
1115
1120
1116 def hook():
1121 def hook():
1117 reporef().hook('txnclose', throw=False, txnname=desc,
1122 reporef().hook('txnclose', throw=False, txnname=desc,
1118 **hookargs)
1123 **hookargs)
1119 reporef()._afterlock(hook)
1124 reporef()._afterlock(hook)
1120 tr.addfinalize('txnclose-hook', txnclosehook)
1125 tr.addfinalize('txnclose-hook', txnclosehook)
1121 def txnaborthook(tr2):
1126 def txnaborthook(tr2):
1122 """To be run if transaction is aborted
1127 """To be run if transaction is aborted
1123 """
1128 """
1124 reporef().hook('txnabort', throw=False, txnname=desc,
1129 reporef().hook('txnabort', throw=False, txnname=desc,
1125 **tr2.hookargs)
1130 **tr2.hookargs)
1126 tr.addabort('txnabort-hook', txnaborthook)
1131 tr.addabort('txnabort-hook', txnaborthook)
1127 # avoid eager cache invalidation. in-memory data should be identical
1132 # avoid eager cache invalidation. in-memory data should be identical
1128 # to stored data if transaction has no error.
1133 # to stored data if transaction has no error.
1129 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1134 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1130 self._transref = weakref.ref(tr)
1135 self._transref = weakref.ref(tr)
1131 return tr
1136 return tr
1132
1137
1133 def _journalfiles(self):
1138 def _journalfiles(self):
1134 return ((self.svfs, 'journal'),
1139 return ((self.svfs, 'journal'),
1135 (self.vfs, 'journal.dirstate'),
1140 (self.vfs, 'journal.dirstate'),
1136 (self.vfs, 'journal.branch'),
1141 (self.vfs, 'journal.branch'),
1137 (self.vfs, 'journal.desc'),
1142 (self.vfs, 'journal.desc'),
1138 (self.vfs, 'journal.bookmarks'),
1143 (self.vfs, 'journal.bookmarks'),
1139 (self.svfs, 'journal.phaseroots'))
1144 (self.svfs, 'journal.phaseroots'))
1140
1145
1141 def undofiles(self):
1146 def undofiles(self):
1142 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1147 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1143
1148
1144 def _writejournal(self, desc):
1149 def _writejournal(self, desc):
1145 self.dirstate.savebackup(None, prefix='journal.')
1150 self.dirstate.savebackup(None, prefix='journal.')
1146 self.vfs.write("journal.branch",
1151 self.vfs.write("journal.branch",
1147 encoding.fromlocal(self.dirstate.branch()))
1152 encoding.fromlocal(self.dirstate.branch()))
1148 self.vfs.write("journal.desc",
1153 self.vfs.write("journal.desc",
1149 "%d\n%s\n" % (len(self), desc))
1154 "%d\n%s\n" % (len(self), desc))
1150 self.vfs.write("journal.bookmarks",
1155 self.vfs.write("journal.bookmarks",
1151 self.vfs.tryread("bookmarks"))
1156 self.vfs.tryread("bookmarks"))
1152 self.svfs.write("journal.phaseroots",
1157 self.svfs.write("journal.phaseroots",
1153 self.svfs.tryread("phaseroots"))
1158 self.svfs.tryread("phaseroots"))
1154
1159
1155 def recover(self):
1160 def recover(self):
1156 with self.lock():
1161 with self.lock():
1157 if self.svfs.exists("journal"):
1162 if self.svfs.exists("journal"):
1158 self.ui.status(_("rolling back interrupted transaction\n"))
1163 self.ui.status(_("rolling back interrupted transaction\n"))
1159 vfsmap = {'': self.svfs,
1164 vfsmap = {'': self.svfs,
1160 'plain': self.vfs,}
1165 'plain': self.vfs,}
1161 transaction.rollback(self.svfs, vfsmap, "journal",
1166 transaction.rollback(self.svfs, vfsmap, "journal",
1162 self.ui.warn)
1167 self.ui.warn)
1163 self.invalidate()
1168 self.invalidate()
1164 return True
1169 return True
1165 else:
1170 else:
1166 self.ui.warn(_("no interrupted transaction available\n"))
1171 self.ui.warn(_("no interrupted transaction available\n"))
1167 return False
1172 return False
1168
1173
1169 def rollback(self, dryrun=False, force=False):
1174 def rollback(self, dryrun=False, force=False):
1170 wlock = lock = dsguard = None
1175 wlock = lock = dsguard = None
1171 try:
1176 try:
1172 wlock = self.wlock()
1177 wlock = self.wlock()
1173 lock = self.lock()
1178 lock = self.lock()
1174 if self.svfs.exists("undo"):
1179 if self.svfs.exists("undo"):
1175 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1180 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1176
1181
1177 return self._rollback(dryrun, force, dsguard)
1182 return self._rollback(dryrun, force, dsguard)
1178 else:
1183 else:
1179 self.ui.warn(_("no rollback information available\n"))
1184 self.ui.warn(_("no rollback information available\n"))
1180 return 1
1185 return 1
1181 finally:
1186 finally:
1182 release(dsguard, lock, wlock)
1187 release(dsguard, lock, wlock)
1183
1188
1184 @unfilteredmethod # Until we get smarter cache management
1189 @unfilteredmethod # Until we get smarter cache management
1185 def _rollback(self, dryrun, force, dsguard):
1190 def _rollback(self, dryrun, force, dsguard):
1186 ui = self.ui
1191 ui = self.ui
1187 try:
1192 try:
1188 args = self.vfs.read('undo.desc').splitlines()
1193 args = self.vfs.read('undo.desc').splitlines()
1189 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1194 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1190 if len(args) >= 3:
1195 if len(args) >= 3:
1191 detail = args[2]
1196 detail = args[2]
1192 oldtip = oldlen - 1
1197 oldtip = oldlen - 1
1193
1198
1194 if detail and ui.verbose:
1199 if detail and ui.verbose:
1195 msg = (_('repository tip rolled back to revision %s'
1200 msg = (_('repository tip rolled back to revision %s'
1196 ' (undo %s: %s)\n')
1201 ' (undo %s: %s)\n')
1197 % (oldtip, desc, detail))
1202 % (oldtip, desc, detail))
1198 else:
1203 else:
1199 msg = (_('repository tip rolled back to revision %s'
1204 msg = (_('repository tip rolled back to revision %s'
1200 ' (undo %s)\n')
1205 ' (undo %s)\n')
1201 % (oldtip, desc))
1206 % (oldtip, desc))
1202 except IOError:
1207 except IOError:
1203 msg = _('rolling back unknown transaction\n')
1208 msg = _('rolling back unknown transaction\n')
1204 desc = None
1209 desc = None
1205
1210
1206 if not force and self['.'] != self['tip'] and desc == 'commit':
1211 if not force and self['.'] != self['tip'] and desc == 'commit':
1207 raise error.Abort(
1212 raise error.Abort(
1208 _('rollback of last commit while not checked out '
1213 _('rollback of last commit while not checked out '
1209 'may lose data'), hint=_('use -f to force'))
1214 'may lose data'), hint=_('use -f to force'))
1210
1215
1211 ui.status(msg)
1216 ui.status(msg)
1212 if dryrun:
1217 if dryrun:
1213 return 0
1218 return 0
1214
1219
1215 parents = self.dirstate.parents()
1220 parents = self.dirstate.parents()
1216 self.destroying()
1221 self.destroying()
1217 vfsmap = {'plain': self.vfs, '': self.svfs}
1222 vfsmap = {'plain': self.vfs, '': self.svfs}
1218 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1223 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1219 if self.vfs.exists('undo.bookmarks'):
1224 if self.vfs.exists('undo.bookmarks'):
1220 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1225 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1221 if self.svfs.exists('undo.phaseroots'):
1226 if self.svfs.exists('undo.phaseroots'):
1222 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1227 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1223 self.invalidate()
1228 self.invalidate()
1224
1229
1225 parentgone = (parents[0] not in self.changelog.nodemap or
1230 parentgone = (parents[0] not in self.changelog.nodemap or
1226 parents[1] not in self.changelog.nodemap)
1231 parents[1] not in self.changelog.nodemap)
1227 if parentgone:
1232 if parentgone:
1228 # prevent dirstateguard from overwriting already restored one
1233 # prevent dirstateguard from overwriting already restored one
1229 dsguard.close()
1234 dsguard.close()
1230
1235
1231 self.dirstate.restorebackup(None, prefix='undo.')
1236 self.dirstate.restorebackup(None, prefix='undo.')
1232 try:
1237 try:
1233 branch = self.vfs.read('undo.branch')
1238 branch = self.vfs.read('undo.branch')
1234 self.dirstate.setbranch(encoding.tolocal(branch))
1239 self.dirstate.setbranch(encoding.tolocal(branch))
1235 except IOError:
1240 except IOError:
1236 ui.warn(_('named branch could not be reset: '
1241 ui.warn(_('named branch could not be reset: '
1237 'current branch is still \'%s\'\n')
1242 'current branch is still \'%s\'\n')
1238 % self.dirstate.branch())
1243 % self.dirstate.branch())
1239
1244
1240 parents = tuple([p.rev() for p in self[None].parents()])
1245 parents = tuple([p.rev() for p in self[None].parents()])
1241 if len(parents) > 1:
1246 if len(parents) > 1:
1242 ui.status(_('working directory now based on '
1247 ui.status(_('working directory now based on '
1243 'revisions %d and %d\n') % parents)
1248 'revisions %d and %d\n') % parents)
1244 else:
1249 else:
1245 ui.status(_('working directory now based on '
1250 ui.status(_('working directory now based on '
1246 'revision %d\n') % parents)
1251 'revision %d\n') % parents)
1247 mergemod.mergestate.clean(self, self['.'].node())
1252 mergemod.mergestate.clean(self, self['.'].node())
1248
1253
1249 # TODO: if we know which new heads may result from this rollback, pass
1254 # TODO: if we know which new heads may result from this rollback, pass
1250 # them to destroy(), which will prevent the branchhead cache from being
1255 # them to destroy(), which will prevent the branchhead cache from being
1251 # invalidated.
1256 # invalidated.
1252 self.destroyed()
1257 self.destroyed()
1253 return 0
1258 return 0
1254
1259
1255 def invalidatecaches(self):
1260 def invalidatecaches(self):
1256
1261
1257 if '_tagscache' in vars(self):
1262 if '_tagscache' in vars(self):
1258 # can't use delattr on proxy
1263 # can't use delattr on proxy
1259 del self.__dict__['_tagscache']
1264 del self.__dict__['_tagscache']
1260
1265
1261 self.unfiltered()._branchcaches.clear()
1266 self.unfiltered()._branchcaches.clear()
1262 self.invalidatevolatilesets()
1267 self.invalidatevolatilesets()
1263
1268
1264 def invalidatevolatilesets(self):
1269 def invalidatevolatilesets(self):
1265 self.filteredrevcache.clear()
1270 self.filteredrevcache.clear()
1266 obsolete.clearobscaches(self)
1271 obsolete.clearobscaches(self)
1267
1272
1268 def invalidatedirstate(self):
1273 def invalidatedirstate(self):
1269 '''Invalidates the dirstate, causing the next call to dirstate
1274 '''Invalidates the dirstate, causing the next call to dirstate
1270 to check if it was modified since the last time it was read,
1275 to check if it was modified since the last time it was read,
1271 rereading it if it has.
1276 rereading it if it has.
1272
1277
1273 This is different to dirstate.invalidate() that it doesn't always
1278 This is different to dirstate.invalidate() that it doesn't always
1274 rereads the dirstate. Use dirstate.invalidate() if you want to
1279 rereads the dirstate. Use dirstate.invalidate() if you want to
1275 explicitly read the dirstate again (i.e. restoring it to a previous
1280 explicitly read the dirstate again (i.e. restoring it to a previous
1276 known good state).'''
1281 known good state).'''
1277 if hasunfilteredcache(self, 'dirstate'):
1282 if hasunfilteredcache(self, 'dirstate'):
1278 for k in self.dirstate._filecache:
1283 for k in self.dirstate._filecache:
1279 try:
1284 try:
1280 delattr(self.dirstate, k)
1285 delattr(self.dirstate, k)
1281 except AttributeError:
1286 except AttributeError:
1282 pass
1287 pass
1283 delattr(self.unfiltered(), 'dirstate')
1288 delattr(self.unfiltered(), 'dirstate')
1284
1289
1285 def invalidate(self, clearfilecache=False):
1290 def invalidate(self, clearfilecache=False):
1286 '''Invalidates both store and non-store parts other than dirstate
1291 '''Invalidates both store and non-store parts other than dirstate
1287
1292
1288 If a transaction is running, invalidation of store is omitted,
1293 If a transaction is running, invalidation of store is omitted,
1289 because discarding in-memory changes might cause inconsistency
1294 because discarding in-memory changes might cause inconsistency
1290 (e.g. incomplete fncache causes unintentional failure, but
1295 (e.g. incomplete fncache causes unintentional failure, but
1291 redundant one doesn't).
1296 redundant one doesn't).
1292 '''
1297 '''
1293 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1298 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1294 for k in self._filecache.keys():
1299 for k in self._filecache.keys():
1295 # dirstate is invalidated separately in invalidatedirstate()
1300 # dirstate is invalidated separately in invalidatedirstate()
1296 if k == 'dirstate':
1301 if k == 'dirstate':
1297 continue
1302 continue
1298
1303
1299 if clearfilecache:
1304 if clearfilecache:
1300 del self._filecache[k]
1305 del self._filecache[k]
1301 try:
1306 try:
1302 delattr(unfiltered, k)
1307 delattr(unfiltered, k)
1303 except AttributeError:
1308 except AttributeError:
1304 pass
1309 pass
1305 self.invalidatecaches()
1310 self.invalidatecaches()
1306 if not self.currenttransaction():
1311 if not self.currenttransaction():
1307 # TODO: Changing contents of store outside transaction
1312 # TODO: Changing contents of store outside transaction
1308 # causes inconsistency. We should make in-memory store
1313 # causes inconsistency. We should make in-memory store
1309 # changes detectable, and abort if changed.
1314 # changes detectable, and abort if changed.
1310 self.store.invalidatecaches()
1315 self.store.invalidatecaches()
1311
1316
1312 def invalidateall(self):
1317 def invalidateall(self):
1313 '''Fully invalidates both store and non-store parts, causing the
1318 '''Fully invalidates both store and non-store parts, causing the
1314 subsequent operation to reread any outside changes.'''
1319 subsequent operation to reread any outside changes.'''
1315 # extension should hook this to invalidate its caches
1320 # extension should hook this to invalidate its caches
1316 self.invalidate()
1321 self.invalidate()
1317 self.invalidatedirstate()
1322 self.invalidatedirstate()
1318
1323
1319 @unfilteredmethod
1324 @unfilteredmethod
1320 def _refreshfilecachestats(self, tr):
1325 def _refreshfilecachestats(self, tr):
1321 """Reload stats of cached files so that they are flagged as valid"""
1326 """Reload stats of cached files so that they are flagged as valid"""
1322 for k, ce in self._filecache.items():
1327 for k, ce in self._filecache.items():
1323 if k == 'dirstate' or k not in self.__dict__:
1328 if k == 'dirstate' or k not in self.__dict__:
1324 continue
1329 continue
1325 ce.refresh()
1330 ce.refresh()
1326
1331
1327 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1332 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1328 inheritchecker=None, parentenvvar=None):
1333 inheritchecker=None, parentenvvar=None):
1329 parentlock = None
1334 parentlock = None
1330 # the contents of parentenvvar are used by the underlying lock to
1335 # the contents of parentenvvar are used by the underlying lock to
1331 # determine whether it can be inherited
1336 # determine whether it can be inherited
1332 if parentenvvar is not None:
1337 if parentenvvar is not None:
1333 parentlock = encoding.environ.get(parentenvvar)
1338 parentlock = encoding.environ.get(parentenvvar)
1334 try:
1339 try:
1335 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1340 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1336 acquirefn=acquirefn, desc=desc,
1341 acquirefn=acquirefn, desc=desc,
1337 inheritchecker=inheritchecker,
1342 inheritchecker=inheritchecker,
1338 parentlock=parentlock)
1343 parentlock=parentlock)
1339 except error.LockHeld as inst:
1344 except error.LockHeld as inst:
1340 if not wait:
1345 if not wait:
1341 raise
1346 raise
1342 # show more details for new-style locks
1347 # show more details for new-style locks
1343 if ':' in inst.locker:
1348 if ':' in inst.locker:
1344 host, pid = inst.locker.split(":", 1)
1349 host, pid = inst.locker.split(":", 1)
1345 self.ui.warn(
1350 self.ui.warn(
1346 _("waiting for lock on %s held by process %r "
1351 _("waiting for lock on %s held by process %r "
1347 "on host %r\n") % (desc, pid, host))
1352 "on host %r\n") % (desc, pid, host))
1348 else:
1353 else:
1349 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1354 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1350 (desc, inst.locker))
1355 (desc, inst.locker))
1351 # default to 600 seconds timeout
1356 # default to 600 seconds timeout
1352 l = lockmod.lock(vfs, lockname,
1357 l = lockmod.lock(vfs, lockname,
1353 int(self.ui.config("ui", "timeout", "600")),
1358 int(self.ui.config("ui", "timeout", "600")),
1354 releasefn=releasefn, acquirefn=acquirefn,
1359 releasefn=releasefn, acquirefn=acquirefn,
1355 desc=desc)
1360 desc=desc)
1356 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1361 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1357 return l
1362 return l
1358
1363
1359 def _afterlock(self, callback):
1364 def _afterlock(self, callback):
1360 """add a callback to be run when the repository is fully unlocked
1365 """add a callback to be run when the repository is fully unlocked
1361
1366
1362 The callback will be executed when the outermost lock is released
1367 The callback will be executed when the outermost lock is released
1363 (with wlock being higher level than 'lock')."""
1368 (with wlock being higher level than 'lock')."""
1364 for ref in (self._wlockref, self._lockref):
1369 for ref in (self._wlockref, self._lockref):
1365 l = ref and ref()
1370 l = ref and ref()
1366 if l and l.held:
1371 if l and l.held:
1367 l.postrelease.append(callback)
1372 l.postrelease.append(callback)
1368 break
1373 break
1369 else: # no lock have been found.
1374 else: # no lock have been found.
1370 callback()
1375 callback()
1371
1376
1372 def lock(self, wait=True):
1377 def lock(self, wait=True):
1373 '''Lock the repository store (.hg/store) and return a weak reference
1378 '''Lock the repository store (.hg/store) and return a weak reference
1374 to the lock. Use this before modifying the store (e.g. committing or
1379 to the lock. Use this before modifying the store (e.g. committing or
1375 stripping). If you are opening a transaction, get a lock as well.)
1380 stripping). If you are opening a transaction, get a lock as well.)
1376
1381
1377 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1382 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1378 'wlock' first to avoid a dead-lock hazard.'''
1383 'wlock' first to avoid a dead-lock hazard.'''
1379 l = self._currentlock(self._lockref)
1384 l = self._currentlock(self._lockref)
1380 if l is not None:
1385 if l is not None:
1381 l.lock()
1386 l.lock()
1382 return l
1387 return l
1383
1388
1384 l = self._lock(self.svfs, "lock", wait, None,
1389 l = self._lock(self.svfs, "lock", wait, None,
1385 self.invalidate, _('repository %s') % self.origroot)
1390 self.invalidate, _('repository %s') % self.origroot)
1386 self._lockref = weakref.ref(l)
1391 self._lockref = weakref.ref(l)
1387 return l
1392 return l
1388
1393
1389 def _wlockchecktransaction(self):
1394 def _wlockchecktransaction(self):
1390 if self.currenttransaction() is not None:
1395 if self.currenttransaction() is not None:
1391 raise error.LockInheritanceContractViolation(
1396 raise error.LockInheritanceContractViolation(
1392 'wlock cannot be inherited in the middle of a transaction')
1397 'wlock cannot be inherited in the middle of a transaction')
1393
1398
1394 def wlock(self, wait=True):
1399 def wlock(self, wait=True):
1395 '''Lock the non-store parts of the repository (everything under
1400 '''Lock the non-store parts of the repository (everything under
1396 .hg except .hg/store) and return a weak reference to the lock.
1401 .hg except .hg/store) and return a weak reference to the lock.
1397
1402
1398 Use this before modifying files in .hg.
1403 Use this before modifying files in .hg.
1399
1404
1400 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1405 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1401 'wlock' first to avoid a dead-lock hazard.'''
1406 'wlock' first to avoid a dead-lock hazard.'''
1402 l = self._wlockref and self._wlockref()
1407 l = self._wlockref and self._wlockref()
1403 if l is not None and l.held:
1408 if l is not None and l.held:
1404 l.lock()
1409 l.lock()
1405 return l
1410 return l
1406
1411
1407 # We do not need to check for non-waiting lock acquisition. Such
1412 # We do not need to check for non-waiting lock acquisition. Such
1408 # acquisition would not cause dead-lock as they would just fail.
1413 # acquisition would not cause dead-lock as they would just fail.
1409 if wait and (self.ui.configbool('devel', 'all-warnings')
1414 if wait and (self.ui.configbool('devel', 'all-warnings')
1410 or self.ui.configbool('devel', 'check-locks')):
1415 or self.ui.configbool('devel', 'check-locks')):
1411 if self._currentlock(self._lockref) is not None:
1416 if self._currentlock(self._lockref) is not None:
1412 self.ui.develwarn('"wlock" acquired after "lock"')
1417 self.ui.develwarn('"wlock" acquired after "lock"')
1413
1418
1414 def unlock():
1419 def unlock():
1415 if self.dirstate.pendingparentchange():
1420 if self.dirstate.pendingparentchange():
1416 self.dirstate.invalidate()
1421 self.dirstate.invalidate()
1417 else:
1422 else:
1418 self.dirstate.write(None)
1423 self.dirstate.write(None)
1419
1424
1420 self._filecache['dirstate'].refresh()
1425 self._filecache['dirstate'].refresh()
1421
1426
1422 l = self._lock(self.vfs, "wlock", wait, unlock,
1427 l = self._lock(self.vfs, "wlock", wait, unlock,
1423 self.invalidatedirstate, _('working directory of %s') %
1428 self.invalidatedirstate, _('working directory of %s') %
1424 self.origroot,
1429 self.origroot,
1425 inheritchecker=self._wlockchecktransaction,
1430 inheritchecker=self._wlockchecktransaction,
1426 parentenvvar='HG_WLOCK_LOCKER')
1431 parentenvvar='HG_WLOCK_LOCKER')
1427 self._wlockref = weakref.ref(l)
1432 self._wlockref = weakref.ref(l)
1428 return l
1433 return l
1429
1434
1430 def _currentlock(self, lockref):
1435 def _currentlock(self, lockref):
1431 """Returns the lock if it's held, or None if it's not."""
1436 """Returns the lock if it's held, or None if it's not."""
1432 if lockref is None:
1437 if lockref is None:
1433 return None
1438 return None
1434 l = lockref()
1439 l = lockref()
1435 if l is None or not l.held:
1440 if l is None or not l.held:
1436 return None
1441 return None
1437 return l
1442 return l
1438
1443
1439 def currentwlock(self):
1444 def currentwlock(self):
1440 """Returns the wlock if it's held, or None if it's not."""
1445 """Returns the wlock if it's held, or None if it's not."""
1441 return self._currentlock(self._wlockref)
1446 return self._currentlock(self._wlockref)
1442
1447
1443 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1448 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1444 """
1449 """
1445 commit an individual file as part of a larger transaction
1450 commit an individual file as part of a larger transaction
1446 """
1451 """
1447
1452
1448 fname = fctx.path()
1453 fname = fctx.path()
1449 fparent1 = manifest1.get(fname, nullid)
1454 fparent1 = manifest1.get(fname, nullid)
1450 fparent2 = manifest2.get(fname, nullid)
1455 fparent2 = manifest2.get(fname, nullid)
1451 if isinstance(fctx, context.filectx):
1456 if isinstance(fctx, context.filectx):
1452 node = fctx.filenode()
1457 node = fctx.filenode()
1453 if node in [fparent1, fparent2]:
1458 if node in [fparent1, fparent2]:
1454 self.ui.debug('reusing %s filelog entry\n' % fname)
1459 self.ui.debug('reusing %s filelog entry\n' % fname)
1455 if manifest1.flags(fname) != fctx.flags():
1460 if manifest1.flags(fname) != fctx.flags():
1456 changelist.append(fname)
1461 changelist.append(fname)
1457 return node
1462 return node
1458
1463
1459 flog = self.file(fname)
1464 flog = self.file(fname)
1460 meta = {}
1465 meta = {}
1461 copy = fctx.renamed()
1466 copy = fctx.renamed()
1462 if copy and copy[0] != fname:
1467 if copy and copy[0] != fname:
1463 # Mark the new revision of this file as a copy of another
1468 # Mark the new revision of this file as a copy of another
1464 # file. This copy data will effectively act as a parent
1469 # file. This copy data will effectively act as a parent
1465 # of this new revision. If this is a merge, the first
1470 # of this new revision. If this is a merge, the first
1466 # parent will be the nullid (meaning "look up the copy data")
1471 # parent will be the nullid (meaning "look up the copy data")
1467 # and the second one will be the other parent. For example:
1472 # and the second one will be the other parent. For example:
1468 #
1473 #
1469 # 0 --- 1 --- 3 rev1 changes file foo
1474 # 0 --- 1 --- 3 rev1 changes file foo
1470 # \ / rev2 renames foo to bar and changes it
1475 # \ / rev2 renames foo to bar and changes it
1471 # \- 2 -/ rev3 should have bar with all changes and
1476 # \- 2 -/ rev3 should have bar with all changes and
1472 # should record that bar descends from
1477 # should record that bar descends from
1473 # bar in rev2 and foo in rev1
1478 # bar in rev2 and foo in rev1
1474 #
1479 #
1475 # this allows this merge to succeed:
1480 # this allows this merge to succeed:
1476 #
1481 #
1477 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1482 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1478 # \ / merging rev3 and rev4 should use bar@rev2
1483 # \ / merging rev3 and rev4 should use bar@rev2
1479 # \- 2 --- 4 as the merge base
1484 # \- 2 --- 4 as the merge base
1480 #
1485 #
1481
1486
1482 cfname = copy[0]
1487 cfname = copy[0]
1483 crev = manifest1.get(cfname)
1488 crev = manifest1.get(cfname)
1484 newfparent = fparent2
1489 newfparent = fparent2
1485
1490
1486 if manifest2: # branch merge
1491 if manifest2: # branch merge
1487 if fparent2 == nullid or crev is None: # copied on remote side
1492 if fparent2 == nullid or crev is None: # copied on remote side
1488 if cfname in manifest2:
1493 if cfname in manifest2:
1489 crev = manifest2[cfname]
1494 crev = manifest2[cfname]
1490 newfparent = fparent1
1495 newfparent = fparent1
1491
1496
1492 # Here, we used to search backwards through history to try to find
1497 # Here, we used to search backwards through history to try to find
1493 # where the file copy came from if the source of a copy was not in
1498 # where the file copy came from if the source of a copy was not in
1494 # the parent directory. However, this doesn't actually make sense to
1499 # the parent directory. However, this doesn't actually make sense to
1495 # do (what does a copy from something not in your working copy even
1500 # do (what does a copy from something not in your working copy even
1496 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1501 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1497 # the user that copy information was dropped, so if they didn't
1502 # the user that copy information was dropped, so if they didn't
1498 # expect this outcome it can be fixed, but this is the correct
1503 # expect this outcome it can be fixed, but this is the correct
1499 # behavior in this circumstance.
1504 # behavior in this circumstance.
1500
1505
1501 if crev:
1506 if crev:
1502 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1507 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1503 meta["copy"] = cfname
1508 meta["copy"] = cfname
1504 meta["copyrev"] = hex(crev)
1509 meta["copyrev"] = hex(crev)
1505 fparent1, fparent2 = nullid, newfparent
1510 fparent1, fparent2 = nullid, newfparent
1506 else:
1511 else:
1507 self.ui.warn(_("warning: can't find ancestor for '%s' "
1512 self.ui.warn(_("warning: can't find ancestor for '%s' "
1508 "copied from '%s'!\n") % (fname, cfname))
1513 "copied from '%s'!\n") % (fname, cfname))
1509
1514
1510 elif fparent1 == nullid:
1515 elif fparent1 == nullid:
1511 fparent1, fparent2 = fparent2, nullid
1516 fparent1, fparent2 = fparent2, nullid
1512 elif fparent2 != nullid:
1517 elif fparent2 != nullid:
1513 # is one parent an ancestor of the other?
1518 # is one parent an ancestor of the other?
1514 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1519 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1515 if fparent1 in fparentancestors:
1520 if fparent1 in fparentancestors:
1516 fparent1, fparent2 = fparent2, nullid
1521 fparent1, fparent2 = fparent2, nullid
1517 elif fparent2 in fparentancestors:
1522 elif fparent2 in fparentancestors:
1518 fparent2 = nullid
1523 fparent2 = nullid
1519
1524
1520 # is the file changed?
1525 # is the file changed?
1521 text = fctx.data()
1526 text = fctx.data()
1522 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1527 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1523 changelist.append(fname)
1528 changelist.append(fname)
1524 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1529 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1525 # are just the flags changed during merge?
1530 # are just the flags changed during merge?
1526 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1531 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1527 changelist.append(fname)
1532 changelist.append(fname)
1528
1533
1529 return fparent1
1534 return fparent1
1530
1535
1531 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1536 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1532 """check for commit arguments that aren't committable"""
1537 """check for commit arguments that aren't committable"""
1533 if match.isexact() or match.prefix():
1538 if match.isexact() or match.prefix():
1534 matched = set(status.modified + status.added + status.removed)
1539 matched = set(status.modified + status.added + status.removed)
1535
1540
1536 for f in match.files():
1541 for f in match.files():
1537 f = self.dirstate.normalize(f)
1542 f = self.dirstate.normalize(f)
1538 if f == '.' or f in matched or f in wctx.substate:
1543 if f == '.' or f in matched or f in wctx.substate:
1539 continue
1544 continue
1540 if f in status.deleted:
1545 if f in status.deleted:
1541 fail(f, _('file not found!'))
1546 fail(f, _('file not found!'))
1542 if f in vdirs: # visited directory
1547 if f in vdirs: # visited directory
1543 d = f + '/'
1548 d = f + '/'
1544 for mf in matched:
1549 for mf in matched:
1545 if mf.startswith(d):
1550 if mf.startswith(d):
1546 break
1551 break
1547 else:
1552 else:
1548 fail(f, _("no match under directory!"))
1553 fail(f, _("no match under directory!"))
1549 elif f not in self.dirstate:
1554 elif f not in self.dirstate:
1550 fail(f, _("file not tracked!"))
1555 fail(f, _("file not tracked!"))
1551
1556
1552 @unfilteredmethod
1557 @unfilteredmethod
1553 def commit(self, text="", user=None, date=None, match=None, force=False,
1558 def commit(self, text="", user=None, date=None, match=None, force=False,
1554 editor=False, extra=None):
1559 editor=False, extra=None):
1555 """Add a new revision to current repository.
1560 """Add a new revision to current repository.
1556
1561
1557 Revision information is gathered from the working directory,
1562 Revision information is gathered from the working directory,
1558 match can be used to filter the committed files. If editor is
1563 match can be used to filter the committed files. If editor is
1559 supplied, it is called to get a commit message.
1564 supplied, it is called to get a commit message.
1560 """
1565 """
1561 if extra is None:
1566 if extra is None:
1562 extra = {}
1567 extra = {}
1563
1568
1564 def fail(f, msg):
1569 def fail(f, msg):
1565 raise error.Abort('%s: %s' % (f, msg))
1570 raise error.Abort('%s: %s' % (f, msg))
1566
1571
1567 if not match:
1572 if not match:
1568 match = matchmod.always(self.root, '')
1573 match = matchmod.always(self.root, '')
1569
1574
1570 if not force:
1575 if not force:
1571 vdirs = []
1576 vdirs = []
1572 match.explicitdir = vdirs.append
1577 match.explicitdir = vdirs.append
1573 match.bad = fail
1578 match.bad = fail
1574
1579
1575 wlock = lock = tr = None
1580 wlock = lock = tr = None
1576 try:
1581 try:
1577 wlock = self.wlock()
1582 wlock = self.wlock()
1578 lock = self.lock() # for recent changelog (see issue4368)
1583 lock = self.lock() # for recent changelog (see issue4368)
1579
1584
1580 wctx = self[None]
1585 wctx = self[None]
1581 merge = len(wctx.parents()) > 1
1586 merge = len(wctx.parents()) > 1
1582
1587
1583 if not force and merge and match.ispartial():
1588 if not force and merge and match.ispartial():
1584 raise error.Abort(_('cannot partially commit a merge '
1589 raise error.Abort(_('cannot partially commit a merge '
1585 '(do not specify files or patterns)'))
1590 '(do not specify files or patterns)'))
1586
1591
1587 status = self.status(match=match, clean=force)
1592 status = self.status(match=match, clean=force)
1588 if force:
1593 if force:
1589 status.modified.extend(status.clean) # mq may commit clean files
1594 status.modified.extend(status.clean) # mq may commit clean files
1590
1595
1591 # check subrepos
1596 # check subrepos
1592 subs = []
1597 subs = []
1593 commitsubs = set()
1598 commitsubs = set()
1594 newstate = wctx.substate.copy()
1599 newstate = wctx.substate.copy()
1595 # only manage subrepos and .hgsubstate if .hgsub is present
1600 # only manage subrepos and .hgsubstate if .hgsub is present
1596 if '.hgsub' in wctx:
1601 if '.hgsub' in wctx:
1597 # we'll decide whether to track this ourselves, thanks
1602 # we'll decide whether to track this ourselves, thanks
1598 for c in status.modified, status.added, status.removed:
1603 for c in status.modified, status.added, status.removed:
1599 if '.hgsubstate' in c:
1604 if '.hgsubstate' in c:
1600 c.remove('.hgsubstate')
1605 c.remove('.hgsubstate')
1601
1606
1602 # compare current state to last committed state
1607 # compare current state to last committed state
1603 # build new substate based on last committed state
1608 # build new substate based on last committed state
1604 oldstate = wctx.p1().substate
1609 oldstate = wctx.p1().substate
1605 for s in sorted(newstate.keys()):
1610 for s in sorted(newstate.keys()):
1606 if not match(s):
1611 if not match(s):
1607 # ignore working copy, use old state if present
1612 # ignore working copy, use old state if present
1608 if s in oldstate:
1613 if s in oldstate:
1609 newstate[s] = oldstate[s]
1614 newstate[s] = oldstate[s]
1610 continue
1615 continue
1611 if not force:
1616 if not force:
1612 raise error.Abort(
1617 raise error.Abort(
1613 _("commit with new subrepo %s excluded") % s)
1618 _("commit with new subrepo %s excluded") % s)
1614 dirtyreason = wctx.sub(s).dirtyreason(True)
1619 dirtyreason = wctx.sub(s).dirtyreason(True)
1615 if dirtyreason:
1620 if dirtyreason:
1616 if not self.ui.configbool('ui', 'commitsubrepos'):
1621 if not self.ui.configbool('ui', 'commitsubrepos'):
1617 raise error.Abort(dirtyreason,
1622 raise error.Abort(dirtyreason,
1618 hint=_("use --subrepos for recursive commit"))
1623 hint=_("use --subrepos for recursive commit"))
1619 subs.append(s)
1624 subs.append(s)
1620 commitsubs.add(s)
1625 commitsubs.add(s)
1621 else:
1626 else:
1622 bs = wctx.sub(s).basestate()
1627 bs = wctx.sub(s).basestate()
1623 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1628 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1624 if oldstate.get(s, (None, None, None))[1] != bs:
1629 if oldstate.get(s, (None, None, None))[1] != bs:
1625 subs.append(s)
1630 subs.append(s)
1626
1631
1627 # check for removed subrepos
1632 # check for removed subrepos
1628 for p in wctx.parents():
1633 for p in wctx.parents():
1629 r = [s for s in p.substate if s not in newstate]
1634 r = [s for s in p.substate if s not in newstate]
1630 subs += [s for s in r if match(s)]
1635 subs += [s for s in r if match(s)]
1631 if subs:
1636 if subs:
1632 if (not match('.hgsub') and
1637 if (not match('.hgsub') and
1633 '.hgsub' in (wctx.modified() + wctx.added())):
1638 '.hgsub' in (wctx.modified() + wctx.added())):
1634 raise error.Abort(
1639 raise error.Abort(
1635 _("can't commit subrepos without .hgsub"))
1640 _("can't commit subrepos without .hgsub"))
1636 status.modified.insert(0, '.hgsubstate')
1641 status.modified.insert(0, '.hgsubstate')
1637
1642
1638 elif '.hgsub' in status.removed:
1643 elif '.hgsub' in status.removed:
1639 # clean up .hgsubstate when .hgsub is removed
1644 # clean up .hgsubstate when .hgsub is removed
1640 if ('.hgsubstate' in wctx and
1645 if ('.hgsubstate' in wctx and
1641 '.hgsubstate' not in (status.modified + status.added +
1646 '.hgsubstate' not in (status.modified + status.added +
1642 status.removed)):
1647 status.removed)):
1643 status.removed.insert(0, '.hgsubstate')
1648 status.removed.insert(0, '.hgsubstate')
1644
1649
1645 # make sure all explicit patterns are matched
1650 # make sure all explicit patterns are matched
1646 if not force:
1651 if not force:
1647 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1652 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1648
1653
1649 cctx = context.workingcommitctx(self, status,
1654 cctx = context.workingcommitctx(self, status,
1650 text, user, date, extra)
1655 text, user, date, extra)
1651
1656
1652 # internal config: ui.allowemptycommit
1657 # internal config: ui.allowemptycommit
1653 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1658 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1654 or extra.get('close') or merge or cctx.files()
1659 or extra.get('close') or merge or cctx.files()
1655 or self.ui.configbool('ui', 'allowemptycommit'))
1660 or self.ui.configbool('ui', 'allowemptycommit'))
1656 if not allowemptycommit:
1661 if not allowemptycommit:
1657 return None
1662 return None
1658
1663
1659 if merge and cctx.deleted():
1664 if merge and cctx.deleted():
1660 raise error.Abort(_("cannot commit merge with missing files"))
1665 raise error.Abort(_("cannot commit merge with missing files"))
1661
1666
1662 ms = mergemod.mergestate.read(self)
1667 ms = mergemod.mergestate.read(self)
1663 mergeutil.checkunresolved(ms)
1668 mergeutil.checkunresolved(ms)
1664
1669
1665 if editor:
1670 if editor:
1666 cctx._text = editor(self, cctx, subs)
1671 cctx._text = editor(self, cctx, subs)
1667 edited = (text != cctx._text)
1672 edited = (text != cctx._text)
1668
1673
1669 # Save commit message in case this transaction gets rolled back
1674 # Save commit message in case this transaction gets rolled back
1670 # (e.g. by a pretxncommit hook). Leave the content alone on
1675 # (e.g. by a pretxncommit hook). Leave the content alone on
1671 # the assumption that the user will use the same editor again.
1676 # the assumption that the user will use the same editor again.
1672 msgfn = self.savecommitmessage(cctx._text)
1677 msgfn = self.savecommitmessage(cctx._text)
1673
1678
1674 # commit subs and write new state
1679 # commit subs and write new state
1675 if subs:
1680 if subs:
1676 for s in sorted(commitsubs):
1681 for s in sorted(commitsubs):
1677 sub = wctx.sub(s)
1682 sub = wctx.sub(s)
1678 self.ui.status(_('committing subrepository %s\n') %
1683 self.ui.status(_('committing subrepository %s\n') %
1679 subrepo.subrelpath(sub))
1684 subrepo.subrelpath(sub))
1680 sr = sub.commit(cctx._text, user, date)
1685 sr = sub.commit(cctx._text, user, date)
1681 newstate[s] = (newstate[s][0], sr)
1686 newstate[s] = (newstate[s][0], sr)
1682 subrepo.writestate(self, newstate)
1687 subrepo.writestate(self, newstate)
1683
1688
1684 p1, p2 = self.dirstate.parents()
1689 p1, p2 = self.dirstate.parents()
1685 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1690 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1686 try:
1691 try:
1687 self.hook("precommit", throw=True, parent1=hookp1,
1692 self.hook("precommit", throw=True, parent1=hookp1,
1688 parent2=hookp2)
1693 parent2=hookp2)
1689 tr = self.transaction('commit')
1694 tr = self.transaction('commit')
1690 ret = self.commitctx(cctx, True)
1695 ret = self.commitctx(cctx, True)
1691 except: # re-raises
1696 except: # re-raises
1692 if edited:
1697 if edited:
1693 self.ui.write(
1698 self.ui.write(
1694 _('note: commit message saved in %s\n') % msgfn)
1699 _('note: commit message saved in %s\n') % msgfn)
1695 raise
1700 raise
1696 # update bookmarks, dirstate and mergestate
1701 # update bookmarks, dirstate and mergestate
1697 bookmarks.update(self, [p1, p2], ret)
1702 bookmarks.update(self, [p1, p2], ret)
1698 cctx.markcommitted(ret)
1703 cctx.markcommitted(ret)
1699 ms.reset()
1704 ms.reset()
1700 tr.close()
1705 tr.close()
1701
1706
1702 finally:
1707 finally:
1703 lockmod.release(tr, lock, wlock)
1708 lockmod.release(tr, lock, wlock)
1704
1709
1705 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1710 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1706 # hack for command that use a temporary commit (eg: histedit)
1711 # hack for command that use a temporary commit (eg: histedit)
1707 # temporary commit got stripped before hook release
1712 # temporary commit got stripped before hook release
1708 if self.changelog.hasnode(ret):
1713 if self.changelog.hasnode(ret):
1709 self.hook("commit", node=node, parent1=parent1,
1714 self.hook("commit", node=node, parent1=parent1,
1710 parent2=parent2)
1715 parent2=parent2)
1711 self._afterlock(commithook)
1716 self._afterlock(commithook)
1712 return ret
1717 return ret
1713
1718
1714 @unfilteredmethod
1719 @unfilteredmethod
1715 def commitctx(self, ctx, error=False):
1720 def commitctx(self, ctx, error=False):
1716 """Add a new revision to current repository.
1721 """Add a new revision to current repository.
1717 Revision information is passed via the context argument.
1722 Revision information is passed via the context argument.
1718 """
1723 """
1719
1724
1720 tr = None
1725 tr = None
1721 p1, p2 = ctx.p1(), ctx.p2()
1726 p1, p2 = ctx.p1(), ctx.p2()
1722 user = ctx.user()
1727 user = ctx.user()
1723
1728
1724 lock = self.lock()
1729 lock = self.lock()
1725 try:
1730 try:
1726 tr = self.transaction("commit")
1731 tr = self.transaction("commit")
1727 trp = weakref.proxy(tr)
1732 trp = weakref.proxy(tr)
1728
1733
1729 if ctx.manifestnode():
1734 if ctx.manifestnode():
1730 # reuse an existing manifest revision
1735 # reuse an existing manifest revision
1731 mn = ctx.manifestnode()
1736 mn = ctx.manifestnode()
1732 files = ctx.files()
1737 files = ctx.files()
1733 elif ctx.files():
1738 elif ctx.files():
1734 m1ctx = p1.manifestctx()
1739 m1ctx = p1.manifestctx()
1735 m2ctx = p2.manifestctx()
1740 m2ctx = p2.manifestctx()
1736 mctx = m1ctx.copy()
1741 mctx = m1ctx.copy()
1737
1742
1738 m = mctx.read()
1743 m = mctx.read()
1739 m1 = m1ctx.read()
1744 m1 = m1ctx.read()
1740 m2 = m2ctx.read()
1745 m2 = m2ctx.read()
1741
1746
1742 # check in files
1747 # check in files
1743 added = []
1748 added = []
1744 changed = []
1749 changed = []
1745 removed = list(ctx.removed())
1750 removed = list(ctx.removed())
1746 linkrev = len(self)
1751 linkrev = len(self)
1747 self.ui.note(_("committing files:\n"))
1752 self.ui.note(_("committing files:\n"))
1748 for f in sorted(ctx.modified() + ctx.added()):
1753 for f in sorted(ctx.modified() + ctx.added()):
1749 self.ui.note(f + "\n")
1754 self.ui.note(f + "\n")
1750 try:
1755 try:
1751 fctx = ctx[f]
1756 fctx = ctx[f]
1752 if fctx is None:
1757 if fctx is None:
1753 removed.append(f)
1758 removed.append(f)
1754 else:
1759 else:
1755 added.append(f)
1760 added.append(f)
1756 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1761 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1757 trp, changed)
1762 trp, changed)
1758 m.setflag(f, fctx.flags())
1763 m.setflag(f, fctx.flags())
1759 except OSError as inst:
1764 except OSError as inst:
1760 self.ui.warn(_("trouble committing %s!\n") % f)
1765 self.ui.warn(_("trouble committing %s!\n") % f)
1761 raise
1766 raise
1762 except IOError as inst:
1767 except IOError as inst:
1763 errcode = getattr(inst, 'errno', errno.ENOENT)
1768 errcode = getattr(inst, 'errno', errno.ENOENT)
1764 if error or errcode and errcode != errno.ENOENT:
1769 if error or errcode and errcode != errno.ENOENT:
1765 self.ui.warn(_("trouble committing %s!\n") % f)
1770 self.ui.warn(_("trouble committing %s!\n") % f)
1766 raise
1771 raise
1767
1772
1768 # update manifest
1773 # update manifest
1769 self.ui.note(_("committing manifest\n"))
1774 self.ui.note(_("committing manifest\n"))
1770 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1775 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1771 drop = [f for f in removed if f in m]
1776 drop = [f for f in removed if f in m]
1772 for f in drop:
1777 for f in drop:
1773 del m[f]
1778 del m[f]
1774 mn = mctx.write(trp, linkrev,
1779 mn = mctx.write(trp, linkrev,
1775 p1.manifestnode(), p2.manifestnode(),
1780 p1.manifestnode(), p2.manifestnode(),
1776 added, drop)
1781 added, drop)
1777 files = changed + removed
1782 files = changed + removed
1778 else:
1783 else:
1779 mn = p1.manifestnode()
1784 mn = p1.manifestnode()
1780 files = []
1785 files = []
1781
1786
1782 # update changelog
1787 # update changelog
1783 self.ui.note(_("committing changelog\n"))
1788 self.ui.note(_("committing changelog\n"))
1784 self.changelog.delayupdate(tr)
1789 self.changelog.delayupdate(tr)
1785 n = self.changelog.add(mn, files, ctx.description(),
1790 n = self.changelog.add(mn, files, ctx.description(),
1786 trp, p1.node(), p2.node(),
1791 trp, p1.node(), p2.node(),
1787 user, ctx.date(), ctx.extra().copy())
1792 user, ctx.date(), ctx.extra().copy())
1788 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1793 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1789 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1794 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1790 parent2=xp2)
1795 parent2=xp2)
1791 # set the new commit is proper phase
1796 # set the new commit is proper phase
1792 targetphase = subrepo.newcommitphase(self.ui, ctx)
1797 targetphase = subrepo.newcommitphase(self.ui, ctx)
1793 if targetphase:
1798 if targetphase:
1794 # retract boundary do not alter parent changeset.
1799 # retract boundary do not alter parent changeset.
1795 # if a parent have higher the resulting phase will
1800 # if a parent have higher the resulting phase will
1796 # be compliant anyway
1801 # be compliant anyway
1797 #
1802 #
1798 # if minimal phase was 0 we don't need to retract anything
1803 # if minimal phase was 0 we don't need to retract anything
1799 phases.retractboundary(self, tr, targetphase, [n])
1804 phases.retractboundary(self, tr, targetphase, [n])
1800 tr.close()
1805 tr.close()
1801 branchmap.updatecache(self.filtered('served'))
1806 branchmap.updatecache(self.filtered('served'))
1802 return n
1807 return n
1803 finally:
1808 finally:
1804 if tr:
1809 if tr:
1805 tr.release()
1810 tr.release()
1806 lock.release()
1811 lock.release()
1807
1812
1808 @unfilteredmethod
1813 @unfilteredmethod
1809 def destroying(self):
1814 def destroying(self):
1810 '''Inform the repository that nodes are about to be destroyed.
1815 '''Inform the repository that nodes are about to be destroyed.
1811 Intended for use by strip and rollback, so there's a common
1816 Intended for use by strip and rollback, so there's a common
1812 place for anything that has to be done before destroying history.
1817 place for anything that has to be done before destroying history.
1813
1818
1814 This is mostly useful for saving state that is in memory and waiting
1819 This is mostly useful for saving state that is in memory and waiting
1815 to be flushed when the current lock is released. Because a call to
1820 to be flushed when the current lock is released. Because a call to
1816 destroyed is imminent, the repo will be invalidated causing those
1821 destroyed is imminent, the repo will be invalidated causing those
1817 changes to stay in memory (waiting for the next unlock), or vanish
1822 changes to stay in memory (waiting for the next unlock), or vanish
1818 completely.
1823 completely.
1819 '''
1824 '''
1820 # When using the same lock to commit and strip, the phasecache is left
1825 # When using the same lock to commit and strip, the phasecache is left
1821 # dirty after committing. Then when we strip, the repo is invalidated,
1826 # dirty after committing. Then when we strip, the repo is invalidated,
1822 # causing those changes to disappear.
1827 # causing those changes to disappear.
1823 if '_phasecache' in vars(self):
1828 if '_phasecache' in vars(self):
1824 self._phasecache.write()
1829 self._phasecache.write()
1825
1830
1826 @unfilteredmethod
1831 @unfilteredmethod
1827 def destroyed(self):
1832 def destroyed(self):
1828 '''Inform the repository that nodes have been destroyed.
1833 '''Inform the repository that nodes have been destroyed.
1829 Intended for use by strip and rollback, so there's a common
1834 Intended for use by strip and rollback, so there's a common
1830 place for anything that has to be done after destroying history.
1835 place for anything that has to be done after destroying history.
1831 '''
1836 '''
1832 # When one tries to:
1837 # When one tries to:
1833 # 1) destroy nodes thus calling this method (e.g. strip)
1838 # 1) destroy nodes thus calling this method (e.g. strip)
1834 # 2) use phasecache somewhere (e.g. commit)
1839 # 2) use phasecache somewhere (e.g. commit)
1835 #
1840 #
1836 # then 2) will fail because the phasecache contains nodes that were
1841 # then 2) will fail because the phasecache contains nodes that were
1837 # removed. We can either remove phasecache from the filecache,
1842 # removed. We can either remove phasecache from the filecache,
1838 # causing it to reload next time it is accessed, or simply filter
1843 # causing it to reload next time it is accessed, or simply filter
1839 # the removed nodes now and write the updated cache.
1844 # the removed nodes now and write the updated cache.
1840 self._phasecache.filterunknown(self)
1845 self._phasecache.filterunknown(self)
1841 self._phasecache.write()
1846 self._phasecache.write()
1842
1847
1843 # update the 'served' branch cache to help read only server process
1848 # update the 'served' branch cache to help read only server process
1844 # Thanks to branchcache collaboration this is done from the nearest
1849 # Thanks to branchcache collaboration this is done from the nearest
1845 # filtered subset and it is expected to be fast.
1850 # filtered subset and it is expected to be fast.
1846 branchmap.updatecache(self.filtered('served'))
1851 branchmap.updatecache(self.filtered('served'))
1847
1852
1848 # Ensure the persistent tag cache is updated. Doing it now
1853 # Ensure the persistent tag cache is updated. Doing it now
1849 # means that the tag cache only has to worry about destroyed
1854 # means that the tag cache only has to worry about destroyed
1850 # heads immediately after a strip/rollback. That in turn
1855 # heads immediately after a strip/rollback. That in turn
1851 # guarantees that "cachetip == currenttip" (comparing both rev
1856 # guarantees that "cachetip == currenttip" (comparing both rev
1852 # and node) always means no nodes have been added or destroyed.
1857 # and node) always means no nodes have been added or destroyed.
1853
1858
1854 # XXX this is suboptimal when qrefresh'ing: we strip the current
1859 # XXX this is suboptimal when qrefresh'ing: we strip the current
1855 # head, refresh the tag cache, then immediately add a new head.
1860 # head, refresh the tag cache, then immediately add a new head.
1856 # But I think doing it this way is necessary for the "instant
1861 # But I think doing it this way is necessary for the "instant
1857 # tag cache retrieval" case to work.
1862 # tag cache retrieval" case to work.
1858 self.invalidate()
1863 self.invalidate()
1859
1864
1860 def walk(self, match, node=None):
1865 def walk(self, match, node=None):
1861 '''
1866 '''
1862 walk recursively through the directory tree or a given
1867 walk recursively through the directory tree or a given
1863 changeset, finding all files matched by the match
1868 changeset, finding all files matched by the match
1864 function
1869 function
1865 '''
1870 '''
1866 return self[node].walk(match)
1871 return self[node].walk(match)
1867
1872
1868 def status(self, node1='.', node2=None, match=None,
1873 def status(self, node1='.', node2=None, match=None,
1869 ignored=False, clean=False, unknown=False,
1874 ignored=False, clean=False, unknown=False,
1870 listsubrepos=False):
1875 listsubrepos=False):
1871 '''a convenience method that calls node1.status(node2)'''
1876 '''a convenience method that calls node1.status(node2)'''
1872 return self[node1].status(node2, match, ignored, clean, unknown,
1877 return self[node1].status(node2, match, ignored, clean, unknown,
1873 listsubrepos)
1878 listsubrepos)
1874
1879
1875 def heads(self, start=None):
1880 def heads(self, start=None):
1876 if start is None:
1881 if start is None:
1877 cl = self.changelog
1882 cl = self.changelog
1878 headrevs = reversed(cl.headrevs())
1883 headrevs = reversed(cl.headrevs())
1879 return [cl.node(rev) for rev in headrevs]
1884 return [cl.node(rev) for rev in headrevs]
1880
1885
1881 heads = self.changelog.heads(start)
1886 heads = self.changelog.heads(start)
1882 # sort the output in rev descending order
1887 # sort the output in rev descending order
1883 return sorted(heads, key=self.changelog.rev, reverse=True)
1888 return sorted(heads, key=self.changelog.rev, reverse=True)
1884
1889
1885 def branchheads(self, branch=None, start=None, closed=False):
1890 def branchheads(self, branch=None, start=None, closed=False):
1886 '''return a (possibly filtered) list of heads for the given branch
1891 '''return a (possibly filtered) list of heads for the given branch
1887
1892
1888 Heads are returned in topological order, from newest to oldest.
1893 Heads are returned in topological order, from newest to oldest.
1889 If branch is None, use the dirstate branch.
1894 If branch is None, use the dirstate branch.
1890 If start is not None, return only heads reachable from start.
1895 If start is not None, return only heads reachable from start.
1891 If closed is True, return heads that are marked as closed as well.
1896 If closed is True, return heads that are marked as closed as well.
1892 '''
1897 '''
1893 if branch is None:
1898 if branch is None:
1894 branch = self[None].branch()
1899 branch = self[None].branch()
1895 branches = self.branchmap()
1900 branches = self.branchmap()
1896 if branch not in branches:
1901 if branch not in branches:
1897 return []
1902 return []
1898 # the cache returns heads ordered lowest to highest
1903 # the cache returns heads ordered lowest to highest
1899 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1904 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1900 if start is not None:
1905 if start is not None:
1901 # filter out the heads that cannot be reached from startrev
1906 # filter out the heads that cannot be reached from startrev
1902 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1907 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1903 bheads = [h for h in bheads if h in fbheads]
1908 bheads = [h for h in bheads if h in fbheads]
1904 return bheads
1909 return bheads
1905
1910
1906 def branches(self, nodes):
1911 def branches(self, nodes):
1907 if not nodes:
1912 if not nodes:
1908 nodes = [self.changelog.tip()]
1913 nodes = [self.changelog.tip()]
1909 b = []
1914 b = []
1910 for n in nodes:
1915 for n in nodes:
1911 t = n
1916 t = n
1912 while True:
1917 while True:
1913 p = self.changelog.parents(n)
1918 p = self.changelog.parents(n)
1914 if p[1] != nullid or p[0] == nullid:
1919 if p[1] != nullid or p[0] == nullid:
1915 b.append((t, n, p[0], p[1]))
1920 b.append((t, n, p[0], p[1]))
1916 break
1921 break
1917 n = p[0]
1922 n = p[0]
1918 return b
1923 return b
1919
1924
1920 def between(self, pairs):
1925 def between(self, pairs):
1921 r = []
1926 r = []
1922
1927
1923 for top, bottom in pairs:
1928 for top, bottom in pairs:
1924 n, l, i = top, [], 0
1929 n, l, i = top, [], 0
1925 f = 1
1930 f = 1
1926
1931
1927 while n != bottom and n != nullid:
1932 while n != bottom and n != nullid:
1928 p = self.changelog.parents(n)[0]
1933 p = self.changelog.parents(n)[0]
1929 if i == f:
1934 if i == f:
1930 l.append(n)
1935 l.append(n)
1931 f = f * 2
1936 f = f * 2
1932 n = p
1937 n = p
1933 i += 1
1938 i += 1
1934
1939
1935 r.append(l)
1940 r.append(l)
1936
1941
1937 return r
1942 return r
1938
1943
1939 def checkpush(self, pushop):
1944 def checkpush(self, pushop):
1940 """Extensions can override this function if additional checks have
1945 """Extensions can override this function if additional checks have
1941 to be performed before pushing, or call it if they override push
1946 to be performed before pushing, or call it if they override push
1942 command.
1947 command.
1943 """
1948 """
1944 pass
1949 pass
1945
1950
1946 @unfilteredpropertycache
1951 @unfilteredpropertycache
1947 def prepushoutgoinghooks(self):
1952 def prepushoutgoinghooks(self):
1948 """Return util.hooks consists of a pushop with repo, remote, outgoing
1953 """Return util.hooks consists of a pushop with repo, remote, outgoing
1949 methods, which are called before pushing changesets.
1954 methods, which are called before pushing changesets.
1950 """
1955 """
1951 return util.hooks()
1956 return util.hooks()
1952
1957
1953 def pushkey(self, namespace, key, old, new):
1958 def pushkey(self, namespace, key, old, new):
1954 try:
1959 try:
1955 tr = self.currenttransaction()
1960 tr = self.currenttransaction()
1956 hookargs = {}
1961 hookargs = {}
1957 if tr is not None:
1962 if tr is not None:
1958 hookargs.update(tr.hookargs)
1963 hookargs.update(tr.hookargs)
1959 hookargs['namespace'] = namespace
1964 hookargs['namespace'] = namespace
1960 hookargs['key'] = key
1965 hookargs['key'] = key
1961 hookargs['old'] = old
1966 hookargs['old'] = old
1962 hookargs['new'] = new
1967 hookargs['new'] = new
1963 self.hook('prepushkey', throw=True, **hookargs)
1968 self.hook('prepushkey', throw=True, **hookargs)
1964 except error.HookAbort as exc:
1969 except error.HookAbort as exc:
1965 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1970 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1966 if exc.hint:
1971 if exc.hint:
1967 self.ui.write_err(_("(%s)\n") % exc.hint)
1972 self.ui.write_err(_("(%s)\n") % exc.hint)
1968 return False
1973 return False
1969 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1974 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1970 ret = pushkey.push(self, namespace, key, old, new)
1975 ret = pushkey.push(self, namespace, key, old, new)
1971 def runhook():
1976 def runhook():
1972 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1977 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1973 ret=ret)
1978 ret=ret)
1974 self._afterlock(runhook)
1979 self._afterlock(runhook)
1975 return ret
1980 return ret
1976
1981
1977 def listkeys(self, namespace):
1982 def listkeys(self, namespace):
1978 self.hook('prelistkeys', throw=True, namespace=namespace)
1983 self.hook('prelistkeys', throw=True, namespace=namespace)
1979 self.ui.debug('listing keys for "%s"\n' % namespace)
1984 self.ui.debug('listing keys for "%s"\n' % namespace)
1980 values = pushkey.list(self, namespace)
1985 values = pushkey.list(self, namespace)
1981 self.hook('listkeys', namespace=namespace, values=values)
1986 self.hook('listkeys', namespace=namespace, values=values)
1982 return values
1987 return values
1983
1988
1984 def debugwireargs(self, one, two, three=None, four=None, five=None):
1989 def debugwireargs(self, one, two, three=None, four=None, five=None):
1985 '''used to test argument passing over the wire'''
1990 '''used to test argument passing over the wire'''
1986 return "%s %s %s %s %s" % (one, two, three, four, five)
1991 return "%s %s %s %s %s" % (one, two, three, four, five)
1987
1992
1988 def savecommitmessage(self, text):
1993 def savecommitmessage(self, text):
1989 fp = self.vfs('last-message.txt', 'wb')
1994 fp = self.vfs('last-message.txt', 'wb')
1990 try:
1995 try:
1991 fp.write(text)
1996 fp.write(text)
1992 finally:
1997 finally:
1993 fp.close()
1998 fp.close()
1994 return self.pathto(fp.name[len(self.root) + 1:])
1999 return self.pathto(fp.name[len(self.root) + 1:])
1995
2000
1996 # used to avoid circular references so destructors work
2001 # used to avoid circular references so destructors work
1997 def aftertrans(files):
2002 def aftertrans(files):
1998 renamefiles = [tuple(t) for t in files]
2003 renamefiles = [tuple(t) for t in files]
1999 def a():
2004 def a():
2000 for vfs, src, dest in renamefiles:
2005 for vfs, src, dest in renamefiles:
2001 try:
2006 try:
2002 vfs.rename(src, dest)
2007 vfs.rename(src, dest)
2003 except OSError: # journal file does not yet exist
2008 except OSError: # journal file does not yet exist
2004 pass
2009 pass
2005 return a
2010 return a
2006
2011
2007 def undoname(fn):
2012 def undoname(fn):
2008 base, name = os.path.split(fn)
2013 base, name = os.path.split(fn)
2009 assert name.startswith('journal')
2014 assert name.startswith('journal')
2010 return os.path.join(base, name.replace('journal', 'undo', 1))
2015 return os.path.join(base, name.replace('journal', 'undo', 1))
2011
2016
2012 def instance(ui, path, create):
2017 def instance(ui, path, create):
2013 return localrepository(ui, util.urllocalpath(path), create)
2018 return localrepository(ui, util.urllocalpath(path), create)
2014
2019
2015 def islocal(path):
2020 def islocal(path):
2016 return True
2021 return True
2017
2022
2018 def newreporequirements(repo):
2023 def newreporequirements(repo):
2019 """Determine the set of requirements for a new local repository.
2024 """Determine the set of requirements for a new local repository.
2020
2025
2021 Extensions can wrap this function to specify custom requirements for
2026 Extensions can wrap this function to specify custom requirements for
2022 new repositories.
2027 new repositories.
2023 """
2028 """
2024 ui = repo.ui
2029 ui = repo.ui
2025 requirements = set(['revlogv1'])
2030 requirements = set(['revlogv1'])
2026 if ui.configbool('format', 'usestore', True):
2031 if ui.configbool('format', 'usestore', True):
2027 requirements.add('store')
2032 requirements.add('store')
2028 if ui.configbool('format', 'usefncache', True):
2033 if ui.configbool('format', 'usefncache', True):
2029 requirements.add('fncache')
2034 requirements.add('fncache')
2030 if ui.configbool('format', 'dotencode', True):
2035 if ui.configbool('format', 'dotencode', True):
2031 requirements.add('dotencode')
2036 requirements.add('dotencode')
2032
2037
2033 compengine = ui.config('experimental', 'format.compression', 'zlib')
2038 compengine = ui.config('experimental', 'format.compression', 'zlib')
2034 if compengine not in util.compengines:
2039 if compengine not in util.compengines:
2035 raise error.Abort(_('compression engine %s defined by '
2040 raise error.Abort(_('compression engine %s defined by '
2036 'experimental.format.compression not available') %
2041 'experimental.format.compression not available') %
2037 compengine,
2042 compengine,
2038 hint=_('run "hg debuginstall" to list available '
2043 hint=_('run "hg debuginstall" to list available '
2039 'compression engines'))
2044 'compression engines'))
2040
2045
2041 # zlib is the historical default and doesn't need an explicit requirement.
2046 # zlib is the historical default and doesn't need an explicit requirement.
2042 if compengine != 'zlib':
2047 if compengine != 'zlib':
2043 requirements.add('exp-compression-%s' % compengine)
2048 requirements.add('exp-compression-%s' % compengine)
2044
2049
2045 if scmutil.gdinitconfig(ui):
2050 if scmutil.gdinitconfig(ui):
2046 requirements.add('generaldelta')
2051 requirements.add('generaldelta')
2047 if ui.configbool('experimental', 'treemanifest', False):
2052 if ui.configbool('experimental', 'treemanifest', False):
2048 requirements.add('treemanifest')
2053 requirements.add('treemanifest')
2049 if ui.configbool('experimental', 'manifestv2', False):
2054 if ui.configbool('experimental', 'manifestv2', False):
2050 requirements.add('manifestv2')
2055 requirements.add('manifestv2')
2051
2056
2052 return requirements
2057 return requirements
General Comments 0
You need to be logged in to leave comments. Login now