##// END OF EJS Templates
localrepo: use peer interfaces...
Gregory Szorc -
r33802:707750e5 default
parent child Browse files
Show More
@@ -1,2265 +1,2294 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repository,
52 repoview,
53 repoview,
53 revset,
54 revset,
54 revsetlang,
55 revsetlang,
55 scmutil,
56 scmutil,
56 sparse,
57 sparse,
57 store,
58 store,
58 subrepo,
59 subrepo,
59 tags as tagsmod,
60 tags as tagsmod,
60 transaction,
61 transaction,
61 txnutil,
62 txnutil,
62 util,
63 util,
63 vfs as vfsmod,
64 vfs as vfsmod,
64 )
65 )
65
66
66 release = lockmod.release
67 release = lockmod.release
67 urlerr = util.urlerr
68 urlerr = util.urlerr
68 urlreq = util.urlreq
69 urlreq = util.urlreq
69
70
70 # set of (path, vfs-location) tuples. vfs-location is:
71 # set of (path, vfs-location) tuples. vfs-location is:
71 # - 'plain for vfs relative paths
72 # - 'plain for vfs relative paths
72 # - '' for svfs relative paths
73 # - '' for svfs relative paths
73 _cachedfiles = set()
74 _cachedfiles = set()
74
75
75 class _basefilecache(scmutil.filecache):
76 class _basefilecache(scmutil.filecache):
76 """All filecache usage on repo are done for logic that should be unfiltered
77 """All filecache usage on repo are done for logic that should be unfiltered
77 """
78 """
78 def __get__(self, repo, type=None):
79 def __get__(self, repo, type=None):
79 if repo is None:
80 if repo is None:
80 return self
81 return self
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 def __set__(self, repo, value):
83 def __set__(self, repo, value):
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 def __delete__(self, repo):
85 def __delete__(self, repo):
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86 return super(_basefilecache, self).__delete__(repo.unfiltered())
86
87
87 class repofilecache(_basefilecache):
88 class repofilecache(_basefilecache):
88 """filecache for files in .hg but outside of .hg/store"""
89 """filecache for files in .hg but outside of .hg/store"""
89 def __init__(self, *paths):
90 def __init__(self, *paths):
90 super(repofilecache, self).__init__(*paths)
91 super(repofilecache, self).__init__(*paths)
91 for path in paths:
92 for path in paths:
92 _cachedfiles.add((path, 'plain'))
93 _cachedfiles.add((path, 'plain'))
93
94
94 def join(self, obj, fname):
95 def join(self, obj, fname):
95 return obj.vfs.join(fname)
96 return obj.vfs.join(fname)
96
97
97 class storecache(_basefilecache):
98 class storecache(_basefilecache):
98 """filecache for files in the store"""
99 """filecache for files in the store"""
99 def __init__(self, *paths):
100 def __init__(self, *paths):
100 super(storecache, self).__init__(*paths)
101 super(storecache, self).__init__(*paths)
101 for path in paths:
102 for path in paths:
102 _cachedfiles.add((path, ''))
103 _cachedfiles.add((path, ''))
103
104
104 def join(self, obj, fname):
105 def join(self, obj, fname):
105 return obj.sjoin(fname)
106 return obj.sjoin(fname)
106
107
107 def isfilecached(repo, name):
108 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
109 """check if a repo has already cached "name" filecache-ed property
109
110
110 This returns (cachedobj-or-None, iscached) tuple.
111 This returns (cachedobj-or-None, iscached) tuple.
111 """
112 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
114 if not cacheentry:
114 return None, False
115 return None, False
115 return cacheentry.obj, True
116 return cacheentry.obj, True
116
117
117 class unfilteredpropertycache(util.propertycache):
118 class unfilteredpropertycache(util.propertycache):
118 """propertycache that apply to unfiltered repo only"""
119 """propertycache that apply to unfiltered repo only"""
119
120
120 def __get__(self, repo, type=None):
121 def __get__(self, repo, type=None):
121 unfi = repo.unfiltered()
122 unfi = repo.unfiltered()
122 if unfi is repo:
123 if unfi is repo:
123 return super(unfilteredpropertycache, self).__get__(unfi)
124 return super(unfilteredpropertycache, self).__get__(unfi)
124 return getattr(unfi, self.name)
125 return getattr(unfi, self.name)
125
126
126 class filteredpropertycache(util.propertycache):
127 class filteredpropertycache(util.propertycache):
127 """propertycache that must take filtering in account"""
128 """propertycache that must take filtering in account"""
128
129
129 def cachevalue(self, obj, value):
130 def cachevalue(self, obj, value):
130 object.__setattr__(obj, self.name, value)
131 object.__setattr__(obj, self.name, value)
131
132
132
133
133 def hasunfilteredcache(repo, name):
134 def hasunfilteredcache(repo, name):
134 """check if a repo has an unfilteredpropertycache value for <name>"""
135 """check if a repo has an unfilteredpropertycache value for <name>"""
135 return name in vars(repo.unfiltered())
136 return name in vars(repo.unfiltered())
136
137
137 def unfilteredmethod(orig):
138 def unfilteredmethod(orig):
138 """decorate method that always need to be run on unfiltered version"""
139 """decorate method that always need to be run on unfiltered version"""
139 def wrapper(repo, *args, **kwargs):
140 def wrapper(repo, *args, **kwargs):
140 return orig(repo.unfiltered(), *args, **kwargs)
141 return orig(repo.unfiltered(), *args, **kwargs)
141 return wrapper
142 return wrapper
142
143
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
144 'unbundle'}
145 'unbundle'}
145 legacycaps = moderncaps.union({'changegroupsubset'})
146 legacycaps = moderncaps.union({'changegroupsubset'})
146
147
147 class localpeer(peer.peerrepository):
148 class localpeer(repository.peer):
148 '''peer for a local repo; reflects only the most recent API'''
149 '''peer for a local repo; reflects only the most recent API'''
149
150
150 def __init__(self, repo, caps=None):
151 def __init__(self, repo, caps=None):
152 super(localpeer, self).__init__()
153
151 if caps is None:
154 if caps is None:
152 caps = moderncaps.copy()
155 caps = moderncaps.copy()
153 peer.peerrepository.__init__(self)
154 self._repo = repo.filtered('served')
156 self._repo = repo.filtered('served')
155 self.ui = repo.ui
157 self._ui = repo.ui
156 self._caps = repo._restrictcapabilities(caps)
158 self._caps = repo._restrictcapabilities(caps)
157
159
160 # Begin of _basepeer interface.
161
162 @util.propertycache
163 def ui(self):
164 return self._ui
165
166 def url(self):
167 return self._repo.url()
168
169 def local(self):
170 return self._repo
171
172 def peer(self):
173 return self
174
175 def canpush(self):
176 return True
177
158 def close(self):
178 def close(self):
159 self._repo.close()
179 self._repo.close()
160
180
161 def _capabilities(self):
181 # End of _basepeer interface.
162 return self._caps
163
164 def local(self):
165 return self._repo
166
182
167 def canpush(self):
183 # Begin of _basewirecommands interface.
168 return True
169
170 def url(self):
171 return self._repo.url()
172
173 def lookup(self, key):
174 return self._repo.lookup(key)
175
184
176 def branchmap(self):
185 def branchmap(self):
177 return self._repo.branchmap()
186 return self._repo.branchmap()
178
187
179 def heads(self):
188 def capabilities(self):
180 return self._repo.heads()
189 return self._caps
181
190
182 def known(self, nodes):
191 def debugwireargs(self, one, two, three=None, four=None, five=None):
183 return self._repo.known(nodes)
192 """Used to test argument passing over the wire"""
193 return "%s %s %s %s %s" % (one, two, three, four, five)
184
194
185 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
195 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
186 **kwargs):
196 **kwargs):
187 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
197 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
188 common=common, bundlecaps=bundlecaps,
198 common=common, bundlecaps=bundlecaps,
189 **kwargs)
199 **kwargs)
190 cb = util.chunkbuffer(chunks)
200 cb = util.chunkbuffer(chunks)
191
201
192 if exchange.bundle2requested(bundlecaps):
202 if exchange.bundle2requested(bundlecaps):
193 # When requesting a bundle2, getbundle returns a stream to make the
203 # When requesting a bundle2, getbundle returns a stream to make the
194 # wire level function happier. We need to build a proper object
204 # wire level function happier. We need to build a proper object
195 # from it in local peer.
205 # from it in local peer.
196 return bundle2.getunbundler(self.ui, cb)
206 return bundle2.getunbundler(self.ui, cb)
197 else:
207 else:
198 return changegroup.getunbundler('01', cb, None)
208 return changegroup.getunbundler('01', cb, None)
199
209
200 # TODO We might want to move the next two calls into legacypeer and add
210 def heads(self):
201 # unbundle instead.
211 return self._repo.heads()
212
213 def known(self, nodes):
214 return self._repo.known(nodes)
215
216 def listkeys(self, namespace):
217 return self._repo.listkeys(namespace)
218
219 def lookup(self, key):
220 return self._repo.lookup(key)
221
222 def pushkey(self, namespace, key, old, new):
223 return self._repo.pushkey(namespace, key, old, new)
224
225 def stream_out(self):
226 raise error.Abort(_('cannot perform stream clone against local '
227 'peer'))
202
228
203 def unbundle(self, cg, heads, url):
229 def unbundle(self, cg, heads, url):
204 """apply a bundle on a repo
230 """apply a bundle on a repo
205
231
206 This function handles the repo locking itself."""
232 This function handles the repo locking itself."""
207 try:
233 try:
208 try:
234 try:
209 cg = exchange.readbundle(self.ui, cg, None)
235 cg = exchange.readbundle(self.ui, cg, None)
210 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
236 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
211 if util.safehasattr(ret, 'getchunks'):
237 if util.safehasattr(ret, 'getchunks'):
212 # This is a bundle20 object, turn it into an unbundler.
238 # This is a bundle20 object, turn it into an unbundler.
213 # This little dance should be dropped eventually when the
239 # This little dance should be dropped eventually when the
214 # API is finally improved.
240 # API is finally improved.
215 stream = util.chunkbuffer(ret.getchunks())
241 stream = util.chunkbuffer(ret.getchunks())
216 ret = bundle2.getunbundler(self.ui, stream)
242 ret = bundle2.getunbundler(self.ui, stream)
217 return ret
243 return ret
218 except Exception as exc:
244 except Exception as exc:
219 # If the exception contains output salvaged from a bundle2
245 # If the exception contains output salvaged from a bundle2
220 # reply, we need to make sure it is printed before continuing
246 # reply, we need to make sure it is printed before continuing
221 # to fail. So we build a bundle2 with such output and consume
247 # to fail. So we build a bundle2 with such output and consume
222 # it directly.
248 # it directly.
223 #
249 #
224 # This is not very elegant but allows a "simple" solution for
250 # This is not very elegant but allows a "simple" solution for
225 # issue4594
251 # issue4594
226 output = getattr(exc, '_bundle2salvagedoutput', ())
252 output = getattr(exc, '_bundle2salvagedoutput', ())
227 if output:
253 if output:
228 bundler = bundle2.bundle20(self._repo.ui)
254 bundler = bundle2.bundle20(self._repo.ui)
229 for out in output:
255 for out in output:
230 bundler.addpart(out)
256 bundler.addpart(out)
231 stream = util.chunkbuffer(bundler.getchunks())
257 stream = util.chunkbuffer(bundler.getchunks())
232 b = bundle2.getunbundler(self.ui, stream)
258 b = bundle2.getunbundler(self.ui, stream)
233 bundle2.processbundle(self._repo, b)
259 bundle2.processbundle(self._repo, b)
234 raise
260 raise
235 except error.PushRaced as exc:
261 except error.PushRaced as exc:
236 raise error.ResponseError(_('push failed:'), str(exc))
262 raise error.ResponseError(_('push failed:'), str(exc))
237
263
238 def pushkey(self, namespace, key, old, new):
264 # End of _basewirecommands interface.
239 return self._repo.pushkey(namespace, key, old, new)
240
265
241 def listkeys(self, namespace):
266 # Begin of peer interface.
242 return self._repo.listkeys(namespace)
243
267
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
268 def iterbatch(self):
245 '''used to test argument passing over the wire'''
269 return peer.localiterbatcher(self)
246 return "%s %s %s %s %s" % (one, two, three, four, five)
247
270
248 class locallegacypeer(localpeer):
271 # End of peer interface.
272
273 class locallegacypeer(repository.legacypeer, localpeer):
249 '''peer extension which implements legacy methods too; used for tests with
274 '''peer extension which implements legacy methods too; used for tests with
250 restricted capabilities'''
275 restricted capabilities'''
251
276
252 def __init__(self, repo):
277 def __init__(self, repo):
253 localpeer.__init__(self, repo, caps=legacycaps)
278 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279
280 # Begin of baselegacywirecommands interface.
281
282 def between(self, pairs):
283 return self._repo.between(pairs)
254
284
255 def branches(self, nodes):
285 def branches(self, nodes):
256 return self._repo.branches(nodes)
286 return self._repo.branches(nodes)
257
287
258 def between(self, pairs):
259 return self._repo.between(pairs)
260
261 def changegroup(self, basenodes, source):
288 def changegroup(self, basenodes, source):
262 return changegroup.changegroup(self._repo, basenodes, source)
289 return changegroup.changegroup(self._repo, basenodes, source)
263
290
264 def changegroupsubset(self, bases, heads, source):
291 def changegroupsubset(self, bases, heads, source):
265 return changegroup.changegroupsubset(self._repo, bases, heads, source)
292 return changegroup.changegroupsubset(self._repo, bases, heads, source)
266
293
294 # End of baselegacywirecommands interface.
295
267 # Increment the sub-version when the revlog v2 format changes to lock out old
296 # Increment the sub-version when the revlog v2 format changes to lock out old
268 # clients.
297 # clients.
269 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
298 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
270
299
271 class localrepository(object):
300 class localrepository(object):
272
301
273 supportedformats = {
302 supportedformats = {
274 'revlogv1',
303 'revlogv1',
275 'generaldelta',
304 'generaldelta',
276 'treemanifest',
305 'treemanifest',
277 'manifestv2',
306 'manifestv2',
278 REVLOGV2_REQUIREMENT,
307 REVLOGV2_REQUIREMENT,
279 }
308 }
280 _basesupported = supportedformats | {
309 _basesupported = supportedformats | {
281 'store',
310 'store',
282 'fncache',
311 'fncache',
283 'shared',
312 'shared',
284 'relshared',
313 'relshared',
285 'dotencode',
314 'dotencode',
286 'exp-sparse',
315 'exp-sparse',
287 }
316 }
288 openerreqs = {
317 openerreqs = {
289 'revlogv1',
318 'revlogv1',
290 'generaldelta',
319 'generaldelta',
291 'treemanifest',
320 'treemanifest',
292 'manifestv2',
321 'manifestv2',
293 }
322 }
294
323
295 # a list of (ui, featureset) functions.
324 # a list of (ui, featureset) functions.
296 # only functions defined in module of enabled extensions are invoked
325 # only functions defined in module of enabled extensions are invoked
297 featuresetupfuncs = set()
326 featuresetupfuncs = set()
298
327
299 # list of prefix for file which can be written without 'wlock'
328 # list of prefix for file which can be written without 'wlock'
300 # Extensions should extend this list when needed
329 # Extensions should extend this list when needed
301 _wlockfreeprefix = {
330 _wlockfreeprefix = {
302 # We migh consider requiring 'wlock' for the next
331 # We migh consider requiring 'wlock' for the next
303 # two, but pretty much all the existing code assume
332 # two, but pretty much all the existing code assume
304 # wlock is not needed so we keep them excluded for
333 # wlock is not needed so we keep them excluded for
305 # now.
334 # now.
306 'hgrc',
335 'hgrc',
307 'requires',
336 'requires',
308 # XXX cache is a complicatged business someone
337 # XXX cache is a complicatged business someone
309 # should investigate this in depth at some point
338 # should investigate this in depth at some point
310 'cache/',
339 'cache/',
311 # XXX shouldn't be dirstate covered by the wlock?
340 # XXX shouldn't be dirstate covered by the wlock?
312 'dirstate',
341 'dirstate',
313 # XXX bisect was still a bit too messy at the time
342 # XXX bisect was still a bit too messy at the time
314 # this changeset was introduced. Someone should fix
343 # this changeset was introduced. Someone should fix
315 # the remainig bit and drop this line
344 # the remainig bit and drop this line
316 'bisect.state',
345 'bisect.state',
317 }
346 }
318
347
319 def __init__(self, baseui, path, create=False):
348 def __init__(self, baseui, path, create=False):
320 self.requirements = set()
349 self.requirements = set()
321 self.filtername = None
350 self.filtername = None
322 # wvfs: rooted at the repository root, used to access the working copy
351 # wvfs: rooted at the repository root, used to access the working copy
323 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
352 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
324 # vfs: rooted at .hg, used to access repo files outside of .hg/store
353 # vfs: rooted at .hg, used to access repo files outside of .hg/store
325 self.vfs = None
354 self.vfs = None
326 # svfs: usually rooted at .hg/store, used to access repository history
355 # svfs: usually rooted at .hg/store, used to access repository history
327 # If this is a shared repository, this vfs may point to another
356 # If this is a shared repository, this vfs may point to another
328 # repository's .hg/store directory.
357 # repository's .hg/store directory.
329 self.svfs = None
358 self.svfs = None
330 self.root = self.wvfs.base
359 self.root = self.wvfs.base
331 self.path = self.wvfs.join(".hg")
360 self.path = self.wvfs.join(".hg")
332 self.origroot = path
361 self.origroot = path
333 # These auditor are not used by the vfs,
362 # These auditor are not used by the vfs,
334 # only used when writing this comment: basectx.match
363 # only used when writing this comment: basectx.match
335 self.auditor = pathutil.pathauditor(self.root, self._checknested)
364 self.auditor = pathutil.pathauditor(self.root, self._checknested)
336 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
365 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
337 realfs=False, cached=True)
366 realfs=False, cached=True)
338 self.baseui = baseui
367 self.baseui = baseui
339 self.ui = baseui.copy()
368 self.ui = baseui.copy()
340 self.ui.copy = baseui.copy # prevent copying repo configuration
369 self.ui.copy = baseui.copy # prevent copying repo configuration
341 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
370 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
342 if (self.ui.configbool('devel', 'all-warnings') or
371 if (self.ui.configbool('devel', 'all-warnings') or
343 self.ui.configbool('devel', 'check-locks')):
372 self.ui.configbool('devel', 'check-locks')):
344 self.vfs.audit = self._getvfsward(self.vfs.audit)
373 self.vfs.audit = self._getvfsward(self.vfs.audit)
345 # A list of callback to shape the phase if no data were found.
374 # A list of callback to shape the phase if no data were found.
346 # Callback are in the form: func(repo, roots) --> processed root.
375 # Callback are in the form: func(repo, roots) --> processed root.
347 # This list it to be filled by extension during repo setup
376 # This list it to be filled by extension during repo setup
348 self._phasedefaults = []
377 self._phasedefaults = []
349 try:
378 try:
350 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
379 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
351 self._loadextensions()
380 self._loadextensions()
352 except IOError:
381 except IOError:
353 pass
382 pass
354
383
355 if self.featuresetupfuncs:
384 if self.featuresetupfuncs:
356 self.supported = set(self._basesupported) # use private copy
385 self.supported = set(self._basesupported) # use private copy
357 extmods = set(m.__name__ for n, m
386 extmods = set(m.__name__ for n, m
358 in extensions.extensions(self.ui))
387 in extensions.extensions(self.ui))
359 for setupfunc in self.featuresetupfuncs:
388 for setupfunc in self.featuresetupfuncs:
360 if setupfunc.__module__ in extmods:
389 if setupfunc.__module__ in extmods:
361 setupfunc(self.ui, self.supported)
390 setupfunc(self.ui, self.supported)
362 else:
391 else:
363 self.supported = self._basesupported
392 self.supported = self._basesupported
364 color.setup(self.ui)
393 color.setup(self.ui)
365
394
366 # Add compression engines.
395 # Add compression engines.
367 for name in util.compengines:
396 for name in util.compengines:
368 engine = util.compengines[name]
397 engine = util.compengines[name]
369 if engine.revlogheader():
398 if engine.revlogheader():
370 self.supported.add('exp-compression-%s' % name)
399 self.supported.add('exp-compression-%s' % name)
371
400
372 if not self.vfs.isdir():
401 if not self.vfs.isdir():
373 if create:
402 if create:
374 self.requirements = newreporequirements(self)
403 self.requirements = newreporequirements(self)
375
404
376 if not self.wvfs.exists():
405 if not self.wvfs.exists():
377 self.wvfs.makedirs()
406 self.wvfs.makedirs()
378 self.vfs.makedir(notindexed=True)
407 self.vfs.makedir(notindexed=True)
379
408
380 if 'store' in self.requirements:
409 if 'store' in self.requirements:
381 self.vfs.mkdir("store")
410 self.vfs.mkdir("store")
382
411
383 # create an invalid changelog
412 # create an invalid changelog
384 self.vfs.append(
413 self.vfs.append(
385 "00changelog.i",
414 "00changelog.i",
386 '\0\0\0\2' # represents revlogv2
415 '\0\0\0\2' # represents revlogv2
387 ' dummy changelog to prevent using the old repo layout'
416 ' dummy changelog to prevent using the old repo layout'
388 )
417 )
389 else:
418 else:
390 raise error.RepoError(_("repository %s not found") % path)
419 raise error.RepoError(_("repository %s not found") % path)
391 elif create:
420 elif create:
392 raise error.RepoError(_("repository %s already exists") % path)
421 raise error.RepoError(_("repository %s already exists") % path)
393 else:
422 else:
394 try:
423 try:
395 self.requirements = scmutil.readrequires(
424 self.requirements = scmutil.readrequires(
396 self.vfs, self.supported)
425 self.vfs, self.supported)
397 except IOError as inst:
426 except IOError as inst:
398 if inst.errno != errno.ENOENT:
427 if inst.errno != errno.ENOENT:
399 raise
428 raise
400
429
401 cachepath = self.vfs.join('cache')
430 cachepath = self.vfs.join('cache')
402 self.sharedpath = self.path
431 self.sharedpath = self.path
403 try:
432 try:
404 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
433 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
405 if 'relshared' in self.requirements:
434 if 'relshared' in self.requirements:
406 sharedpath = self.vfs.join(sharedpath)
435 sharedpath = self.vfs.join(sharedpath)
407 vfs = vfsmod.vfs(sharedpath, realpath=True)
436 vfs = vfsmod.vfs(sharedpath, realpath=True)
408 cachepath = vfs.join('cache')
437 cachepath = vfs.join('cache')
409 s = vfs.base
438 s = vfs.base
410 if not vfs.exists():
439 if not vfs.exists():
411 raise error.RepoError(
440 raise error.RepoError(
412 _('.hg/sharedpath points to nonexistent directory %s') % s)
441 _('.hg/sharedpath points to nonexistent directory %s') % s)
413 self.sharedpath = s
442 self.sharedpath = s
414 except IOError as inst:
443 except IOError as inst:
415 if inst.errno != errno.ENOENT:
444 if inst.errno != errno.ENOENT:
416 raise
445 raise
417
446
418 if 'exp-sparse' in self.requirements and not sparse.enabled:
447 if 'exp-sparse' in self.requirements and not sparse.enabled:
419 raise error.RepoError(_('repository is using sparse feature but '
448 raise error.RepoError(_('repository is using sparse feature but '
420 'sparse is not enabled; enable the '
449 'sparse is not enabled; enable the '
421 '"sparse" extensions to access'))
450 '"sparse" extensions to access'))
422
451
423 self.store = store.store(
452 self.store = store.store(
424 self.requirements, self.sharedpath,
453 self.requirements, self.sharedpath,
425 lambda base: vfsmod.vfs(base, cacheaudited=True))
454 lambda base: vfsmod.vfs(base, cacheaudited=True))
426 self.spath = self.store.path
455 self.spath = self.store.path
427 self.svfs = self.store.vfs
456 self.svfs = self.store.vfs
428 self.sjoin = self.store.join
457 self.sjoin = self.store.join
429 self.vfs.createmode = self.store.createmode
458 self.vfs.createmode = self.store.createmode
430 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
459 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
431 self.cachevfs.createmode = self.store.createmode
460 self.cachevfs.createmode = self.store.createmode
432 if (self.ui.configbool('devel', 'all-warnings') or
461 if (self.ui.configbool('devel', 'all-warnings') or
433 self.ui.configbool('devel', 'check-locks')):
462 self.ui.configbool('devel', 'check-locks')):
434 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
463 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
435 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
464 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
436 else: # standard vfs
465 else: # standard vfs
437 self.svfs.audit = self._getsvfsward(self.svfs.audit)
466 self.svfs.audit = self._getsvfsward(self.svfs.audit)
438 self._applyopenerreqs()
467 self._applyopenerreqs()
439 if create:
468 if create:
440 self._writerequirements()
469 self._writerequirements()
441
470
442 self._dirstatevalidatewarned = False
471 self._dirstatevalidatewarned = False
443
472
444 self._branchcaches = {}
473 self._branchcaches = {}
445 self._revbranchcache = None
474 self._revbranchcache = None
446 self.filterpats = {}
475 self.filterpats = {}
447 self._datafilters = {}
476 self._datafilters = {}
448 self._transref = self._lockref = self._wlockref = None
477 self._transref = self._lockref = self._wlockref = None
449
478
450 # A cache for various files under .hg/ that tracks file changes,
479 # A cache for various files under .hg/ that tracks file changes,
451 # (used by the filecache decorator)
480 # (used by the filecache decorator)
452 #
481 #
453 # Maps a property name to its util.filecacheentry
482 # Maps a property name to its util.filecacheentry
454 self._filecache = {}
483 self._filecache = {}
455
484
456 # hold sets of revision to be filtered
485 # hold sets of revision to be filtered
457 # should be cleared when something might have changed the filter value:
486 # should be cleared when something might have changed the filter value:
458 # - new changesets,
487 # - new changesets,
459 # - phase change,
488 # - phase change,
460 # - new obsolescence marker,
489 # - new obsolescence marker,
461 # - working directory parent change,
490 # - working directory parent change,
462 # - bookmark changes
491 # - bookmark changes
463 self.filteredrevcache = {}
492 self.filteredrevcache = {}
464
493
465 # post-dirstate-status hooks
494 # post-dirstate-status hooks
466 self._postdsstatus = []
495 self._postdsstatus = []
467
496
468 # Cache of types representing filtered repos.
497 # Cache of types representing filtered repos.
469 self._filteredrepotypes = weakref.WeakKeyDictionary()
498 self._filteredrepotypes = weakref.WeakKeyDictionary()
470
499
471 # generic mapping between names and nodes
500 # generic mapping between names and nodes
472 self.names = namespaces.namespaces()
501 self.names = namespaces.namespaces()
473
502
474 # Key to signature value.
503 # Key to signature value.
475 self._sparsesignaturecache = {}
504 self._sparsesignaturecache = {}
476 # Signature to cached matcher instance.
505 # Signature to cached matcher instance.
477 self._sparsematchercache = {}
506 self._sparsematchercache = {}
478
507
479 def _getvfsward(self, origfunc):
508 def _getvfsward(self, origfunc):
480 """build a ward for self.vfs"""
509 """build a ward for self.vfs"""
481 rref = weakref.ref(self)
510 rref = weakref.ref(self)
482 def checkvfs(path, mode=None):
511 def checkvfs(path, mode=None):
483 ret = origfunc(path, mode=mode)
512 ret = origfunc(path, mode=mode)
484 repo = rref()
513 repo = rref()
485 if (repo is None
514 if (repo is None
486 or not util.safehasattr(repo, '_wlockref')
515 or not util.safehasattr(repo, '_wlockref')
487 or not util.safehasattr(repo, '_lockref')):
516 or not util.safehasattr(repo, '_lockref')):
488 return
517 return
489 if mode in (None, 'r', 'rb'):
518 if mode in (None, 'r', 'rb'):
490 return
519 return
491 if path.startswith(repo.path):
520 if path.startswith(repo.path):
492 # truncate name relative to the repository (.hg)
521 # truncate name relative to the repository (.hg)
493 path = path[len(repo.path) + 1:]
522 path = path[len(repo.path) + 1:]
494 if path.startswith('cache/'):
523 if path.startswith('cache/'):
495 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
524 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
496 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
525 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
497 if path.startswith('journal.'):
526 if path.startswith('journal.'):
498 # journal is covered by 'lock'
527 # journal is covered by 'lock'
499 if repo._currentlock(repo._lockref) is None:
528 if repo._currentlock(repo._lockref) is None:
500 repo.ui.develwarn('write with no lock: "%s"' % path,
529 repo.ui.develwarn('write with no lock: "%s"' % path,
501 stacklevel=2, config='check-locks')
530 stacklevel=2, config='check-locks')
502 elif repo._currentlock(repo._wlockref) is None:
531 elif repo._currentlock(repo._wlockref) is None:
503 # rest of vfs files are covered by 'wlock'
532 # rest of vfs files are covered by 'wlock'
504 #
533 #
505 # exclude special files
534 # exclude special files
506 for prefix in self._wlockfreeprefix:
535 for prefix in self._wlockfreeprefix:
507 if path.startswith(prefix):
536 if path.startswith(prefix):
508 return
537 return
509 repo.ui.develwarn('write with no wlock: "%s"' % path,
538 repo.ui.develwarn('write with no wlock: "%s"' % path,
510 stacklevel=2, config='check-locks')
539 stacklevel=2, config='check-locks')
511 return ret
540 return ret
512 return checkvfs
541 return checkvfs
513
542
514 def _getsvfsward(self, origfunc):
543 def _getsvfsward(self, origfunc):
515 """build a ward for self.svfs"""
544 """build a ward for self.svfs"""
516 rref = weakref.ref(self)
545 rref = weakref.ref(self)
517 def checksvfs(path, mode=None):
546 def checksvfs(path, mode=None):
518 ret = origfunc(path, mode=mode)
547 ret = origfunc(path, mode=mode)
519 repo = rref()
548 repo = rref()
520 if repo is None or not util.safehasattr(repo, '_lockref'):
549 if repo is None or not util.safehasattr(repo, '_lockref'):
521 return
550 return
522 if mode in (None, 'r', 'rb'):
551 if mode in (None, 'r', 'rb'):
523 return
552 return
524 if path.startswith(repo.sharedpath):
553 if path.startswith(repo.sharedpath):
525 # truncate name relative to the repository (.hg)
554 # truncate name relative to the repository (.hg)
526 path = path[len(repo.sharedpath) + 1:]
555 path = path[len(repo.sharedpath) + 1:]
527 if repo._currentlock(repo._lockref) is None:
556 if repo._currentlock(repo._lockref) is None:
528 repo.ui.develwarn('write with no lock: "%s"' % path,
557 repo.ui.develwarn('write with no lock: "%s"' % path,
529 stacklevel=3)
558 stacklevel=3)
530 return ret
559 return ret
531 return checksvfs
560 return checksvfs
532
561
533 def close(self):
562 def close(self):
534 self._writecaches()
563 self._writecaches()
535
564
536 def _loadextensions(self):
565 def _loadextensions(self):
537 extensions.loadall(self.ui)
566 extensions.loadall(self.ui)
538
567
539 def _writecaches(self):
568 def _writecaches(self):
540 if self._revbranchcache:
569 if self._revbranchcache:
541 self._revbranchcache.write()
570 self._revbranchcache.write()
542
571
543 def _restrictcapabilities(self, caps):
572 def _restrictcapabilities(self, caps):
544 if self.ui.configbool('experimental', 'bundle2-advertise'):
573 if self.ui.configbool('experimental', 'bundle2-advertise'):
545 caps = set(caps)
574 caps = set(caps)
546 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
575 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
547 caps.add('bundle2=' + urlreq.quote(capsblob))
576 caps.add('bundle2=' + urlreq.quote(capsblob))
548 return caps
577 return caps
549
578
550 def _applyopenerreqs(self):
579 def _applyopenerreqs(self):
551 self.svfs.options = dict((r, 1) for r in self.requirements
580 self.svfs.options = dict((r, 1) for r in self.requirements
552 if r in self.openerreqs)
581 if r in self.openerreqs)
553 # experimental config: format.chunkcachesize
582 # experimental config: format.chunkcachesize
554 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
583 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
555 if chunkcachesize is not None:
584 if chunkcachesize is not None:
556 self.svfs.options['chunkcachesize'] = chunkcachesize
585 self.svfs.options['chunkcachesize'] = chunkcachesize
557 # experimental config: format.maxchainlen
586 # experimental config: format.maxchainlen
558 maxchainlen = self.ui.configint('format', 'maxchainlen')
587 maxchainlen = self.ui.configint('format', 'maxchainlen')
559 if maxchainlen is not None:
588 if maxchainlen is not None:
560 self.svfs.options['maxchainlen'] = maxchainlen
589 self.svfs.options['maxchainlen'] = maxchainlen
561 # experimental config: format.manifestcachesize
590 # experimental config: format.manifestcachesize
562 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
591 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
563 if manifestcachesize is not None:
592 if manifestcachesize is not None:
564 self.svfs.options['manifestcachesize'] = manifestcachesize
593 self.svfs.options['manifestcachesize'] = manifestcachesize
565 # experimental config: format.aggressivemergedeltas
594 # experimental config: format.aggressivemergedeltas
566 aggressivemergedeltas = self.ui.configbool('format',
595 aggressivemergedeltas = self.ui.configbool('format',
567 'aggressivemergedeltas')
596 'aggressivemergedeltas')
568 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
597 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
569 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
598 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
570 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
599 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
571 if 0 <= chainspan:
600 if 0 <= chainspan:
572 self.svfs.options['maxdeltachainspan'] = chainspan
601 self.svfs.options['maxdeltachainspan'] = chainspan
573
602
574 for r in self.requirements:
603 for r in self.requirements:
575 if r.startswith('exp-compression-'):
604 if r.startswith('exp-compression-'):
576 self.svfs.options['compengine'] = r[len('exp-compression-'):]
605 self.svfs.options['compengine'] = r[len('exp-compression-'):]
577
606
578 # TODO move "revlogv2" to openerreqs once finalized.
607 # TODO move "revlogv2" to openerreqs once finalized.
579 if REVLOGV2_REQUIREMENT in self.requirements:
608 if REVLOGV2_REQUIREMENT in self.requirements:
580 self.svfs.options['revlogv2'] = True
609 self.svfs.options['revlogv2'] = True
581
610
582 def _writerequirements(self):
611 def _writerequirements(self):
583 scmutil.writerequires(self.vfs, self.requirements)
612 scmutil.writerequires(self.vfs, self.requirements)
584
613
585 def _checknested(self, path):
614 def _checknested(self, path):
586 """Determine if path is a legal nested repository."""
615 """Determine if path is a legal nested repository."""
587 if not path.startswith(self.root):
616 if not path.startswith(self.root):
588 return False
617 return False
589 subpath = path[len(self.root) + 1:]
618 subpath = path[len(self.root) + 1:]
590 normsubpath = util.pconvert(subpath)
619 normsubpath = util.pconvert(subpath)
591
620
592 # XXX: Checking against the current working copy is wrong in
621 # XXX: Checking against the current working copy is wrong in
593 # the sense that it can reject things like
622 # the sense that it can reject things like
594 #
623 #
595 # $ hg cat -r 10 sub/x.txt
624 # $ hg cat -r 10 sub/x.txt
596 #
625 #
597 # if sub/ is no longer a subrepository in the working copy
626 # if sub/ is no longer a subrepository in the working copy
598 # parent revision.
627 # parent revision.
599 #
628 #
600 # However, it can of course also allow things that would have
629 # However, it can of course also allow things that would have
601 # been rejected before, such as the above cat command if sub/
630 # been rejected before, such as the above cat command if sub/
602 # is a subrepository now, but was a normal directory before.
631 # is a subrepository now, but was a normal directory before.
603 # The old path auditor would have rejected by mistake since it
632 # The old path auditor would have rejected by mistake since it
604 # panics when it sees sub/.hg/.
633 # panics when it sees sub/.hg/.
605 #
634 #
606 # All in all, checking against the working copy seems sensible
635 # All in all, checking against the working copy seems sensible
607 # since we want to prevent access to nested repositories on
636 # since we want to prevent access to nested repositories on
608 # the filesystem *now*.
637 # the filesystem *now*.
609 ctx = self[None]
638 ctx = self[None]
610 parts = util.splitpath(subpath)
639 parts = util.splitpath(subpath)
611 while parts:
640 while parts:
612 prefix = '/'.join(parts)
641 prefix = '/'.join(parts)
613 if prefix in ctx.substate:
642 if prefix in ctx.substate:
614 if prefix == normsubpath:
643 if prefix == normsubpath:
615 return True
644 return True
616 else:
645 else:
617 sub = ctx.sub(prefix)
646 sub = ctx.sub(prefix)
618 return sub.checknested(subpath[len(prefix) + 1:])
647 return sub.checknested(subpath[len(prefix) + 1:])
619 else:
648 else:
620 parts.pop()
649 parts.pop()
621 return False
650 return False
622
651
623 def peer(self):
652 def peer(self):
624 return localpeer(self) # not cached to avoid reference cycle
653 return localpeer(self) # not cached to avoid reference cycle
625
654
626 def unfiltered(self):
655 def unfiltered(self):
627 """Return unfiltered version of the repository
656 """Return unfiltered version of the repository
628
657
629 Intended to be overwritten by filtered repo."""
658 Intended to be overwritten by filtered repo."""
630 return self
659 return self
631
660
632 def filtered(self, name):
661 def filtered(self, name):
633 """Return a filtered version of a repository"""
662 """Return a filtered version of a repository"""
634 # Python <3.4 easily leaks types via __mro__. See
663 # Python <3.4 easily leaks types via __mro__. See
635 # https://bugs.python.org/issue17950. We cache dynamically
664 # https://bugs.python.org/issue17950. We cache dynamically
636 # created types so this method doesn't leak on every
665 # created types so this method doesn't leak on every
637 # invocation.
666 # invocation.
638
667
639 key = self.unfiltered().__class__
668 key = self.unfiltered().__class__
640 if key not in self._filteredrepotypes:
669 if key not in self._filteredrepotypes:
641 # Build a new type with the repoview mixin and the base
670 # Build a new type with the repoview mixin and the base
642 # class of this repo. Give it a name containing the
671 # class of this repo. Give it a name containing the
643 # filter name to aid debugging.
672 # filter name to aid debugging.
644 bases = (repoview.repoview, key)
673 bases = (repoview.repoview, key)
645 cls = type(r'%sfilteredrepo' % name, bases, {})
674 cls = type(r'%sfilteredrepo' % name, bases, {})
646 self._filteredrepotypes[key] = cls
675 self._filteredrepotypes[key] = cls
647
676
648 return self._filteredrepotypes[key](self, name)
677 return self._filteredrepotypes[key](self, name)
649
678
650 @repofilecache('bookmarks', 'bookmarks.current')
679 @repofilecache('bookmarks', 'bookmarks.current')
651 def _bookmarks(self):
680 def _bookmarks(self):
652 return bookmarks.bmstore(self)
681 return bookmarks.bmstore(self)
653
682
654 @property
683 @property
655 def _activebookmark(self):
684 def _activebookmark(self):
656 return self._bookmarks.active
685 return self._bookmarks.active
657
686
658 # _phaserevs and _phasesets depend on changelog. what we need is to
687 # _phaserevs and _phasesets depend on changelog. what we need is to
659 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
688 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
660 # can't be easily expressed in filecache mechanism.
689 # can't be easily expressed in filecache mechanism.
661 @storecache('phaseroots', '00changelog.i')
690 @storecache('phaseroots', '00changelog.i')
662 def _phasecache(self):
691 def _phasecache(self):
663 return phases.phasecache(self, self._phasedefaults)
692 return phases.phasecache(self, self._phasedefaults)
664
693
665 @storecache('obsstore')
694 @storecache('obsstore')
666 def obsstore(self):
695 def obsstore(self):
667 return obsolete.makestore(self.ui, self)
696 return obsolete.makestore(self.ui, self)
668
697
669 @storecache('00changelog.i')
698 @storecache('00changelog.i')
670 def changelog(self):
699 def changelog(self):
671 return changelog.changelog(self.svfs,
700 return changelog.changelog(self.svfs,
672 trypending=txnutil.mayhavepending(self.root))
701 trypending=txnutil.mayhavepending(self.root))
673
702
674 def _constructmanifest(self):
703 def _constructmanifest(self):
675 # This is a temporary function while we migrate from manifest to
704 # This is a temporary function while we migrate from manifest to
676 # manifestlog. It allows bundlerepo and unionrepo to intercept the
705 # manifestlog. It allows bundlerepo and unionrepo to intercept the
677 # manifest creation.
706 # manifest creation.
678 return manifest.manifestrevlog(self.svfs)
707 return manifest.manifestrevlog(self.svfs)
679
708
680 @storecache('00manifest.i')
709 @storecache('00manifest.i')
681 def manifestlog(self):
710 def manifestlog(self):
682 return manifest.manifestlog(self.svfs, self)
711 return manifest.manifestlog(self.svfs, self)
683
712
684 @repofilecache('dirstate')
713 @repofilecache('dirstate')
685 def dirstate(self):
714 def dirstate(self):
686 sparsematchfn = lambda: sparse.matcher(self)
715 sparsematchfn = lambda: sparse.matcher(self)
687
716
688 return dirstate.dirstate(self.vfs, self.ui, self.root,
717 return dirstate.dirstate(self.vfs, self.ui, self.root,
689 self._dirstatevalidate, sparsematchfn)
718 self._dirstatevalidate, sparsematchfn)
690
719
691 def _dirstatevalidate(self, node):
720 def _dirstatevalidate(self, node):
692 try:
721 try:
693 self.changelog.rev(node)
722 self.changelog.rev(node)
694 return node
723 return node
695 except error.LookupError:
724 except error.LookupError:
696 if not self._dirstatevalidatewarned:
725 if not self._dirstatevalidatewarned:
697 self._dirstatevalidatewarned = True
726 self._dirstatevalidatewarned = True
698 self.ui.warn(_("warning: ignoring unknown"
727 self.ui.warn(_("warning: ignoring unknown"
699 " working parent %s!\n") % short(node))
728 " working parent %s!\n") % short(node))
700 return nullid
729 return nullid
701
730
702 def __getitem__(self, changeid):
731 def __getitem__(self, changeid):
703 if changeid is None:
732 if changeid is None:
704 return context.workingctx(self)
733 return context.workingctx(self)
705 if isinstance(changeid, slice):
734 if isinstance(changeid, slice):
706 # wdirrev isn't contiguous so the slice shouldn't include it
735 # wdirrev isn't contiguous so the slice shouldn't include it
707 return [context.changectx(self, i)
736 return [context.changectx(self, i)
708 for i in xrange(*changeid.indices(len(self)))
737 for i in xrange(*changeid.indices(len(self)))
709 if i not in self.changelog.filteredrevs]
738 if i not in self.changelog.filteredrevs]
710 try:
739 try:
711 return context.changectx(self, changeid)
740 return context.changectx(self, changeid)
712 except error.WdirUnsupported:
741 except error.WdirUnsupported:
713 return context.workingctx(self)
742 return context.workingctx(self)
714
743
715 def __contains__(self, changeid):
744 def __contains__(self, changeid):
716 """True if the given changeid exists
745 """True if the given changeid exists
717
746
718 error.LookupError is raised if an ambiguous node specified.
747 error.LookupError is raised if an ambiguous node specified.
719 """
748 """
720 try:
749 try:
721 self[changeid]
750 self[changeid]
722 return True
751 return True
723 except error.RepoLookupError:
752 except error.RepoLookupError:
724 return False
753 return False
725
754
726 def __nonzero__(self):
755 def __nonzero__(self):
727 return True
756 return True
728
757
729 __bool__ = __nonzero__
758 __bool__ = __nonzero__
730
759
731 def __len__(self):
760 def __len__(self):
732 return len(self.changelog)
761 return len(self.changelog)
733
762
734 def __iter__(self):
763 def __iter__(self):
735 return iter(self.changelog)
764 return iter(self.changelog)
736
765
737 def revs(self, expr, *args):
766 def revs(self, expr, *args):
738 '''Find revisions matching a revset.
767 '''Find revisions matching a revset.
739
768
740 The revset is specified as a string ``expr`` that may contain
769 The revset is specified as a string ``expr`` that may contain
741 %-formatting to escape certain types. See ``revsetlang.formatspec``.
770 %-formatting to escape certain types. See ``revsetlang.formatspec``.
742
771
743 Revset aliases from the configuration are not expanded. To expand
772 Revset aliases from the configuration are not expanded. To expand
744 user aliases, consider calling ``scmutil.revrange()`` or
773 user aliases, consider calling ``scmutil.revrange()`` or
745 ``repo.anyrevs([expr], user=True)``.
774 ``repo.anyrevs([expr], user=True)``.
746
775
747 Returns a revset.abstractsmartset, which is a list-like interface
776 Returns a revset.abstractsmartset, which is a list-like interface
748 that contains integer revisions.
777 that contains integer revisions.
749 '''
778 '''
750 expr = revsetlang.formatspec(expr, *args)
779 expr = revsetlang.formatspec(expr, *args)
751 m = revset.match(None, expr)
780 m = revset.match(None, expr)
752 return m(self)
781 return m(self)
753
782
754 def set(self, expr, *args):
783 def set(self, expr, *args):
755 '''Find revisions matching a revset and emit changectx instances.
784 '''Find revisions matching a revset and emit changectx instances.
756
785
757 This is a convenience wrapper around ``revs()`` that iterates the
786 This is a convenience wrapper around ``revs()`` that iterates the
758 result and is a generator of changectx instances.
787 result and is a generator of changectx instances.
759
788
760 Revset aliases from the configuration are not expanded. To expand
789 Revset aliases from the configuration are not expanded. To expand
761 user aliases, consider calling ``scmutil.revrange()``.
790 user aliases, consider calling ``scmutil.revrange()``.
762 '''
791 '''
763 for r in self.revs(expr, *args):
792 for r in self.revs(expr, *args):
764 yield self[r]
793 yield self[r]
765
794
766 def anyrevs(self, specs, user=False, localalias=None):
795 def anyrevs(self, specs, user=False, localalias=None):
767 '''Find revisions matching one of the given revsets.
796 '''Find revisions matching one of the given revsets.
768
797
769 Revset aliases from the configuration are not expanded by default. To
798 Revset aliases from the configuration are not expanded by default. To
770 expand user aliases, specify ``user=True``. To provide some local
799 expand user aliases, specify ``user=True``. To provide some local
771 definitions overriding user aliases, set ``localalias`` to
800 definitions overriding user aliases, set ``localalias`` to
772 ``{name: definitionstring}``.
801 ``{name: definitionstring}``.
773 '''
802 '''
774 if user:
803 if user:
775 m = revset.matchany(self.ui, specs, repo=self,
804 m = revset.matchany(self.ui, specs, repo=self,
776 localalias=localalias)
805 localalias=localalias)
777 else:
806 else:
778 m = revset.matchany(None, specs, localalias=localalias)
807 m = revset.matchany(None, specs, localalias=localalias)
779 return m(self)
808 return m(self)
780
809
781 def url(self):
810 def url(self):
782 return 'file:' + self.root
811 return 'file:' + self.root
783
812
784 def hook(self, name, throw=False, **args):
813 def hook(self, name, throw=False, **args):
785 """Call a hook, passing this repo instance.
814 """Call a hook, passing this repo instance.
786
815
787 This a convenience method to aid invoking hooks. Extensions likely
816 This a convenience method to aid invoking hooks. Extensions likely
788 won't call this unless they have registered a custom hook or are
817 won't call this unless they have registered a custom hook or are
789 replacing code that is expected to call a hook.
818 replacing code that is expected to call a hook.
790 """
819 """
791 return hook.hook(self.ui, self, name, throw, **args)
820 return hook.hook(self.ui, self, name, throw, **args)
792
821
793 @filteredpropertycache
822 @filteredpropertycache
794 def _tagscache(self):
823 def _tagscache(self):
795 '''Returns a tagscache object that contains various tags related
824 '''Returns a tagscache object that contains various tags related
796 caches.'''
825 caches.'''
797
826
798 # This simplifies its cache management by having one decorated
827 # This simplifies its cache management by having one decorated
799 # function (this one) and the rest simply fetch things from it.
828 # function (this one) and the rest simply fetch things from it.
800 class tagscache(object):
829 class tagscache(object):
801 def __init__(self):
830 def __init__(self):
802 # These two define the set of tags for this repository. tags
831 # These two define the set of tags for this repository. tags
803 # maps tag name to node; tagtypes maps tag name to 'global' or
832 # maps tag name to node; tagtypes maps tag name to 'global' or
804 # 'local'. (Global tags are defined by .hgtags across all
833 # 'local'. (Global tags are defined by .hgtags across all
805 # heads, and local tags are defined in .hg/localtags.)
834 # heads, and local tags are defined in .hg/localtags.)
806 # They constitute the in-memory cache of tags.
835 # They constitute the in-memory cache of tags.
807 self.tags = self.tagtypes = None
836 self.tags = self.tagtypes = None
808
837
809 self.nodetagscache = self.tagslist = None
838 self.nodetagscache = self.tagslist = None
810
839
811 cache = tagscache()
840 cache = tagscache()
812 cache.tags, cache.tagtypes = self._findtags()
841 cache.tags, cache.tagtypes = self._findtags()
813
842
814 return cache
843 return cache
815
844
816 def tags(self):
845 def tags(self):
817 '''return a mapping of tag to node'''
846 '''return a mapping of tag to node'''
818 t = {}
847 t = {}
819 if self.changelog.filteredrevs:
848 if self.changelog.filteredrevs:
820 tags, tt = self._findtags()
849 tags, tt = self._findtags()
821 else:
850 else:
822 tags = self._tagscache.tags
851 tags = self._tagscache.tags
823 for k, v in tags.iteritems():
852 for k, v in tags.iteritems():
824 try:
853 try:
825 # ignore tags to unknown nodes
854 # ignore tags to unknown nodes
826 self.changelog.rev(v)
855 self.changelog.rev(v)
827 t[k] = v
856 t[k] = v
828 except (error.LookupError, ValueError):
857 except (error.LookupError, ValueError):
829 pass
858 pass
830 return t
859 return t
831
860
832 def _findtags(self):
861 def _findtags(self):
833 '''Do the hard work of finding tags. Return a pair of dicts
862 '''Do the hard work of finding tags. Return a pair of dicts
834 (tags, tagtypes) where tags maps tag name to node, and tagtypes
863 (tags, tagtypes) where tags maps tag name to node, and tagtypes
835 maps tag name to a string like \'global\' or \'local\'.
864 maps tag name to a string like \'global\' or \'local\'.
836 Subclasses or extensions are free to add their own tags, but
865 Subclasses or extensions are free to add their own tags, but
837 should be aware that the returned dicts will be retained for the
866 should be aware that the returned dicts will be retained for the
838 duration of the localrepo object.'''
867 duration of the localrepo object.'''
839
868
840 # XXX what tagtype should subclasses/extensions use? Currently
869 # XXX what tagtype should subclasses/extensions use? Currently
841 # mq and bookmarks add tags, but do not set the tagtype at all.
870 # mq and bookmarks add tags, but do not set the tagtype at all.
842 # Should each extension invent its own tag type? Should there
871 # Should each extension invent its own tag type? Should there
843 # be one tagtype for all such "virtual" tags? Or is the status
872 # be one tagtype for all such "virtual" tags? Or is the status
844 # quo fine?
873 # quo fine?
845
874
846
875
847 # map tag name to (node, hist)
876 # map tag name to (node, hist)
848 alltags = tagsmod.findglobaltags(self.ui, self)
877 alltags = tagsmod.findglobaltags(self.ui, self)
849 # map tag name to tag type
878 # map tag name to tag type
850 tagtypes = dict((tag, 'global') for tag in alltags)
879 tagtypes = dict((tag, 'global') for tag in alltags)
851
880
852 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
881 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
853
882
854 # Build the return dicts. Have to re-encode tag names because
883 # Build the return dicts. Have to re-encode tag names because
855 # the tags module always uses UTF-8 (in order not to lose info
884 # the tags module always uses UTF-8 (in order not to lose info
856 # writing to the cache), but the rest of Mercurial wants them in
885 # writing to the cache), but the rest of Mercurial wants them in
857 # local encoding.
886 # local encoding.
858 tags = {}
887 tags = {}
859 for (name, (node, hist)) in alltags.iteritems():
888 for (name, (node, hist)) in alltags.iteritems():
860 if node != nullid:
889 if node != nullid:
861 tags[encoding.tolocal(name)] = node
890 tags[encoding.tolocal(name)] = node
862 tags['tip'] = self.changelog.tip()
891 tags['tip'] = self.changelog.tip()
863 tagtypes = dict([(encoding.tolocal(name), value)
892 tagtypes = dict([(encoding.tolocal(name), value)
864 for (name, value) in tagtypes.iteritems()])
893 for (name, value) in tagtypes.iteritems()])
865 return (tags, tagtypes)
894 return (tags, tagtypes)
866
895
867 def tagtype(self, tagname):
896 def tagtype(self, tagname):
868 '''
897 '''
869 return the type of the given tag. result can be:
898 return the type of the given tag. result can be:
870
899
871 'local' : a local tag
900 'local' : a local tag
872 'global' : a global tag
901 'global' : a global tag
873 None : tag does not exist
902 None : tag does not exist
874 '''
903 '''
875
904
876 return self._tagscache.tagtypes.get(tagname)
905 return self._tagscache.tagtypes.get(tagname)
877
906
878 def tagslist(self):
907 def tagslist(self):
879 '''return a list of tags ordered by revision'''
908 '''return a list of tags ordered by revision'''
880 if not self._tagscache.tagslist:
909 if not self._tagscache.tagslist:
881 l = []
910 l = []
882 for t, n in self.tags().iteritems():
911 for t, n in self.tags().iteritems():
883 l.append((self.changelog.rev(n), t, n))
912 l.append((self.changelog.rev(n), t, n))
884 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
913 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
885
914
886 return self._tagscache.tagslist
915 return self._tagscache.tagslist
887
916
888 def nodetags(self, node):
917 def nodetags(self, node):
889 '''return the tags associated with a node'''
918 '''return the tags associated with a node'''
890 if not self._tagscache.nodetagscache:
919 if not self._tagscache.nodetagscache:
891 nodetagscache = {}
920 nodetagscache = {}
892 for t, n in self._tagscache.tags.iteritems():
921 for t, n in self._tagscache.tags.iteritems():
893 nodetagscache.setdefault(n, []).append(t)
922 nodetagscache.setdefault(n, []).append(t)
894 for tags in nodetagscache.itervalues():
923 for tags in nodetagscache.itervalues():
895 tags.sort()
924 tags.sort()
896 self._tagscache.nodetagscache = nodetagscache
925 self._tagscache.nodetagscache = nodetagscache
897 return self._tagscache.nodetagscache.get(node, [])
926 return self._tagscache.nodetagscache.get(node, [])
898
927
899 def nodebookmarks(self, node):
928 def nodebookmarks(self, node):
900 """return the list of bookmarks pointing to the specified node"""
929 """return the list of bookmarks pointing to the specified node"""
901 marks = []
930 marks = []
902 for bookmark, n in self._bookmarks.iteritems():
931 for bookmark, n in self._bookmarks.iteritems():
903 if n == node:
932 if n == node:
904 marks.append(bookmark)
933 marks.append(bookmark)
905 return sorted(marks)
934 return sorted(marks)
906
935
907 def branchmap(self):
936 def branchmap(self):
908 '''returns a dictionary {branch: [branchheads]} with branchheads
937 '''returns a dictionary {branch: [branchheads]} with branchheads
909 ordered by increasing revision number'''
938 ordered by increasing revision number'''
910 branchmap.updatecache(self)
939 branchmap.updatecache(self)
911 return self._branchcaches[self.filtername]
940 return self._branchcaches[self.filtername]
912
941
913 @unfilteredmethod
942 @unfilteredmethod
914 def revbranchcache(self):
943 def revbranchcache(self):
915 if not self._revbranchcache:
944 if not self._revbranchcache:
916 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
945 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
917 return self._revbranchcache
946 return self._revbranchcache
918
947
919 def branchtip(self, branch, ignoremissing=False):
948 def branchtip(self, branch, ignoremissing=False):
920 '''return the tip node for a given branch
949 '''return the tip node for a given branch
921
950
922 If ignoremissing is True, then this method will not raise an error.
951 If ignoremissing is True, then this method will not raise an error.
923 This is helpful for callers that only expect None for a missing branch
952 This is helpful for callers that only expect None for a missing branch
924 (e.g. namespace).
953 (e.g. namespace).
925
954
926 '''
955 '''
927 try:
956 try:
928 return self.branchmap().branchtip(branch)
957 return self.branchmap().branchtip(branch)
929 except KeyError:
958 except KeyError:
930 if not ignoremissing:
959 if not ignoremissing:
931 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
960 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
932 else:
961 else:
933 pass
962 pass
934
963
935 def lookup(self, key):
964 def lookup(self, key):
936 return self[key].node()
965 return self[key].node()
937
966
938 def lookupbranch(self, key, remote=None):
967 def lookupbranch(self, key, remote=None):
939 repo = remote or self
968 repo = remote or self
940 if key in repo.branchmap():
969 if key in repo.branchmap():
941 return key
970 return key
942
971
943 repo = (remote and remote.local()) and remote or self
972 repo = (remote and remote.local()) and remote or self
944 return repo[key].branch()
973 return repo[key].branch()
945
974
946 def known(self, nodes):
975 def known(self, nodes):
947 cl = self.changelog
976 cl = self.changelog
948 nm = cl.nodemap
977 nm = cl.nodemap
949 filtered = cl.filteredrevs
978 filtered = cl.filteredrevs
950 result = []
979 result = []
951 for n in nodes:
980 for n in nodes:
952 r = nm.get(n)
981 r = nm.get(n)
953 resp = not (r is None or r in filtered)
982 resp = not (r is None or r in filtered)
954 result.append(resp)
983 result.append(resp)
955 return result
984 return result
956
985
957 def local(self):
986 def local(self):
958 return self
987 return self
959
988
960 def publishing(self):
989 def publishing(self):
961 # it's safe (and desirable) to trust the publish flag unconditionally
990 # it's safe (and desirable) to trust the publish flag unconditionally
962 # so that we don't finalize changes shared between users via ssh or nfs
991 # so that we don't finalize changes shared between users via ssh or nfs
963 return self.ui.configbool('phases', 'publish', untrusted=True)
992 return self.ui.configbool('phases', 'publish', untrusted=True)
964
993
965 def cancopy(self):
994 def cancopy(self):
966 # so statichttprepo's override of local() works
995 # so statichttprepo's override of local() works
967 if not self.local():
996 if not self.local():
968 return False
997 return False
969 if not self.publishing():
998 if not self.publishing():
970 return True
999 return True
971 # if publishing we can't copy if there is filtered content
1000 # if publishing we can't copy if there is filtered content
972 return not self.filtered('visible').changelog.filteredrevs
1001 return not self.filtered('visible').changelog.filteredrevs
973
1002
974 def shared(self):
1003 def shared(self):
975 '''the type of shared repository (None if not shared)'''
1004 '''the type of shared repository (None if not shared)'''
976 if self.sharedpath != self.path:
1005 if self.sharedpath != self.path:
977 return 'store'
1006 return 'store'
978 return None
1007 return None
979
1008
980 def wjoin(self, f, *insidef):
1009 def wjoin(self, f, *insidef):
981 return self.vfs.reljoin(self.root, f, *insidef)
1010 return self.vfs.reljoin(self.root, f, *insidef)
982
1011
983 def file(self, f):
1012 def file(self, f):
984 if f[0] == '/':
1013 if f[0] == '/':
985 f = f[1:]
1014 f = f[1:]
986 return filelog.filelog(self.svfs, f)
1015 return filelog.filelog(self.svfs, f)
987
1016
988 def changectx(self, changeid):
1017 def changectx(self, changeid):
989 return self[changeid]
1018 return self[changeid]
990
1019
991 def setparents(self, p1, p2=nullid):
1020 def setparents(self, p1, p2=nullid):
992 with self.dirstate.parentchange():
1021 with self.dirstate.parentchange():
993 copies = self.dirstate.setparents(p1, p2)
1022 copies = self.dirstate.setparents(p1, p2)
994 pctx = self[p1]
1023 pctx = self[p1]
995 if copies:
1024 if copies:
996 # Adjust copy records, the dirstate cannot do it, it
1025 # Adjust copy records, the dirstate cannot do it, it
997 # requires access to parents manifests. Preserve them
1026 # requires access to parents manifests. Preserve them
998 # only for entries added to first parent.
1027 # only for entries added to first parent.
999 for f in copies:
1028 for f in copies:
1000 if f not in pctx and copies[f] in pctx:
1029 if f not in pctx and copies[f] in pctx:
1001 self.dirstate.copy(copies[f], f)
1030 self.dirstate.copy(copies[f], f)
1002 if p2 == nullid:
1031 if p2 == nullid:
1003 for f, s in sorted(self.dirstate.copies().items()):
1032 for f, s in sorted(self.dirstate.copies().items()):
1004 if f not in pctx and s not in pctx:
1033 if f not in pctx and s not in pctx:
1005 self.dirstate.copy(None, f)
1034 self.dirstate.copy(None, f)
1006
1035
1007 def filectx(self, path, changeid=None, fileid=None):
1036 def filectx(self, path, changeid=None, fileid=None):
1008 """changeid can be a changeset revision, node, or tag.
1037 """changeid can be a changeset revision, node, or tag.
1009 fileid can be a file revision or node."""
1038 fileid can be a file revision or node."""
1010 return context.filectx(self, path, changeid, fileid)
1039 return context.filectx(self, path, changeid, fileid)
1011
1040
1012 def getcwd(self):
1041 def getcwd(self):
1013 return self.dirstate.getcwd()
1042 return self.dirstate.getcwd()
1014
1043
1015 def pathto(self, f, cwd=None):
1044 def pathto(self, f, cwd=None):
1016 return self.dirstate.pathto(f, cwd)
1045 return self.dirstate.pathto(f, cwd)
1017
1046
1018 def _loadfilter(self, filter):
1047 def _loadfilter(self, filter):
1019 if filter not in self.filterpats:
1048 if filter not in self.filterpats:
1020 l = []
1049 l = []
1021 for pat, cmd in self.ui.configitems(filter):
1050 for pat, cmd in self.ui.configitems(filter):
1022 if cmd == '!':
1051 if cmd == '!':
1023 continue
1052 continue
1024 mf = matchmod.match(self.root, '', [pat])
1053 mf = matchmod.match(self.root, '', [pat])
1025 fn = None
1054 fn = None
1026 params = cmd
1055 params = cmd
1027 for name, filterfn in self._datafilters.iteritems():
1056 for name, filterfn in self._datafilters.iteritems():
1028 if cmd.startswith(name):
1057 if cmd.startswith(name):
1029 fn = filterfn
1058 fn = filterfn
1030 params = cmd[len(name):].lstrip()
1059 params = cmd[len(name):].lstrip()
1031 break
1060 break
1032 if not fn:
1061 if not fn:
1033 fn = lambda s, c, **kwargs: util.filter(s, c)
1062 fn = lambda s, c, **kwargs: util.filter(s, c)
1034 # Wrap old filters not supporting keyword arguments
1063 # Wrap old filters not supporting keyword arguments
1035 if not inspect.getargspec(fn)[2]:
1064 if not inspect.getargspec(fn)[2]:
1036 oldfn = fn
1065 oldfn = fn
1037 fn = lambda s, c, **kwargs: oldfn(s, c)
1066 fn = lambda s, c, **kwargs: oldfn(s, c)
1038 l.append((mf, fn, params))
1067 l.append((mf, fn, params))
1039 self.filterpats[filter] = l
1068 self.filterpats[filter] = l
1040 return self.filterpats[filter]
1069 return self.filterpats[filter]
1041
1070
1042 def _filter(self, filterpats, filename, data):
1071 def _filter(self, filterpats, filename, data):
1043 for mf, fn, cmd in filterpats:
1072 for mf, fn, cmd in filterpats:
1044 if mf(filename):
1073 if mf(filename):
1045 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1074 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1046 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1075 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1047 break
1076 break
1048
1077
1049 return data
1078 return data
1050
1079
1051 @unfilteredpropertycache
1080 @unfilteredpropertycache
1052 def _encodefilterpats(self):
1081 def _encodefilterpats(self):
1053 return self._loadfilter('encode')
1082 return self._loadfilter('encode')
1054
1083
1055 @unfilteredpropertycache
1084 @unfilteredpropertycache
1056 def _decodefilterpats(self):
1085 def _decodefilterpats(self):
1057 return self._loadfilter('decode')
1086 return self._loadfilter('decode')
1058
1087
1059 def adddatafilter(self, name, filter):
1088 def adddatafilter(self, name, filter):
1060 self._datafilters[name] = filter
1089 self._datafilters[name] = filter
1061
1090
1062 def wread(self, filename):
1091 def wread(self, filename):
1063 if self.wvfs.islink(filename):
1092 if self.wvfs.islink(filename):
1064 data = self.wvfs.readlink(filename)
1093 data = self.wvfs.readlink(filename)
1065 else:
1094 else:
1066 data = self.wvfs.read(filename)
1095 data = self.wvfs.read(filename)
1067 return self._filter(self._encodefilterpats, filename, data)
1096 return self._filter(self._encodefilterpats, filename, data)
1068
1097
1069 def wwrite(self, filename, data, flags, backgroundclose=False):
1098 def wwrite(self, filename, data, flags, backgroundclose=False):
1070 """write ``data`` into ``filename`` in the working directory
1099 """write ``data`` into ``filename`` in the working directory
1071
1100
1072 This returns length of written (maybe decoded) data.
1101 This returns length of written (maybe decoded) data.
1073 """
1102 """
1074 data = self._filter(self._decodefilterpats, filename, data)
1103 data = self._filter(self._decodefilterpats, filename, data)
1075 if 'l' in flags:
1104 if 'l' in flags:
1076 self.wvfs.symlink(data, filename)
1105 self.wvfs.symlink(data, filename)
1077 else:
1106 else:
1078 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1107 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1079 if 'x' in flags:
1108 if 'x' in flags:
1080 self.wvfs.setflags(filename, False, True)
1109 self.wvfs.setflags(filename, False, True)
1081 return len(data)
1110 return len(data)
1082
1111
1083 def wwritedata(self, filename, data):
1112 def wwritedata(self, filename, data):
1084 return self._filter(self._decodefilterpats, filename, data)
1113 return self._filter(self._decodefilterpats, filename, data)
1085
1114
1086 def currenttransaction(self):
1115 def currenttransaction(self):
1087 """return the current transaction or None if non exists"""
1116 """return the current transaction or None if non exists"""
1088 if self._transref:
1117 if self._transref:
1089 tr = self._transref()
1118 tr = self._transref()
1090 else:
1119 else:
1091 tr = None
1120 tr = None
1092
1121
1093 if tr and tr.running():
1122 if tr and tr.running():
1094 return tr
1123 return tr
1095 return None
1124 return None
1096
1125
1097 def transaction(self, desc, report=None):
1126 def transaction(self, desc, report=None):
1098 if (self.ui.configbool('devel', 'all-warnings')
1127 if (self.ui.configbool('devel', 'all-warnings')
1099 or self.ui.configbool('devel', 'check-locks')):
1128 or self.ui.configbool('devel', 'check-locks')):
1100 if self._currentlock(self._lockref) is None:
1129 if self._currentlock(self._lockref) is None:
1101 raise error.ProgrammingError('transaction requires locking')
1130 raise error.ProgrammingError('transaction requires locking')
1102 tr = self.currenttransaction()
1131 tr = self.currenttransaction()
1103 if tr is not None:
1132 if tr is not None:
1104 scmutil.registersummarycallback(self, tr, desc)
1133 scmutil.registersummarycallback(self, tr, desc)
1105 return tr.nest()
1134 return tr.nest()
1106
1135
1107 # abort here if the journal already exists
1136 # abort here if the journal already exists
1108 if self.svfs.exists("journal"):
1137 if self.svfs.exists("journal"):
1109 raise error.RepoError(
1138 raise error.RepoError(
1110 _("abandoned transaction found"),
1139 _("abandoned transaction found"),
1111 hint=_("run 'hg recover' to clean up transaction"))
1140 hint=_("run 'hg recover' to clean up transaction"))
1112
1141
1113 idbase = "%.40f#%f" % (random.random(), time.time())
1142 idbase = "%.40f#%f" % (random.random(), time.time())
1114 ha = hex(hashlib.sha1(idbase).digest())
1143 ha = hex(hashlib.sha1(idbase).digest())
1115 txnid = 'TXN:' + ha
1144 txnid = 'TXN:' + ha
1116 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1145 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1117
1146
1118 self._writejournal(desc)
1147 self._writejournal(desc)
1119 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1148 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1120 if report:
1149 if report:
1121 rp = report
1150 rp = report
1122 else:
1151 else:
1123 rp = self.ui.warn
1152 rp = self.ui.warn
1124 vfsmap = {'plain': self.vfs} # root of .hg/
1153 vfsmap = {'plain': self.vfs} # root of .hg/
1125 # we must avoid cyclic reference between repo and transaction.
1154 # we must avoid cyclic reference between repo and transaction.
1126 reporef = weakref.ref(self)
1155 reporef = weakref.ref(self)
1127 # Code to track tag movement
1156 # Code to track tag movement
1128 #
1157 #
1129 # Since tags are all handled as file content, it is actually quite hard
1158 # Since tags are all handled as file content, it is actually quite hard
1130 # to track these movement from a code perspective. So we fallback to a
1159 # to track these movement from a code perspective. So we fallback to a
1131 # tracking at the repository level. One could envision to track changes
1160 # tracking at the repository level. One could envision to track changes
1132 # to the '.hgtags' file through changegroup apply but that fails to
1161 # to the '.hgtags' file through changegroup apply but that fails to
1133 # cope with case where transaction expose new heads without changegroup
1162 # cope with case where transaction expose new heads without changegroup
1134 # being involved (eg: phase movement).
1163 # being involved (eg: phase movement).
1135 #
1164 #
1136 # For now, We gate the feature behind a flag since this likely comes
1165 # For now, We gate the feature behind a flag since this likely comes
1137 # with performance impacts. The current code run more often than needed
1166 # with performance impacts. The current code run more often than needed
1138 # and do not use caches as much as it could. The current focus is on
1167 # and do not use caches as much as it could. The current focus is on
1139 # the behavior of the feature so we disable it by default. The flag
1168 # the behavior of the feature so we disable it by default. The flag
1140 # will be removed when we are happy with the performance impact.
1169 # will be removed when we are happy with the performance impact.
1141 #
1170 #
1142 # Once this feature is no longer experimental move the following
1171 # Once this feature is no longer experimental move the following
1143 # documentation to the appropriate help section:
1172 # documentation to the appropriate help section:
1144 #
1173 #
1145 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1174 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1146 # tags (new or changed or deleted tags). In addition the details of
1175 # tags (new or changed or deleted tags). In addition the details of
1147 # these changes are made available in a file at:
1176 # these changes are made available in a file at:
1148 # ``REPOROOT/.hg/changes/tags.changes``.
1177 # ``REPOROOT/.hg/changes/tags.changes``.
1149 # Make sure you check for HG_TAG_MOVED before reading that file as it
1178 # Make sure you check for HG_TAG_MOVED before reading that file as it
1150 # might exist from a previous transaction even if no tag were touched
1179 # might exist from a previous transaction even if no tag were touched
1151 # in this one. Changes are recorded in a line base format::
1180 # in this one. Changes are recorded in a line base format::
1152 #
1181 #
1153 # <action> <hex-node> <tag-name>\n
1182 # <action> <hex-node> <tag-name>\n
1154 #
1183 #
1155 # Actions are defined as follow:
1184 # Actions are defined as follow:
1156 # "-R": tag is removed,
1185 # "-R": tag is removed,
1157 # "+A": tag is added,
1186 # "+A": tag is added,
1158 # "-M": tag is moved (old value),
1187 # "-M": tag is moved (old value),
1159 # "+M": tag is moved (new value),
1188 # "+M": tag is moved (new value),
1160 tracktags = lambda x: None
1189 tracktags = lambda x: None
1161 # experimental config: experimental.hook-track-tags
1190 # experimental config: experimental.hook-track-tags
1162 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1191 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1163 if desc != 'strip' and shouldtracktags:
1192 if desc != 'strip' and shouldtracktags:
1164 oldheads = self.changelog.headrevs()
1193 oldheads = self.changelog.headrevs()
1165 def tracktags(tr2):
1194 def tracktags(tr2):
1166 repo = reporef()
1195 repo = reporef()
1167 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1196 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1168 newheads = repo.changelog.headrevs()
1197 newheads = repo.changelog.headrevs()
1169 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1198 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1170 # notes: we compare lists here.
1199 # notes: we compare lists here.
1171 # As we do it only once buiding set would not be cheaper
1200 # As we do it only once buiding set would not be cheaper
1172 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1201 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1173 if changes:
1202 if changes:
1174 tr2.hookargs['tag_moved'] = '1'
1203 tr2.hookargs['tag_moved'] = '1'
1175 with repo.vfs('changes/tags.changes', 'w',
1204 with repo.vfs('changes/tags.changes', 'w',
1176 atomictemp=True) as changesfile:
1205 atomictemp=True) as changesfile:
1177 # note: we do not register the file to the transaction
1206 # note: we do not register the file to the transaction
1178 # because we needs it to still exist on the transaction
1207 # because we needs it to still exist on the transaction
1179 # is close (for txnclose hooks)
1208 # is close (for txnclose hooks)
1180 tagsmod.writediff(changesfile, changes)
1209 tagsmod.writediff(changesfile, changes)
1181 def validate(tr2):
1210 def validate(tr2):
1182 """will run pre-closing hooks"""
1211 """will run pre-closing hooks"""
1183 # XXX the transaction API is a bit lacking here so we take a hacky
1212 # XXX the transaction API is a bit lacking here so we take a hacky
1184 # path for now
1213 # path for now
1185 #
1214 #
1186 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1215 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1187 # dict is copied before these run. In addition we needs the data
1216 # dict is copied before these run. In addition we needs the data
1188 # available to in memory hooks too.
1217 # available to in memory hooks too.
1189 #
1218 #
1190 # Moreover, we also need to make sure this runs before txnclose
1219 # Moreover, we also need to make sure this runs before txnclose
1191 # hooks and there is no "pending" mechanism that would execute
1220 # hooks and there is no "pending" mechanism that would execute
1192 # logic only if hooks are about to run.
1221 # logic only if hooks are about to run.
1193 #
1222 #
1194 # Fixing this limitation of the transaction is also needed to track
1223 # Fixing this limitation of the transaction is also needed to track
1195 # other families of changes (bookmarks, phases, obsolescence).
1224 # other families of changes (bookmarks, phases, obsolescence).
1196 #
1225 #
1197 # This will have to be fixed before we remove the experimental
1226 # This will have to be fixed before we remove the experimental
1198 # gating.
1227 # gating.
1199 tracktags(tr2)
1228 tracktags(tr2)
1200 reporef().hook('pretxnclose', throw=True,
1229 reporef().hook('pretxnclose', throw=True,
1201 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1230 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1202 def releasefn(tr, success):
1231 def releasefn(tr, success):
1203 repo = reporef()
1232 repo = reporef()
1204 if success:
1233 if success:
1205 # this should be explicitly invoked here, because
1234 # this should be explicitly invoked here, because
1206 # in-memory changes aren't written out at closing
1235 # in-memory changes aren't written out at closing
1207 # transaction, if tr.addfilegenerator (via
1236 # transaction, if tr.addfilegenerator (via
1208 # dirstate.write or so) isn't invoked while
1237 # dirstate.write or so) isn't invoked while
1209 # transaction running
1238 # transaction running
1210 repo.dirstate.write(None)
1239 repo.dirstate.write(None)
1211 else:
1240 else:
1212 # discard all changes (including ones already written
1241 # discard all changes (including ones already written
1213 # out) in this transaction
1242 # out) in this transaction
1214 repo.dirstate.restorebackup(None, 'journal.dirstate')
1243 repo.dirstate.restorebackup(None, 'journal.dirstate')
1215
1244
1216 repo.invalidate(clearfilecache=True)
1245 repo.invalidate(clearfilecache=True)
1217
1246
1218 tr = transaction.transaction(rp, self.svfs, vfsmap,
1247 tr = transaction.transaction(rp, self.svfs, vfsmap,
1219 "journal",
1248 "journal",
1220 "undo",
1249 "undo",
1221 aftertrans(renames),
1250 aftertrans(renames),
1222 self.store.createmode,
1251 self.store.createmode,
1223 validator=validate,
1252 validator=validate,
1224 releasefn=releasefn,
1253 releasefn=releasefn,
1225 checkambigfiles=_cachedfiles)
1254 checkambigfiles=_cachedfiles)
1226 tr.changes['revs'] = set()
1255 tr.changes['revs'] = set()
1227 tr.changes['obsmarkers'] = set()
1256 tr.changes['obsmarkers'] = set()
1228 tr.changes['phases'] = {}
1257 tr.changes['phases'] = {}
1229 tr.changes['bookmarks'] = {}
1258 tr.changes['bookmarks'] = {}
1230
1259
1231 tr.hookargs['txnid'] = txnid
1260 tr.hookargs['txnid'] = txnid
1232 # note: writing the fncache only during finalize mean that the file is
1261 # note: writing the fncache only during finalize mean that the file is
1233 # outdated when running hooks. As fncache is used for streaming clone,
1262 # outdated when running hooks. As fncache is used for streaming clone,
1234 # this is not expected to break anything that happen during the hooks.
1263 # this is not expected to break anything that happen during the hooks.
1235 tr.addfinalize('flush-fncache', self.store.write)
1264 tr.addfinalize('flush-fncache', self.store.write)
1236 def txnclosehook(tr2):
1265 def txnclosehook(tr2):
1237 """To be run if transaction is successful, will schedule a hook run
1266 """To be run if transaction is successful, will schedule a hook run
1238 """
1267 """
1239 # Don't reference tr2 in hook() so we don't hold a reference.
1268 # Don't reference tr2 in hook() so we don't hold a reference.
1240 # This reduces memory consumption when there are multiple
1269 # This reduces memory consumption when there are multiple
1241 # transactions per lock. This can likely go away if issue5045
1270 # transactions per lock. This can likely go away if issue5045
1242 # fixes the function accumulation.
1271 # fixes the function accumulation.
1243 hookargs = tr2.hookargs
1272 hookargs = tr2.hookargs
1244
1273
1245 def hook():
1274 def hook():
1246 reporef().hook('txnclose', throw=False, txnname=desc,
1275 reporef().hook('txnclose', throw=False, txnname=desc,
1247 **pycompat.strkwargs(hookargs))
1276 **pycompat.strkwargs(hookargs))
1248 reporef()._afterlock(hook)
1277 reporef()._afterlock(hook)
1249 tr.addfinalize('txnclose-hook', txnclosehook)
1278 tr.addfinalize('txnclose-hook', txnclosehook)
1250 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1279 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1251 def txnaborthook(tr2):
1280 def txnaborthook(tr2):
1252 """To be run if transaction is aborted
1281 """To be run if transaction is aborted
1253 """
1282 """
1254 reporef().hook('txnabort', throw=False, txnname=desc,
1283 reporef().hook('txnabort', throw=False, txnname=desc,
1255 **tr2.hookargs)
1284 **tr2.hookargs)
1256 tr.addabort('txnabort-hook', txnaborthook)
1285 tr.addabort('txnabort-hook', txnaborthook)
1257 # avoid eager cache invalidation. in-memory data should be identical
1286 # avoid eager cache invalidation. in-memory data should be identical
1258 # to stored data if transaction has no error.
1287 # to stored data if transaction has no error.
1259 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1288 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1260 self._transref = weakref.ref(tr)
1289 self._transref = weakref.ref(tr)
1261 scmutil.registersummarycallback(self, tr, desc)
1290 scmutil.registersummarycallback(self, tr, desc)
1262 return tr
1291 return tr
1263
1292
1264 def _journalfiles(self):
1293 def _journalfiles(self):
1265 return ((self.svfs, 'journal'),
1294 return ((self.svfs, 'journal'),
1266 (self.vfs, 'journal.dirstate'),
1295 (self.vfs, 'journal.dirstate'),
1267 (self.vfs, 'journal.branch'),
1296 (self.vfs, 'journal.branch'),
1268 (self.vfs, 'journal.desc'),
1297 (self.vfs, 'journal.desc'),
1269 (self.vfs, 'journal.bookmarks'),
1298 (self.vfs, 'journal.bookmarks'),
1270 (self.svfs, 'journal.phaseroots'))
1299 (self.svfs, 'journal.phaseroots'))
1271
1300
1272 def undofiles(self):
1301 def undofiles(self):
1273 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1302 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1274
1303
1275 @unfilteredmethod
1304 @unfilteredmethod
1276 def _writejournal(self, desc):
1305 def _writejournal(self, desc):
1277 self.dirstate.savebackup(None, 'journal.dirstate')
1306 self.dirstate.savebackup(None, 'journal.dirstate')
1278 self.vfs.write("journal.branch",
1307 self.vfs.write("journal.branch",
1279 encoding.fromlocal(self.dirstate.branch()))
1308 encoding.fromlocal(self.dirstate.branch()))
1280 self.vfs.write("journal.desc",
1309 self.vfs.write("journal.desc",
1281 "%d\n%s\n" % (len(self), desc))
1310 "%d\n%s\n" % (len(self), desc))
1282 self.vfs.write("journal.bookmarks",
1311 self.vfs.write("journal.bookmarks",
1283 self.vfs.tryread("bookmarks"))
1312 self.vfs.tryread("bookmarks"))
1284 self.svfs.write("journal.phaseroots",
1313 self.svfs.write("journal.phaseroots",
1285 self.svfs.tryread("phaseroots"))
1314 self.svfs.tryread("phaseroots"))
1286
1315
1287 def recover(self):
1316 def recover(self):
1288 with self.lock():
1317 with self.lock():
1289 if self.svfs.exists("journal"):
1318 if self.svfs.exists("journal"):
1290 self.ui.status(_("rolling back interrupted transaction\n"))
1319 self.ui.status(_("rolling back interrupted transaction\n"))
1291 vfsmap = {'': self.svfs,
1320 vfsmap = {'': self.svfs,
1292 'plain': self.vfs,}
1321 'plain': self.vfs,}
1293 transaction.rollback(self.svfs, vfsmap, "journal",
1322 transaction.rollback(self.svfs, vfsmap, "journal",
1294 self.ui.warn,
1323 self.ui.warn,
1295 checkambigfiles=_cachedfiles)
1324 checkambigfiles=_cachedfiles)
1296 self.invalidate()
1325 self.invalidate()
1297 return True
1326 return True
1298 else:
1327 else:
1299 self.ui.warn(_("no interrupted transaction available\n"))
1328 self.ui.warn(_("no interrupted transaction available\n"))
1300 return False
1329 return False
1301
1330
1302 def rollback(self, dryrun=False, force=False):
1331 def rollback(self, dryrun=False, force=False):
1303 wlock = lock = dsguard = None
1332 wlock = lock = dsguard = None
1304 try:
1333 try:
1305 wlock = self.wlock()
1334 wlock = self.wlock()
1306 lock = self.lock()
1335 lock = self.lock()
1307 if self.svfs.exists("undo"):
1336 if self.svfs.exists("undo"):
1308 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1337 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1309
1338
1310 return self._rollback(dryrun, force, dsguard)
1339 return self._rollback(dryrun, force, dsguard)
1311 else:
1340 else:
1312 self.ui.warn(_("no rollback information available\n"))
1341 self.ui.warn(_("no rollback information available\n"))
1313 return 1
1342 return 1
1314 finally:
1343 finally:
1315 release(dsguard, lock, wlock)
1344 release(dsguard, lock, wlock)
1316
1345
1317 @unfilteredmethod # Until we get smarter cache management
1346 @unfilteredmethod # Until we get smarter cache management
1318 def _rollback(self, dryrun, force, dsguard):
1347 def _rollback(self, dryrun, force, dsguard):
1319 ui = self.ui
1348 ui = self.ui
1320 try:
1349 try:
1321 args = self.vfs.read('undo.desc').splitlines()
1350 args = self.vfs.read('undo.desc').splitlines()
1322 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1351 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1323 if len(args) >= 3:
1352 if len(args) >= 3:
1324 detail = args[2]
1353 detail = args[2]
1325 oldtip = oldlen - 1
1354 oldtip = oldlen - 1
1326
1355
1327 if detail and ui.verbose:
1356 if detail and ui.verbose:
1328 msg = (_('repository tip rolled back to revision %d'
1357 msg = (_('repository tip rolled back to revision %d'
1329 ' (undo %s: %s)\n')
1358 ' (undo %s: %s)\n')
1330 % (oldtip, desc, detail))
1359 % (oldtip, desc, detail))
1331 else:
1360 else:
1332 msg = (_('repository tip rolled back to revision %d'
1361 msg = (_('repository tip rolled back to revision %d'
1333 ' (undo %s)\n')
1362 ' (undo %s)\n')
1334 % (oldtip, desc))
1363 % (oldtip, desc))
1335 except IOError:
1364 except IOError:
1336 msg = _('rolling back unknown transaction\n')
1365 msg = _('rolling back unknown transaction\n')
1337 desc = None
1366 desc = None
1338
1367
1339 if not force and self['.'] != self['tip'] and desc == 'commit':
1368 if not force and self['.'] != self['tip'] and desc == 'commit':
1340 raise error.Abort(
1369 raise error.Abort(
1341 _('rollback of last commit while not checked out '
1370 _('rollback of last commit while not checked out '
1342 'may lose data'), hint=_('use -f to force'))
1371 'may lose data'), hint=_('use -f to force'))
1343
1372
1344 ui.status(msg)
1373 ui.status(msg)
1345 if dryrun:
1374 if dryrun:
1346 return 0
1375 return 0
1347
1376
1348 parents = self.dirstate.parents()
1377 parents = self.dirstate.parents()
1349 self.destroying()
1378 self.destroying()
1350 vfsmap = {'plain': self.vfs, '': self.svfs}
1379 vfsmap = {'plain': self.vfs, '': self.svfs}
1351 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1380 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1352 checkambigfiles=_cachedfiles)
1381 checkambigfiles=_cachedfiles)
1353 if self.vfs.exists('undo.bookmarks'):
1382 if self.vfs.exists('undo.bookmarks'):
1354 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1383 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1355 if self.svfs.exists('undo.phaseroots'):
1384 if self.svfs.exists('undo.phaseroots'):
1356 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1385 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1357 self.invalidate()
1386 self.invalidate()
1358
1387
1359 parentgone = (parents[0] not in self.changelog.nodemap or
1388 parentgone = (parents[0] not in self.changelog.nodemap or
1360 parents[1] not in self.changelog.nodemap)
1389 parents[1] not in self.changelog.nodemap)
1361 if parentgone:
1390 if parentgone:
1362 # prevent dirstateguard from overwriting already restored one
1391 # prevent dirstateguard from overwriting already restored one
1363 dsguard.close()
1392 dsguard.close()
1364
1393
1365 self.dirstate.restorebackup(None, 'undo.dirstate')
1394 self.dirstate.restorebackup(None, 'undo.dirstate')
1366 try:
1395 try:
1367 branch = self.vfs.read('undo.branch')
1396 branch = self.vfs.read('undo.branch')
1368 self.dirstate.setbranch(encoding.tolocal(branch))
1397 self.dirstate.setbranch(encoding.tolocal(branch))
1369 except IOError:
1398 except IOError:
1370 ui.warn(_('named branch could not be reset: '
1399 ui.warn(_('named branch could not be reset: '
1371 'current branch is still \'%s\'\n')
1400 'current branch is still \'%s\'\n')
1372 % self.dirstate.branch())
1401 % self.dirstate.branch())
1373
1402
1374 parents = tuple([p.rev() for p in self[None].parents()])
1403 parents = tuple([p.rev() for p in self[None].parents()])
1375 if len(parents) > 1:
1404 if len(parents) > 1:
1376 ui.status(_('working directory now based on '
1405 ui.status(_('working directory now based on '
1377 'revisions %d and %d\n') % parents)
1406 'revisions %d and %d\n') % parents)
1378 else:
1407 else:
1379 ui.status(_('working directory now based on '
1408 ui.status(_('working directory now based on '
1380 'revision %d\n') % parents)
1409 'revision %d\n') % parents)
1381 mergemod.mergestate.clean(self, self['.'].node())
1410 mergemod.mergestate.clean(self, self['.'].node())
1382
1411
1383 # TODO: if we know which new heads may result from this rollback, pass
1412 # TODO: if we know which new heads may result from this rollback, pass
1384 # them to destroy(), which will prevent the branchhead cache from being
1413 # them to destroy(), which will prevent the branchhead cache from being
1385 # invalidated.
1414 # invalidated.
1386 self.destroyed()
1415 self.destroyed()
1387 return 0
1416 return 0
1388
1417
1389 def _buildcacheupdater(self, newtransaction):
1418 def _buildcacheupdater(self, newtransaction):
1390 """called during transaction to build the callback updating cache
1419 """called during transaction to build the callback updating cache
1391
1420
1392 Lives on the repository to help extension who might want to augment
1421 Lives on the repository to help extension who might want to augment
1393 this logic. For this purpose, the created transaction is passed to the
1422 this logic. For this purpose, the created transaction is passed to the
1394 method.
1423 method.
1395 """
1424 """
1396 # we must avoid cyclic reference between repo and transaction.
1425 # we must avoid cyclic reference between repo and transaction.
1397 reporef = weakref.ref(self)
1426 reporef = weakref.ref(self)
1398 def updater(tr):
1427 def updater(tr):
1399 repo = reporef()
1428 repo = reporef()
1400 repo.updatecaches(tr)
1429 repo.updatecaches(tr)
1401 return updater
1430 return updater
1402
1431
1403 @unfilteredmethod
1432 @unfilteredmethod
1404 def updatecaches(self, tr=None):
1433 def updatecaches(self, tr=None):
1405 """warm appropriate caches
1434 """warm appropriate caches
1406
1435
1407 If this function is called after a transaction closed. The transaction
1436 If this function is called after a transaction closed. The transaction
1408 will be available in the 'tr' argument. This can be used to selectively
1437 will be available in the 'tr' argument. This can be used to selectively
1409 update caches relevant to the changes in that transaction.
1438 update caches relevant to the changes in that transaction.
1410 """
1439 """
1411 if tr is not None and tr.hookargs.get('source') == 'strip':
1440 if tr is not None and tr.hookargs.get('source') == 'strip':
1412 # During strip, many caches are invalid but
1441 # During strip, many caches are invalid but
1413 # later call to `destroyed` will refresh them.
1442 # later call to `destroyed` will refresh them.
1414 return
1443 return
1415
1444
1416 if tr is None or tr.changes['revs']:
1445 if tr is None or tr.changes['revs']:
1417 # updating the unfiltered branchmap should refresh all the others,
1446 # updating the unfiltered branchmap should refresh all the others,
1418 self.ui.debug('updating the branch cache\n')
1447 self.ui.debug('updating the branch cache\n')
1419 branchmap.updatecache(self.filtered('served'))
1448 branchmap.updatecache(self.filtered('served'))
1420
1449
1421 def invalidatecaches(self):
1450 def invalidatecaches(self):
1422
1451
1423 if '_tagscache' in vars(self):
1452 if '_tagscache' in vars(self):
1424 # can't use delattr on proxy
1453 # can't use delattr on proxy
1425 del self.__dict__['_tagscache']
1454 del self.__dict__['_tagscache']
1426
1455
1427 self.unfiltered()._branchcaches.clear()
1456 self.unfiltered()._branchcaches.clear()
1428 self.invalidatevolatilesets()
1457 self.invalidatevolatilesets()
1429 self._sparsesignaturecache.clear()
1458 self._sparsesignaturecache.clear()
1430
1459
1431 def invalidatevolatilesets(self):
1460 def invalidatevolatilesets(self):
1432 self.filteredrevcache.clear()
1461 self.filteredrevcache.clear()
1433 obsolete.clearobscaches(self)
1462 obsolete.clearobscaches(self)
1434
1463
1435 def invalidatedirstate(self):
1464 def invalidatedirstate(self):
1436 '''Invalidates the dirstate, causing the next call to dirstate
1465 '''Invalidates the dirstate, causing the next call to dirstate
1437 to check if it was modified since the last time it was read,
1466 to check if it was modified since the last time it was read,
1438 rereading it if it has.
1467 rereading it if it has.
1439
1468
1440 This is different to dirstate.invalidate() that it doesn't always
1469 This is different to dirstate.invalidate() that it doesn't always
1441 rereads the dirstate. Use dirstate.invalidate() if you want to
1470 rereads the dirstate. Use dirstate.invalidate() if you want to
1442 explicitly read the dirstate again (i.e. restoring it to a previous
1471 explicitly read the dirstate again (i.e. restoring it to a previous
1443 known good state).'''
1472 known good state).'''
1444 if hasunfilteredcache(self, 'dirstate'):
1473 if hasunfilteredcache(self, 'dirstate'):
1445 for k in self.dirstate._filecache:
1474 for k in self.dirstate._filecache:
1446 try:
1475 try:
1447 delattr(self.dirstate, k)
1476 delattr(self.dirstate, k)
1448 except AttributeError:
1477 except AttributeError:
1449 pass
1478 pass
1450 delattr(self.unfiltered(), 'dirstate')
1479 delattr(self.unfiltered(), 'dirstate')
1451
1480
1452 def invalidate(self, clearfilecache=False):
1481 def invalidate(self, clearfilecache=False):
1453 '''Invalidates both store and non-store parts other than dirstate
1482 '''Invalidates both store and non-store parts other than dirstate
1454
1483
1455 If a transaction is running, invalidation of store is omitted,
1484 If a transaction is running, invalidation of store is omitted,
1456 because discarding in-memory changes might cause inconsistency
1485 because discarding in-memory changes might cause inconsistency
1457 (e.g. incomplete fncache causes unintentional failure, but
1486 (e.g. incomplete fncache causes unintentional failure, but
1458 redundant one doesn't).
1487 redundant one doesn't).
1459 '''
1488 '''
1460 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1489 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1461 for k in list(self._filecache.keys()):
1490 for k in list(self._filecache.keys()):
1462 # dirstate is invalidated separately in invalidatedirstate()
1491 # dirstate is invalidated separately in invalidatedirstate()
1463 if k == 'dirstate':
1492 if k == 'dirstate':
1464 continue
1493 continue
1465 if (k == 'changelog' and
1494 if (k == 'changelog' and
1466 self.currenttransaction() and
1495 self.currenttransaction() and
1467 self.changelog._delayed):
1496 self.changelog._delayed):
1468 # The changelog object may store unwritten revisions. We don't
1497 # The changelog object may store unwritten revisions. We don't
1469 # want to lose them.
1498 # want to lose them.
1470 # TODO: Solve the problem instead of working around it.
1499 # TODO: Solve the problem instead of working around it.
1471 continue
1500 continue
1472
1501
1473 if clearfilecache:
1502 if clearfilecache:
1474 del self._filecache[k]
1503 del self._filecache[k]
1475 try:
1504 try:
1476 delattr(unfiltered, k)
1505 delattr(unfiltered, k)
1477 except AttributeError:
1506 except AttributeError:
1478 pass
1507 pass
1479 self.invalidatecaches()
1508 self.invalidatecaches()
1480 if not self.currenttransaction():
1509 if not self.currenttransaction():
1481 # TODO: Changing contents of store outside transaction
1510 # TODO: Changing contents of store outside transaction
1482 # causes inconsistency. We should make in-memory store
1511 # causes inconsistency. We should make in-memory store
1483 # changes detectable, and abort if changed.
1512 # changes detectable, and abort if changed.
1484 self.store.invalidatecaches()
1513 self.store.invalidatecaches()
1485
1514
1486 def invalidateall(self):
1515 def invalidateall(self):
1487 '''Fully invalidates both store and non-store parts, causing the
1516 '''Fully invalidates both store and non-store parts, causing the
1488 subsequent operation to reread any outside changes.'''
1517 subsequent operation to reread any outside changes.'''
1489 # extension should hook this to invalidate its caches
1518 # extension should hook this to invalidate its caches
1490 self.invalidate()
1519 self.invalidate()
1491 self.invalidatedirstate()
1520 self.invalidatedirstate()
1492
1521
1493 @unfilteredmethod
1522 @unfilteredmethod
1494 def _refreshfilecachestats(self, tr):
1523 def _refreshfilecachestats(self, tr):
1495 """Reload stats of cached files so that they are flagged as valid"""
1524 """Reload stats of cached files so that they are flagged as valid"""
1496 for k, ce in self._filecache.items():
1525 for k, ce in self._filecache.items():
1497 if k == 'dirstate' or k not in self.__dict__:
1526 if k == 'dirstate' or k not in self.__dict__:
1498 continue
1527 continue
1499 ce.refresh()
1528 ce.refresh()
1500
1529
1501 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1530 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1502 inheritchecker=None, parentenvvar=None):
1531 inheritchecker=None, parentenvvar=None):
1503 parentlock = None
1532 parentlock = None
1504 # the contents of parentenvvar are used by the underlying lock to
1533 # the contents of parentenvvar are used by the underlying lock to
1505 # determine whether it can be inherited
1534 # determine whether it can be inherited
1506 if parentenvvar is not None:
1535 if parentenvvar is not None:
1507 parentlock = encoding.environ.get(parentenvvar)
1536 parentlock = encoding.environ.get(parentenvvar)
1508 try:
1537 try:
1509 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1538 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1510 acquirefn=acquirefn, desc=desc,
1539 acquirefn=acquirefn, desc=desc,
1511 inheritchecker=inheritchecker,
1540 inheritchecker=inheritchecker,
1512 parentlock=parentlock)
1541 parentlock=parentlock)
1513 except error.LockHeld as inst:
1542 except error.LockHeld as inst:
1514 if not wait:
1543 if not wait:
1515 raise
1544 raise
1516 # show more details for new-style locks
1545 # show more details for new-style locks
1517 if ':' in inst.locker:
1546 if ':' in inst.locker:
1518 host, pid = inst.locker.split(":", 1)
1547 host, pid = inst.locker.split(":", 1)
1519 self.ui.warn(
1548 self.ui.warn(
1520 _("waiting for lock on %s held by process %r "
1549 _("waiting for lock on %s held by process %r "
1521 "on host %r\n") % (desc, pid, host))
1550 "on host %r\n") % (desc, pid, host))
1522 else:
1551 else:
1523 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1552 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1524 (desc, inst.locker))
1553 (desc, inst.locker))
1525 # default to 600 seconds timeout
1554 # default to 600 seconds timeout
1526 l = lockmod.lock(vfs, lockname,
1555 l = lockmod.lock(vfs, lockname,
1527 int(self.ui.config("ui", "timeout")),
1556 int(self.ui.config("ui", "timeout")),
1528 releasefn=releasefn, acquirefn=acquirefn,
1557 releasefn=releasefn, acquirefn=acquirefn,
1529 desc=desc)
1558 desc=desc)
1530 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1559 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1531 return l
1560 return l
1532
1561
1533 def _afterlock(self, callback):
1562 def _afterlock(self, callback):
1534 """add a callback to be run when the repository is fully unlocked
1563 """add a callback to be run when the repository is fully unlocked
1535
1564
1536 The callback will be executed when the outermost lock is released
1565 The callback will be executed when the outermost lock is released
1537 (with wlock being higher level than 'lock')."""
1566 (with wlock being higher level than 'lock')."""
1538 for ref in (self._wlockref, self._lockref):
1567 for ref in (self._wlockref, self._lockref):
1539 l = ref and ref()
1568 l = ref and ref()
1540 if l and l.held:
1569 if l and l.held:
1541 l.postrelease.append(callback)
1570 l.postrelease.append(callback)
1542 break
1571 break
1543 else: # no lock have been found.
1572 else: # no lock have been found.
1544 callback()
1573 callback()
1545
1574
1546 def lock(self, wait=True):
1575 def lock(self, wait=True):
1547 '''Lock the repository store (.hg/store) and return a weak reference
1576 '''Lock the repository store (.hg/store) and return a weak reference
1548 to the lock. Use this before modifying the store (e.g. committing or
1577 to the lock. Use this before modifying the store (e.g. committing or
1549 stripping). If you are opening a transaction, get a lock as well.)
1578 stripping). If you are opening a transaction, get a lock as well.)
1550
1579
1551 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1580 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1552 'wlock' first to avoid a dead-lock hazard.'''
1581 'wlock' first to avoid a dead-lock hazard.'''
1553 l = self._currentlock(self._lockref)
1582 l = self._currentlock(self._lockref)
1554 if l is not None:
1583 if l is not None:
1555 l.lock()
1584 l.lock()
1556 return l
1585 return l
1557
1586
1558 l = self._lock(self.svfs, "lock", wait, None,
1587 l = self._lock(self.svfs, "lock", wait, None,
1559 self.invalidate, _('repository %s') % self.origroot)
1588 self.invalidate, _('repository %s') % self.origroot)
1560 self._lockref = weakref.ref(l)
1589 self._lockref = weakref.ref(l)
1561 return l
1590 return l
1562
1591
1563 def _wlockchecktransaction(self):
1592 def _wlockchecktransaction(self):
1564 if self.currenttransaction() is not None:
1593 if self.currenttransaction() is not None:
1565 raise error.LockInheritanceContractViolation(
1594 raise error.LockInheritanceContractViolation(
1566 'wlock cannot be inherited in the middle of a transaction')
1595 'wlock cannot be inherited in the middle of a transaction')
1567
1596
1568 def wlock(self, wait=True):
1597 def wlock(self, wait=True):
1569 '''Lock the non-store parts of the repository (everything under
1598 '''Lock the non-store parts of the repository (everything under
1570 .hg except .hg/store) and return a weak reference to the lock.
1599 .hg except .hg/store) and return a weak reference to the lock.
1571
1600
1572 Use this before modifying files in .hg.
1601 Use this before modifying files in .hg.
1573
1602
1574 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1603 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1575 'wlock' first to avoid a dead-lock hazard.'''
1604 'wlock' first to avoid a dead-lock hazard.'''
1576 l = self._wlockref and self._wlockref()
1605 l = self._wlockref and self._wlockref()
1577 if l is not None and l.held:
1606 if l is not None and l.held:
1578 l.lock()
1607 l.lock()
1579 return l
1608 return l
1580
1609
1581 # We do not need to check for non-waiting lock acquisition. Such
1610 # We do not need to check for non-waiting lock acquisition. Such
1582 # acquisition would not cause dead-lock as they would just fail.
1611 # acquisition would not cause dead-lock as they would just fail.
1583 if wait and (self.ui.configbool('devel', 'all-warnings')
1612 if wait and (self.ui.configbool('devel', 'all-warnings')
1584 or self.ui.configbool('devel', 'check-locks')):
1613 or self.ui.configbool('devel', 'check-locks')):
1585 if self._currentlock(self._lockref) is not None:
1614 if self._currentlock(self._lockref) is not None:
1586 self.ui.develwarn('"wlock" acquired after "lock"')
1615 self.ui.develwarn('"wlock" acquired after "lock"')
1587
1616
1588 def unlock():
1617 def unlock():
1589 if self.dirstate.pendingparentchange():
1618 if self.dirstate.pendingparentchange():
1590 self.dirstate.invalidate()
1619 self.dirstate.invalidate()
1591 else:
1620 else:
1592 self.dirstate.write(None)
1621 self.dirstate.write(None)
1593
1622
1594 self._filecache['dirstate'].refresh()
1623 self._filecache['dirstate'].refresh()
1595
1624
1596 l = self._lock(self.vfs, "wlock", wait, unlock,
1625 l = self._lock(self.vfs, "wlock", wait, unlock,
1597 self.invalidatedirstate, _('working directory of %s') %
1626 self.invalidatedirstate, _('working directory of %s') %
1598 self.origroot,
1627 self.origroot,
1599 inheritchecker=self._wlockchecktransaction,
1628 inheritchecker=self._wlockchecktransaction,
1600 parentenvvar='HG_WLOCK_LOCKER')
1629 parentenvvar='HG_WLOCK_LOCKER')
1601 self._wlockref = weakref.ref(l)
1630 self._wlockref = weakref.ref(l)
1602 return l
1631 return l
1603
1632
1604 def _currentlock(self, lockref):
1633 def _currentlock(self, lockref):
1605 """Returns the lock if it's held, or None if it's not."""
1634 """Returns the lock if it's held, or None if it's not."""
1606 if lockref is None:
1635 if lockref is None:
1607 return None
1636 return None
1608 l = lockref()
1637 l = lockref()
1609 if l is None or not l.held:
1638 if l is None or not l.held:
1610 return None
1639 return None
1611 return l
1640 return l
1612
1641
1613 def currentwlock(self):
1642 def currentwlock(self):
1614 """Returns the wlock if it's held, or None if it's not."""
1643 """Returns the wlock if it's held, or None if it's not."""
1615 return self._currentlock(self._wlockref)
1644 return self._currentlock(self._wlockref)
1616
1645
1617 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1646 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1618 """
1647 """
1619 commit an individual file as part of a larger transaction
1648 commit an individual file as part of a larger transaction
1620 """
1649 """
1621
1650
1622 fname = fctx.path()
1651 fname = fctx.path()
1623 fparent1 = manifest1.get(fname, nullid)
1652 fparent1 = manifest1.get(fname, nullid)
1624 fparent2 = manifest2.get(fname, nullid)
1653 fparent2 = manifest2.get(fname, nullid)
1625 if isinstance(fctx, context.filectx):
1654 if isinstance(fctx, context.filectx):
1626 node = fctx.filenode()
1655 node = fctx.filenode()
1627 if node in [fparent1, fparent2]:
1656 if node in [fparent1, fparent2]:
1628 self.ui.debug('reusing %s filelog entry\n' % fname)
1657 self.ui.debug('reusing %s filelog entry\n' % fname)
1629 if manifest1.flags(fname) != fctx.flags():
1658 if manifest1.flags(fname) != fctx.flags():
1630 changelist.append(fname)
1659 changelist.append(fname)
1631 return node
1660 return node
1632
1661
1633 flog = self.file(fname)
1662 flog = self.file(fname)
1634 meta = {}
1663 meta = {}
1635 copy = fctx.renamed()
1664 copy = fctx.renamed()
1636 if copy and copy[0] != fname:
1665 if copy and copy[0] != fname:
1637 # Mark the new revision of this file as a copy of another
1666 # Mark the new revision of this file as a copy of another
1638 # file. This copy data will effectively act as a parent
1667 # file. This copy data will effectively act as a parent
1639 # of this new revision. If this is a merge, the first
1668 # of this new revision. If this is a merge, the first
1640 # parent will be the nullid (meaning "look up the copy data")
1669 # parent will be the nullid (meaning "look up the copy data")
1641 # and the second one will be the other parent. For example:
1670 # and the second one will be the other parent. For example:
1642 #
1671 #
1643 # 0 --- 1 --- 3 rev1 changes file foo
1672 # 0 --- 1 --- 3 rev1 changes file foo
1644 # \ / rev2 renames foo to bar and changes it
1673 # \ / rev2 renames foo to bar and changes it
1645 # \- 2 -/ rev3 should have bar with all changes and
1674 # \- 2 -/ rev3 should have bar with all changes and
1646 # should record that bar descends from
1675 # should record that bar descends from
1647 # bar in rev2 and foo in rev1
1676 # bar in rev2 and foo in rev1
1648 #
1677 #
1649 # this allows this merge to succeed:
1678 # this allows this merge to succeed:
1650 #
1679 #
1651 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1680 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1652 # \ / merging rev3 and rev4 should use bar@rev2
1681 # \ / merging rev3 and rev4 should use bar@rev2
1653 # \- 2 --- 4 as the merge base
1682 # \- 2 --- 4 as the merge base
1654 #
1683 #
1655
1684
1656 cfname = copy[0]
1685 cfname = copy[0]
1657 crev = manifest1.get(cfname)
1686 crev = manifest1.get(cfname)
1658 newfparent = fparent2
1687 newfparent = fparent2
1659
1688
1660 if manifest2: # branch merge
1689 if manifest2: # branch merge
1661 if fparent2 == nullid or crev is None: # copied on remote side
1690 if fparent2 == nullid or crev is None: # copied on remote side
1662 if cfname in manifest2:
1691 if cfname in manifest2:
1663 crev = manifest2[cfname]
1692 crev = manifest2[cfname]
1664 newfparent = fparent1
1693 newfparent = fparent1
1665
1694
1666 # Here, we used to search backwards through history to try to find
1695 # Here, we used to search backwards through history to try to find
1667 # where the file copy came from if the source of a copy was not in
1696 # where the file copy came from if the source of a copy was not in
1668 # the parent directory. However, this doesn't actually make sense to
1697 # the parent directory. However, this doesn't actually make sense to
1669 # do (what does a copy from something not in your working copy even
1698 # do (what does a copy from something not in your working copy even
1670 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1699 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1671 # the user that copy information was dropped, so if they didn't
1700 # the user that copy information was dropped, so if they didn't
1672 # expect this outcome it can be fixed, but this is the correct
1701 # expect this outcome it can be fixed, but this is the correct
1673 # behavior in this circumstance.
1702 # behavior in this circumstance.
1674
1703
1675 if crev:
1704 if crev:
1676 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1705 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1677 meta["copy"] = cfname
1706 meta["copy"] = cfname
1678 meta["copyrev"] = hex(crev)
1707 meta["copyrev"] = hex(crev)
1679 fparent1, fparent2 = nullid, newfparent
1708 fparent1, fparent2 = nullid, newfparent
1680 else:
1709 else:
1681 self.ui.warn(_("warning: can't find ancestor for '%s' "
1710 self.ui.warn(_("warning: can't find ancestor for '%s' "
1682 "copied from '%s'!\n") % (fname, cfname))
1711 "copied from '%s'!\n") % (fname, cfname))
1683
1712
1684 elif fparent1 == nullid:
1713 elif fparent1 == nullid:
1685 fparent1, fparent2 = fparent2, nullid
1714 fparent1, fparent2 = fparent2, nullid
1686 elif fparent2 != nullid:
1715 elif fparent2 != nullid:
1687 # is one parent an ancestor of the other?
1716 # is one parent an ancestor of the other?
1688 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1717 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1689 if fparent1 in fparentancestors:
1718 if fparent1 in fparentancestors:
1690 fparent1, fparent2 = fparent2, nullid
1719 fparent1, fparent2 = fparent2, nullid
1691 elif fparent2 in fparentancestors:
1720 elif fparent2 in fparentancestors:
1692 fparent2 = nullid
1721 fparent2 = nullid
1693
1722
1694 # is the file changed?
1723 # is the file changed?
1695 text = fctx.data()
1724 text = fctx.data()
1696 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1725 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1697 changelist.append(fname)
1726 changelist.append(fname)
1698 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1727 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1699 # are just the flags changed during merge?
1728 # are just the flags changed during merge?
1700 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1729 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1701 changelist.append(fname)
1730 changelist.append(fname)
1702
1731
1703 return fparent1
1732 return fparent1
1704
1733
1705 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1734 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1706 """check for commit arguments that aren't committable"""
1735 """check for commit arguments that aren't committable"""
1707 if match.isexact() or match.prefix():
1736 if match.isexact() or match.prefix():
1708 matched = set(status.modified + status.added + status.removed)
1737 matched = set(status.modified + status.added + status.removed)
1709
1738
1710 for f in match.files():
1739 for f in match.files():
1711 f = self.dirstate.normalize(f)
1740 f = self.dirstate.normalize(f)
1712 if f == '.' or f in matched or f in wctx.substate:
1741 if f == '.' or f in matched or f in wctx.substate:
1713 continue
1742 continue
1714 if f in status.deleted:
1743 if f in status.deleted:
1715 fail(f, _('file not found!'))
1744 fail(f, _('file not found!'))
1716 if f in vdirs: # visited directory
1745 if f in vdirs: # visited directory
1717 d = f + '/'
1746 d = f + '/'
1718 for mf in matched:
1747 for mf in matched:
1719 if mf.startswith(d):
1748 if mf.startswith(d):
1720 break
1749 break
1721 else:
1750 else:
1722 fail(f, _("no match under directory!"))
1751 fail(f, _("no match under directory!"))
1723 elif f not in self.dirstate:
1752 elif f not in self.dirstate:
1724 fail(f, _("file not tracked!"))
1753 fail(f, _("file not tracked!"))
1725
1754
1726 @unfilteredmethod
1755 @unfilteredmethod
1727 def commit(self, text="", user=None, date=None, match=None, force=False,
1756 def commit(self, text="", user=None, date=None, match=None, force=False,
1728 editor=False, extra=None):
1757 editor=False, extra=None):
1729 """Add a new revision to current repository.
1758 """Add a new revision to current repository.
1730
1759
1731 Revision information is gathered from the working directory,
1760 Revision information is gathered from the working directory,
1732 match can be used to filter the committed files. If editor is
1761 match can be used to filter the committed files. If editor is
1733 supplied, it is called to get a commit message.
1762 supplied, it is called to get a commit message.
1734 """
1763 """
1735 if extra is None:
1764 if extra is None:
1736 extra = {}
1765 extra = {}
1737
1766
1738 def fail(f, msg):
1767 def fail(f, msg):
1739 raise error.Abort('%s: %s' % (f, msg))
1768 raise error.Abort('%s: %s' % (f, msg))
1740
1769
1741 if not match:
1770 if not match:
1742 match = matchmod.always(self.root, '')
1771 match = matchmod.always(self.root, '')
1743
1772
1744 if not force:
1773 if not force:
1745 vdirs = []
1774 vdirs = []
1746 match.explicitdir = vdirs.append
1775 match.explicitdir = vdirs.append
1747 match.bad = fail
1776 match.bad = fail
1748
1777
1749 wlock = lock = tr = None
1778 wlock = lock = tr = None
1750 try:
1779 try:
1751 wlock = self.wlock()
1780 wlock = self.wlock()
1752 lock = self.lock() # for recent changelog (see issue4368)
1781 lock = self.lock() # for recent changelog (see issue4368)
1753
1782
1754 wctx = self[None]
1783 wctx = self[None]
1755 merge = len(wctx.parents()) > 1
1784 merge = len(wctx.parents()) > 1
1756
1785
1757 if not force and merge and not match.always():
1786 if not force and merge and not match.always():
1758 raise error.Abort(_('cannot partially commit a merge '
1787 raise error.Abort(_('cannot partially commit a merge '
1759 '(do not specify files or patterns)'))
1788 '(do not specify files or patterns)'))
1760
1789
1761 status = self.status(match=match, clean=force)
1790 status = self.status(match=match, clean=force)
1762 if force:
1791 if force:
1763 status.modified.extend(status.clean) # mq may commit clean files
1792 status.modified.extend(status.clean) # mq may commit clean files
1764
1793
1765 # check subrepos
1794 # check subrepos
1766 subs = []
1795 subs = []
1767 commitsubs = set()
1796 commitsubs = set()
1768 newstate = wctx.substate.copy()
1797 newstate = wctx.substate.copy()
1769 # only manage subrepos and .hgsubstate if .hgsub is present
1798 # only manage subrepos and .hgsubstate if .hgsub is present
1770 if '.hgsub' in wctx:
1799 if '.hgsub' in wctx:
1771 # we'll decide whether to track this ourselves, thanks
1800 # we'll decide whether to track this ourselves, thanks
1772 for c in status.modified, status.added, status.removed:
1801 for c in status.modified, status.added, status.removed:
1773 if '.hgsubstate' in c:
1802 if '.hgsubstate' in c:
1774 c.remove('.hgsubstate')
1803 c.remove('.hgsubstate')
1775
1804
1776 # compare current state to last committed state
1805 # compare current state to last committed state
1777 # build new substate based on last committed state
1806 # build new substate based on last committed state
1778 oldstate = wctx.p1().substate
1807 oldstate = wctx.p1().substate
1779 for s in sorted(newstate.keys()):
1808 for s in sorted(newstate.keys()):
1780 if not match(s):
1809 if not match(s):
1781 # ignore working copy, use old state if present
1810 # ignore working copy, use old state if present
1782 if s in oldstate:
1811 if s in oldstate:
1783 newstate[s] = oldstate[s]
1812 newstate[s] = oldstate[s]
1784 continue
1813 continue
1785 if not force:
1814 if not force:
1786 raise error.Abort(
1815 raise error.Abort(
1787 _("commit with new subrepo %s excluded") % s)
1816 _("commit with new subrepo %s excluded") % s)
1788 dirtyreason = wctx.sub(s).dirtyreason(True)
1817 dirtyreason = wctx.sub(s).dirtyreason(True)
1789 if dirtyreason:
1818 if dirtyreason:
1790 if not self.ui.configbool('ui', 'commitsubrepos'):
1819 if not self.ui.configbool('ui', 'commitsubrepos'):
1791 raise error.Abort(dirtyreason,
1820 raise error.Abort(dirtyreason,
1792 hint=_("use --subrepos for recursive commit"))
1821 hint=_("use --subrepos for recursive commit"))
1793 subs.append(s)
1822 subs.append(s)
1794 commitsubs.add(s)
1823 commitsubs.add(s)
1795 else:
1824 else:
1796 bs = wctx.sub(s).basestate()
1825 bs = wctx.sub(s).basestate()
1797 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1826 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1798 if oldstate.get(s, (None, None, None))[1] != bs:
1827 if oldstate.get(s, (None, None, None))[1] != bs:
1799 subs.append(s)
1828 subs.append(s)
1800
1829
1801 # check for removed subrepos
1830 # check for removed subrepos
1802 for p in wctx.parents():
1831 for p in wctx.parents():
1803 r = [s for s in p.substate if s not in newstate]
1832 r = [s for s in p.substate if s not in newstate]
1804 subs += [s for s in r if match(s)]
1833 subs += [s for s in r if match(s)]
1805 if subs:
1834 if subs:
1806 if (not match('.hgsub') and
1835 if (not match('.hgsub') and
1807 '.hgsub' in (wctx.modified() + wctx.added())):
1836 '.hgsub' in (wctx.modified() + wctx.added())):
1808 raise error.Abort(
1837 raise error.Abort(
1809 _("can't commit subrepos without .hgsub"))
1838 _("can't commit subrepos without .hgsub"))
1810 status.modified.insert(0, '.hgsubstate')
1839 status.modified.insert(0, '.hgsubstate')
1811
1840
1812 elif '.hgsub' in status.removed:
1841 elif '.hgsub' in status.removed:
1813 # clean up .hgsubstate when .hgsub is removed
1842 # clean up .hgsubstate when .hgsub is removed
1814 if ('.hgsubstate' in wctx and
1843 if ('.hgsubstate' in wctx and
1815 '.hgsubstate' not in (status.modified + status.added +
1844 '.hgsubstate' not in (status.modified + status.added +
1816 status.removed)):
1845 status.removed)):
1817 status.removed.insert(0, '.hgsubstate')
1846 status.removed.insert(0, '.hgsubstate')
1818
1847
1819 # make sure all explicit patterns are matched
1848 # make sure all explicit patterns are matched
1820 if not force:
1849 if not force:
1821 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1850 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1822
1851
1823 cctx = context.workingcommitctx(self, status,
1852 cctx = context.workingcommitctx(self, status,
1824 text, user, date, extra)
1853 text, user, date, extra)
1825
1854
1826 # internal config: ui.allowemptycommit
1855 # internal config: ui.allowemptycommit
1827 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1856 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1828 or extra.get('close') or merge or cctx.files()
1857 or extra.get('close') or merge or cctx.files()
1829 or self.ui.configbool('ui', 'allowemptycommit'))
1858 or self.ui.configbool('ui', 'allowemptycommit'))
1830 if not allowemptycommit:
1859 if not allowemptycommit:
1831 return None
1860 return None
1832
1861
1833 if merge and cctx.deleted():
1862 if merge and cctx.deleted():
1834 raise error.Abort(_("cannot commit merge with missing files"))
1863 raise error.Abort(_("cannot commit merge with missing files"))
1835
1864
1836 ms = mergemod.mergestate.read(self)
1865 ms = mergemod.mergestate.read(self)
1837 mergeutil.checkunresolved(ms)
1866 mergeutil.checkunresolved(ms)
1838
1867
1839 if editor:
1868 if editor:
1840 cctx._text = editor(self, cctx, subs)
1869 cctx._text = editor(self, cctx, subs)
1841 edited = (text != cctx._text)
1870 edited = (text != cctx._text)
1842
1871
1843 # Save commit message in case this transaction gets rolled back
1872 # Save commit message in case this transaction gets rolled back
1844 # (e.g. by a pretxncommit hook). Leave the content alone on
1873 # (e.g. by a pretxncommit hook). Leave the content alone on
1845 # the assumption that the user will use the same editor again.
1874 # the assumption that the user will use the same editor again.
1846 msgfn = self.savecommitmessage(cctx._text)
1875 msgfn = self.savecommitmessage(cctx._text)
1847
1876
1848 # commit subs and write new state
1877 # commit subs and write new state
1849 if subs:
1878 if subs:
1850 for s in sorted(commitsubs):
1879 for s in sorted(commitsubs):
1851 sub = wctx.sub(s)
1880 sub = wctx.sub(s)
1852 self.ui.status(_('committing subrepository %s\n') %
1881 self.ui.status(_('committing subrepository %s\n') %
1853 subrepo.subrelpath(sub))
1882 subrepo.subrelpath(sub))
1854 sr = sub.commit(cctx._text, user, date)
1883 sr = sub.commit(cctx._text, user, date)
1855 newstate[s] = (newstate[s][0], sr)
1884 newstate[s] = (newstate[s][0], sr)
1856 subrepo.writestate(self, newstate)
1885 subrepo.writestate(self, newstate)
1857
1886
1858 p1, p2 = self.dirstate.parents()
1887 p1, p2 = self.dirstate.parents()
1859 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1888 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1860 try:
1889 try:
1861 self.hook("precommit", throw=True, parent1=hookp1,
1890 self.hook("precommit", throw=True, parent1=hookp1,
1862 parent2=hookp2)
1891 parent2=hookp2)
1863 tr = self.transaction('commit')
1892 tr = self.transaction('commit')
1864 ret = self.commitctx(cctx, True)
1893 ret = self.commitctx(cctx, True)
1865 except: # re-raises
1894 except: # re-raises
1866 if edited:
1895 if edited:
1867 self.ui.write(
1896 self.ui.write(
1868 _('note: commit message saved in %s\n') % msgfn)
1897 _('note: commit message saved in %s\n') % msgfn)
1869 raise
1898 raise
1870 # update bookmarks, dirstate and mergestate
1899 # update bookmarks, dirstate and mergestate
1871 bookmarks.update(self, [p1, p2], ret)
1900 bookmarks.update(self, [p1, p2], ret)
1872 cctx.markcommitted(ret)
1901 cctx.markcommitted(ret)
1873 ms.reset()
1902 ms.reset()
1874 tr.close()
1903 tr.close()
1875
1904
1876 finally:
1905 finally:
1877 lockmod.release(tr, lock, wlock)
1906 lockmod.release(tr, lock, wlock)
1878
1907
1879 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1908 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1880 # hack for command that use a temporary commit (eg: histedit)
1909 # hack for command that use a temporary commit (eg: histedit)
1881 # temporary commit got stripped before hook release
1910 # temporary commit got stripped before hook release
1882 if self.changelog.hasnode(ret):
1911 if self.changelog.hasnode(ret):
1883 self.hook("commit", node=node, parent1=parent1,
1912 self.hook("commit", node=node, parent1=parent1,
1884 parent2=parent2)
1913 parent2=parent2)
1885 self._afterlock(commithook)
1914 self._afterlock(commithook)
1886 return ret
1915 return ret
1887
1916
1888 @unfilteredmethod
1917 @unfilteredmethod
1889 def commitctx(self, ctx, error=False):
1918 def commitctx(self, ctx, error=False):
1890 """Add a new revision to current repository.
1919 """Add a new revision to current repository.
1891 Revision information is passed via the context argument.
1920 Revision information is passed via the context argument.
1892 """
1921 """
1893
1922
1894 tr = None
1923 tr = None
1895 p1, p2 = ctx.p1(), ctx.p2()
1924 p1, p2 = ctx.p1(), ctx.p2()
1896 user = ctx.user()
1925 user = ctx.user()
1897
1926
1898 lock = self.lock()
1927 lock = self.lock()
1899 try:
1928 try:
1900 tr = self.transaction("commit")
1929 tr = self.transaction("commit")
1901 trp = weakref.proxy(tr)
1930 trp = weakref.proxy(tr)
1902
1931
1903 if ctx.manifestnode():
1932 if ctx.manifestnode():
1904 # reuse an existing manifest revision
1933 # reuse an existing manifest revision
1905 mn = ctx.manifestnode()
1934 mn = ctx.manifestnode()
1906 files = ctx.files()
1935 files = ctx.files()
1907 elif ctx.files():
1936 elif ctx.files():
1908 m1ctx = p1.manifestctx()
1937 m1ctx = p1.manifestctx()
1909 m2ctx = p2.manifestctx()
1938 m2ctx = p2.manifestctx()
1910 mctx = m1ctx.copy()
1939 mctx = m1ctx.copy()
1911
1940
1912 m = mctx.read()
1941 m = mctx.read()
1913 m1 = m1ctx.read()
1942 m1 = m1ctx.read()
1914 m2 = m2ctx.read()
1943 m2 = m2ctx.read()
1915
1944
1916 # check in files
1945 # check in files
1917 added = []
1946 added = []
1918 changed = []
1947 changed = []
1919 removed = list(ctx.removed())
1948 removed = list(ctx.removed())
1920 linkrev = len(self)
1949 linkrev = len(self)
1921 self.ui.note(_("committing files:\n"))
1950 self.ui.note(_("committing files:\n"))
1922 for f in sorted(ctx.modified() + ctx.added()):
1951 for f in sorted(ctx.modified() + ctx.added()):
1923 self.ui.note(f + "\n")
1952 self.ui.note(f + "\n")
1924 try:
1953 try:
1925 fctx = ctx[f]
1954 fctx = ctx[f]
1926 if fctx is None:
1955 if fctx is None:
1927 removed.append(f)
1956 removed.append(f)
1928 else:
1957 else:
1929 added.append(f)
1958 added.append(f)
1930 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1959 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1931 trp, changed)
1960 trp, changed)
1932 m.setflag(f, fctx.flags())
1961 m.setflag(f, fctx.flags())
1933 except OSError as inst:
1962 except OSError as inst:
1934 self.ui.warn(_("trouble committing %s!\n") % f)
1963 self.ui.warn(_("trouble committing %s!\n") % f)
1935 raise
1964 raise
1936 except IOError as inst:
1965 except IOError as inst:
1937 errcode = getattr(inst, 'errno', errno.ENOENT)
1966 errcode = getattr(inst, 'errno', errno.ENOENT)
1938 if error or errcode and errcode != errno.ENOENT:
1967 if error or errcode and errcode != errno.ENOENT:
1939 self.ui.warn(_("trouble committing %s!\n") % f)
1968 self.ui.warn(_("trouble committing %s!\n") % f)
1940 raise
1969 raise
1941
1970
1942 # update manifest
1971 # update manifest
1943 self.ui.note(_("committing manifest\n"))
1972 self.ui.note(_("committing manifest\n"))
1944 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1973 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1945 drop = [f for f in removed if f in m]
1974 drop = [f for f in removed if f in m]
1946 for f in drop:
1975 for f in drop:
1947 del m[f]
1976 del m[f]
1948 mn = mctx.write(trp, linkrev,
1977 mn = mctx.write(trp, linkrev,
1949 p1.manifestnode(), p2.manifestnode(),
1978 p1.manifestnode(), p2.manifestnode(),
1950 added, drop)
1979 added, drop)
1951 files = changed + removed
1980 files = changed + removed
1952 else:
1981 else:
1953 mn = p1.manifestnode()
1982 mn = p1.manifestnode()
1954 files = []
1983 files = []
1955
1984
1956 # update changelog
1985 # update changelog
1957 self.ui.note(_("committing changelog\n"))
1986 self.ui.note(_("committing changelog\n"))
1958 self.changelog.delayupdate(tr)
1987 self.changelog.delayupdate(tr)
1959 n = self.changelog.add(mn, files, ctx.description(),
1988 n = self.changelog.add(mn, files, ctx.description(),
1960 trp, p1.node(), p2.node(),
1989 trp, p1.node(), p2.node(),
1961 user, ctx.date(), ctx.extra().copy())
1990 user, ctx.date(), ctx.extra().copy())
1962 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1991 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1963 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1992 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1964 parent2=xp2)
1993 parent2=xp2)
1965 # set the new commit is proper phase
1994 # set the new commit is proper phase
1966 targetphase = subrepo.newcommitphase(self.ui, ctx)
1995 targetphase = subrepo.newcommitphase(self.ui, ctx)
1967 if targetphase:
1996 if targetphase:
1968 # retract boundary do not alter parent changeset.
1997 # retract boundary do not alter parent changeset.
1969 # if a parent have higher the resulting phase will
1998 # if a parent have higher the resulting phase will
1970 # be compliant anyway
1999 # be compliant anyway
1971 #
2000 #
1972 # if minimal phase was 0 we don't need to retract anything
2001 # if minimal phase was 0 we don't need to retract anything
1973 phases.registernew(self, tr, targetphase, [n])
2002 phases.registernew(self, tr, targetphase, [n])
1974 tr.close()
2003 tr.close()
1975 return n
2004 return n
1976 finally:
2005 finally:
1977 if tr:
2006 if tr:
1978 tr.release()
2007 tr.release()
1979 lock.release()
2008 lock.release()
1980
2009
1981 @unfilteredmethod
2010 @unfilteredmethod
1982 def destroying(self):
2011 def destroying(self):
1983 '''Inform the repository that nodes are about to be destroyed.
2012 '''Inform the repository that nodes are about to be destroyed.
1984 Intended for use by strip and rollback, so there's a common
2013 Intended for use by strip and rollback, so there's a common
1985 place for anything that has to be done before destroying history.
2014 place for anything that has to be done before destroying history.
1986
2015
1987 This is mostly useful for saving state that is in memory and waiting
2016 This is mostly useful for saving state that is in memory and waiting
1988 to be flushed when the current lock is released. Because a call to
2017 to be flushed when the current lock is released. Because a call to
1989 destroyed is imminent, the repo will be invalidated causing those
2018 destroyed is imminent, the repo will be invalidated causing those
1990 changes to stay in memory (waiting for the next unlock), or vanish
2019 changes to stay in memory (waiting for the next unlock), or vanish
1991 completely.
2020 completely.
1992 '''
2021 '''
1993 # When using the same lock to commit and strip, the phasecache is left
2022 # When using the same lock to commit and strip, the phasecache is left
1994 # dirty after committing. Then when we strip, the repo is invalidated,
2023 # dirty after committing. Then when we strip, the repo is invalidated,
1995 # causing those changes to disappear.
2024 # causing those changes to disappear.
1996 if '_phasecache' in vars(self):
2025 if '_phasecache' in vars(self):
1997 self._phasecache.write()
2026 self._phasecache.write()
1998
2027
1999 @unfilteredmethod
2028 @unfilteredmethod
2000 def destroyed(self):
2029 def destroyed(self):
2001 '''Inform the repository that nodes have been destroyed.
2030 '''Inform the repository that nodes have been destroyed.
2002 Intended for use by strip and rollback, so there's a common
2031 Intended for use by strip and rollback, so there's a common
2003 place for anything that has to be done after destroying history.
2032 place for anything that has to be done after destroying history.
2004 '''
2033 '''
2005 # When one tries to:
2034 # When one tries to:
2006 # 1) destroy nodes thus calling this method (e.g. strip)
2035 # 1) destroy nodes thus calling this method (e.g. strip)
2007 # 2) use phasecache somewhere (e.g. commit)
2036 # 2) use phasecache somewhere (e.g. commit)
2008 #
2037 #
2009 # then 2) will fail because the phasecache contains nodes that were
2038 # then 2) will fail because the phasecache contains nodes that were
2010 # removed. We can either remove phasecache from the filecache,
2039 # removed. We can either remove phasecache from the filecache,
2011 # causing it to reload next time it is accessed, or simply filter
2040 # causing it to reload next time it is accessed, or simply filter
2012 # the removed nodes now and write the updated cache.
2041 # the removed nodes now and write the updated cache.
2013 self._phasecache.filterunknown(self)
2042 self._phasecache.filterunknown(self)
2014 self._phasecache.write()
2043 self._phasecache.write()
2015
2044
2016 # refresh all repository caches
2045 # refresh all repository caches
2017 self.updatecaches()
2046 self.updatecaches()
2018
2047
2019 # Ensure the persistent tag cache is updated. Doing it now
2048 # Ensure the persistent tag cache is updated. Doing it now
2020 # means that the tag cache only has to worry about destroyed
2049 # means that the tag cache only has to worry about destroyed
2021 # heads immediately after a strip/rollback. That in turn
2050 # heads immediately after a strip/rollback. That in turn
2022 # guarantees that "cachetip == currenttip" (comparing both rev
2051 # guarantees that "cachetip == currenttip" (comparing both rev
2023 # and node) always means no nodes have been added or destroyed.
2052 # and node) always means no nodes have been added or destroyed.
2024
2053
2025 # XXX this is suboptimal when qrefresh'ing: we strip the current
2054 # XXX this is suboptimal when qrefresh'ing: we strip the current
2026 # head, refresh the tag cache, then immediately add a new head.
2055 # head, refresh the tag cache, then immediately add a new head.
2027 # But I think doing it this way is necessary for the "instant
2056 # But I think doing it this way is necessary for the "instant
2028 # tag cache retrieval" case to work.
2057 # tag cache retrieval" case to work.
2029 self.invalidate()
2058 self.invalidate()
2030
2059
2031 def walk(self, match, node=None):
2060 def walk(self, match, node=None):
2032 '''
2061 '''
2033 walk recursively through the directory tree or a given
2062 walk recursively through the directory tree or a given
2034 changeset, finding all files matched by the match
2063 changeset, finding all files matched by the match
2035 function
2064 function
2036 '''
2065 '''
2037 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2066 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2038 return self[node].walk(match)
2067 return self[node].walk(match)
2039
2068
2040 def status(self, node1='.', node2=None, match=None,
2069 def status(self, node1='.', node2=None, match=None,
2041 ignored=False, clean=False, unknown=False,
2070 ignored=False, clean=False, unknown=False,
2042 listsubrepos=False):
2071 listsubrepos=False):
2043 '''a convenience method that calls node1.status(node2)'''
2072 '''a convenience method that calls node1.status(node2)'''
2044 return self[node1].status(node2, match, ignored, clean, unknown,
2073 return self[node1].status(node2, match, ignored, clean, unknown,
2045 listsubrepos)
2074 listsubrepos)
2046
2075
2047 def addpostdsstatus(self, ps):
2076 def addpostdsstatus(self, ps):
2048 """Add a callback to run within the wlock, at the point at which status
2077 """Add a callback to run within the wlock, at the point at which status
2049 fixups happen.
2078 fixups happen.
2050
2079
2051 On status completion, callback(wctx, status) will be called with the
2080 On status completion, callback(wctx, status) will be called with the
2052 wlock held, unless the dirstate has changed from underneath or the wlock
2081 wlock held, unless the dirstate has changed from underneath or the wlock
2053 couldn't be grabbed.
2082 couldn't be grabbed.
2054
2083
2055 Callbacks should not capture and use a cached copy of the dirstate --
2084 Callbacks should not capture and use a cached copy of the dirstate --
2056 it might change in the meanwhile. Instead, they should access the
2085 it might change in the meanwhile. Instead, they should access the
2057 dirstate via wctx.repo().dirstate.
2086 dirstate via wctx.repo().dirstate.
2058
2087
2059 This list is emptied out after each status run -- extensions should
2088 This list is emptied out after each status run -- extensions should
2060 make sure it adds to this list each time dirstate.status is called.
2089 make sure it adds to this list each time dirstate.status is called.
2061 Extensions should also make sure they don't call this for statuses
2090 Extensions should also make sure they don't call this for statuses
2062 that don't involve the dirstate.
2091 that don't involve the dirstate.
2063 """
2092 """
2064
2093
2065 # The list is located here for uniqueness reasons -- it is actually
2094 # The list is located here for uniqueness reasons -- it is actually
2066 # managed by the workingctx, but that isn't unique per-repo.
2095 # managed by the workingctx, but that isn't unique per-repo.
2067 self._postdsstatus.append(ps)
2096 self._postdsstatus.append(ps)
2068
2097
2069 def postdsstatus(self):
2098 def postdsstatus(self):
2070 """Used by workingctx to get the list of post-dirstate-status hooks."""
2099 """Used by workingctx to get the list of post-dirstate-status hooks."""
2071 return self._postdsstatus
2100 return self._postdsstatus
2072
2101
2073 def clearpostdsstatus(self):
2102 def clearpostdsstatus(self):
2074 """Used by workingctx to clear post-dirstate-status hooks."""
2103 """Used by workingctx to clear post-dirstate-status hooks."""
2075 del self._postdsstatus[:]
2104 del self._postdsstatus[:]
2076
2105
2077 def heads(self, start=None):
2106 def heads(self, start=None):
2078 if start is None:
2107 if start is None:
2079 cl = self.changelog
2108 cl = self.changelog
2080 headrevs = reversed(cl.headrevs())
2109 headrevs = reversed(cl.headrevs())
2081 return [cl.node(rev) for rev in headrevs]
2110 return [cl.node(rev) for rev in headrevs]
2082
2111
2083 heads = self.changelog.heads(start)
2112 heads = self.changelog.heads(start)
2084 # sort the output in rev descending order
2113 # sort the output in rev descending order
2085 return sorted(heads, key=self.changelog.rev, reverse=True)
2114 return sorted(heads, key=self.changelog.rev, reverse=True)
2086
2115
2087 def branchheads(self, branch=None, start=None, closed=False):
2116 def branchheads(self, branch=None, start=None, closed=False):
2088 '''return a (possibly filtered) list of heads for the given branch
2117 '''return a (possibly filtered) list of heads for the given branch
2089
2118
2090 Heads are returned in topological order, from newest to oldest.
2119 Heads are returned in topological order, from newest to oldest.
2091 If branch is None, use the dirstate branch.
2120 If branch is None, use the dirstate branch.
2092 If start is not None, return only heads reachable from start.
2121 If start is not None, return only heads reachable from start.
2093 If closed is True, return heads that are marked as closed as well.
2122 If closed is True, return heads that are marked as closed as well.
2094 '''
2123 '''
2095 if branch is None:
2124 if branch is None:
2096 branch = self[None].branch()
2125 branch = self[None].branch()
2097 branches = self.branchmap()
2126 branches = self.branchmap()
2098 if branch not in branches:
2127 if branch not in branches:
2099 return []
2128 return []
2100 # the cache returns heads ordered lowest to highest
2129 # the cache returns heads ordered lowest to highest
2101 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2130 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2102 if start is not None:
2131 if start is not None:
2103 # filter out the heads that cannot be reached from startrev
2132 # filter out the heads that cannot be reached from startrev
2104 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2133 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2105 bheads = [h for h in bheads if h in fbheads]
2134 bheads = [h for h in bheads if h in fbheads]
2106 return bheads
2135 return bheads
2107
2136
2108 def branches(self, nodes):
2137 def branches(self, nodes):
2109 if not nodes:
2138 if not nodes:
2110 nodes = [self.changelog.tip()]
2139 nodes = [self.changelog.tip()]
2111 b = []
2140 b = []
2112 for n in nodes:
2141 for n in nodes:
2113 t = n
2142 t = n
2114 while True:
2143 while True:
2115 p = self.changelog.parents(n)
2144 p = self.changelog.parents(n)
2116 if p[1] != nullid or p[0] == nullid:
2145 if p[1] != nullid or p[0] == nullid:
2117 b.append((t, n, p[0], p[1]))
2146 b.append((t, n, p[0], p[1]))
2118 break
2147 break
2119 n = p[0]
2148 n = p[0]
2120 return b
2149 return b
2121
2150
2122 def between(self, pairs):
2151 def between(self, pairs):
2123 r = []
2152 r = []
2124
2153
2125 for top, bottom in pairs:
2154 for top, bottom in pairs:
2126 n, l, i = top, [], 0
2155 n, l, i = top, [], 0
2127 f = 1
2156 f = 1
2128
2157
2129 while n != bottom and n != nullid:
2158 while n != bottom and n != nullid:
2130 p = self.changelog.parents(n)[0]
2159 p = self.changelog.parents(n)[0]
2131 if i == f:
2160 if i == f:
2132 l.append(n)
2161 l.append(n)
2133 f = f * 2
2162 f = f * 2
2134 n = p
2163 n = p
2135 i += 1
2164 i += 1
2136
2165
2137 r.append(l)
2166 r.append(l)
2138
2167
2139 return r
2168 return r
2140
2169
2141 def checkpush(self, pushop):
2170 def checkpush(self, pushop):
2142 """Extensions can override this function if additional checks have
2171 """Extensions can override this function if additional checks have
2143 to be performed before pushing, or call it if they override push
2172 to be performed before pushing, or call it if they override push
2144 command.
2173 command.
2145 """
2174 """
2146 pass
2175 pass
2147
2176
2148 @unfilteredpropertycache
2177 @unfilteredpropertycache
2149 def prepushoutgoinghooks(self):
2178 def prepushoutgoinghooks(self):
2150 """Return util.hooks consists of a pushop with repo, remote, outgoing
2179 """Return util.hooks consists of a pushop with repo, remote, outgoing
2151 methods, which are called before pushing changesets.
2180 methods, which are called before pushing changesets.
2152 """
2181 """
2153 return util.hooks()
2182 return util.hooks()
2154
2183
2155 def pushkey(self, namespace, key, old, new):
2184 def pushkey(self, namespace, key, old, new):
2156 try:
2185 try:
2157 tr = self.currenttransaction()
2186 tr = self.currenttransaction()
2158 hookargs = {}
2187 hookargs = {}
2159 if tr is not None:
2188 if tr is not None:
2160 hookargs.update(tr.hookargs)
2189 hookargs.update(tr.hookargs)
2161 hookargs['namespace'] = namespace
2190 hookargs['namespace'] = namespace
2162 hookargs['key'] = key
2191 hookargs['key'] = key
2163 hookargs['old'] = old
2192 hookargs['old'] = old
2164 hookargs['new'] = new
2193 hookargs['new'] = new
2165 self.hook('prepushkey', throw=True, **hookargs)
2194 self.hook('prepushkey', throw=True, **hookargs)
2166 except error.HookAbort as exc:
2195 except error.HookAbort as exc:
2167 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2196 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2168 if exc.hint:
2197 if exc.hint:
2169 self.ui.write_err(_("(%s)\n") % exc.hint)
2198 self.ui.write_err(_("(%s)\n") % exc.hint)
2170 return False
2199 return False
2171 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2200 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2172 ret = pushkey.push(self, namespace, key, old, new)
2201 ret = pushkey.push(self, namespace, key, old, new)
2173 def runhook():
2202 def runhook():
2174 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2203 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2175 ret=ret)
2204 ret=ret)
2176 self._afterlock(runhook)
2205 self._afterlock(runhook)
2177 return ret
2206 return ret
2178
2207
2179 def listkeys(self, namespace):
2208 def listkeys(self, namespace):
2180 self.hook('prelistkeys', throw=True, namespace=namespace)
2209 self.hook('prelistkeys', throw=True, namespace=namespace)
2181 self.ui.debug('listing keys for "%s"\n' % namespace)
2210 self.ui.debug('listing keys for "%s"\n' % namespace)
2182 values = pushkey.list(self, namespace)
2211 values = pushkey.list(self, namespace)
2183 self.hook('listkeys', namespace=namespace, values=values)
2212 self.hook('listkeys', namespace=namespace, values=values)
2184 return values
2213 return values
2185
2214
2186 def debugwireargs(self, one, two, three=None, four=None, five=None):
2215 def debugwireargs(self, one, two, three=None, four=None, five=None):
2187 '''used to test argument passing over the wire'''
2216 '''used to test argument passing over the wire'''
2188 return "%s %s %s %s %s" % (one, two, three, four, five)
2217 return "%s %s %s %s %s" % (one, two, three, four, five)
2189
2218
2190 def savecommitmessage(self, text):
2219 def savecommitmessage(self, text):
2191 fp = self.vfs('last-message.txt', 'wb')
2220 fp = self.vfs('last-message.txt', 'wb')
2192 try:
2221 try:
2193 fp.write(text)
2222 fp.write(text)
2194 finally:
2223 finally:
2195 fp.close()
2224 fp.close()
2196 return self.pathto(fp.name[len(self.root) + 1:])
2225 return self.pathto(fp.name[len(self.root) + 1:])
2197
2226
2198 # used to avoid circular references so destructors work
2227 # used to avoid circular references so destructors work
2199 def aftertrans(files):
2228 def aftertrans(files):
2200 renamefiles = [tuple(t) for t in files]
2229 renamefiles = [tuple(t) for t in files]
2201 def a():
2230 def a():
2202 for vfs, src, dest in renamefiles:
2231 for vfs, src, dest in renamefiles:
2203 # if src and dest refer to a same file, vfs.rename is a no-op,
2232 # if src and dest refer to a same file, vfs.rename is a no-op,
2204 # leaving both src and dest on disk. delete dest to make sure
2233 # leaving both src and dest on disk. delete dest to make sure
2205 # the rename couldn't be such a no-op.
2234 # the rename couldn't be such a no-op.
2206 vfs.tryunlink(dest)
2235 vfs.tryunlink(dest)
2207 try:
2236 try:
2208 vfs.rename(src, dest)
2237 vfs.rename(src, dest)
2209 except OSError: # journal file does not yet exist
2238 except OSError: # journal file does not yet exist
2210 pass
2239 pass
2211 return a
2240 return a
2212
2241
2213 def undoname(fn):
2242 def undoname(fn):
2214 base, name = os.path.split(fn)
2243 base, name = os.path.split(fn)
2215 assert name.startswith('journal')
2244 assert name.startswith('journal')
2216 return os.path.join(base, name.replace('journal', 'undo', 1))
2245 return os.path.join(base, name.replace('journal', 'undo', 1))
2217
2246
2218 def instance(ui, path, create):
2247 def instance(ui, path, create):
2219 return localrepository(ui, util.urllocalpath(path), create)
2248 return localrepository(ui, util.urllocalpath(path), create)
2220
2249
2221 def islocal(path):
2250 def islocal(path):
2222 return True
2251 return True
2223
2252
2224 def newreporequirements(repo):
2253 def newreporequirements(repo):
2225 """Determine the set of requirements for a new local repository.
2254 """Determine the set of requirements for a new local repository.
2226
2255
2227 Extensions can wrap this function to specify custom requirements for
2256 Extensions can wrap this function to specify custom requirements for
2228 new repositories.
2257 new repositories.
2229 """
2258 """
2230 ui = repo.ui
2259 ui = repo.ui
2231 requirements = {'revlogv1'}
2260 requirements = {'revlogv1'}
2232 if ui.configbool('format', 'usestore'):
2261 if ui.configbool('format', 'usestore'):
2233 requirements.add('store')
2262 requirements.add('store')
2234 if ui.configbool('format', 'usefncache'):
2263 if ui.configbool('format', 'usefncache'):
2235 requirements.add('fncache')
2264 requirements.add('fncache')
2236 if ui.configbool('format', 'dotencode'):
2265 if ui.configbool('format', 'dotencode'):
2237 requirements.add('dotencode')
2266 requirements.add('dotencode')
2238
2267
2239 compengine = ui.config('experimental', 'format.compression')
2268 compengine = ui.config('experimental', 'format.compression')
2240 if compengine not in util.compengines:
2269 if compengine not in util.compengines:
2241 raise error.Abort(_('compression engine %s defined by '
2270 raise error.Abort(_('compression engine %s defined by '
2242 'experimental.format.compression not available') %
2271 'experimental.format.compression not available') %
2243 compengine,
2272 compengine,
2244 hint=_('run "hg debuginstall" to list available '
2273 hint=_('run "hg debuginstall" to list available '
2245 'compression engines'))
2274 'compression engines'))
2246
2275
2247 # zlib is the historical default and doesn't need an explicit requirement.
2276 # zlib is the historical default and doesn't need an explicit requirement.
2248 if compengine != 'zlib':
2277 if compengine != 'zlib':
2249 requirements.add('exp-compression-%s' % compengine)
2278 requirements.add('exp-compression-%s' % compengine)
2250
2279
2251 if scmutil.gdinitconfig(ui):
2280 if scmutil.gdinitconfig(ui):
2252 requirements.add('generaldelta')
2281 requirements.add('generaldelta')
2253 if ui.configbool('experimental', 'treemanifest'):
2282 if ui.configbool('experimental', 'treemanifest'):
2254 requirements.add('treemanifest')
2283 requirements.add('treemanifest')
2255 if ui.configbool('experimental', 'manifestv2'):
2284 if ui.configbool('experimental', 'manifestv2'):
2256 requirements.add('manifestv2')
2285 requirements.add('manifestv2')
2257
2286
2258 revlogv2 = ui.config('experimental', 'revlogv2')
2287 revlogv2 = ui.config('experimental', 'revlogv2')
2259 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2288 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2260 requirements.remove('revlogv1')
2289 requirements.remove('revlogv1')
2261 # generaldelta is implied by revlogv2.
2290 # generaldelta is implied by revlogv2.
2262 requirements.discard('generaldelta')
2291 requirements.discard('generaldelta')
2263 requirements.add(REVLOGV2_REQUIREMENT)
2292 requirements.add(REVLOGV2_REQUIREMENT)
2264
2293
2265 return requirements
2294 return requirements
General Comments 0
You need to be logged in to leave comments. Login now