##// END OF EJS Templates
localrepo: specify optional callback parameter to pathauditor as a keyword
Augie Fackler -
r35118:ebabc4a8 default
parent child Browse files
Show More
@@ -1,2346 +1,2347 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # These auditor are not used by the vfs,
367 # These auditor are not used by the vfs,
368 # only used when writing this comment: basectx.match
368 # only used when writing this comment: basectx.match
369 self.auditor = pathutil.pathauditor(self.root, self._checknested)
369 self.auditor = pathutil.pathauditor(
370 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
370 self.root, callback=self._checknested)
371 realfs=False, cached=True)
371 self.nofsauditor = pathutil.pathauditor(
372 self.root, callback=self._checknested, realfs=False, cached=True)
372 self.baseui = baseui
373 self.baseui = baseui
373 self.ui = baseui.copy()
374 self.ui = baseui.copy()
374 self.ui.copy = baseui.copy # prevent copying repo configuration
375 self.ui.copy = baseui.copy # prevent copying repo configuration
375 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
376 if (self.ui.configbool('devel', 'all-warnings') or
377 if (self.ui.configbool('devel', 'all-warnings') or
377 self.ui.configbool('devel', 'check-locks')):
378 self.ui.configbool('devel', 'check-locks')):
378 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 self.vfs.audit = self._getvfsward(self.vfs.audit)
379 # A list of callback to shape the phase if no data were found.
380 # A list of callback to shape the phase if no data were found.
380 # Callback are in the form: func(repo, roots) --> processed root.
381 # Callback are in the form: func(repo, roots) --> processed root.
381 # This list it to be filled by extension during repo setup
382 # This list it to be filled by extension during repo setup
382 self._phasedefaults = []
383 self._phasedefaults = []
383 try:
384 try:
384 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
385 self._loadextensions()
386 self._loadextensions()
386 except IOError:
387 except IOError:
387 pass
388 pass
388
389
389 if self.featuresetupfuncs:
390 if self.featuresetupfuncs:
390 self.supported = set(self._basesupported) # use private copy
391 self.supported = set(self._basesupported) # use private copy
391 extmods = set(m.__name__ for n, m
392 extmods = set(m.__name__ for n, m
392 in extensions.extensions(self.ui))
393 in extensions.extensions(self.ui))
393 for setupfunc in self.featuresetupfuncs:
394 for setupfunc in self.featuresetupfuncs:
394 if setupfunc.__module__ in extmods:
395 if setupfunc.__module__ in extmods:
395 setupfunc(self.ui, self.supported)
396 setupfunc(self.ui, self.supported)
396 else:
397 else:
397 self.supported = self._basesupported
398 self.supported = self._basesupported
398 color.setup(self.ui)
399 color.setup(self.ui)
399
400
400 # Add compression engines.
401 # Add compression engines.
401 for name in util.compengines:
402 for name in util.compengines:
402 engine = util.compengines[name]
403 engine = util.compengines[name]
403 if engine.revlogheader():
404 if engine.revlogheader():
404 self.supported.add('exp-compression-%s' % name)
405 self.supported.add('exp-compression-%s' % name)
405
406
406 if not self.vfs.isdir():
407 if not self.vfs.isdir():
407 if create:
408 if create:
408 self.requirements = newreporequirements(self)
409 self.requirements = newreporequirements(self)
409
410
410 if not self.wvfs.exists():
411 if not self.wvfs.exists():
411 self.wvfs.makedirs()
412 self.wvfs.makedirs()
412 self.vfs.makedir(notindexed=True)
413 self.vfs.makedir(notindexed=True)
413
414
414 if 'store' in self.requirements:
415 if 'store' in self.requirements:
415 self.vfs.mkdir("store")
416 self.vfs.mkdir("store")
416
417
417 # create an invalid changelog
418 # create an invalid changelog
418 self.vfs.append(
419 self.vfs.append(
419 "00changelog.i",
420 "00changelog.i",
420 '\0\0\0\2' # represents revlogv2
421 '\0\0\0\2' # represents revlogv2
421 ' dummy changelog to prevent using the old repo layout'
422 ' dummy changelog to prevent using the old repo layout'
422 )
423 )
423 else:
424 else:
424 raise error.RepoError(_("repository %s not found") % path)
425 raise error.RepoError(_("repository %s not found") % path)
425 elif create:
426 elif create:
426 raise error.RepoError(_("repository %s already exists") % path)
427 raise error.RepoError(_("repository %s already exists") % path)
427 else:
428 else:
428 try:
429 try:
429 self.requirements = scmutil.readrequires(
430 self.requirements = scmutil.readrequires(
430 self.vfs, self.supported)
431 self.vfs, self.supported)
431 except IOError as inst:
432 except IOError as inst:
432 if inst.errno != errno.ENOENT:
433 if inst.errno != errno.ENOENT:
433 raise
434 raise
434
435
435 cachepath = self.vfs.join('cache')
436 cachepath = self.vfs.join('cache')
436 self.sharedpath = self.path
437 self.sharedpath = self.path
437 try:
438 try:
438 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
439 if 'relshared' in self.requirements:
440 if 'relshared' in self.requirements:
440 sharedpath = self.vfs.join(sharedpath)
441 sharedpath = self.vfs.join(sharedpath)
441 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 vfs = vfsmod.vfs(sharedpath, realpath=True)
442 cachepath = vfs.join('cache')
443 cachepath = vfs.join('cache')
443 s = vfs.base
444 s = vfs.base
444 if not vfs.exists():
445 if not vfs.exists():
445 raise error.RepoError(
446 raise error.RepoError(
446 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 _('.hg/sharedpath points to nonexistent directory %s') % s)
447 self.sharedpath = s
448 self.sharedpath = s
448 except IOError as inst:
449 except IOError as inst:
449 if inst.errno != errno.ENOENT:
450 if inst.errno != errno.ENOENT:
450 raise
451 raise
451
452
452 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 if 'exp-sparse' in self.requirements and not sparse.enabled:
453 raise error.RepoError(_('repository is using sparse feature but '
454 raise error.RepoError(_('repository is using sparse feature but '
454 'sparse is not enabled; enable the '
455 'sparse is not enabled; enable the '
455 '"sparse" extensions to access'))
456 '"sparse" extensions to access'))
456
457
457 self.store = store.store(
458 self.store = store.store(
458 self.requirements, self.sharedpath,
459 self.requirements, self.sharedpath,
459 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 lambda base: vfsmod.vfs(base, cacheaudited=True))
460 self.spath = self.store.path
461 self.spath = self.store.path
461 self.svfs = self.store.vfs
462 self.svfs = self.store.vfs
462 self.sjoin = self.store.join
463 self.sjoin = self.store.join
463 self.vfs.createmode = self.store.createmode
464 self.vfs.createmode = self.store.createmode
464 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
465 self.cachevfs.createmode = self.store.createmode
466 self.cachevfs.createmode = self.store.createmode
466 if (self.ui.configbool('devel', 'all-warnings') or
467 if (self.ui.configbool('devel', 'all-warnings') or
467 self.ui.configbool('devel', 'check-locks')):
468 self.ui.configbool('devel', 'check-locks')):
468 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
469 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
470 else: # standard vfs
471 else: # standard vfs
471 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 self.svfs.audit = self._getsvfsward(self.svfs.audit)
472 self._applyopenerreqs()
473 self._applyopenerreqs()
473 if create:
474 if create:
474 self._writerequirements()
475 self._writerequirements()
475
476
476 self._dirstatevalidatewarned = False
477 self._dirstatevalidatewarned = False
477
478
478 self._branchcaches = {}
479 self._branchcaches = {}
479 self._revbranchcache = None
480 self._revbranchcache = None
480 self.filterpats = {}
481 self.filterpats = {}
481 self._datafilters = {}
482 self._datafilters = {}
482 self._transref = self._lockref = self._wlockref = None
483 self._transref = self._lockref = self._wlockref = None
483
484
484 # A cache for various files under .hg/ that tracks file changes,
485 # A cache for various files under .hg/ that tracks file changes,
485 # (used by the filecache decorator)
486 # (used by the filecache decorator)
486 #
487 #
487 # Maps a property name to its util.filecacheentry
488 # Maps a property name to its util.filecacheentry
488 self._filecache = {}
489 self._filecache = {}
489
490
490 # hold sets of revision to be filtered
491 # hold sets of revision to be filtered
491 # should be cleared when something might have changed the filter value:
492 # should be cleared when something might have changed the filter value:
492 # - new changesets,
493 # - new changesets,
493 # - phase change,
494 # - phase change,
494 # - new obsolescence marker,
495 # - new obsolescence marker,
495 # - working directory parent change,
496 # - working directory parent change,
496 # - bookmark changes
497 # - bookmark changes
497 self.filteredrevcache = {}
498 self.filteredrevcache = {}
498
499
499 # post-dirstate-status hooks
500 # post-dirstate-status hooks
500 self._postdsstatus = []
501 self._postdsstatus = []
501
502
502 # Cache of types representing filtered repos.
503 # Cache of types representing filtered repos.
503 self._filteredrepotypes = weakref.WeakKeyDictionary()
504 self._filteredrepotypes = weakref.WeakKeyDictionary()
504
505
505 # generic mapping between names and nodes
506 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
507 self.names = namespaces.namespaces()
507
508
508 # Key to signature value.
509 # Key to signature value.
509 self._sparsesignaturecache = {}
510 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
511 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
512 self._sparsematchercache = {}
512
513
513 def _getvfsward(self, origfunc):
514 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
515 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
516 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
517 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
518 ret = origfunc(path, mode=mode)
518 repo = rref()
519 repo = rref()
519 if (repo is None
520 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
522 or not util.safehasattr(repo, '_lockref')):
522 return
523 return
523 if mode in (None, 'r', 'rb'):
524 if mode in (None, 'r', 'rb'):
524 return
525 return
525 if path.startswith(repo.path):
526 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
527 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
528 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
529 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
532 if path.startswith('journal.'):
532 # journal is covered by 'lock'
533 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
534 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
536 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
537 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
538 # rest of vfs files are covered by 'wlock'
538 #
539 #
539 # exclude special files
540 # exclude special files
540 for prefix in self._wlockfreeprefix:
541 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
542 if path.startswith(prefix):
542 return
543 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
545 stacklevel=2, config='check-locks')
545 return ret
546 return ret
546 return checkvfs
547 return checkvfs
547
548
548 def _getsvfsward(self, origfunc):
549 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
550 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
551 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
552 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
553 ret = origfunc(path, mode=mode)
553 repo = rref()
554 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
556 return
556 if mode in (None, 'r', 'rb'):
557 if mode in (None, 'r', 'rb'):
557 return
558 return
558 if path.startswith(repo.sharedpath):
559 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
560 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
561 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
562 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
564 stacklevel=3)
564 return ret
565 return ret
565 return checksvfs
566 return checksvfs
566
567
567 def close(self):
568 def close(self):
568 self._writecaches()
569 self._writecaches()
569
570
570 def _loadextensions(self):
571 def _loadextensions(self):
571 extensions.loadall(self.ui)
572 extensions.loadall(self.ui)
572
573
573 def _writecaches(self):
574 def _writecaches(self):
574 if self._revbranchcache:
575 if self._revbranchcache:
575 self._revbranchcache.write()
576 self._revbranchcache.write()
576
577
577 def _restrictcapabilities(self, caps):
578 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
580 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
583 return caps
583
584
584 def _applyopenerreqs(self):
585 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
586 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
587 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
588 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
590 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
592 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
594 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
595 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
596 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
598 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
600 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
601 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
602 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
606 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
607 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
608 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold')
609 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
610 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 srdensitythres = float(self.ui.config('experimental',
613 srdensitythres = float(self.ui.config('experimental',
613 'sparse-read.density-threshold'))
614 'sparse-read.density-threshold'))
614 srmingapsize = self.ui.configbytes('experimental',
615 srmingapsize = self.ui.configbytes('experimental',
615 'sparse-read.min-gap-size')
616 'sparse-read.min-gap-size')
616 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619
620
620 for r in self.requirements:
621 for r in self.requirements:
621 if r.startswith('exp-compression-'):
622 if r.startswith('exp-compression-'):
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623
624
624 # TODO move "revlogv2" to openerreqs once finalized.
625 # TODO move "revlogv2" to openerreqs once finalized.
625 if REVLOGV2_REQUIREMENT in self.requirements:
626 if REVLOGV2_REQUIREMENT in self.requirements:
626 self.svfs.options['revlogv2'] = True
627 self.svfs.options['revlogv2'] = True
627
628
628 def _writerequirements(self):
629 def _writerequirements(self):
629 scmutil.writerequires(self.vfs, self.requirements)
630 scmutil.writerequires(self.vfs, self.requirements)
630
631
631 def _checknested(self, path):
632 def _checknested(self, path):
632 """Determine if path is a legal nested repository."""
633 """Determine if path is a legal nested repository."""
633 if not path.startswith(self.root):
634 if not path.startswith(self.root):
634 return False
635 return False
635 subpath = path[len(self.root) + 1:]
636 subpath = path[len(self.root) + 1:]
636 normsubpath = util.pconvert(subpath)
637 normsubpath = util.pconvert(subpath)
637
638
638 # XXX: Checking against the current working copy is wrong in
639 # XXX: Checking against the current working copy is wrong in
639 # the sense that it can reject things like
640 # the sense that it can reject things like
640 #
641 #
641 # $ hg cat -r 10 sub/x.txt
642 # $ hg cat -r 10 sub/x.txt
642 #
643 #
643 # if sub/ is no longer a subrepository in the working copy
644 # if sub/ is no longer a subrepository in the working copy
644 # parent revision.
645 # parent revision.
645 #
646 #
646 # However, it can of course also allow things that would have
647 # However, it can of course also allow things that would have
647 # been rejected before, such as the above cat command if sub/
648 # been rejected before, such as the above cat command if sub/
648 # is a subrepository now, but was a normal directory before.
649 # is a subrepository now, but was a normal directory before.
649 # The old path auditor would have rejected by mistake since it
650 # The old path auditor would have rejected by mistake since it
650 # panics when it sees sub/.hg/.
651 # panics when it sees sub/.hg/.
651 #
652 #
652 # All in all, checking against the working copy seems sensible
653 # All in all, checking against the working copy seems sensible
653 # since we want to prevent access to nested repositories on
654 # since we want to prevent access to nested repositories on
654 # the filesystem *now*.
655 # the filesystem *now*.
655 ctx = self[None]
656 ctx = self[None]
656 parts = util.splitpath(subpath)
657 parts = util.splitpath(subpath)
657 while parts:
658 while parts:
658 prefix = '/'.join(parts)
659 prefix = '/'.join(parts)
659 if prefix in ctx.substate:
660 if prefix in ctx.substate:
660 if prefix == normsubpath:
661 if prefix == normsubpath:
661 return True
662 return True
662 else:
663 else:
663 sub = ctx.sub(prefix)
664 sub = ctx.sub(prefix)
664 return sub.checknested(subpath[len(prefix) + 1:])
665 return sub.checknested(subpath[len(prefix) + 1:])
665 else:
666 else:
666 parts.pop()
667 parts.pop()
667 return False
668 return False
668
669
669 def peer(self):
670 def peer(self):
670 return localpeer(self) # not cached to avoid reference cycle
671 return localpeer(self) # not cached to avoid reference cycle
671
672
672 def unfiltered(self):
673 def unfiltered(self):
673 """Return unfiltered version of the repository
674 """Return unfiltered version of the repository
674
675
675 Intended to be overwritten by filtered repo."""
676 Intended to be overwritten by filtered repo."""
676 return self
677 return self
677
678
678 def filtered(self, name):
679 def filtered(self, name):
679 """Return a filtered version of a repository"""
680 """Return a filtered version of a repository"""
680 # Python <3.4 easily leaks types via __mro__. See
681 # Python <3.4 easily leaks types via __mro__. See
681 # https://bugs.python.org/issue17950. We cache dynamically
682 # https://bugs.python.org/issue17950. We cache dynamically
682 # created types so this method doesn't leak on every
683 # created types so this method doesn't leak on every
683 # invocation.
684 # invocation.
684
685
685 key = self.unfiltered().__class__
686 key = self.unfiltered().__class__
686 if key not in self._filteredrepotypes:
687 if key not in self._filteredrepotypes:
687 # Build a new type with the repoview mixin and the base
688 # Build a new type with the repoview mixin and the base
688 # class of this repo. Give it a name containing the
689 # class of this repo. Give it a name containing the
689 # filter name to aid debugging.
690 # filter name to aid debugging.
690 bases = (repoview.repoview, key)
691 bases = (repoview.repoview, key)
691 cls = type(r'%sfilteredrepo' % name, bases, {})
692 cls = type(r'%sfilteredrepo' % name, bases, {})
692 self._filteredrepotypes[key] = cls
693 self._filteredrepotypes[key] = cls
693
694
694 return self._filteredrepotypes[key](self, name)
695 return self._filteredrepotypes[key](self, name)
695
696
696 @repofilecache('bookmarks', 'bookmarks.current')
697 @repofilecache('bookmarks', 'bookmarks.current')
697 def _bookmarks(self):
698 def _bookmarks(self):
698 return bookmarks.bmstore(self)
699 return bookmarks.bmstore(self)
699
700
700 @property
701 @property
701 def _activebookmark(self):
702 def _activebookmark(self):
702 return self._bookmarks.active
703 return self._bookmarks.active
703
704
704 # _phaserevs and _phasesets depend on changelog. what we need is to
705 # _phaserevs and _phasesets depend on changelog. what we need is to
705 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
706 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
706 # can't be easily expressed in filecache mechanism.
707 # can't be easily expressed in filecache mechanism.
707 @storecache('phaseroots', '00changelog.i')
708 @storecache('phaseroots', '00changelog.i')
708 def _phasecache(self):
709 def _phasecache(self):
709 return phases.phasecache(self, self._phasedefaults)
710 return phases.phasecache(self, self._phasedefaults)
710
711
711 @storecache('obsstore')
712 @storecache('obsstore')
712 def obsstore(self):
713 def obsstore(self):
713 return obsolete.makestore(self.ui, self)
714 return obsolete.makestore(self.ui, self)
714
715
715 @storecache('00changelog.i')
716 @storecache('00changelog.i')
716 def changelog(self):
717 def changelog(self):
717 return changelog.changelog(self.svfs,
718 return changelog.changelog(self.svfs,
718 trypending=txnutil.mayhavepending(self.root))
719 trypending=txnutil.mayhavepending(self.root))
719
720
720 def _constructmanifest(self):
721 def _constructmanifest(self):
721 # This is a temporary function while we migrate from manifest to
722 # This is a temporary function while we migrate from manifest to
722 # manifestlog. It allows bundlerepo and unionrepo to intercept the
723 # manifestlog. It allows bundlerepo and unionrepo to intercept the
723 # manifest creation.
724 # manifest creation.
724 return manifest.manifestrevlog(self.svfs)
725 return manifest.manifestrevlog(self.svfs)
725
726
726 @storecache('00manifest.i')
727 @storecache('00manifest.i')
727 def manifestlog(self):
728 def manifestlog(self):
728 return manifest.manifestlog(self.svfs, self)
729 return manifest.manifestlog(self.svfs, self)
729
730
730 @repofilecache('dirstate')
731 @repofilecache('dirstate')
731 def dirstate(self):
732 def dirstate(self):
732 sparsematchfn = lambda: sparse.matcher(self)
733 sparsematchfn = lambda: sparse.matcher(self)
733
734
734 return dirstate.dirstate(self.vfs, self.ui, self.root,
735 return dirstate.dirstate(self.vfs, self.ui, self.root,
735 self._dirstatevalidate, sparsematchfn)
736 self._dirstatevalidate, sparsematchfn)
736
737
737 def _dirstatevalidate(self, node):
738 def _dirstatevalidate(self, node):
738 try:
739 try:
739 self.changelog.rev(node)
740 self.changelog.rev(node)
740 return node
741 return node
741 except error.LookupError:
742 except error.LookupError:
742 if not self._dirstatevalidatewarned:
743 if not self._dirstatevalidatewarned:
743 self._dirstatevalidatewarned = True
744 self._dirstatevalidatewarned = True
744 self.ui.warn(_("warning: ignoring unknown"
745 self.ui.warn(_("warning: ignoring unknown"
745 " working parent %s!\n") % short(node))
746 " working parent %s!\n") % short(node))
746 return nullid
747 return nullid
747
748
748 def __getitem__(self, changeid):
749 def __getitem__(self, changeid):
749 if changeid is None:
750 if changeid is None:
750 return context.workingctx(self)
751 return context.workingctx(self)
751 if isinstance(changeid, slice):
752 if isinstance(changeid, slice):
752 # wdirrev isn't contiguous so the slice shouldn't include it
753 # wdirrev isn't contiguous so the slice shouldn't include it
753 return [context.changectx(self, i)
754 return [context.changectx(self, i)
754 for i in xrange(*changeid.indices(len(self)))
755 for i in xrange(*changeid.indices(len(self)))
755 if i not in self.changelog.filteredrevs]
756 if i not in self.changelog.filteredrevs]
756 try:
757 try:
757 return context.changectx(self, changeid)
758 return context.changectx(self, changeid)
758 except error.WdirUnsupported:
759 except error.WdirUnsupported:
759 return context.workingctx(self)
760 return context.workingctx(self)
760
761
761 def __contains__(self, changeid):
762 def __contains__(self, changeid):
762 """True if the given changeid exists
763 """True if the given changeid exists
763
764
764 error.LookupError is raised if an ambiguous node specified.
765 error.LookupError is raised if an ambiguous node specified.
765 """
766 """
766 try:
767 try:
767 self[changeid]
768 self[changeid]
768 return True
769 return True
769 except error.RepoLookupError:
770 except error.RepoLookupError:
770 return False
771 return False
771
772
772 def __nonzero__(self):
773 def __nonzero__(self):
773 return True
774 return True
774
775
775 __bool__ = __nonzero__
776 __bool__ = __nonzero__
776
777
777 def __len__(self):
778 def __len__(self):
778 return len(self.changelog)
779 return len(self.changelog)
779
780
780 def __iter__(self):
781 def __iter__(self):
781 return iter(self.changelog)
782 return iter(self.changelog)
782
783
783 def revs(self, expr, *args):
784 def revs(self, expr, *args):
784 '''Find revisions matching a revset.
785 '''Find revisions matching a revset.
785
786
786 The revset is specified as a string ``expr`` that may contain
787 The revset is specified as a string ``expr`` that may contain
787 %-formatting to escape certain types. See ``revsetlang.formatspec``.
788 %-formatting to escape certain types. See ``revsetlang.formatspec``.
788
789
789 Revset aliases from the configuration are not expanded. To expand
790 Revset aliases from the configuration are not expanded. To expand
790 user aliases, consider calling ``scmutil.revrange()`` or
791 user aliases, consider calling ``scmutil.revrange()`` or
791 ``repo.anyrevs([expr], user=True)``.
792 ``repo.anyrevs([expr], user=True)``.
792
793
793 Returns a revset.abstractsmartset, which is a list-like interface
794 Returns a revset.abstractsmartset, which is a list-like interface
794 that contains integer revisions.
795 that contains integer revisions.
795 '''
796 '''
796 expr = revsetlang.formatspec(expr, *args)
797 expr = revsetlang.formatspec(expr, *args)
797 m = revset.match(None, expr)
798 m = revset.match(None, expr)
798 return m(self)
799 return m(self)
799
800
800 def set(self, expr, *args):
801 def set(self, expr, *args):
801 '''Find revisions matching a revset and emit changectx instances.
802 '''Find revisions matching a revset and emit changectx instances.
802
803
803 This is a convenience wrapper around ``revs()`` that iterates the
804 This is a convenience wrapper around ``revs()`` that iterates the
804 result and is a generator of changectx instances.
805 result and is a generator of changectx instances.
805
806
806 Revset aliases from the configuration are not expanded. To expand
807 Revset aliases from the configuration are not expanded. To expand
807 user aliases, consider calling ``scmutil.revrange()``.
808 user aliases, consider calling ``scmutil.revrange()``.
808 '''
809 '''
809 for r in self.revs(expr, *args):
810 for r in self.revs(expr, *args):
810 yield self[r]
811 yield self[r]
811
812
812 def anyrevs(self, specs, user=False, localalias=None):
813 def anyrevs(self, specs, user=False, localalias=None):
813 '''Find revisions matching one of the given revsets.
814 '''Find revisions matching one of the given revsets.
814
815
815 Revset aliases from the configuration are not expanded by default. To
816 Revset aliases from the configuration are not expanded by default. To
816 expand user aliases, specify ``user=True``. To provide some local
817 expand user aliases, specify ``user=True``. To provide some local
817 definitions overriding user aliases, set ``localalias`` to
818 definitions overriding user aliases, set ``localalias`` to
818 ``{name: definitionstring}``.
819 ``{name: definitionstring}``.
819 '''
820 '''
820 if user:
821 if user:
821 m = revset.matchany(self.ui, specs, repo=self,
822 m = revset.matchany(self.ui, specs, repo=self,
822 localalias=localalias)
823 localalias=localalias)
823 else:
824 else:
824 m = revset.matchany(None, specs, localalias=localalias)
825 m = revset.matchany(None, specs, localalias=localalias)
825 return m(self)
826 return m(self)
826
827
827 def url(self):
828 def url(self):
828 return 'file:' + self.root
829 return 'file:' + self.root
829
830
830 def hook(self, name, throw=False, **args):
831 def hook(self, name, throw=False, **args):
831 """Call a hook, passing this repo instance.
832 """Call a hook, passing this repo instance.
832
833
833 This a convenience method to aid invoking hooks. Extensions likely
834 This a convenience method to aid invoking hooks. Extensions likely
834 won't call this unless they have registered a custom hook or are
835 won't call this unless they have registered a custom hook or are
835 replacing code that is expected to call a hook.
836 replacing code that is expected to call a hook.
836 """
837 """
837 return hook.hook(self.ui, self, name, throw, **args)
838 return hook.hook(self.ui, self, name, throw, **args)
838
839
839 @filteredpropertycache
840 @filteredpropertycache
840 def _tagscache(self):
841 def _tagscache(self):
841 '''Returns a tagscache object that contains various tags related
842 '''Returns a tagscache object that contains various tags related
842 caches.'''
843 caches.'''
843
844
844 # This simplifies its cache management by having one decorated
845 # This simplifies its cache management by having one decorated
845 # function (this one) and the rest simply fetch things from it.
846 # function (this one) and the rest simply fetch things from it.
846 class tagscache(object):
847 class tagscache(object):
847 def __init__(self):
848 def __init__(self):
848 # These two define the set of tags for this repository. tags
849 # These two define the set of tags for this repository. tags
849 # maps tag name to node; tagtypes maps tag name to 'global' or
850 # maps tag name to node; tagtypes maps tag name to 'global' or
850 # 'local'. (Global tags are defined by .hgtags across all
851 # 'local'. (Global tags are defined by .hgtags across all
851 # heads, and local tags are defined in .hg/localtags.)
852 # heads, and local tags are defined in .hg/localtags.)
852 # They constitute the in-memory cache of tags.
853 # They constitute the in-memory cache of tags.
853 self.tags = self.tagtypes = None
854 self.tags = self.tagtypes = None
854
855
855 self.nodetagscache = self.tagslist = None
856 self.nodetagscache = self.tagslist = None
856
857
857 cache = tagscache()
858 cache = tagscache()
858 cache.tags, cache.tagtypes = self._findtags()
859 cache.tags, cache.tagtypes = self._findtags()
859
860
860 return cache
861 return cache
861
862
862 def tags(self):
863 def tags(self):
863 '''return a mapping of tag to node'''
864 '''return a mapping of tag to node'''
864 t = {}
865 t = {}
865 if self.changelog.filteredrevs:
866 if self.changelog.filteredrevs:
866 tags, tt = self._findtags()
867 tags, tt = self._findtags()
867 else:
868 else:
868 tags = self._tagscache.tags
869 tags = self._tagscache.tags
869 for k, v in tags.iteritems():
870 for k, v in tags.iteritems():
870 try:
871 try:
871 # ignore tags to unknown nodes
872 # ignore tags to unknown nodes
872 self.changelog.rev(v)
873 self.changelog.rev(v)
873 t[k] = v
874 t[k] = v
874 except (error.LookupError, ValueError):
875 except (error.LookupError, ValueError):
875 pass
876 pass
876 return t
877 return t
877
878
878 def _findtags(self):
879 def _findtags(self):
879 '''Do the hard work of finding tags. Return a pair of dicts
880 '''Do the hard work of finding tags. Return a pair of dicts
880 (tags, tagtypes) where tags maps tag name to node, and tagtypes
881 (tags, tagtypes) where tags maps tag name to node, and tagtypes
881 maps tag name to a string like \'global\' or \'local\'.
882 maps tag name to a string like \'global\' or \'local\'.
882 Subclasses or extensions are free to add their own tags, but
883 Subclasses or extensions are free to add their own tags, but
883 should be aware that the returned dicts will be retained for the
884 should be aware that the returned dicts will be retained for the
884 duration of the localrepo object.'''
885 duration of the localrepo object.'''
885
886
886 # XXX what tagtype should subclasses/extensions use? Currently
887 # XXX what tagtype should subclasses/extensions use? Currently
887 # mq and bookmarks add tags, but do not set the tagtype at all.
888 # mq and bookmarks add tags, but do not set the tagtype at all.
888 # Should each extension invent its own tag type? Should there
889 # Should each extension invent its own tag type? Should there
889 # be one tagtype for all such "virtual" tags? Or is the status
890 # be one tagtype for all such "virtual" tags? Or is the status
890 # quo fine?
891 # quo fine?
891
892
892
893
893 # map tag name to (node, hist)
894 # map tag name to (node, hist)
894 alltags = tagsmod.findglobaltags(self.ui, self)
895 alltags = tagsmod.findglobaltags(self.ui, self)
895 # map tag name to tag type
896 # map tag name to tag type
896 tagtypes = dict((tag, 'global') for tag in alltags)
897 tagtypes = dict((tag, 'global') for tag in alltags)
897
898
898 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
899 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
899
900
900 # Build the return dicts. Have to re-encode tag names because
901 # Build the return dicts. Have to re-encode tag names because
901 # the tags module always uses UTF-8 (in order not to lose info
902 # the tags module always uses UTF-8 (in order not to lose info
902 # writing to the cache), but the rest of Mercurial wants them in
903 # writing to the cache), but the rest of Mercurial wants them in
903 # local encoding.
904 # local encoding.
904 tags = {}
905 tags = {}
905 for (name, (node, hist)) in alltags.iteritems():
906 for (name, (node, hist)) in alltags.iteritems():
906 if node != nullid:
907 if node != nullid:
907 tags[encoding.tolocal(name)] = node
908 tags[encoding.tolocal(name)] = node
908 tags['tip'] = self.changelog.tip()
909 tags['tip'] = self.changelog.tip()
909 tagtypes = dict([(encoding.tolocal(name), value)
910 tagtypes = dict([(encoding.tolocal(name), value)
910 for (name, value) in tagtypes.iteritems()])
911 for (name, value) in tagtypes.iteritems()])
911 return (tags, tagtypes)
912 return (tags, tagtypes)
912
913
913 def tagtype(self, tagname):
914 def tagtype(self, tagname):
914 '''
915 '''
915 return the type of the given tag. result can be:
916 return the type of the given tag. result can be:
916
917
917 'local' : a local tag
918 'local' : a local tag
918 'global' : a global tag
919 'global' : a global tag
919 None : tag does not exist
920 None : tag does not exist
920 '''
921 '''
921
922
922 return self._tagscache.tagtypes.get(tagname)
923 return self._tagscache.tagtypes.get(tagname)
923
924
924 def tagslist(self):
925 def tagslist(self):
925 '''return a list of tags ordered by revision'''
926 '''return a list of tags ordered by revision'''
926 if not self._tagscache.tagslist:
927 if not self._tagscache.tagslist:
927 l = []
928 l = []
928 for t, n in self.tags().iteritems():
929 for t, n in self.tags().iteritems():
929 l.append((self.changelog.rev(n), t, n))
930 l.append((self.changelog.rev(n), t, n))
930 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
931 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
931
932
932 return self._tagscache.tagslist
933 return self._tagscache.tagslist
933
934
934 def nodetags(self, node):
935 def nodetags(self, node):
935 '''return the tags associated with a node'''
936 '''return the tags associated with a node'''
936 if not self._tagscache.nodetagscache:
937 if not self._tagscache.nodetagscache:
937 nodetagscache = {}
938 nodetagscache = {}
938 for t, n in self._tagscache.tags.iteritems():
939 for t, n in self._tagscache.tags.iteritems():
939 nodetagscache.setdefault(n, []).append(t)
940 nodetagscache.setdefault(n, []).append(t)
940 for tags in nodetagscache.itervalues():
941 for tags in nodetagscache.itervalues():
941 tags.sort()
942 tags.sort()
942 self._tagscache.nodetagscache = nodetagscache
943 self._tagscache.nodetagscache = nodetagscache
943 return self._tagscache.nodetagscache.get(node, [])
944 return self._tagscache.nodetagscache.get(node, [])
944
945
945 def nodebookmarks(self, node):
946 def nodebookmarks(self, node):
946 """return the list of bookmarks pointing to the specified node"""
947 """return the list of bookmarks pointing to the specified node"""
947 marks = []
948 marks = []
948 for bookmark, n in self._bookmarks.iteritems():
949 for bookmark, n in self._bookmarks.iteritems():
949 if n == node:
950 if n == node:
950 marks.append(bookmark)
951 marks.append(bookmark)
951 return sorted(marks)
952 return sorted(marks)
952
953
953 def branchmap(self):
954 def branchmap(self):
954 '''returns a dictionary {branch: [branchheads]} with branchheads
955 '''returns a dictionary {branch: [branchheads]} with branchheads
955 ordered by increasing revision number'''
956 ordered by increasing revision number'''
956 branchmap.updatecache(self)
957 branchmap.updatecache(self)
957 return self._branchcaches[self.filtername]
958 return self._branchcaches[self.filtername]
958
959
959 @unfilteredmethod
960 @unfilteredmethod
960 def revbranchcache(self):
961 def revbranchcache(self):
961 if not self._revbranchcache:
962 if not self._revbranchcache:
962 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
963 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
963 return self._revbranchcache
964 return self._revbranchcache
964
965
965 def branchtip(self, branch, ignoremissing=False):
966 def branchtip(self, branch, ignoremissing=False):
966 '''return the tip node for a given branch
967 '''return the tip node for a given branch
967
968
968 If ignoremissing is True, then this method will not raise an error.
969 If ignoremissing is True, then this method will not raise an error.
969 This is helpful for callers that only expect None for a missing branch
970 This is helpful for callers that only expect None for a missing branch
970 (e.g. namespace).
971 (e.g. namespace).
971
972
972 '''
973 '''
973 try:
974 try:
974 return self.branchmap().branchtip(branch)
975 return self.branchmap().branchtip(branch)
975 except KeyError:
976 except KeyError:
976 if not ignoremissing:
977 if not ignoremissing:
977 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
978 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
978 else:
979 else:
979 pass
980 pass
980
981
981 def lookup(self, key):
982 def lookup(self, key):
982 return self[key].node()
983 return self[key].node()
983
984
984 def lookupbranch(self, key, remote=None):
985 def lookupbranch(self, key, remote=None):
985 repo = remote or self
986 repo = remote or self
986 if key in repo.branchmap():
987 if key in repo.branchmap():
987 return key
988 return key
988
989
989 repo = (remote and remote.local()) and remote or self
990 repo = (remote and remote.local()) and remote or self
990 return repo[key].branch()
991 return repo[key].branch()
991
992
992 def known(self, nodes):
993 def known(self, nodes):
993 cl = self.changelog
994 cl = self.changelog
994 nm = cl.nodemap
995 nm = cl.nodemap
995 filtered = cl.filteredrevs
996 filtered = cl.filteredrevs
996 result = []
997 result = []
997 for n in nodes:
998 for n in nodes:
998 r = nm.get(n)
999 r = nm.get(n)
999 resp = not (r is None or r in filtered)
1000 resp = not (r is None or r in filtered)
1000 result.append(resp)
1001 result.append(resp)
1001 return result
1002 return result
1002
1003
1003 def local(self):
1004 def local(self):
1004 return self
1005 return self
1005
1006
1006 def publishing(self):
1007 def publishing(self):
1007 # it's safe (and desirable) to trust the publish flag unconditionally
1008 # it's safe (and desirable) to trust the publish flag unconditionally
1008 # so that we don't finalize changes shared between users via ssh or nfs
1009 # so that we don't finalize changes shared between users via ssh or nfs
1009 return self.ui.configbool('phases', 'publish', untrusted=True)
1010 return self.ui.configbool('phases', 'publish', untrusted=True)
1010
1011
1011 def cancopy(self):
1012 def cancopy(self):
1012 # so statichttprepo's override of local() works
1013 # so statichttprepo's override of local() works
1013 if not self.local():
1014 if not self.local():
1014 return False
1015 return False
1015 if not self.publishing():
1016 if not self.publishing():
1016 return True
1017 return True
1017 # if publishing we can't copy if there is filtered content
1018 # if publishing we can't copy if there is filtered content
1018 return not self.filtered('visible').changelog.filteredrevs
1019 return not self.filtered('visible').changelog.filteredrevs
1019
1020
1020 def shared(self):
1021 def shared(self):
1021 '''the type of shared repository (None if not shared)'''
1022 '''the type of shared repository (None if not shared)'''
1022 if self.sharedpath != self.path:
1023 if self.sharedpath != self.path:
1023 return 'store'
1024 return 'store'
1024 return None
1025 return None
1025
1026
1026 def wjoin(self, f, *insidef):
1027 def wjoin(self, f, *insidef):
1027 return self.vfs.reljoin(self.root, f, *insidef)
1028 return self.vfs.reljoin(self.root, f, *insidef)
1028
1029
1029 def file(self, f):
1030 def file(self, f):
1030 if f[0] == '/':
1031 if f[0] == '/':
1031 f = f[1:]
1032 f = f[1:]
1032 return filelog.filelog(self.svfs, f)
1033 return filelog.filelog(self.svfs, f)
1033
1034
1034 def changectx(self, changeid):
1035 def changectx(self, changeid):
1035 return self[changeid]
1036 return self[changeid]
1036
1037
1037 def setparents(self, p1, p2=nullid):
1038 def setparents(self, p1, p2=nullid):
1038 with self.dirstate.parentchange():
1039 with self.dirstate.parentchange():
1039 copies = self.dirstate.setparents(p1, p2)
1040 copies = self.dirstate.setparents(p1, p2)
1040 pctx = self[p1]
1041 pctx = self[p1]
1041 if copies:
1042 if copies:
1042 # Adjust copy records, the dirstate cannot do it, it
1043 # Adjust copy records, the dirstate cannot do it, it
1043 # requires access to parents manifests. Preserve them
1044 # requires access to parents manifests. Preserve them
1044 # only for entries added to first parent.
1045 # only for entries added to first parent.
1045 for f in copies:
1046 for f in copies:
1046 if f not in pctx and copies[f] in pctx:
1047 if f not in pctx and copies[f] in pctx:
1047 self.dirstate.copy(copies[f], f)
1048 self.dirstate.copy(copies[f], f)
1048 if p2 == nullid:
1049 if p2 == nullid:
1049 for f, s in sorted(self.dirstate.copies().items()):
1050 for f, s in sorted(self.dirstate.copies().items()):
1050 if f not in pctx and s not in pctx:
1051 if f not in pctx and s not in pctx:
1051 self.dirstate.copy(None, f)
1052 self.dirstate.copy(None, f)
1052
1053
1053 def filectx(self, path, changeid=None, fileid=None):
1054 def filectx(self, path, changeid=None, fileid=None):
1054 """changeid can be a changeset revision, node, or tag.
1055 """changeid can be a changeset revision, node, or tag.
1055 fileid can be a file revision or node."""
1056 fileid can be a file revision or node."""
1056 return context.filectx(self, path, changeid, fileid)
1057 return context.filectx(self, path, changeid, fileid)
1057
1058
1058 def getcwd(self):
1059 def getcwd(self):
1059 return self.dirstate.getcwd()
1060 return self.dirstate.getcwd()
1060
1061
1061 def pathto(self, f, cwd=None):
1062 def pathto(self, f, cwd=None):
1062 return self.dirstate.pathto(f, cwd)
1063 return self.dirstate.pathto(f, cwd)
1063
1064
1064 def _loadfilter(self, filter):
1065 def _loadfilter(self, filter):
1065 if filter not in self.filterpats:
1066 if filter not in self.filterpats:
1066 l = []
1067 l = []
1067 for pat, cmd in self.ui.configitems(filter):
1068 for pat, cmd in self.ui.configitems(filter):
1068 if cmd == '!':
1069 if cmd == '!':
1069 continue
1070 continue
1070 mf = matchmod.match(self.root, '', [pat])
1071 mf = matchmod.match(self.root, '', [pat])
1071 fn = None
1072 fn = None
1072 params = cmd
1073 params = cmd
1073 for name, filterfn in self._datafilters.iteritems():
1074 for name, filterfn in self._datafilters.iteritems():
1074 if cmd.startswith(name):
1075 if cmd.startswith(name):
1075 fn = filterfn
1076 fn = filterfn
1076 params = cmd[len(name):].lstrip()
1077 params = cmd[len(name):].lstrip()
1077 break
1078 break
1078 if not fn:
1079 if not fn:
1079 fn = lambda s, c, **kwargs: util.filter(s, c)
1080 fn = lambda s, c, **kwargs: util.filter(s, c)
1080 # Wrap old filters not supporting keyword arguments
1081 # Wrap old filters not supporting keyword arguments
1081 if not inspect.getargspec(fn)[2]:
1082 if not inspect.getargspec(fn)[2]:
1082 oldfn = fn
1083 oldfn = fn
1083 fn = lambda s, c, **kwargs: oldfn(s, c)
1084 fn = lambda s, c, **kwargs: oldfn(s, c)
1084 l.append((mf, fn, params))
1085 l.append((mf, fn, params))
1085 self.filterpats[filter] = l
1086 self.filterpats[filter] = l
1086 return self.filterpats[filter]
1087 return self.filterpats[filter]
1087
1088
1088 def _filter(self, filterpats, filename, data):
1089 def _filter(self, filterpats, filename, data):
1089 for mf, fn, cmd in filterpats:
1090 for mf, fn, cmd in filterpats:
1090 if mf(filename):
1091 if mf(filename):
1091 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1092 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1092 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1093 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1093 break
1094 break
1094
1095
1095 return data
1096 return data
1096
1097
1097 @unfilteredpropertycache
1098 @unfilteredpropertycache
1098 def _encodefilterpats(self):
1099 def _encodefilterpats(self):
1099 return self._loadfilter('encode')
1100 return self._loadfilter('encode')
1100
1101
1101 @unfilteredpropertycache
1102 @unfilteredpropertycache
1102 def _decodefilterpats(self):
1103 def _decodefilterpats(self):
1103 return self._loadfilter('decode')
1104 return self._loadfilter('decode')
1104
1105
1105 def adddatafilter(self, name, filter):
1106 def adddatafilter(self, name, filter):
1106 self._datafilters[name] = filter
1107 self._datafilters[name] = filter
1107
1108
1108 def wread(self, filename):
1109 def wread(self, filename):
1109 if self.wvfs.islink(filename):
1110 if self.wvfs.islink(filename):
1110 data = self.wvfs.readlink(filename)
1111 data = self.wvfs.readlink(filename)
1111 else:
1112 else:
1112 data = self.wvfs.read(filename)
1113 data = self.wvfs.read(filename)
1113 return self._filter(self._encodefilterpats, filename, data)
1114 return self._filter(self._encodefilterpats, filename, data)
1114
1115
1115 def wwrite(self, filename, data, flags, backgroundclose=False):
1116 def wwrite(self, filename, data, flags, backgroundclose=False):
1116 """write ``data`` into ``filename`` in the working directory
1117 """write ``data`` into ``filename`` in the working directory
1117
1118
1118 This returns length of written (maybe decoded) data.
1119 This returns length of written (maybe decoded) data.
1119 """
1120 """
1120 data = self._filter(self._decodefilterpats, filename, data)
1121 data = self._filter(self._decodefilterpats, filename, data)
1121 if 'l' in flags:
1122 if 'l' in flags:
1122 self.wvfs.symlink(data, filename)
1123 self.wvfs.symlink(data, filename)
1123 else:
1124 else:
1124 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1125 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1125 if 'x' in flags:
1126 if 'x' in flags:
1126 self.wvfs.setflags(filename, False, True)
1127 self.wvfs.setflags(filename, False, True)
1127 return len(data)
1128 return len(data)
1128
1129
1129 def wwritedata(self, filename, data):
1130 def wwritedata(self, filename, data):
1130 return self._filter(self._decodefilterpats, filename, data)
1131 return self._filter(self._decodefilterpats, filename, data)
1131
1132
1132 def currenttransaction(self):
1133 def currenttransaction(self):
1133 """return the current transaction or None if non exists"""
1134 """return the current transaction or None if non exists"""
1134 if self._transref:
1135 if self._transref:
1135 tr = self._transref()
1136 tr = self._transref()
1136 else:
1137 else:
1137 tr = None
1138 tr = None
1138
1139
1139 if tr and tr.running():
1140 if tr and tr.running():
1140 return tr
1141 return tr
1141 return None
1142 return None
1142
1143
1143 def transaction(self, desc, report=None):
1144 def transaction(self, desc, report=None):
1144 if (self.ui.configbool('devel', 'all-warnings')
1145 if (self.ui.configbool('devel', 'all-warnings')
1145 or self.ui.configbool('devel', 'check-locks')):
1146 or self.ui.configbool('devel', 'check-locks')):
1146 if self._currentlock(self._lockref) is None:
1147 if self._currentlock(self._lockref) is None:
1147 raise error.ProgrammingError('transaction requires locking')
1148 raise error.ProgrammingError('transaction requires locking')
1148 tr = self.currenttransaction()
1149 tr = self.currenttransaction()
1149 if tr is not None:
1150 if tr is not None:
1150 scmutil.registersummarycallback(self, tr, desc)
1151 scmutil.registersummarycallback(self, tr, desc)
1151 return tr.nest()
1152 return tr.nest()
1152
1153
1153 # abort here if the journal already exists
1154 # abort here if the journal already exists
1154 if self.svfs.exists("journal"):
1155 if self.svfs.exists("journal"):
1155 raise error.RepoError(
1156 raise error.RepoError(
1156 _("abandoned transaction found"),
1157 _("abandoned transaction found"),
1157 hint=_("run 'hg recover' to clean up transaction"))
1158 hint=_("run 'hg recover' to clean up transaction"))
1158
1159
1159 idbase = "%.40f#%f" % (random.random(), time.time())
1160 idbase = "%.40f#%f" % (random.random(), time.time())
1160 ha = hex(hashlib.sha1(idbase).digest())
1161 ha = hex(hashlib.sha1(idbase).digest())
1161 txnid = 'TXN:' + ha
1162 txnid = 'TXN:' + ha
1162 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1163 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1163
1164
1164 self._writejournal(desc)
1165 self._writejournal(desc)
1165 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1166 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1166 if report:
1167 if report:
1167 rp = report
1168 rp = report
1168 else:
1169 else:
1169 rp = self.ui.warn
1170 rp = self.ui.warn
1170 vfsmap = {'plain': self.vfs} # root of .hg/
1171 vfsmap = {'plain': self.vfs} # root of .hg/
1171 # we must avoid cyclic reference between repo and transaction.
1172 # we must avoid cyclic reference between repo and transaction.
1172 reporef = weakref.ref(self)
1173 reporef = weakref.ref(self)
1173 # Code to track tag movement
1174 # Code to track tag movement
1174 #
1175 #
1175 # Since tags are all handled as file content, it is actually quite hard
1176 # Since tags are all handled as file content, it is actually quite hard
1176 # to track these movement from a code perspective. So we fallback to a
1177 # to track these movement from a code perspective. So we fallback to a
1177 # tracking at the repository level. One could envision to track changes
1178 # tracking at the repository level. One could envision to track changes
1178 # to the '.hgtags' file through changegroup apply but that fails to
1179 # to the '.hgtags' file through changegroup apply but that fails to
1179 # cope with case where transaction expose new heads without changegroup
1180 # cope with case where transaction expose new heads without changegroup
1180 # being involved (eg: phase movement).
1181 # being involved (eg: phase movement).
1181 #
1182 #
1182 # For now, We gate the feature behind a flag since this likely comes
1183 # For now, We gate the feature behind a flag since this likely comes
1183 # with performance impacts. The current code run more often than needed
1184 # with performance impacts. The current code run more often than needed
1184 # and do not use caches as much as it could. The current focus is on
1185 # and do not use caches as much as it could. The current focus is on
1185 # the behavior of the feature so we disable it by default. The flag
1186 # the behavior of the feature so we disable it by default. The flag
1186 # will be removed when we are happy with the performance impact.
1187 # will be removed when we are happy with the performance impact.
1187 #
1188 #
1188 # Once this feature is no longer experimental move the following
1189 # Once this feature is no longer experimental move the following
1189 # documentation to the appropriate help section:
1190 # documentation to the appropriate help section:
1190 #
1191 #
1191 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1192 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1192 # tags (new or changed or deleted tags). In addition the details of
1193 # tags (new or changed or deleted tags). In addition the details of
1193 # these changes are made available in a file at:
1194 # these changes are made available in a file at:
1194 # ``REPOROOT/.hg/changes/tags.changes``.
1195 # ``REPOROOT/.hg/changes/tags.changes``.
1195 # Make sure you check for HG_TAG_MOVED before reading that file as it
1196 # Make sure you check for HG_TAG_MOVED before reading that file as it
1196 # might exist from a previous transaction even if no tag were touched
1197 # might exist from a previous transaction even if no tag were touched
1197 # in this one. Changes are recorded in a line base format::
1198 # in this one. Changes are recorded in a line base format::
1198 #
1199 #
1199 # <action> <hex-node> <tag-name>\n
1200 # <action> <hex-node> <tag-name>\n
1200 #
1201 #
1201 # Actions are defined as follow:
1202 # Actions are defined as follow:
1202 # "-R": tag is removed,
1203 # "-R": tag is removed,
1203 # "+A": tag is added,
1204 # "+A": tag is added,
1204 # "-M": tag is moved (old value),
1205 # "-M": tag is moved (old value),
1205 # "+M": tag is moved (new value),
1206 # "+M": tag is moved (new value),
1206 tracktags = lambda x: None
1207 tracktags = lambda x: None
1207 # experimental config: experimental.hook-track-tags
1208 # experimental config: experimental.hook-track-tags
1208 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1209 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1209 if desc != 'strip' and shouldtracktags:
1210 if desc != 'strip' and shouldtracktags:
1210 oldheads = self.changelog.headrevs()
1211 oldheads = self.changelog.headrevs()
1211 def tracktags(tr2):
1212 def tracktags(tr2):
1212 repo = reporef()
1213 repo = reporef()
1213 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1214 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1214 newheads = repo.changelog.headrevs()
1215 newheads = repo.changelog.headrevs()
1215 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1216 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1216 # notes: we compare lists here.
1217 # notes: we compare lists here.
1217 # As we do it only once buiding set would not be cheaper
1218 # As we do it only once buiding set would not be cheaper
1218 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1219 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1219 if changes:
1220 if changes:
1220 tr2.hookargs['tag_moved'] = '1'
1221 tr2.hookargs['tag_moved'] = '1'
1221 with repo.vfs('changes/tags.changes', 'w',
1222 with repo.vfs('changes/tags.changes', 'w',
1222 atomictemp=True) as changesfile:
1223 atomictemp=True) as changesfile:
1223 # note: we do not register the file to the transaction
1224 # note: we do not register the file to the transaction
1224 # because we needs it to still exist on the transaction
1225 # because we needs it to still exist on the transaction
1225 # is close (for txnclose hooks)
1226 # is close (for txnclose hooks)
1226 tagsmod.writediff(changesfile, changes)
1227 tagsmod.writediff(changesfile, changes)
1227 def validate(tr2):
1228 def validate(tr2):
1228 """will run pre-closing hooks"""
1229 """will run pre-closing hooks"""
1229 # XXX the transaction API is a bit lacking here so we take a hacky
1230 # XXX the transaction API is a bit lacking here so we take a hacky
1230 # path for now
1231 # path for now
1231 #
1232 #
1232 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1233 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1233 # dict is copied before these run. In addition we needs the data
1234 # dict is copied before these run. In addition we needs the data
1234 # available to in memory hooks too.
1235 # available to in memory hooks too.
1235 #
1236 #
1236 # Moreover, we also need to make sure this runs before txnclose
1237 # Moreover, we also need to make sure this runs before txnclose
1237 # hooks and there is no "pending" mechanism that would execute
1238 # hooks and there is no "pending" mechanism that would execute
1238 # logic only if hooks are about to run.
1239 # logic only if hooks are about to run.
1239 #
1240 #
1240 # Fixing this limitation of the transaction is also needed to track
1241 # Fixing this limitation of the transaction is also needed to track
1241 # other families of changes (bookmarks, phases, obsolescence).
1242 # other families of changes (bookmarks, phases, obsolescence).
1242 #
1243 #
1243 # This will have to be fixed before we remove the experimental
1244 # This will have to be fixed before we remove the experimental
1244 # gating.
1245 # gating.
1245 tracktags(tr2)
1246 tracktags(tr2)
1246 repo = reporef()
1247 repo = reporef()
1247 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1248 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1248 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1249 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1249 args = tr.hookargs.copy()
1250 args = tr.hookargs.copy()
1250 args.update(bookmarks.preparehookargs(name, old, new))
1251 args.update(bookmarks.preparehookargs(name, old, new))
1251 repo.hook('pretxnclose-bookmark', throw=True,
1252 repo.hook('pretxnclose-bookmark', throw=True,
1252 txnname=desc,
1253 txnname=desc,
1253 **pycompat.strkwargs(args))
1254 **pycompat.strkwargs(args))
1254 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1255 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1255 cl = repo.unfiltered().changelog
1256 cl = repo.unfiltered().changelog
1256 for rev, (old, new) in tr.changes['phases'].items():
1257 for rev, (old, new) in tr.changes['phases'].items():
1257 args = tr.hookargs.copy()
1258 args = tr.hookargs.copy()
1258 node = hex(cl.node(rev))
1259 node = hex(cl.node(rev))
1259 args.update(phases.preparehookargs(node, old, new))
1260 args.update(phases.preparehookargs(node, old, new))
1260 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1261 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1261 **pycompat.strkwargs(args))
1262 **pycompat.strkwargs(args))
1262
1263
1263 repo.hook('pretxnclose', throw=True,
1264 repo.hook('pretxnclose', throw=True,
1264 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1265 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1265 def releasefn(tr, success):
1266 def releasefn(tr, success):
1266 repo = reporef()
1267 repo = reporef()
1267 if success:
1268 if success:
1268 # this should be explicitly invoked here, because
1269 # this should be explicitly invoked here, because
1269 # in-memory changes aren't written out at closing
1270 # in-memory changes aren't written out at closing
1270 # transaction, if tr.addfilegenerator (via
1271 # transaction, if tr.addfilegenerator (via
1271 # dirstate.write or so) isn't invoked while
1272 # dirstate.write or so) isn't invoked while
1272 # transaction running
1273 # transaction running
1273 repo.dirstate.write(None)
1274 repo.dirstate.write(None)
1274 else:
1275 else:
1275 # discard all changes (including ones already written
1276 # discard all changes (including ones already written
1276 # out) in this transaction
1277 # out) in this transaction
1277 repo.dirstate.restorebackup(None, 'journal.dirstate')
1278 repo.dirstate.restorebackup(None, 'journal.dirstate')
1278
1279
1279 repo.invalidate(clearfilecache=True)
1280 repo.invalidate(clearfilecache=True)
1280
1281
1281 tr = transaction.transaction(rp, self.svfs, vfsmap,
1282 tr = transaction.transaction(rp, self.svfs, vfsmap,
1282 "journal",
1283 "journal",
1283 "undo",
1284 "undo",
1284 aftertrans(renames),
1285 aftertrans(renames),
1285 self.store.createmode,
1286 self.store.createmode,
1286 validator=validate,
1287 validator=validate,
1287 releasefn=releasefn,
1288 releasefn=releasefn,
1288 checkambigfiles=_cachedfiles)
1289 checkambigfiles=_cachedfiles)
1289 tr.changes['revs'] = set()
1290 tr.changes['revs'] = set()
1290 tr.changes['obsmarkers'] = set()
1291 tr.changes['obsmarkers'] = set()
1291 tr.changes['phases'] = {}
1292 tr.changes['phases'] = {}
1292 tr.changes['bookmarks'] = {}
1293 tr.changes['bookmarks'] = {}
1293
1294
1294 tr.hookargs['txnid'] = txnid
1295 tr.hookargs['txnid'] = txnid
1295 # note: writing the fncache only during finalize mean that the file is
1296 # note: writing the fncache only during finalize mean that the file is
1296 # outdated when running hooks. As fncache is used for streaming clone,
1297 # outdated when running hooks. As fncache is used for streaming clone,
1297 # this is not expected to break anything that happen during the hooks.
1298 # this is not expected to break anything that happen during the hooks.
1298 tr.addfinalize('flush-fncache', self.store.write)
1299 tr.addfinalize('flush-fncache', self.store.write)
1299 def txnclosehook(tr2):
1300 def txnclosehook(tr2):
1300 """To be run if transaction is successful, will schedule a hook run
1301 """To be run if transaction is successful, will schedule a hook run
1301 """
1302 """
1302 # Don't reference tr2 in hook() so we don't hold a reference.
1303 # Don't reference tr2 in hook() so we don't hold a reference.
1303 # This reduces memory consumption when there are multiple
1304 # This reduces memory consumption when there are multiple
1304 # transactions per lock. This can likely go away if issue5045
1305 # transactions per lock. This can likely go away if issue5045
1305 # fixes the function accumulation.
1306 # fixes the function accumulation.
1306 hookargs = tr2.hookargs
1307 hookargs = tr2.hookargs
1307
1308
1308 def hookfunc():
1309 def hookfunc():
1309 repo = reporef()
1310 repo = reporef()
1310 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1311 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1311 bmchanges = sorted(tr.changes['bookmarks'].items())
1312 bmchanges = sorted(tr.changes['bookmarks'].items())
1312 for name, (old, new) in bmchanges:
1313 for name, (old, new) in bmchanges:
1313 args = tr.hookargs.copy()
1314 args = tr.hookargs.copy()
1314 args.update(bookmarks.preparehookargs(name, old, new))
1315 args.update(bookmarks.preparehookargs(name, old, new))
1315 repo.hook('txnclose-bookmark', throw=False,
1316 repo.hook('txnclose-bookmark', throw=False,
1316 txnname=desc, **pycompat.strkwargs(args))
1317 txnname=desc, **pycompat.strkwargs(args))
1317
1318
1318 if hook.hashook(repo.ui, 'txnclose-phase'):
1319 if hook.hashook(repo.ui, 'txnclose-phase'):
1319 cl = repo.unfiltered().changelog
1320 cl = repo.unfiltered().changelog
1320 phasemv = sorted(tr.changes['phases'].items())
1321 phasemv = sorted(tr.changes['phases'].items())
1321 for rev, (old, new) in phasemv:
1322 for rev, (old, new) in phasemv:
1322 args = tr.hookargs.copy()
1323 args = tr.hookargs.copy()
1323 node = hex(cl.node(rev))
1324 node = hex(cl.node(rev))
1324 args.update(phases.preparehookargs(node, old, new))
1325 args.update(phases.preparehookargs(node, old, new))
1325 repo.hook('txnclose-phase', throw=False, txnname=desc,
1326 repo.hook('txnclose-phase', throw=False, txnname=desc,
1326 **pycompat.strkwargs(args))
1327 **pycompat.strkwargs(args))
1327
1328
1328 repo.hook('txnclose', throw=False, txnname=desc,
1329 repo.hook('txnclose', throw=False, txnname=desc,
1329 **pycompat.strkwargs(hookargs))
1330 **pycompat.strkwargs(hookargs))
1330 reporef()._afterlock(hookfunc)
1331 reporef()._afterlock(hookfunc)
1331 tr.addfinalize('txnclose-hook', txnclosehook)
1332 tr.addfinalize('txnclose-hook', txnclosehook)
1332 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1333 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1333 def txnaborthook(tr2):
1334 def txnaborthook(tr2):
1334 """To be run if transaction is aborted
1335 """To be run if transaction is aborted
1335 """
1336 """
1336 reporef().hook('txnabort', throw=False, txnname=desc,
1337 reporef().hook('txnabort', throw=False, txnname=desc,
1337 **tr2.hookargs)
1338 **tr2.hookargs)
1338 tr.addabort('txnabort-hook', txnaborthook)
1339 tr.addabort('txnabort-hook', txnaborthook)
1339 # avoid eager cache invalidation. in-memory data should be identical
1340 # avoid eager cache invalidation. in-memory data should be identical
1340 # to stored data if transaction has no error.
1341 # to stored data if transaction has no error.
1341 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1342 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1342 self._transref = weakref.ref(tr)
1343 self._transref = weakref.ref(tr)
1343 scmutil.registersummarycallback(self, tr, desc)
1344 scmutil.registersummarycallback(self, tr, desc)
1344 return tr
1345 return tr
1345
1346
1346 def _journalfiles(self):
1347 def _journalfiles(self):
1347 return ((self.svfs, 'journal'),
1348 return ((self.svfs, 'journal'),
1348 (self.vfs, 'journal.dirstate'),
1349 (self.vfs, 'journal.dirstate'),
1349 (self.vfs, 'journal.branch'),
1350 (self.vfs, 'journal.branch'),
1350 (self.vfs, 'journal.desc'),
1351 (self.vfs, 'journal.desc'),
1351 (self.vfs, 'journal.bookmarks'),
1352 (self.vfs, 'journal.bookmarks'),
1352 (self.svfs, 'journal.phaseroots'))
1353 (self.svfs, 'journal.phaseroots'))
1353
1354
1354 def undofiles(self):
1355 def undofiles(self):
1355 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1356 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1356
1357
1357 @unfilteredmethod
1358 @unfilteredmethod
1358 def _writejournal(self, desc):
1359 def _writejournal(self, desc):
1359 self.dirstate.savebackup(None, 'journal.dirstate')
1360 self.dirstate.savebackup(None, 'journal.dirstate')
1360 self.vfs.write("journal.branch",
1361 self.vfs.write("journal.branch",
1361 encoding.fromlocal(self.dirstate.branch()))
1362 encoding.fromlocal(self.dirstate.branch()))
1362 self.vfs.write("journal.desc",
1363 self.vfs.write("journal.desc",
1363 "%d\n%s\n" % (len(self), desc))
1364 "%d\n%s\n" % (len(self), desc))
1364 self.vfs.write("journal.bookmarks",
1365 self.vfs.write("journal.bookmarks",
1365 self.vfs.tryread("bookmarks"))
1366 self.vfs.tryread("bookmarks"))
1366 self.svfs.write("journal.phaseroots",
1367 self.svfs.write("journal.phaseroots",
1367 self.svfs.tryread("phaseroots"))
1368 self.svfs.tryread("phaseroots"))
1368
1369
1369 def recover(self):
1370 def recover(self):
1370 with self.lock():
1371 with self.lock():
1371 if self.svfs.exists("journal"):
1372 if self.svfs.exists("journal"):
1372 self.ui.status(_("rolling back interrupted transaction\n"))
1373 self.ui.status(_("rolling back interrupted transaction\n"))
1373 vfsmap = {'': self.svfs,
1374 vfsmap = {'': self.svfs,
1374 'plain': self.vfs,}
1375 'plain': self.vfs,}
1375 transaction.rollback(self.svfs, vfsmap, "journal",
1376 transaction.rollback(self.svfs, vfsmap, "journal",
1376 self.ui.warn,
1377 self.ui.warn,
1377 checkambigfiles=_cachedfiles)
1378 checkambigfiles=_cachedfiles)
1378 self.invalidate()
1379 self.invalidate()
1379 return True
1380 return True
1380 else:
1381 else:
1381 self.ui.warn(_("no interrupted transaction available\n"))
1382 self.ui.warn(_("no interrupted transaction available\n"))
1382 return False
1383 return False
1383
1384
1384 def rollback(self, dryrun=False, force=False):
1385 def rollback(self, dryrun=False, force=False):
1385 wlock = lock = dsguard = None
1386 wlock = lock = dsguard = None
1386 try:
1387 try:
1387 wlock = self.wlock()
1388 wlock = self.wlock()
1388 lock = self.lock()
1389 lock = self.lock()
1389 if self.svfs.exists("undo"):
1390 if self.svfs.exists("undo"):
1390 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1391 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1391
1392
1392 return self._rollback(dryrun, force, dsguard)
1393 return self._rollback(dryrun, force, dsguard)
1393 else:
1394 else:
1394 self.ui.warn(_("no rollback information available\n"))
1395 self.ui.warn(_("no rollback information available\n"))
1395 return 1
1396 return 1
1396 finally:
1397 finally:
1397 release(dsguard, lock, wlock)
1398 release(dsguard, lock, wlock)
1398
1399
1399 @unfilteredmethod # Until we get smarter cache management
1400 @unfilteredmethod # Until we get smarter cache management
1400 def _rollback(self, dryrun, force, dsguard):
1401 def _rollback(self, dryrun, force, dsguard):
1401 ui = self.ui
1402 ui = self.ui
1402 try:
1403 try:
1403 args = self.vfs.read('undo.desc').splitlines()
1404 args = self.vfs.read('undo.desc').splitlines()
1404 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1405 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1405 if len(args) >= 3:
1406 if len(args) >= 3:
1406 detail = args[2]
1407 detail = args[2]
1407 oldtip = oldlen - 1
1408 oldtip = oldlen - 1
1408
1409
1409 if detail and ui.verbose:
1410 if detail and ui.verbose:
1410 msg = (_('repository tip rolled back to revision %d'
1411 msg = (_('repository tip rolled back to revision %d'
1411 ' (undo %s: %s)\n')
1412 ' (undo %s: %s)\n')
1412 % (oldtip, desc, detail))
1413 % (oldtip, desc, detail))
1413 else:
1414 else:
1414 msg = (_('repository tip rolled back to revision %d'
1415 msg = (_('repository tip rolled back to revision %d'
1415 ' (undo %s)\n')
1416 ' (undo %s)\n')
1416 % (oldtip, desc))
1417 % (oldtip, desc))
1417 except IOError:
1418 except IOError:
1418 msg = _('rolling back unknown transaction\n')
1419 msg = _('rolling back unknown transaction\n')
1419 desc = None
1420 desc = None
1420
1421
1421 if not force and self['.'] != self['tip'] and desc == 'commit':
1422 if not force and self['.'] != self['tip'] and desc == 'commit':
1422 raise error.Abort(
1423 raise error.Abort(
1423 _('rollback of last commit while not checked out '
1424 _('rollback of last commit while not checked out '
1424 'may lose data'), hint=_('use -f to force'))
1425 'may lose data'), hint=_('use -f to force'))
1425
1426
1426 ui.status(msg)
1427 ui.status(msg)
1427 if dryrun:
1428 if dryrun:
1428 return 0
1429 return 0
1429
1430
1430 parents = self.dirstate.parents()
1431 parents = self.dirstate.parents()
1431 self.destroying()
1432 self.destroying()
1432 vfsmap = {'plain': self.vfs, '': self.svfs}
1433 vfsmap = {'plain': self.vfs, '': self.svfs}
1433 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1434 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1434 checkambigfiles=_cachedfiles)
1435 checkambigfiles=_cachedfiles)
1435 if self.vfs.exists('undo.bookmarks'):
1436 if self.vfs.exists('undo.bookmarks'):
1436 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1437 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1437 if self.svfs.exists('undo.phaseroots'):
1438 if self.svfs.exists('undo.phaseroots'):
1438 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1439 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1439 self.invalidate()
1440 self.invalidate()
1440
1441
1441 parentgone = (parents[0] not in self.changelog.nodemap or
1442 parentgone = (parents[0] not in self.changelog.nodemap or
1442 parents[1] not in self.changelog.nodemap)
1443 parents[1] not in self.changelog.nodemap)
1443 if parentgone:
1444 if parentgone:
1444 # prevent dirstateguard from overwriting already restored one
1445 # prevent dirstateguard from overwriting already restored one
1445 dsguard.close()
1446 dsguard.close()
1446
1447
1447 self.dirstate.restorebackup(None, 'undo.dirstate')
1448 self.dirstate.restorebackup(None, 'undo.dirstate')
1448 try:
1449 try:
1449 branch = self.vfs.read('undo.branch')
1450 branch = self.vfs.read('undo.branch')
1450 self.dirstate.setbranch(encoding.tolocal(branch))
1451 self.dirstate.setbranch(encoding.tolocal(branch))
1451 except IOError:
1452 except IOError:
1452 ui.warn(_('named branch could not be reset: '
1453 ui.warn(_('named branch could not be reset: '
1453 'current branch is still \'%s\'\n')
1454 'current branch is still \'%s\'\n')
1454 % self.dirstate.branch())
1455 % self.dirstate.branch())
1455
1456
1456 parents = tuple([p.rev() for p in self[None].parents()])
1457 parents = tuple([p.rev() for p in self[None].parents()])
1457 if len(parents) > 1:
1458 if len(parents) > 1:
1458 ui.status(_('working directory now based on '
1459 ui.status(_('working directory now based on '
1459 'revisions %d and %d\n') % parents)
1460 'revisions %d and %d\n') % parents)
1460 else:
1461 else:
1461 ui.status(_('working directory now based on '
1462 ui.status(_('working directory now based on '
1462 'revision %d\n') % parents)
1463 'revision %d\n') % parents)
1463 mergemod.mergestate.clean(self, self['.'].node())
1464 mergemod.mergestate.clean(self, self['.'].node())
1464
1465
1465 # TODO: if we know which new heads may result from this rollback, pass
1466 # TODO: if we know which new heads may result from this rollback, pass
1466 # them to destroy(), which will prevent the branchhead cache from being
1467 # them to destroy(), which will prevent the branchhead cache from being
1467 # invalidated.
1468 # invalidated.
1468 self.destroyed()
1469 self.destroyed()
1469 return 0
1470 return 0
1470
1471
1471 def _buildcacheupdater(self, newtransaction):
1472 def _buildcacheupdater(self, newtransaction):
1472 """called during transaction to build the callback updating cache
1473 """called during transaction to build the callback updating cache
1473
1474
1474 Lives on the repository to help extension who might want to augment
1475 Lives on the repository to help extension who might want to augment
1475 this logic. For this purpose, the created transaction is passed to the
1476 this logic. For this purpose, the created transaction is passed to the
1476 method.
1477 method.
1477 """
1478 """
1478 # we must avoid cyclic reference between repo and transaction.
1479 # we must avoid cyclic reference between repo and transaction.
1479 reporef = weakref.ref(self)
1480 reporef = weakref.ref(self)
1480 def updater(tr):
1481 def updater(tr):
1481 repo = reporef()
1482 repo = reporef()
1482 repo.updatecaches(tr)
1483 repo.updatecaches(tr)
1483 return updater
1484 return updater
1484
1485
1485 @unfilteredmethod
1486 @unfilteredmethod
1486 def updatecaches(self, tr=None):
1487 def updatecaches(self, tr=None):
1487 """warm appropriate caches
1488 """warm appropriate caches
1488
1489
1489 If this function is called after a transaction closed. The transaction
1490 If this function is called after a transaction closed. The transaction
1490 will be available in the 'tr' argument. This can be used to selectively
1491 will be available in the 'tr' argument. This can be used to selectively
1491 update caches relevant to the changes in that transaction.
1492 update caches relevant to the changes in that transaction.
1492 """
1493 """
1493 if tr is not None and tr.hookargs.get('source') == 'strip':
1494 if tr is not None and tr.hookargs.get('source') == 'strip':
1494 # During strip, many caches are invalid but
1495 # During strip, many caches are invalid but
1495 # later call to `destroyed` will refresh them.
1496 # later call to `destroyed` will refresh them.
1496 return
1497 return
1497
1498
1498 if tr is None or tr.changes['revs']:
1499 if tr is None or tr.changes['revs']:
1499 # updating the unfiltered branchmap should refresh all the others,
1500 # updating the unfiltered branchmap should refresh all the others,
1500 self.ui.debug('updating the branch cache\n')
1501 self.ui.debug('updating the branch cache\n')
1501 branchmap.updatecache(self.filtered('served'))
1502 branchmap.updatecache(self.filtered('served'))
1502
1503
1503 def invalidatecaches(self):
1504 def invalidatecaches(self):
1504
1505
1505 if '_tagscache' in vars(self):
1506 if '_tagscache' in vars(self):
1506 # can't use delattr on proxy
1507 # can't use delattr on proxy
1507 del self.__dict__['_tagscache']
1508 del self.__dict__['_tagscache']
1508
1509
1509 self.unfiltered()._branchcaches.clear()
1510 self.unfiltered()._branchcaches.clear()
1510 self.invalidatevolatilesets()
1511 self.invalidatevolatilesets()
1511 self._sparsesignaturecache.clear()
1512 self._sparsesignaturecache.clear()
1512
1513
1513 def invalidatevolatilesets(self):
1514 def invalidatevolatilesets(self):
1514 self.filteredrevcache.clear()
1515 self.filteredrevcache.clear()
1515 obsolete.clearobscaches(self)
1516 obsolete.clearobscaches(self)
1516
1517
1517 def invalidatedirstate(self):
1518 def invalidatedirstate(self):
1518 '''Invalidates the dirstate, causing the next call to dirstate
1519 '''Invalidates the dirstate, causing the next call to dirstate
1519 to check if it was modified since the last time it was read,
1520 to check if it was modified since the last time it was read,
1520 rereading it if it has.
1521 rereading it if it has.
1521
1522
1522 This is different to dirstate.invalidate() that it doesn't always
1523 This is different to dirstate.invalidate() that it doesn't always
1523 rereads the dirstate. Use dirstate.invalidate() if you want to
1524 rereads the dirstate. Use dirstate.invalidate() if you want to
1524 explicitly read the dirstate again (i.e. restoring it to a previous
1525 explicitly read the dirstate again (i.e. restoring it to a previous
1525 known good state).'''
1526 known good state).'''
1526 if hasunfilteredcache(self, 'dirstate'):
1527 if hasunfilteredcache(self, 'dirstate'):
1527 for k in self.dirstate._filecache:
1528 for k in self.dirstate._filecache:
1528 try:
1529 try:
1529 delattr(self.dirstate, k)
1530 delattr(self.dirstate, k)
1530 except AttributeError:
1531 except AttributeError:
1531 pass
1532 pass
1532 delattr(self.unfiltered(), 'dirstate')
1533 delattr(self.unfiltered(), 'dirstate')
1533
1534
1534 def invalidate(self, clearfilecache=False):
1535 def invalidate(self, clearfilecache=False):
1535 '''Invalidates both store and non-store parts other than dirstate
1536 '''Invalidates both store and non-store parts other than dirstate
1536
1537
1537 If a transaction is running, invalidation of store is omitted,
1538 If a transaction is running, invalidation of store is omitted,
1538 because discarding in-memory changes might cause inconsistency
1539 because discarding in-memory changes might cause inconsistency
1539 (e.g. incomplete fncache causes unintentional failure, but
1540 (e.g. incomplete fncache causes unintentional failure, but
1540 redundant one doesn't).
1541 redundant one doesn't).
1541 '''
1542 '''
1542 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1543 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1543 for k in list(self._filecache.keys()):
1544 for k in list(self._filecache.keys()):
1544 # dirstate is invalidated separately in invalidatedirstate()
1545 # dirstate is invalidated separately in invalidatedirstate()
1545 if k == 'dirstate':
1546 if k == 'dirstate':
1546 continue
1547 continue
1547 if (k == 'changelog' and
1548 if (k == 'changelog' and
1548 self.currenttransaction() and
1549 self.currenttransaction() and
1549 self.changelog._delayed):
1550 self.changelog._delayed):
1550 # The changelog object may store unwritten revisions. We don't
1551 # The changelog object may store unwritten revisions. We don't
1551 # want to lose them.
1552 # want to lose them.
1552 # TODO: Solve the problem instead of working around it.
1553 # TODO: Solve the problem instead of working around it.
1553 continue
1554 continue
1554
1555
1555 if clearfilecache:
1556 if clearfilecache:
1556 del self._filecache[k]
1557 del self._filecache[k]
1557 try:
1558 try:
1558 delattr(unfiltered, k)
1559 delattr(unfiltered, k)
1559 except AttributeError:
1560 except AttributeError:
1560 pass
1561 pass
1561 self.invalidatecaches()
1562 self.invalidatecaches()
1562 if not self.currenttransaction():
1563 if not self.currenttransaction():
1563 # TODO: Changing contents of store outside transaction
1564 # TODO: Changing contents of store outside transaction
1564 # causes inconsistency. We should make in-memory store
1565 # causes inconsistency. We should make in-memory store
1565 # changes detectable, and abort if changed.
1566 # changes detectable, and abort if changed.
1566 self.store.invalidatecaches()
1567 self.store.invalidatecaches()
1567
1568
1568 def invalidateall(self):
1569 def invalidateall(self):
1569 '''Fully invalidates both store and non-store parts, causing the
1570 '''Fully invalidates both store and non-store parts, causing the
1570 subsequent operation to reread any outside changes.'''
1571 subsequent operation to reread any outside changes.'''
1571 # extension should hook this to invalidate its caches
1572 # extension should hook this to invalidate its caches
1572 self.invalidate()
1573 self.invalidate()
1573 self.invalidatedirstate()
1574 self.invalidatedirstate()
1574
1575
1575 @unfilteredmethod
1576 @unfilteredmethod
1576 def _refreshfilecachestats(self, tr):
1577 def _refreshfilecachestats(self, tr):
1577 """Reload stats of cached files so that they are flagged as valid"""
1578 """Reload stats of cached files so that they are flagged as valid"""
1578 for k, ce in self._filecache.items():
1579 for k, ce in self._filecache.items():
1579 if k == 'dirstate' or k not in self.__dict__:
1580 if k == 'dirstate' or k not in self.__dict__:
1580 continue
1581 continue
1581 ce.refresh()
1582 ce.refresh()
1582
1583
1583 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1584 inheritchecker=None, parentenvvar=None):
1585 inheritchecker=None, parentenvvar=None):
1585 parentlock = None
1586 parentlock = None
1586 # the contents of parentenvvar are used by the underlying lock to
1587 # the contents of parentenvvar are used by the underlying lock to
1587 # determine whether it can be inherited
1588 # determine whether it can be inherited
1588 if parentenvvar is not None:
1589 if parentenvvar is not None:
1589 parentlock = encoding.environ.get(parentenvvar)
1590 parentlock = encoding.environ.get(parentenvvar)
1590 try:
1591 try:
1591 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1592 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1592 acquirefn=acquirefn, desc=desc,
1593 acquirefn=acquirefn, desc=desc,
1593 inheritchecker=inheritchecker,
1594 inheritchecker=inheritchecker,
1594 parentlock=parentlock)
1595 parentlock=parentlock)
1595 except error.LockHeld as inst:
1596 except error.LockHeld as inst:
1596 if not wait:
1597 if not wait:
1597 raise
1598 raise
1598 # show more details for new-style locks
1599 # show more details for new-style locks
1599 if ':' in inst.locker:
1600 if ':' in inst.locker:
1600 host, pid = inst.locker.split(":", 1)
1601 host, pid = inst.locker.split(":", 1)
1601 self.ui.warn(
1602 self.ui.warn(
1602 _("waiting for lock on %s held by process %r "
1603 _("waiting for lock on %s held by process %r "
1603 "on host %r\n") % (desc, pid, host))
1604 "on host %r\n") % (desc, pid, host))
1604 else:
1605 else:
1605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1606 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1606 (desc, inst.locker))
1607 (desc, inst.locker))
1607 # default to 600 seconds timeout
1608 # default to 600 seconds timeout
1608 l = lockmod.lock(vfs, lockname,
1609 l = lockmod.lock(vfs, lockname,
1609 int(self.ui.config("ui", "timeout")),
1610 int(self.ui.config("ui", "timeout")),
1610 releasefn=releasefn, acquirefn=acquirefn,
1611 releasefn=releasefn, acquirefn=acquirefn,
1611 desc=desc)
1612 desc=desc)
1612 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1613 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1613 return l
1614 return l
1614
1615
1615 def _afterlock(self, callback):
1616 def _afterlock(self, callback):
1616 """add a callback to be run when the repository is fully unlocked
1617 """add a callback to be run when the repository is fully unlocked
1617
1618
1618 The callback will be executed when the outermost lock is released
1619 The callback will be executed when the outermost lock is released
1619 (with wlock being higher level than 'lock')."""
1620 (with wlock being higher level than 'lock')."""
1620 for ref in (self._wlockref, self._lockref):
1621 for ref in (self._wlockref, self._lockref):
1621 l = ref and ref()
1622 l = ref and ref()
1622 if l and l.held:
1623 if l and l.held:
1623 l.postrelease.append(callback)
1624 l.postrelease.append(callback)
1624 break
1625 break
1625 else: # no lock have been found.
1626 else: # no lock have been found.
1626 callback()
1627 callback()
1627
1628
1628 def lock(self, wait=True):
1629 def lock(self, wait=True):
1629 '''Lock the repository store (.hg/store) and return a weak reference
1630 '''Lock the repository store (.hg/store) and return a weak reference
1630 to the lock. Use this before modifying the store (e.g. committing or
1631 to the lock. Use this before modifying the store (e.g. committing or
1631 stripping). If you are opening a transaction, get a lock as well.)
1632 stripping). If you are opening a transaction, get a lock as well.)
1632
1633
1633 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1634 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1634 'wlock' first to avoid a dead-lock hazard.'''
1635 'wlock' first to avoid a dead-lock hazard.'''
1635 l = self._currentlock(self._lockref)
1636 l = self._currentlock(self._lockref)
1636 if l is not None:
1637 if l is not None:
1637 l.lock()
1638 l.lock()
1638 return l
1639 return l
1639
1640
1640 l = self._lock(self.svfs, "lock", wait, None,
1641 l = self._lock(self.svfs, "lock", wait, None,
1641 self.invalidate, _('repository %s') % self.origroot)
1642 self.invalidate, _('repository %s') % self.origroot)
1642 self._lockref = weakref.ref(l)
1643 self._lockref = weakref.ref(l)
1643 return l
1644 return l
1644
1645
1645 def _wlockchecktransaction(self):
1646 def _wlockchecktransaction(self):
1646 if self.currenttransaction() is not None:
1647 if self.currenttransaction() is not None:
1647 raise error.LockInheritanceContractViolation(
1648 raise error.LockInheritanceContractViolation(
1648 'wlock cannot be inherited in the middle of a transaction')
1649 'wlock cannot be inherited in the middle of a transaction')
1649
1650
1650 def wlock(self, wait=True):
1651 def wlock(self, wait=True):
1651 '''Lock the non-store parts of the repository (everything under
1652 '''Lock the non-store parts of the repository (everything under
1652 .hg except .hg/store) and return a weak reference to the lock.
1653 .hg except .hg/store) and return a weak reference to the lock.
1653
1654
1654 Use this before modifying files in .hg.
1655 Use this before modifying files in .hg.
1655
1656
1656 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1657 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1657 'wlock' first to avoid a dead-lock hazard.'''
1658 'wlock' first to avoid a dead-lock hazard.'''
1658 l = self._wlockref and self._wlockref()
1659 l = self._wlockref and self._wlockref()
1659 if l is not None and l.held:
1660 if l is not None and l.held:
1660 l.lock()
1661 l.lock()
1661 return l
1662 return l
1662
1663
1663 # We do not need to check for non-waiting lock acquisition. Such
1664 # We do not need to check for non-waiting lock acquisition. Such
1664 # acquisition would not cause dead-lock as they would just fail.
1665 # acquisition would not cause dead-lock as they would just fail.
1665 if wait and (self.ui.configbool('devel', 'all-warnings')
1666 if wait and (self.ui.configbool('devel', 'all-warnings')
1666 or self.ui.configbool('devel', 'check-locks')):
1667 or self.ui.configbool('devel', 'check-locks')):
1667 if self._currentlock(self._lockref) is not None:
1668 if self._currentlock(self._lockref) is not None:
1668 self.ui.develwarn('"wlock" acquired after "lock"')
1669 self.ui.develwarn('"wlock" acquired after "lock"')
1669
1670
1670 def unlock():
1671 def unlock():
1671 if self.dirstate.pendingparentchange():
1672 if self.dirstate.pendingparentchange():
1672 self.dirstate.invalidate()
1673 self.dirstate.invalidate()
1673 else:
1674 else:
1674 self.dirstate.write(None)
1675 self.dirstate.write(None)
1675
1676
1676 self._filecache['dirstate'].refresh()
1677 self._filecache['dirstate'].refresh()
1677
1678
1678 l = self._lock(self.vfs, "wlock", wait, unlock,
1679 l = self._lock(self.vfs, "wlock", wait, unlock,
1679 self.invalidatedirstate, _('working directory of %s') %
1680 self.invalidatedirstate, _('working directory of %s') %
1680 self.origroot,
1681 self.origroot,
1681 inheritchecker=self._wlockchecktransaction,
1682 inheritchecker=self._wlockchecktransaction,
1682 parentenvvar='HG_WLOCK_LOCKER')
1683 parentenvvar='HG_WLOCK_LOCKER')
1683 self._wlockref = weakref.ref(l)
1684 self._wlockref = weakref.ref(l)
1684 return l
1685 return l
1685
1686
1686 def _currentlock(self, lockref):
1687 def _currentlock(self, lockref):
1687 """Returns the lock if it's held, or None if it's not."""
1688 """Returns the lock if it's held, or None if it's not."""
1688 if lockref is None:
1689 if lockref is None:
1689 return None
1690 return None
1690 l = lockref()
1691 l = lockref()
1691 if l is None or not l.held:
1692 if l is None or not l.held:
1692 return None
1693 return None
1693 return l
1694 return l
1694
1695
1695 def currentwlock(self):
1696 def currentwlock(self):
1696 """Returns the wlock if it's held, or None if it's not."""
1697 """Returns the wlock if it's held, or None if it's not."""
1697 return self._currentlock(self._wlockref)
1698 return self._currentlock(self._wlockref)
1698
1699
1699 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1700 """
1701 """
1701 commit an individual file as part of a larger transaction
1702 commit an individual file as part of a larger transaction
1702 """
1703 """
1703
1704
1704 fname = fctx.path()
1705 fname = fctx.path()
1705 fparent1 = manifest1.get(fname, nullid)
1706 fparent1 = manifest1.get(fname, nullid)
1706 fparent2 = manifest2.get(fname, nullid)
1707 fparent2 = manifest2.get(fname, nullid)
1707 if isinstance(fctx, context.filectx):
1708 if isinstance(fctx, context.filectx):
1708 node = fctx.filenode()
1709 node = fctx.filenode()
1709 if node in [fparent1, fparent2]:
1710 if node in [fparent1, fparent2]:
1710 self.ui.debug('reusing %s filelog entry\n' % fname)
1711 self.ui.debug('reusing %s filelog entry\n' % fname)
1711 if manifest1.flags(fname) != fctx.flags():
1712 if manifest1.flags(fname) != fctx.flags():
1712 changelist.append(fname)
1713 changelist.append(fname)
1713 return node
1714 return node
1714
1715
1715 flog = self.file(fname)
1716 flog = self.file(fname)
1716 meta = {}
1717 meta = {}
1717 copy = fctx.renamed()
1718 copy = fctx.renamed()
1718 if copy and copy[0] != fname:
1719 if copy and copy[0] != fname:
1719 # Mark the new revision of this file as a copy of another
1720 # Mark the new revision of this file as a copy of another
1720 # file. This copy data will effectively act as a parent
1721 # file. This copy data will effectively act as a parent
1721 # of this new revision. If this is a merge, the first
1722 # of this new revision. If this is a merge, the first
1722 # parent will be the nullid (meaning "look up the copy data")
1723 # parent will be the nullid (meaning "look up the copy data")
1723 # and the second one will be the other parent. For example:
1724 # and the second one will be the other parent. For example:
1724 #
1725 #
1725 # 0 --- 1 --- 3 rev1 changes file foo
1726 # 0 --- 1 --- 3 rev1 changes file foo
1726 # \ / rev2 renames foo to bar and changes it
1727 # \ / rev2 renames foo to bar and changes it
1727 # \- 2 -/ rev3 should have bar with all changes and
1728 # \- 2 -/ rev3 should have bar with all changes and
1728 # should record that bar descends from
1729 # should record that bar descends from
1729 # bar in rev2 and foo in rev1
1730 # bar in rev2 and foo in rev1
1730 #
1731 #
1731 # this allows this merge to succeed:
1732 # this allows this merge to succeed:
1732 #
1733 #
1733 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1734 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1734 # \ / merging rev3 and rev4 should use bar@rev2
1735 # \ / merging rev3 and rev4 should use bar@rev2
1735 # \- 2 --- 4 as the merge base
1736 # \- 2 --- 4 as the merge base
1736 #
1737 #
1737
1738
1738 cfname = copy[0]
1739 cfname = copy[0]
1739 crev = manifest1.get(cfname)
1740 crev = manifest1.get(cfname)
1740 newfparent = fparent2
1741 newfparent = fparent2
1741
1742
1742 if manifest2: # branch merge
1743 if manifest2: # branch merge
1743 if fparent2 == nullid or crev is None: # copied on remote side
1744 if fparent2 == nullid or crev is None: # copied on remote side
1744 if cfname in manifest2:
1745 if cfname in manifest2:
1745 crev = manifest2[cfname]
1746 crev = manifest2[cfname]
1746 newfparent = fparent1
1747 newfparent = fparent1
1747
1748
1748 # Here, we used to search backwards through history to try to find
1749 # Here, we used to search backwards through history to try to find
1749 # where the file copy came from if the source of a copy was not in
1750 # where the file copy came from if the source of a copy was not in
1750 # the parent directory. However, this doesn't actually make sense to
1751 # the parent directory. However, this doesn't actually make sense to
1751 # do (what does a copy from something not in your working copy even
1752 # do (what does a copy from something not in your working copy even
1752 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1753 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1753 # the user that copy information was dropped, so if they didn't
1754 # the user that copy information was dropped, so if they didn't
1754 # expect this outcome it can be fixed, but this is the correct
1755 # expect this outcome it can be fixed, but this is the correct
1755 # behavior in this circumstance.
1756 # behavior in this circumstance.
1756
1757
1757 if crev:
1758 if crev:
1758 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1759 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1759 meta["copy"] = cfname
1760 meta["copy"] = cfname
1760 meta["copyrev"] = hex(crev)
1761 meta["copyrev"] = hex(crev)
1761 fparent1, fparent2 = nullid, newfparent
1762 fparent1, fparent2 = nullid, newfparent
1762 else:
1763 else:
1763 self.ui.warn(_("warning: can't find ancestor for '%s' "
1764 self.ui.warn(_("warning: can't find ancestor for '%s' "
1764 "copied from '%s'!\n") % (fname, cfname))
1765 "copied from '%s'!\n") % (fname, cfname))
1765
1766
1766 elif fparent1 == nullid:
1767 elif fparent1 == nullid:
1767 fparent1, fparent2 = fparent2, nullid
1768 fparent1, fparent2 = fparent2, nullid
1768 elif fparent2 != nullid:
1769 elif fparent2 != nullid:
1769 # is one parent an ancestor of the other?
1770 # is one parent an ancestor of the other?
1770 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1771 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1771 if fparent1 in fparentancestors:
1772 if fparent1 in fparentancestors:
1772 fparent1, fparent2 = fparent2, nullid
1773 fparent1, fparent2 = fparent2, nullid
1773 elif fparent2 in fparentancestors:
1774 elif fparent2 in fparentancestors:
1774 fparent2 = nullid
1775 fparent2 = nullid
1775
1776
1776 # is the file changed?
1777 # is the file changed?
1777 text = fctx.data()
1778 text = fctx.data()
1778 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1779 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1779 changelist.append(fname)
1780 changelist.append(fname)
1780 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1781 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1781 # are just the flags changed during merge?
1782 # are just the flags changed during merge?
1782 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1783 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1783 changelist.append(fname)
1784 changelist.append(fname)
1784
1785
1785 return fparent1
1786 return fparent1
1786
1787
1787 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1788 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1788 """check for commit arguments that aren't committable"""
1789 """check for commit arguments that aren't committable"""
1789 if match.isexact() or match.prefix():
1790 if match.isexact() or match.prefix():
1790 matched = set(status.modified + status.added + status.removed)
1791 matched = set(status.modified + status.added + status.removed)
1791
1792
1792 for f in match.files():
1793 for f in match.files():
1793 f = self.dirstate.normalize(f)
1794 f = self.dirstate.normalize(f)
1794 if f == '.' or f in matched or f in wctx.substate:
1795 if f == '.' or f in matched or f in wctx.substate:
1795 continue
1796 continue
1796 if f in status.deleted:
1797 if f in status.deleted:
1797 fail(f, _('file not found!'))
1798 fail(f, _('file not found!'))
1798 if f in vdirs: # visited directory
1799 if f in vdirs: # visited directory
1799 d = f + '/'
1800 d = f + '/'
1800 for mf in matched:
1801 for mf in matched:
1801 if mf.startswith(d):
1802 if mf.startswith(d):
1802 break
1803 break
1803 else:
1804 else:
1804 fail(f, _("no match under directory!"))
1805 fail(f, _("no match under directory!"))
1805 elif f not in self.dirstate:
1806 elif f not in self.dirstate:
1806 fail(f, _("file not tracked!"))
1807 fail(f, _("file not tracked!"))
1807
1808
1808 @unfilteredmethod
1809 @unfilteredmethod
1809 def commit(self, text="", user=None, date=None, match=None, force=False,
1810 def commit(self, text="", user=None, date=None, match=None, force=False,
1810 editor=False, extra=None):
1811 editor=False, extra=None):
1811 """Add a new revision to current repository.
1812 """Add a new revision to current repository.
1812
1813
1813 Revision information is gathered from the working directory,
1814 Revision information is gathered from the working directory,
1814 match can be used to filter the committed files. If editor is
1815 match can be used to filter the committed files. If editor is
1815 supplied, it is called to get a commit message.
1816 supplied, it is called to get a commit message.
1816 """
1817 """
1817 if extra is None:
1818 if extra is None:
1818 extra = {}
1819 extra = {}
1819
1820
1820 def fail(f, msg):
1821 def fail(f, msg):
1821 raise error.Abort('%s: %s' % (f, msg))
1822 raise error.Abort('%s: %s' % (f, msg))
1822
1823
1823 if not match:
1824 if not match:
1824 match = matchmod.always(self.root, '')
1825 match = matchmod.always(self.root, '')
1825
1826
1826 if not force:
1827 if not force:
1827 vdirs = []
1828 vdirs = []
1828 match.explicitdir = vdirs.append
1829 match.explicitdir = vdirs.append
1829 match.bad = fail
1830 match.bad = fail
1830
1831
1831 wlock = lock = tr = None
1832 wlock = lock = tr = None
1832 try:
1833 try:
1833 wlock = self.wlock()
1834 wlock = self.wlock()
1834 lock = self.lock() # for recent changelog (see issue4368)
1835 lock = self.lock() # for recent changelog (see issue4368)
1835
1836
1836 wctx = self[None]
1837 wctx = self[None]
1837 merge = len(wctx.parents()) > 1
1838 merge = len(wctx.parents()) > 1
1838
1839
1839 if not force and merge and not match.always():
1840 if not force and merge and not match.always():
1840 raise error.Abort(_('cannot partially commit a merge '
1841 raise error.Abort(_('cannot partially commit a merge '
1841 '(do not specify files or patterns)'))
1842 '(do not specify files or patterns)'))
1842
1843
1843 status = self.status(match=match, clean=force)
1844 status = self.status(match=match, clean=force)
1844 if force:
1845 if force:
1845 status.modified.extend(status.clean) # mq may commit clean files
1846 status.modified.extend(status.clean) # mq may commit clean files
1846
1847
1847 # check subrepos
1848 # check subrepos
1848 subs = []
1849 subs = []
1849 commitsubs = set()
1850 commitsubs = set()
1850 newstate = wctx.substate.copy()
1851 newstate = wctx.substate.copy()
1851 # only manage subrepos and .hgsubstate if .hgsub is present
1852 # only manage subrepos and .hgsubstate if .hgsub is present
1852 if '.hgsub' in wctx:
1853 if '.hgsub' in wctx:
1853 # we'll decide whether to track this ourselves, thanks
1854 # we'll decide whether to track this ourselves, thanks
1854 for c in status.modified, status.added, status.removed:
1855 for c in status.modified, status.added, status.removed:
1855 if '.hgsubstate' in c:
1856 if '.hgsubstate' in c:
1856 c.remove('.hgsubstate')
1857 c.remove('.hgsubstate')
1857
1858
1858 # compare current state to last committed state
1859 # compare current state to last committed state
1859 # build new substate based on last committed state
1860 # build new substate based on last committed state
1860 oldstate = wctx.p1().substate
1861 oldstate = wctx.p1().substate
1861 for s in sorted(newstate.keys()):
1862 for s in sorted(newstate.keys()):
1862 if not match(s):
1863 if not match(s):
1863 # ignore working copy, use old state if present
1864 # ignore working copy, use old state if present
1864 if s in oldstate:
1865 if s in oldstate:
1865 newstate[s] = oldstate[s]
1866 newstate[s] = oldstate[s]
1866 continue
1867 continue
1867 if not force:
1868 if not force:
1868 raise error.Abort(
1869 raise error.Abort(
1869 _("commit with new subrepo %s excluded") % s)
1870 _("commit with new subrepo %s excluded") % s)
1870 dirtyreason = wctx.sub(s).dirtyreason(True)
1871 dirtyreason = wctx.sub(s).dirtyreason(True)
1871 if dirtyreason:
1872 if dirtyreason:
1872 if not self.ui.configbool('ui', 'commitsubrepos'):
1873 if not self.ui.configbool('ui', 'commitsubrepos'):
1873 raise error.Abort(dirtyreason,
1874 raise error.Abort(dirtyreason,
1874 hint=_("use --subrepos for recursive commit"))
1875 hint=_("use --subrepos for recursive commit"))
1875 subs.append(s)
1876 subs.append(s)
1876 commitsubs.add(s)
1877 commitsubs.add(s)
1877 else:
1878 else:
1878 bs = wctx.sub(s).basestate()
1879 bs = wctx.sub(s).basestate()
1879 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1880 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1880 if oldstate.get(s, (None, None, None))[1] != bs:
1881 if oldstate.get(s, (None, None, None))[1] != bs:
1881 subs.append(s)
1882 subs.append(s)
1882
1883
1883 # check for removed subrepos
1884 # check for removed subrepos
1884 for p in wctx.parents():
1885 for p in wctx.parents():
1885 r = [s for s in p.substate if s not in newstate]
1886 r = [s for s in p.substate if s not in newstate]
1886 subs += [s for s in r if match(s)]
1887 subs += [s for s in r if match(s)]
1887 if subs:
1888 if subs:
1888 if (not match('.hgsub') and
1889 if (not match('.hgsub') and
1889 '.hgsub' in (wctx.modified() + wctx.added())):
1890 '.hgsub' in (wctx.modified() + wctx.added())):
1890 raise error.Abort(
1891 raise error.Abort(
1891 _("can't commit subrepos without .hgsub"))
1892 _("can't commit subrepos without .hgsub"))
1892 status.modified.insert(0, '.hgsubstate')
1893 status.modified.insert(0, '.hgsubstate')
1893
1894
1894 elif '.hgsub' in status.removed:
1895 elif '.hgsub' in status.removed:
1895 # clean up .hgsubstate when .hgsub is removed
1896 # clean up .hgsubstate when .hgsub is removed
1896 if ('.hgsubstate' in wctx and
1897 if ('.hgsubstate' in wctx and
1897 '.hgsubstate' not in (status.modified + status.added +
1898 '.hgsubstate' not in (status.modified + status.added +
1898 status.removed)):
1899 status.removed)):
1899 status.removed.insert(0, '.hgsubstate')
1900 status.removed.insert(0, '.hgsubstate')
1900
1901
1901 # make sure all explicit patterns are matched
1902 # make sure all explicit patterns are matched
1902 if not force:
1903 if not force:
1903 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1904 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1904
1905
1905 cctx = context.workingcommitctx(self, status,
1906 cctx = context.workingcommitctx(self, status,
1906 text, user, date, extra)
1907 text, user, date, extra)
1907
1908
1908 # internal config: ui.allowemptycommit
1909 # internal config: ui.allowemptycommit
1909 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1910 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1910 or extra.get('close') or merge or cctx.files()
1911 or extra.get('close') or merge or cctx.files()
1911 or self.ui.configbool('ui', 'allowemptycommit'))
1912 or self.ui.configbool('ui', 'allowemptycommit'))
1912 if not allowemptycommit:
1913 if not allowemptycommit:
1913 return None
1914 return None
1914
1915
1915 if merge and cctx.deleted():
1916 if merge and cctx.deleted():
1916 raise error.Abort(_("cannot commit merge with missing files"))
1917 raise error.Abort(_("cannot commit merge with missing files"))
1917
1918
1918 ms = mergemod.mergestate.read(self)
1919 ms = mergemod.mergestate.read(self)
1919 mergeutil.checkunresolved(ms)
1920 mergeutil.checkunresolved(ms)
1920
1921
1921 if editor:
1922 if editor:
1922 cctx._text = editor(self, cctx, subs)
1923 cctx._text = editor(self, cctx, subs)
1923 edited = (text != cctx._text)
1924 edited = (text != cctx._text)
1924
1925
1925 # Save commit message in case this transaction gets rolled back
1926 # Save commit message in case this transaction gets rolled back
1926 # (e.g. by a pretxncommit hook). Leave the content alone on
1927 # (e.g. by a pretxncommit hook). Leave the content alone on
1927 # the assumption that the user will use the same editor again.
1928 # the assumption that the user will use the same editor again.
1928 msgfn = self.savecommitmessage(cctx._text)
1929 msgfn = self.savecommitmessage(cctx._text)
1929
1930
1930 # commit subs and write new state
1931 # commit subs and write new state
1931 if subs:
1932 if subs:
1932 for s in sorted(commitsubs):
1933 for s in sorted(commitsubs):
1933 sub = wctx.sub(s)
1934 sub = wctx.sub(s)
1934 self.ui.status(_('committing subrepository %s\n') %
1935 self.ui.status(_('committing subrepository %s\n') %
1935 subrepo.subrelpath(sub))
1936 subrepo.subrelpath(sub))
1936 sr = sub.commit(cctx._text, user, date)
1937 sr = sub.commit(cctx._text, user, date)
1937 newstate[s] = (newstate[s][0], sr)
1938 newstate[s] = (newstate[s][0], sr)
1938 subrepo.writestate(self, newstate)
1939 subrepo.writestate(self, newstate)
1939
1940
1940 p1, p2 = self.dirstate.parents()
1941 p1, p2 = self.dirstate.parents()
1941 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1942 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1942 try:
1943 try:
1943 self.hook("precommit", throw=True, parent1=hookp1,
1944 self.hook("precommit", throw=True, parent1=hookp1,
1944 parent2=hookp2)
1945 parent2=hookp2)
1945 tr = self.transaction('commit')
1946 tr = self.transaction('commit')
1946 ret = self.commitctx(cctx, True)
1947 ret = self.commitctx(cctx, True)
1947 except: # re-raises
1948 except: # re-raises
1948 if edited:
1949 if edited:
1949 self.ui.write(
1950 self.ui.write(
1950 _('note: commit message saved in %s\n') % msgfn)
1951 _('note: commit message saved in %s\n') % msgfn)
1951 raise
1952 raise
1952 # update bookmarks, dirstate and mergestate
1953 # update bookmarks, dirstate and mergestate
1953 bookmarks.update(self, [p1, p2], ret)
1954 bookmarks.update(self, [p1, p2], ret)
1954 cctx.markcommitted(ret)
1955 cctx.markcommitted(ret)
1955 ms.reset()
1956 ms.reset()
1956 tr.close()
1957 tr.close()
1957
1958
1958 finally:
1959 finally:
1959 lockmod.release(tr, lock, wlock)
1960 lockmod.release(tr, lock, wlock)
1960
1961
1961 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1962 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1962 # hack for command that use a temporary commit (eg: histedit)
1963 # hack for command that use a temporary commit (eg: histedit)
1963 # temporary commit got stripped before hook release
1964 # temporary commit got stripped before hook release
1964 if self.changelog.hasnode(ret):
1965 if self.changelog.hasnode(ret):
1965 self.hook("commit", node=node, parent1=parent1,
1966 self.hook("commit", node=node, parent1=parent1,
1966 parent2=parent2)
1967 parent2=parent2)
1967 self._afterlock(commithook)
1968 self._afterlock(commithook)
1968 return ret
1969 return ret
1969
1970
1970 @unfilteredmethod
1971 @unfilteredmethod
1971 def commitctx(self, ctx, error=False):
1972 def commitctx(self, ctx, error=False):
1972 """Add a new revision to current repository.
1973 """Add a new revision to current repository.
1973 Revision information is passed via the context argument.
1974 Revision information is passed via the context argument.
1974 """
1975 """
1975
1976
1976 tr = None
1977 tr = None
1977 p1, p2 = ctx.p1(), ctx.p2()
1978 p1, p2 = ctx.p1(), ctx.p2()
1978 user = ctx.user()
1979 user = ctx.user()
1979
1980
1980 lock = self.lock()
1981 lock = self.lock()
1981 try:
1982 try:
1982 tr = self.transaction("commit")
1983 tr = self.transaction("commit")
1983 trp = weakref.proxy(tr)
1984 trp = weakref.proxy(tr)
1984
1985
1985 if ctx.manifestnode():
1986 if ctx.manifestnode():
1986 # reuse an existing manifest revision
1987 # reuse an existing manifest revision
1987 mn = ctx.manifestnode()
1988 mn = ctx.manifestnode()
1988 files = ctx.files()
1989 files = ctx.files()
1989 elif ctx.files():
1990 elif ctx.files():
1990 m1ctx = p1.manifestctx()
1991 m1ctx = p1.manifestctx()
1991 m2ctx = p2.manifestctx()
1992 m2ctx = p2.manifestctx()
1992 mctx = m1ctx.copy()
1993 mctx = m1ctx.copy()
1993
1994
1994 m = mctx.read()
1995 m = mctx.read()
1995 m1 = m1ctx.read()
1996 m1 = m1ctx.read()
1996 m2 = m2ctx.read()
1997 m2 = m2ctx.read()
1997
1998
1998 # check in files
1999 # check in files
1999 added = []
2000 added = []
2000 changed = []
2001 changed = []
2001 removed = list(ctx.removed())
2002 removed = list(ctx.removed())
2002 linkrev = len(self)
2003 linkrev = len(self)
2003 self.ui.note(_("committing files:\n"))
2004 self.ui.note(_("committing files:\n"))
2004 for f in sorted(ctx.modified() + ctx.added()):
2005 for f in sorted(ctx.modified() + ctx.added()):
2005 self.ui.note(f + "\n")
2006 self.ui.note(f + "\n")
2006 try:
2007 try:
2007 fctx = ctx[f]
2008 fctx = ctx[f]
2008 if fctx is None:
2009 if fctx is None:
2009 removed.append(f)
2010 removed.append(f)
2010 else:
2011 else:
2011 added.append(f)
2012 added.append(f)
2012 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2013 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2013 trp, changed)
2014 trp, changed)
2014 m.setflag(f, fctx.flags())
2015 m.setflag(f, fctx.flags())
2015 except OSError as inst:
2016 except OSError as inst:
2016 self.ui.warn(_("trouble committing %s!\n") % f)
2017 self.ui.warn(_("trouble committing %s!\n") % f)
2017 raise
2018 raise
2018 except IOError as inst:
2019 except IOError as inst:
2019 errcode = getattr(inst, 'errno', errno.ENOENT)
2020 errcode = getattr(inst, 'errno', errno.ENOENT)
2020 if error or errcode and errcode != errno.ENOENT:
2021 if error or errcode and errcode != errno.ENOENT:
2021 self.ui.warn(_("trouble committing %s!\n") % f)
2022 self.ui.warn(_("trouble committing %s!\n") % f)
2022 raise
2023 raise
2023
2024
2024 # update manifest
2025 # update manifest
2025 self.ui.note(_("committing manifest\n"))
2026 self.ui.note(_("committing manifest\n"))
2026 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2027 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2027 drop = [f for f in removed if f in m]
2028 drop = [f for f in removed if f in m]
2028 for f in drop:
2029 for f in drop:
2029 del m[f]
2030 del m[f]
2030 mn = mctx.write(trp, linkrev,
2031 mn = mctx.write(trp, linkrev,
2031 p1.manifestnode(), p2.manifestnode(),
2032 p1.manifestnode(), p2.manifestnode(),
2032 added, drop)
2033 added, drop)
2033 files = changed + removed
2034 files = changed + removed
2034 else:
2035 else:
2035 mn = p1.manifestnode()
2036 mn = p1.manifestnode()
2036 files = []
2037 files = []
2037
2038
2038 # update changelog
2039 # update changelog
2039 self.ui.note(_("committing changelog\n"))
2040 self.ui.note(_("committing changelog\n"))
2040 self.changelog.delayupdate(tr)
2041 self.changelog.delayupdate(tr)
2041 n = self.changelog.add(mn, files, ctx.description(),
2042 n = self.changelog.add(mn, files, ctx.description(),
2042 trp, p1.node(), p2.node(),
2043 trp, p1.node(), p2.node(),
2043 user, ctx.date(), ctx.extra().copy())
2044 user, ctx.date(), ctx.extra().copy())
2044 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2045 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2045 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2046 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2046 parent2=xp2)
2047 parent2=xp2)
2047 # set the new commit is proper phase
2048 # set the new commit is proper phase
2048 targetphase = subrepo.newcommitphase(self.ui, ctx)
2049 targetphase = subrepo.newcommitphase(self.ui, ctx)
2049 if targetphase:
2050 if targetphase:
2050 # retract boundary do not alter parent changeset.
2051 # retract boundary do not alter parent changeset.
2051 # if a parent have higher the resulting phase will
2052 # if a parent have higher the resulting phase will
2052 # be compliant anyway
2053 # be compliant anyway
2053 #
2054 #
2054 # if minimal phase was 0 we don't need to retract anything
2055 # if minimal phase was 0 we don't need to retract anything
2055 phases.registernew(self, tr, targetphase, [n])
2056 phases.registernew(self, tr, targetphase, [n])
2056 tr.close()
2057 tr.close()
2057 return n
2058 return n
2058 finally:
2059 finally:
2059 if tr:
2060 if tr:
2060 tr.release()
2061 tr.release()
2061 lock.release()
2062 lock.release()
2062
2063
2063 @unfilteredmethod
2064 @unfilteredmethod
2064 def destroying(self):
2065 def destroying(self):
2065 '''Inform the repository that nodes are about to be destroyed.
2066 '''Inform the repository that nodes are about to be destroyed.
2066 Intended for use by strip and rollback, so there's a common
2067 Intended for use by strip and rollback, so there's a common
2067 place for anything that has to be done before destroying history.
2068 place for anything that has to be done before destroying history.
2068
2069
2069 This is mostly useful for saving state that is in memory and waiting
2070 This is mostly useful for saving state that is in memory and waiting
2070 to be flushed when the current lock is released. Because a call to
2071 to be flushed when the current lock is released. Because a call to
2071 destroyed is imminent, the repo will be invalidated causing those
2072 destroyed is imminent, the repo will be invalidated causing those
2072 changes to stay in memory (waiting for the next unlock), or vanish
2073 changes to stay in memory (waiting for the next unlock), or vanish
2073 completely.
2074 completely.
2074 '''
2075 '''
2075 # When using the same lock to commit and strip, the phasecache is left
2076 # When using the same lock to commit and strip, the phasecache is left
2076 # dirty after committing. Then when we strip, the repo is invalidated,
2077 # dirty after committing. Then when we strip, the repo is invalidated,
2077 # causing those changes to disappear.
2078 # causing those changes to disappear.
2078 if '_phasecache' in vars(self):
2079 if '_phasecache' in vars(self):
2079 self._phasecache.write()
2080 self._phasecache.write()
2080
2081
2081 @unfilteredmethod
2082 @unfilteredmethod
2082 def destroyed(self):
2083 def destroyed(self):
2083 '''Inform the repository that nodes have been destroyed.
2084 '''Inform the repository that nodes have been destroyed.
2084 Intended for use by strip and rollback, so there's a common
2085 Intended for use by strip and rollback, so there's a common
2085 place for anything that has to be done after destroying history.
2086 place for anything that has to be done after destroying history.
2086 '''
2087 '''
2087 # When one tries to:
2088 # When one tries to:
2088 # 1) destroy nodes thus calling this method (e.g. strip)
2089 # 1) destroy nodes thus calling this method (e.g. strip)
2089 # 2) use phasecache somewhere (e.g. commit)
2090 # 2) use phasecache somewhere (e.g. commit)
2090 #
2091 #
2091 # then 2) will fail because the phasecache contains nodes that were
2092 # then 2) will fail because the phasecache contains nodes that were
2092 # removed. We can either remove phasecache from the filecache,
2093 # removed. We can either remove phasecache from the filecache,
2093 # causing it to reload next time it is accessed, or simply filter
2094 # causing it to reload next time it is accessed, or simply filter
2094 # the removed nodes now and write the updated cache.
2095 # the removed nodes now and write the updated cache.
2095 self._phasecache.filterunknown(self)
2096 self._phasecache.filterunknown(self)
2096 self._phasecache.write()
2097 self._phasecache.write()
2097
2098
2098 # refresh all repository caches
2099 # refresh all repository caches
2099 self.updatecaches()
2100 self.updatecaches()
2100
2101
2101 # Ensure the persistent tag cache is updated. Doing it now
2102 # Ensure the persistent tag cache is updated. Doing it now
2102 # means that the tag cache only has to worry about destroyed
2103 # means that the tag cache only has to worry about destroyed
2103 # heads immediately after a strip/rollback. That in turn
2104 # heads immediately after a strip/rollback. That in turn
2104 # guarantees that "cachetip == currenttip" (comparing both rev
2105 # guarantees that "cachetip == currenttip" (comparing both rev
2105 # and node) always means no nodes have been added or destroyed.
2106 # and node) always means no nodes have been added or destroyed.
2106
2107
2107 # XXX this is suboptimal when qrefresh'ing: we strip the current
2108 # XXX this is suboptimal when qrefresh'ing: we strip the current
2108 # head, refresh the tag cache, then immediately add a new head.
2109 # head, refresh the tag cache, then immediately add a new head.
2109 # But I think doing it this way is necessary for the "instant
2110 # But I think doing it this way is necessary for the "instant
2110 # tag cache retrieval" case to work.
2111 # tag cache retrieval" case to work.
2111 self.invalidate()
2112 self.invalidate()
2112
2113
2113 def walk(self, match, node=None):
2114 def walk(self, match, node=None):
2114 '''
2115 '''
2115 walk recursively through the directory tree or a given
2116 walk recursively through the directory tree or a given
2116 changeset, finding all files matched by the match
2117 changeset, finding all files matched by the match
2117 function
2118 function
2118 '''
2119 '''
2119 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2120 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2120 return self[node].walk(match)
2121 return self[node].walk(match)
2121
2122
2122 def status(self, node1='.', node2=None, match=None,
2123 def status(self, node1='.', node2=None, match=None,
2123 ignored=False, clean=False, unknown=False,
2124 ignored=False, clean=False, unknown=False,
2124 listsubrepos=False):
2125 listsubrepos=False):
2125 '''a convenience method that calls node1.status(node2)'''
2126 '''a convenience method that calls node1.status(node2)'''
2126 return self[node1].status(node2, match, ignored, clean, unknown,
2127 return self[node1].status(node2, match, ignored, clean, unknown,
2127 listsubrepos)
2128 listsubrepos)
2128
2129
2129 def addpostdsstatus(self, ps):
2130 def addpostdsstatus(self, ps):
2130 """Add a callback to run within the wlock, at the point at which status
2131 """Add a callback to run within the wlock, at the point at which status
2131 fixups happen.
2132 fixups happen.
2132
2133
2133 On status completion, callback(wctx, status) will be called with the
2134 On status completion, callback(wctx, status) will be called with the
2134 wlock held, unless the dirstate has changed from underneath or the wlock
2135 wlock held, unless the dirstate has changed from underneath or the wlock
2135 couldn't be grabbed.
2136 couldn't be grabbed.
2136
2137
2137 Callbacks should not capture and use a cached copy of the dirstate --
2138 Callbacks should not capture and use a cached copy of the dirstate --
2138 it might change in the meanwhile. Instead, they should access the
2139 it might change in the meanwhile. Instead, they should access the
2139 dirstate via wctx.repo().dirstate.
2140 dirstate via wctx.repo().dirstate.
2140
2141
2141 This list is emptied out after each status run -- extensions should
2142 This list is emptied out after each status run -- extensions should
2142 make sure it adds to this list each time dirstate.status is called.
2143 make sure it adds to this list each time dirstate.status is called.
2143 Extensions should also make sure they don't call this for statuses
2144 Extensions should also make sure they don't call this for statuses
2144 that don't involve the dirstate.
2145 that don't involve the dirstate.
2145 """
2146 """
2146
2147
2147 # The list is located here for uniqueness reasons -- it is actually
2148 # The list is located here for uniqueness reasons -- it is actually
2148 # managed by the workingctx, but that isn't unique per-repo.
2149 # managed by the workingctx, but that isn't unique per-repo.
2149 self._postdsstatus.append(ps)
2150 self._postdsstatus.append(ps)
2150
2151
2151 def postdsstatus(self):
2152 def postdsstatus(self):
2152 """Used by workingctx to get the list of post-dirstate-status hooks."""
2153 """Used by workingctx to get the list of post-dirstate-status hooks."""
2153 return self._postdsstatus
2154 return self._postdsstatus
2154
2155
2155 def clearpostdsstatus(self):
2156 def clearpostdsstatus(self):
2156 """Used by workingctx to clear post-dirstate-status hooks."""
2157 """Used by workingctx to clear post-dirstate-status hooks."""
2157 del self._postdsstatus[:]
2158 del self._postdsstatus[:]
2158
2159
2159 def heads(self, start=None):
2160 def heads(self, start=None):
2160 if start is None:
2161 if start is None:
2161 cl = self.changelog
2162 cl = self.changelog
2162 headrevs = reversed(cl.headrevs())
2163 headrevs = reversed(cl.headrevs())
2163 return [cl.node(rev) for rev in headrevs]
2164 return [cl.node(rev) for rev in headrevs]
2164
2165
2165 heads = self.changelog.heads(start)
2166 heads = self.changelog.heads(start)
2166 # sort the output in rev descending order
2167 # sort the output in rev descending order
2167 return sorted(heads, key=self.changelog.rev, reverse=True)
2168 return sorted(heads, key=self.changelog.rev, reverse=True)
2168
2169
2169 def branchheads(self, branch=None, start=None, closed=False):
2170 def branchheads(self, branch=None, start=None, closed=False):
2170 '''return a (possibly filtered) list of heads for the given branch
2171 '''return a (possibly filtered) list of heads for the given branch
2171
2172
2172 Heads are returned in topological order, from newest to oldest.
2173 Heads are returned in topological order, from newest to oldest.
2173 If branch is None, use the dirstate branch.
2174 If branch is None, use the dirstate branch.
2174 If start is not None, return only heads reachable from start.
2175 If start is not None, return only heads reachable from start.
2175 If closed is True, return heads that are marked as closed as well.
2176 If closed is True, return heads that are marked as closed as well.
2176 '''
2177 '''
2177 if branch is None:
2178 if branch is None:
2178 branch = self[None].branch()
2179 branch = self[None].branch()
2179 branches = self.branchmap()
2180 branches = self.branchmap()
2180 if branch not in branches:
2181 if branch not in branches:
2181 return []
2182 return []
2182 # the cache returns heads ordered lowest to highest
2183 # the cache returns heads ordered lowest to highest
2183 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2184 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2184 if start is not None:
2185 if start is not None:
2185 # filter out the heads that cannot be reached from startrev
2186 # filter out the heads that cannot be reached from startrev
2186 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2187 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2187 bheads = [h for h in bheads if h in fbheads]
2188 bheads = [h for h in bheads if h in fbheads]
2188 return bheads
2189 return bheads
2189
2190
2190 def branches(self, nodes):
2191 def branches(self, nodes):
2191 if not nodes:
2192 if not nodes:
2192 nodes = [self.changelog.tip()]
2193 nodes = [self.changelog.tip()]
2193 b = []
2194 b = []
2194 for n in nodes:
2195 for n in nodes:
2195 t = n
2196 t = n
2196 while True:
2197 while True:
2197 p = self.changelog.parents(n)
2198 p = self.changelog.parents(n)
2198 if p[1] != nullid or p[0] == nullid:
2199 if p[1] != nullid or p[0] == nullid:
2199 b.append((t, n, p[0], p[1]))
2200 b.append((t, n, p[0], p[1]))
2200 break
2201 break
2201 n = p[0]
2202 n = p[0]
2202 return b
2203 return b
2203
2204
2204 def between(self, pairs):
2205 def between(self, pairs):
2205 r = []
2206 r = []
2206
2207
2207 for top, bottom in pairs:
2208 for top, bottom in pairs:
2208 n, l, i = top, [], 0
2209 n, l, i = top, [], 0
2209 f = 1
2210 f = 1
2210
2211
2211 while n != bottom and n != nullid:
2212 while n != bottom and n != nullid:
2212 p = self.changelog.parents(n)[0]
2213 p = self.changelog.parents(n)[0]
2213 if i == f:
2214 if i == f:
2214 l.append(n)
2215 l.append(n)
2215 f = f * 2
2216 f = f * 2
2216 n = p
2217 n = p
2217 i += 1
2218 i += 1
2218
2219
2219 r.append(l)
2220 r.append(l)
2220
2221
2221 return r
2222 return r
2222
2223
2223 def checkpush(self, pushop):
2224 def checkpush(self, pushop):
2224 """Extensions can override this function if additional checks have
2225 """Extensions can override this function if additional checks have
2225 to be performed before pushing, or call it if they override push
2226 to be performed before pushing, or call it if they override push
2226 command.
2227 command.
2227 """
2228 """
2228
2229
2229 @unfilteredpropertycache
2230 @unfilteredpropertycache
2230 def prepushoutgoinghooks(self):
2231 def prepushoutgoinghooks(self):
2231 """Return util.hooks consists of a pushop with repo, remote, outgoing
2232 """Return util.hooks consists of a pushop with repo, remote, outgoing
2232 methods, which are called before pushing changesets.
2233 methods, which are called before pushing changesets.
2233 """
2234 """
2234 return util.hooks()
2235 return util.hooks()
2235
2236
2236 def pushkey(self, namespace, key, old, new):
2237 def pushkey(self, namespace, key, old, new):
2237 try:
2238 try:
2238 tr = self.currenttransaction()
2239 tr = self.currenttransaction()
2239 hookargs = {}
2240 hookargs = {}
2240 if tr is not None:
2241 if tr is not None:
2241 hookargs.update(tr.hookargs)
2242 hookargs.update(tr.hookargs)
2242 hookargs['namespace'] = namespace
2243 hookargs['namespace'] = namespace
2243 hookargs['key'] = key
2244 hookargs['key'] = key
2244 hookargs['old'] = old
2245 hookargs['old'] = old
2245 hookargs['new'] = new
2246 hookargs['new'] = new
2246 self.hook('prepushkey', throw=True, **hookargs)
2247 self.hook('prepushkey', throw=True, **hookargs)
2247 except error.HookAbort as exc:
2248 except error.HookAbort as exc:
2248 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2249 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2249 if exc.hint:
2250 if exc.hint:
2250 self.ui.write_err(_("(%s)\n") % exc.hint)
2251 self.ui.write_err(_("(%s)\n") % exc.hint)
2251 return False
2252 return False
2252 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2253 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2253 ret = pushkey.push(self, namespace, key, old, new)
2254 ret = pushkey.push(self, namespace, key, old, new)
2254 def runhook():
2255 def runhook():
2255 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2256 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2256 ret=ret)
2257 ret=ret)
2257 self._afterlock(runhook)
2258 self._afterlock(runhook)
2258 return ret
2259 return ret
2259
2260
2260 def listkeys(self, namespace):
2261 def listkeys(self, namespace):
2261 self.hook('prelistkeys', throw=True, namespace=namespace)
2262 self.hook('prelistkeys', throw=True, namespace=namespace)
2262 self.ui.debug('listing keys for "%s"\n' % namespace)
2263 self.ui.debug('listing keys for "%s"\n' % namespace)
2263 values = pushkey.list(self, namespace)
2264 values = pushkey.list(self, namespace)
2264 self.hook('listkeys', namespace=namespace, values=values)
2265 self.hook('listkeys', namespace=namespace, values=values)
2265 return values
2266 return values
2266
2267
2267 def debugwireargs(self, one, two, three=None, four=None, five=None):
2268 def debugwireargs(self, one, two, three=None, four=None, five=None):
2268 '''used to test argument passing over the wire'''
2269 '''used to test argument passing over the wire'''
2269 return "%s %s %s %s %s" % (one, two, three, four, five)
2270 return "%s %s %s %s %s" % (one, two, three, four, five)
2270
2271
2271 def savecommitmessage(self, text):
2272 def savecommitmessage(self, text):
2272 fp = self.vfs('last-message.txt', 'wb')
2273 fp = self.vfs('last-message.txt', 'wb')
2273 try:
2274 try:
2274 fp.write(text)
2275 fp.write(text)
2275 finally:
2276 finally:
2276 fp.close()
2277 fp.close()
2277 return self.pathto(fp.name[len(self.root) + 1:])
2278 return self.pathto(fp.name[len(self.root) + 1:])
2278
2279
2279 # used to avoid circular references so destructors work
2280 # used to avoid circular references so destructors work
2280 def aftertrans(files):
2281 def aftertrans(files):
2281 renamefiles = [tuple(t) for t in files]
2282 renamefiles = [tuple(t) for t in files]
2282 def a():
2283 def a():
2283 for vfs, src, dest in renamefiles:
2284 for vfs, src, dest in renamefiles:
2284 # if src and dest refer to a same file, vfs.rename is a no-op,
2285 # if src and dest refer to a same file, vfs.rename is a no-op,
2285 # leaving both src and dest on disk. delete dest to make sure
2286 # leaving both src and dest on disk. delete dest to make sure
2286 # the rename couldn't be such a no-op.
2287 # the rename couldn't be such a no-op.
2287 vfs.tryunlink(dest)
2288 vfs.tryunlink(dest)
2288 try:
2289 try:
2289 vfs.rename(src, dest)
2290 vfs.rename(src, dest)
2290 except OSError: # journal file does not yet exist
2291 except OSError: # journal file does not yet exist
2291 pass
2292 pass
2292 return a
2293 return a
2293
2294
2294 def undoname(fn):
2295 def undoname(fn):
2295 base, name = os.path.split(fn)
2296 base, name = os.path.split(fn)
2296 assert name.startswith('journal')
2297 assert name.startswith('journal')
2297 return os.path.join(base, name.replace('journal', 'undo', 1))
2298 return os.path.join(base, name.replace('journal', 'undo', 1))
2298
2299
2299 def instance(ui, path, create):
2300 def instance(ui, path, create):
2300 return localrepository(ui, util.urllocalpath(path), create)
2301 return localrepository(ui, util.urllocalpath(path), create)
2301
2302
2302 def islocal(path):
2303 def islocal(path):
2303 return True
2304 return True
2304
2305
2305 def newreporequirements(repo):
2306 def newreporequirements(repo):
2306 """Determine the set of requirements for a new local repository.
2307 """Determine the set of requirements for a new local repository.
2307
2308
2308 Extensions can wrap this function to specify custom requirements for
2309 Extensions can wrap this function to specify custom requirements for
2309 new repositories.
2310 new repositories.
2310 """
2311 """
2311 ui = repo.ui
2312 ui = repo.ui
2312 requirements = {'revlogv1'}
2313 requirements = {'revlogv1'}
2313 if ui.configbool('format', 'usestore'):
2314 if ui.configbool('format', 'usestore'):
2314 requirements.add('store')
2315 requirements.add('store')
2315 if ui.configbool('format', 'usefncache'):
2316 if ui.configbool('format', 'usefncache'):
2316 requirements.add('fncache')
2317 requirements.add('fncache')
2317 if ui.configbool('format', 'dotencode'):
2318 if ui.configbool('format', 'dotencode'):
2318 requirements.add('dotencode')
2319 requirements.add('dotencode')
2319
2320
2320 compengine = ui.config('experimental', 'format.compression')
2321 compengine = ui.config('experimental', 'format.compression')
2321 if compengine not in util.compengines:
2322 if compengine not in util.compengines:
2322 raise error.Abort(_('compression engine %s defined by '
2323 raise error.Abort(_('compression engine %s defined by '
2323 'experimental.format.compression not available') %
2324 'experimental.format.compression not available') %
2324 compengine,
2325 compengine,
2325 hint=_('run "hg debuginstall" to list available '
2326 hint=_('run "hg debuginstall" to list available '
2326 'compression engines'))
2327 'compression engines'))
2327
2328
2328 # zlib is the historical default and doesn't need an explicit requirement.
2329 # zlib is the historical default and doesn't need an explicit requirement.
2329 if compengine != 'zlib':
2330 if compengine != 'zlib':
2330 requirements.add('exp-compression-%s' % compengine)
2331 requirements.add('exp-compression-%s' % compengine)
2331
2332
2332 if scmutil.gdinitconfig(ui):
2333 if scmutil.gdinitconfig(ui):
2333 requirements.add('generaldelta')
2334 requirements.add('generaldelta')
2334 if ui.configbool('experimental', 'treemanifest'):
2335 if ui.configbool('experimental', 'treemanifest'):
2335 requirements.add('treemanifest')
2336 requirements.add('treemanifest')
2336 if ui.configbool('experimental', 'manifestv2'):
2337 if ui.configbool('experimental', 'manifestv2'):
2337 requirements.add('manifestv2')
2338 requirements.add('manifestv2')
2338
2339
2339 revlogv2 = ui.config('experimental', 'revlogv2')
2340 revlogv2 = ui.config('experimental', 'revlogv2')
2340 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2341 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2341 requirements.remove('revlogv1')
2342 requirements.remove('revlogv1')
2342 # generaldelta is implied by revlogv2.
2343 # generaldelta is implied by revlogv2.
2343 requirements.discard('generaldelta')
2344 requirements.discard('generaldelta')
2344 requirements.add(REVLOGV2_REQUIREMENT)
2345 requirements.add(REVLOGV2_REQUIREMENT)
2345
2346
2346 return requirements
2347 return requirements
General Comments 0
You need to be logged in to leave comments. Login now