##// END OF EJS Templates
localrepo: update comments around path auditors
Augie Fackler -
r35119:ff80efc8 default
parent child Browse files
Show More
@@ -1,2347 +1,2349 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # These auditor are not used by the vfs,
367 # This is only used by context.workingctx.match in order to
368 # only used when writing this comment: basectx.match
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
371 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
372 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
373 self.baseui = baseui
375 self.baseui = baseui
374 self.ui = baseui.copy()
376 self.ui = baseui.copy()
375 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
376 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
377 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
378 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
379 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
380 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
381 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
382 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
383 self._phasedefaults = []
385 self._phasedefaults = []
384 try:
386 try:
385 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
386 self._loadextensions()
388 self._loadextensions()
387 except IOError:
389 except IOError:
388 pass
390 pass
389
391
390 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
391 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
392 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
393 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
394 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
395 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
396 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
397 else:
399 else:
398 self.supported = self._basesupported
400 self.supported = self._basesupported
399 color.setup(self.ui)
401 color.setup(self.ui)
400
402
401 # Add compression engines.
403 # Add compression engines.
402 for name in util.compengines:
404 for name in util.compengines:
403 engine = util.compengines[name]
405 engine = util.compengines[name]
404 if engine.revlogheader():
406 if engine.revlogheader():
405 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
406
408
407 if not self.vfs.isdir():
409 if not self.vfs.isdir():
408 if create:
410 if create:
409 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
410
412
411 if not self.wvfs.exists():
413 if not self.wvfs.exists():
412 self.wvfs.makedirs()
414 self.wvfs.makedirs()
413 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
414
416
415 if 'store' in self.requirements:
417 if 'store' in self.requirements:
416 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
417
419
418 # create an invalid changelog
420 # create an invalid changelog
419 self.vfs.append(
421 self.vfs.append(
420 "00changelog.i",
422 "00changelog.i",
421 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
422 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
423 )
425 )
424 else:
426 else:
425 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
426 elif create:
428 elif create:
427 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
428 else:
430 else:
429 try:
431 try:
430 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
431 self.vfs, self.supported)
433 self.vfs, self.supported)
432 except IOError as inst:
434 except IOError as inst:
433 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
434 raise
436 raise
435
437
436 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
437 self.sharedpath = self.path
439 self.sharedpath = self.path
438 try:
440 try:
439 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
440 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
441 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
442 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
443 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
444 s = vfs.base
446 s = vfs.base
445 if not vfs.exists():
447 if not vfs.exists():
446 raise error.RepoError(
448 raise error.RepoError(
447 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
448 self.sharedpath = s
450 self.sharedpath = s
449 except IOError as inst:
451 except IOError as inst:
450 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
451 raise
453 raise
452
454
453 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
454 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
455 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
456 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
457
459
458 self.store = store.store(
460 self.store = store.store(
459 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
460 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
461 self.spath = self.store.path
463 self.spath = self.store.path
462 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
463 self.sjoin = self.store.join
465 self.sjoin = self.store.join
464 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
465 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
466 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
467 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
468 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
469 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
470 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
471 else: # standard vfs
473 else: # standard vfs
472 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
473 self._applyopenerreqs()
475 self._applyopenerreqs()
474 if create:
476 if create:
475 self._writerequirements()
477 self._writerequirements()
476
478
477 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
478
480
479 self._branchcaches = {}
481 self._branchcaches = {}
480 self._revbranchcache = None
482 self._revbranchcache = None
481 self.filterpats = {}
483 self.filterpats = {}
482 self._datafilters = {}
484 self._datafilters = {}
483 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
484
486
485 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
486 # (used by the filecache decorator)
488 # (used by the filecache decorator)
487 #
489 #
488 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
489 self._filecache = {}
491 self._filecache = {}
490
492
491 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
492 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
493 # - new changesets,
495 # - new changesets,
494 # - phase change,
496 # - phase change,
495 # - new obsolescence marker,
497 # - new obsolescence marker,
496 # - working directory parent change,
498 # - working directory parent change,
497 # - bookmark changes
499 # - bookmark changes
498 self.filteredrevcache = {}
500 self.filteredrevcache = {}
499
501
500 # post-dirstate-status hooks
502 # post-dirstate-status hooks
501 self._postdsstatus = []
503 self._postdsstatus = []
502
504
503 # Cache of types representing filtered repos.
505 # Cache of types representing filtered repos.
504 self._filteredrepotypes = weakref.WeakKeyDictionary()
506 self._filteredrepotypes = weakref.WeakKeyDictionary()
505
507
506 # generic mapping between names and nodes
508 # generic mapping between names and nodes
507 self.names = namespaces.namespaces()
509 self.names = namespaces.namespaces()
508
510
509 # Key to signature value.
511 # Key to signature value.
510 self._sparsesignaturecache = {}
512 self._sparsesignaturecache = {}
511 # Signature to cached matcher instance.
513 # Signature to cached matcher instance.
512 self._sparsematchercache = {}
514 self._sparsematchercache = {}
513
515
514 def _getvfsward(self, origfunc):
516 def _getvfsward(self, origfunc):
515 """build a ward for self.vfs"""
517 """build a ward for self.vfs"""
516 rref = weakref.ref(self)
518 rref = weakref.ref(self)
517 def checkvfs(path, mode=None):
519 def checkvfs(path, mode=None):
518 ret = origfunc(path, mode=mode)
520 ret = origfunc(path, mode=mode)
519 repo = rref()
521 repo = rref()
520 if (repo is None
522 if (repo is None
521 or not util.safehasattr(repo, '_wlockref')
523 or not util.safehasattr(repo, '_wlockref')
522 or not util.safehasattr(repo, '_lockref')):
524 or not util.safehasattr(repo, '_lockref')):
523 return
525 return
524 if mode in (None, 'r', 'rb'):
526 if mode in (None, 'r', 'rb'):
525 return
527 return
526 if path.startswith(repo.path):
528 if path.startswith(repo.path):
527 # truncate name relative to the repository (.hg)
529 # truncate name relative to the repository (.hg)
528 path = path[len(repo.path) + 1:]
530 path = path[len(repo.path) + 1:]
529 if path.startswith('cache/'):
531 if path.startswith('cache/'):
530 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
531 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
532 if path.startswith('journal.'):
534 if path.startswith('journal.'):
533 # journal is covered by 'lock'
535 # journal is covered by 'lock'
534 if repo._currentlock(repo._lockref) is None:
536 if repo._currentlock(repo._lockref) is None:
535 repo.ui.develwarn('write with no lock: "%s"' % path,
537 repo.ui.develwarn('write with no lock: "%s"' % path,
536 stacklevel=2, config='check-locks')
538 stacklevel=2, config='check-locks')
537 elif repo._currentlock(repo._wlockref) is None:
539 elif repo._currentlock(repo._wlockref) is None:
538 # rest of vfs files are covered by 'wlock'
540 # rest of vfs files are covered by 'wlock'
539 #
541 #
540 # exclude special files
542 # exclude special files
541 for prefix in self._wlockfreeprefix:
543 for prefix in self._wlockfreeprefix:
542 if path.startswith(prefix):
544 if path.startswith(prefix):
543 return
545 return
544 repo.ui.develwarn('write with no wlock: "%s"' % path,
546 repo.ui.develwarn('write with no wlock: "%s"' % path,
545 stacklevel=2, config='check-locks')
547 stacklevel=2, config='check-locks')
546 return ret
548 return ret
547 return checkvfs
549 return checkvfs
548
550
549 def _getsvfsward(self, origfunc):
551 def _getsvfsward(self, origfunc):
550 """build a ward for self.svfs"""
552 """build a ward for self.svfs"""
551 rref = weakref.ref(self)
553 rref = weakref.ref(self)
552 def checksvfs(path, mode=None):
554 def checksvfs(path, mode=None):
553 ret = origfunc(path, mode=mode)
555 ret = origfunc(path, mode=mode)
554 repo = rref()
556 repo = rref()
555 if repo is None or not util.safehasattr(repo, '_lockref'):
557 if repo is None or not util.safehasattr(repo, '_lockref'):
556 return
558 return
557 if mode in (None, 'r', 'rb'):
559 if mode in (None, 'r', 'rb'):
558 return
560 return
559 if path.startswith(repo.sharedpath):
561 if path.startswith(repo.sharedpath):
560 # truncate name relative to the repository (.hg)
562 # truncate name relative to the repository (.hg)
561 path = path[len(repo.sharedpath) + 1:]
563 path = path[len(repo.sharedpath) + 1:]
562 if repo._currentlock(repo._lockref) is None:
564 if repo._currentlock(repo._lockref) is None:
563 repo.ui.develwarn('write with no lock: "%s"' % path,
565 repo.ui.develwarn('write with no lock: "%s"' % path,
564 stacklevel=3)
566 stacklevel=3)
565 return ret
567 return ret
566 return checksvfs
568 return checksvfs
567
569
568 def close(self):
570 def close(self):
569 self._writecaches()
571 self._writecaches()
570
572
571 def _loadextensions(self):
573 def _loadextensions(self):
572 extensions.loadall(self.ui)
574 extensions.loadall(self.ui)
573
575
574 def _writecaches(self):
576 def _writecaches(self):
575 if self._revbranchcache:
577 if self._revbranchcache:
576 self._revbranchcache.write()
578 self._revbranchcache.write()
577
579
578 def _restrictcapabilities(self, caps):
580 def _restrictcapabilities(self, caps):
579 if self.ui.configbool('experimental', 'bundle2-advertise'):
581 if self.ui.configbool('experimental', 'bundle2-advertise'):
580 caps = set(caps)
582 caps = set(caps)
581 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
582 caps.add('bundle2=' + urlreq.quote(capsblob))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
583 return caps
585 return caps
584
586
585 def _applyopenerreqs(self):
587 def _applyopenerreqs(self):
586 self.svfs.options = dict((r, 1) for r in self.requirements
588 self.svfs.options = dict((r, 1) for r in self.requirements
587 if r in self.openerreqs)
589 if r in self.openerreqs)
588 # experimental config: format.chunkcachesize
590 # experimental config: format.chunkcachesize
589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
590 if chunkcachesize is not None:
592 if chunkcachesize is not None:
591 self.svfs.options['chunkcachesize'] = chunkcachesize
593 self.svfs.options['chunkcachesize'] = chunkcachesize
592 # experimental config: format.maxchainlen
594 # experimental config: format.maxchainlen
593 maxchainlen = self.ui.configint('format', 'maxchainlen')
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
594 if maxchainlen is not None:
596 if maxchainlen is not None:
595 self.svfs.options['maxchainlen'] = maxchainlen
597 self.svfs.options['maxchainlen'] = maxchainlen
596 # experimental config: format.manifestcachesize
598 # experimental config: format.manifestcachesize
597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
598 if manifestcachesize is not None:
600 if manifestcachesize is not None:
599 self.svfs.options['manifestcachesize'] = manifestcachesize
601 self.svfs.options['manifestcachesize'] = manifestcachesize
600 # experimental config: format.aggressivemergedeltas
602 # experimental config: format.aggressivemergedeltas
601 aggressivemergedeltas = self.ui.configbool('format',
603 aggressivemergedeltas = self.ui.configbool('format',
602 'aggressivemergedeltas')
604 'aggressivemergedeltas')
603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
606 if 0 <= chainspan:
608 if 0 <= chainspan:
607 self.svfs.options['maxdeltachainspan'] = chainspan
609 self.svfs.options['maxdeltachainspan'] = chainspan
608 mmapindexthreshold = self.ui.configbytes('experimental',
610 mmapindexthreshold = self.ui.configbytes('experimental',
609 'mmapindexthreshold')
611 'mmapindexthreshold')
610 if mmapindexthreshold is not None:
612 if mmapindexthreshold is not None:
611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
613 srdensitythres = float(self.ui.config('experimental',
615 srdensitythres = float(self.ui.config('experimental',
614 'sparse-read.density-threshold'))
616 'sparse-read.density-threshold'))
615 srmingapsize = self.ui.configbytes('experimental',
617 srmingapsize = self.ui.configbytes('experimental',
616 'sparse-read.min-gap-size')
618 'sparse-read.min-gap-size')
617 self.svfs.options['with-sparse-read'] = withsparseread
619 self.svfs.options['with-sparse-read'] = withsparseread
618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
620
622
621 for r in self.requirements:
623 for r in self.requirements:
622 if r.startswith('exp-compression-'):
624 if r.startswith('exp-compression-'):
623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
624
626
625 # TODO move "revlogv2" to openerreqs once finalized.
627 # TODO move "revlogv2" to openerreqs once finalized.
626 if REVLOGV2_REQUIREMENT in self.requirements:
628 if REVLOGV2_REQUIREMENT in self.requirements:
627 self.svfs.options['revlogv2'] = True
629 self.svfs.options['revlogv2'] = True
628
630
629 def _writerequirements(self):
631 def _writerequirements(self):
630 scmutil.writerequires(self.vfs, self.requirements)
632 scmutil.writerequires(self.vfs, self.requirements)
631
633
632 def _checknested(self, path):
634 def _checknested(self, path):
633 """Determine if path is a legal nested repository."""
635 """Determine if path is a legal nested repository."""
634 if not path.startswith(self.root):
636 if not path.startswith(self.root):
635 return False
637 return False
636 subpath = path[len(self.root) + 1:]
638 subpath = path[len(self.root) + 1:]
637 normsubpath = util.pconvert(subpath)
639 normsubpath = util.pconvert(subpath)
638
640
639 # XXX: Checking against the current working copy is wrong in
641 # XXX: Checking against the current working copy is wrong in
640 # the sense that it can reject things like
642 # the sense that it can reject things like
641 #
643 #
642 # $ hg cat -r 10 sub/x.txt
644 # $ hg cat -r 10 sub/x.txt
643 #
645 #
644 # if sub/ is no longer a subrepository in the working copy
646 # if sub/ is no longer a subrepository in the working copy
645 # parent revision.
647 # parent revision.
646 #
648 #
647 # However, it can of course also allow things that would have
649 # However, it can of course also allow things that would have
648 # been rejected before, such as the above cat command if sub/
650 # been rejected before, such as the above cat command if sub/
649 # is a subrepository now, but was a normal directory before.
651 # is a subrepository now, but was a normal directory before.
650 # The old path auditor would have rejected by mistake since it
652 # The old path auditor would have rejected by mistake since it
651 # panics when it sees sub/.hg/.
653 # panics when it sees sub/.hg/.
652 #
654 #
653 # All in all, checking against the working copy seems sensible
655 # All in all, checking against the working copy seems sensible
654 # since we want to prevent access to nested repositories on
656 # since we want to prevent access to nested repositories on
655 # the filesystem *now*.
657 # the filesystem *now*.
656 ctx = self[None]
658 ctx = self[None]
657 parts = util.splitpath(subpath)
659 parts = util.splitpath(subpath)
658 while parts:
660 while parts:
659 prefix = '/'.join(parts)
661 prefix = '/'.join(parts)
660 if prefix in ctx.substate:
662 if prefix in ctx.substate:
661 if prefix == normsubpath:
663 if prefix == normsubpath:
662 return True
664 return True
663 else:
665 else:
664 sub = ctx.sub(prefix)
666 sub = ctx.sub(prefix)
665 return sub.checknested(subpath[len(prefix) + 1:])
667 return sub.checknested(subpath[len(prefix) + 1:])
666 else:
668 else:
667 parts.pop()
669 parts.pop()
668 return False
670 return False
669
671
670 def peer(self):
672 def peer(self):
671 return localpeer(self) # not cached to avoid reference cycle
673 return localpeer(self) # not cached to avoid reference cycle
672
674
673 def unfiltered(self):
675 def unfiltered(self):
674 """Return unfiltered version of the repository
676 """Return unfiltered version of the repository
675
677
676 Intended to be overwritten by filtered repo."""
678 Intended to be overwritten by filtered repo."""
677 return self
679 return self
678
680
679 def filtered(self, name):
681 def filtered(self, name):
680 """Return a filtered version of a repository"""
682 """Return a filtered version of a repository"""
681 # Python <3.4 easily leaks types via __mro__. See
683 # Python <3.4 easily leaks types via __mro__. See
682 # https://bugs.python.org/issue17950. We cache dynamically
684 # https://bugs.python.org/issue17950. We cache dynamically
683 # created types so this method doesn't leak on every
685 # created types so this method doesn't leak on every
684 # invocation.
686 # invocation.
685
687
686 key = self.unfiltered().__class__
688 key = self.unfiltered().__class__
687 if key not in self._filteredrepotypes:
689 if key not in self._filteredrepotypes:
688 # Build a new type with the repoview mixin and the base
690 # Build a new type with the repoview mixin and the base
689 # class of this repo. Give it a name containing the
691 # class of this repo. Give it a name containing the
690 # filter name to aid debugging.
692 # filter name to aid debugging.
691 bases = (repoview.repoview, key)
693 bases = (repoview.repoview, key)
692 cls = type(r'%sfilteredrepo' % name, bases, {})
694 cls = type(r'%sfilteredrepo' % name, bases, {})
693 self._filteredrepotypes[key] = cls
695 self._filteredrepotypes[key] = cls
694
696
695 return self._filteredrepotypes[key](self, name)
697 return self._filteredrepotypes[key](self, name)
696
698
697 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
698 def _bookmarks(self):
700 def _bookmarks(self):
699 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
700
702
701 @property
703 @property
702 def _activebookmark(self):
704 def _activebookmark(self):
703 return self._bookmarks.active
705 return self._bookmarks.active
704
706
705 # _phaserevs and _phasesets depend on changelog. what we need is to
707 # _phaserevs and _phasesets depend on changelog. what we need is to
706 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
707 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
708 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
709 def _phasecache(self):
711 def _phasecache(self):
710 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
711
713
712 @storecache('obsstore')
714 @storecache('obsstore')
713 def obsstore(self):
715 def obsstore(self):
714 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
715
717
716 @storecache('00changelog.i')
718 @storecache('00changelog.i')
717 def changelog(self):
719 def changelog(self):
718 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
719 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
720
722
721 def _constructmanifest(self):
723 def _constructmanifest(self):
722 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
723 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
724 # manifest creation.
726 # manifest creation.
725 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
726
728
727 @storecache('00manifest.i')
729 @storecache('00manifest.i')
728 def manifestlog(self):
730 def manifestlog(self):
729 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
730
732
731 @repofilecache('dirstate')
733 @repofilecache('dirstate')
732 def dirstate(self):
734 def dirstate(self):
733 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
734
736
735 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
736 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
737
739
738 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
739 try:
741 try:
740 self.changelog.rev(node)
742 self.changelog.rev(node)
741 return node
743 return node
742 except error.LookupError:
744 except error.LookupError:
743 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
744 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
745 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
746 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
747 return nullid
749 return nullid
748
750
749 def __getitem__(self, changeid):
751 def __getitem__(self, changeid):
750 if changeid is None:
752 if changeid is None:
751 return context.workingctx(self)
753 return context.workingctx(self)
752 if isinstance(changeid, slice):
754 if isinstance(changeid, slice):
753 # wdirrev isn't contiguous so the slice shouldn't include it
755 # wdirrev isn't contiguous so the slice shouldn't include it
754 return [context.changectx(self, i)
756 return [context.changectx(self, i)
755 for i in xrange(*changeid.indices(len(self)))
757 for i in xrange(*changeid.indices(len(self)))
756 if i not in self.changelog.filteredrevs]
758 if i not in self.changelog.filteredrevs]
757 try:
759 try:
758 return context.changectx(self, changeid)
760 return context.changectx(self, changeid)
759 except error.WdirUnsupported:
761 except error.WdirUnsupported:
760 return context.workingctx(self)
762 return context.workingctx(self)
761
763
762 def __contains__(self, changeid):
764 def __contains__(self, changeid):
763 """True if the given changeid exists
765 """True if the given changeid exists
764
766
765 error.LookupError is raised if an ambiguous node specified.
767 error.LookupError is raised if an ambiguous node specified.
766 """
768 """
767 try:
769 try:
768 self[changeid]
770 self[changeid]
769 return True
771 return True
770 except error.RepoLookupError:
772 except error.RepoLookupError:
771 return False
773 return False
772
774
773 def __nonzero__(self):
775 def __nonzero__(self):
774 return True
776 return True
775
777
776 __bool__ = __nonzero__
778 __bool__ = __nonzero__
777
779
778 def __len__(self):
780 def __len__(self):
779 return len(self.changelog)
781 return len(self.changelog)
780
782
781 def __iter__(self):
783 def __iter__(self):
782 return iter(self.changelog)
784 return iter(self.changelog)
783
785
784 def revs(self, expr, *args):
786 def revs(self, expr, *args):
785 '''Find revisions matching a revset.
787 '''Find revisions matching a revset.
786
788
787 The revset is specified as a string ``expr`` that may contain
789 The revset is specified as a string ``expr`` that may contain
788 %-formatting to escape certain types. See ``revsetlang.formatspec``.
790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
789
791
790 Revset aliases from the configuration are not expanded. To expand
792 Revset aliases from the configuration are not expanded. To expand
791 user aliases, consider calling ``scmutil.revrange()`` or
793 user aliases, consider calling ``scmutil.revrange()`` or
792 ``repo.anyrevs([expr], user=True)``.
794 ``repo.anyrevs([expr], user=True)``.
793
795
794 Returns a revset.abstractsmartset, which is a list-like interface
796 Returns a revset.abstractsmartset, which is a list-like interface
795 that contains integer revisions.
797 that contains integer revisions.
796 '''
798 '''
797 expr = revsetlang.formatspec(expr, *args)
799 expr = revsetlang.formatspec(expr, *args)
798 m = revset.match(None, expr)
800 m = revset.match(None, expr)
799 return m(self)
801 return m(self)
800
802
801 def set(self, expr, *args):
803 def set(self, expr, *args):
802 '''Find revisions matching a revset and emit changectx instances.
804 '''Find revisions matching a revset and emit changectx instances.
803
805
804 This is a convenience wrapper around ``revs()`` that iterates the
806 This is a convenience wrapper around ``revs()`` that iterates the
805 result and is a generator of changectx instances.
807 result and is a generator of changectx instances.
806
808
807 Revset aliases from the configuration are not expanded. To expand
809 Revset aliases from the configuration are not expanded. To expand
808 user aliases, consider calling ``scmutil.revrange()``.
810 user aliases, consider calling ``scmutil.revrange()``.
809 '''
811 '''
810 for r in self.revs(expr, *args):
812 for r in self.revs(expr, *args):
811 yield self[r]
813 yield self[r]
812
814
813 def anyrevs(self, specs, user=False, localalias=None):
815 def anyrevs(self, specs, user=False, localalias=None):
814 '''Find revisions matching one of the given revsets.
816 '''Find revisions matching one of the given revsets.
815
817
816 Revset aliases from the configuration are not expanded by default. To
818 Revset aliases from the configuration are not expanded by default. To
817 expand user aliases, specify ``user=True``. To provide some local
819 expand user aliases, specify ``user=True``. To provide some local
818 definitions overriding user aliases, set ``localalias`` to
820 definitions overriding user aliases, set ``localalias`` to
819 ``{name: definitionstring}``.
821 ``{name: definitionstring}``.
820 '''
822 '''
821 if user:
823 if user:
822 m = revset.matchany(self.ui, specs, repo=self,
824 m = revset.matchany(self.ui, specs, repo=self,
823 localalias=localalias)
825 localalias=localalias)
824 else:
826 else:
825 m = revset.matchany(None, specs, localalias=localalias)
827 m = revset.matchany(None, specs, localalias=localalias)
826 return m(self)
828 return m(self)
827
829
828 def url(self):
830 def url(self):
829 return 'file:' + self.root
831 return 'file:' + self.root
830
832
831 def hook(self, name, throw=False, **args):
833 def hook(self, name, throw=False, **args):
832 """Call a hook, passing this repo instance.
834 """Call a hook, passing this repo instance.
833
835
834 This a convenience method to aid invoking hooks. Extensions likely
836 This a convenience method to aid invoking hooks. Extensions likely
835 won't call this unless they have registered a custom hook or are
837 won't call this unless they have registered a custom hook or are
836 replacing code that is expected to call a hook.
838 replacing code that is expected to call a hook.
837 """
839 """
838 return hook.hook(self.ui, self, name, throw, **args)
840 return hook.hook(self.ui, self, name, throw, **args)
839
841
840 @filteredpropertycache
842 @filteredpropertycache
841 def _tagscache(self):
843 def _tagscache(self):
842 '''Returns a tagscache object that contains various tags related
844 '''Returns a tagscache object that contains various tags related
843 caches.'''
845 caches.'''
844
846
845 # This simplifies its cache management by having one decorated
847 # This simplifies its cache management by having one decorated
846 # function (this one) and the rest simply fetch things from it.
848 # function (this one) and the rest simply fetch things from it.
847 class tagscache(object):
849 class tagscache(object):
848 def __init__(self):
850 def __init__(self):
849 # These two define the set of tags for this repository. tags
851 # These two define the set of tags for this repository. tags
850 # maps tag name to node; tagtypes maps tag name to 'global' or
852 # maps tag name to node; tagtypes maps tag name to 'global' or
851 # 'local'. (Global tags are defined by .hgtags across all
853 # 'local'. (Global tags are defined by .hgtags across all
852 # heads, and local tags are defined in .hg/localtags.)
854 # heads, and local tags are defined in .hg/localtags.)
853 # They constitute the in-memory cache of tags.
855 # They constitute the in-memory cache of tags.
854 self.tags = self.tagtypes = None
856 self.tags = self.tagtypes = None
855
857
856 self.nodetagscache = self.tagslist = None
858 self.nodetagscache = self.tagslist = None
857
859
858 cache = tagscache()
860 cache = tagscache()
859 cache.tags, cache.tagtypes = self._findtags()
861 cache.tags, cache.tagtypes = self._findtags()
860
862
861 return cache
863 return cache
862
864
863 def tags(self):
865 def tags(self):
864 '''return a mapping of tag to node'''
866 '''return a mapping of tag to node'''
865 t = {}
867 t = {}
866 if self.changelog.filteredrevs:
868 if self.changelog.filteredrevs:
867 tags, tt = self._findtags()
869 tags, tt = self._findtags()
868 else:
870 else:
869 tags = self._tagscache.tags
871 tags = self._tagscache.tags
870 for k, v in tags.iteritems():
872 for k, v in tags.iteritems():
871 try:
873 try:
872 # ignore tags to unknown nodes
874 # ignore tags to unknown nodes
873 self.changelog.rev(v)
875 self.changelog.rev(v)
874 t[k] = v
876 t[k] = v
875 except (error.LookupError, ValueError):
877 except (error.LookupError, ValueError):
876 pass
878 pass
877 return t
879 return t
878
880
879 def _findtags(self):
881 def _findtags(self):
880 '''Do the hard work of finding tags. Return a pair of dicts
882 '''Do the hard work of finding tags. Return a pair of dicts
881 (tags, tagtypes) where tags maps tag name to node, and tagtypes
883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
882 maps tag name to a string like \'global\' or \'local\'.
884 maps tag name to a string like \'global\' or \'local\'.
883 Subclasses or extensions are free to add their own tags, but
885 Subclasses or extensions are free to add their own tags, but
884 should be aware that the returned dicts will be retained for the
886 should be aware that the returned dicts will be retained for the
885 duration of the localrepo object.'''
887 duration of the localrepo object.'''
886
888
887 # XXX what tagtype should subclasses/extensions use? Currently
889 # XXX what tagtype should subclasses/extensions use? Currently
888 # mq and bookmarks add tags, but do not set the tagtype at all.
890 # mq and bookmarks add tags, but do not set the tagtype at all.
889 # Should each extension invent its own tag type? Should there
891 # Should each extension invent its own tag type? Should there
890 # be one tagtype for all such "virtual" tags? Or is the status
892 # be one tagtype for all such "virtual" tags? Or is the status
891 # quo fine?
893 # quo fine?
892
894
893
895
894 # map tag name to (node, hist)
896 # map tag name to (node, hist)
895 alltags = tagsmod.findglobaltags(self.ui, self)
897 alltags = tagsmod.findglobaltags(self.ui, self)
896 # map tag name to tag type
898 # map tag name to tag type
897 tagtypes = dict((tag, 'global') for tag in alltags)
899 tagtypes = dict((tag, 'global') for tag in alltags)
898
900
899 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
900
902
901 # Build the return dicts. Have to re-encode tag names because
903 # Build the return dicts. Have to re-encode tag names because
902 # the tags module always uses UTF-8 (in order not to lose info
904 # the tags module always uses UTF-8 (in order not to lose info
903 # writing to the cache), but the rest of Mercurial wants them in
905 # writing to the cache), but the rest of Mercurial wants them in
904 # local encoding.
906 # local encoding.
905 tags = {}
907 tags = {}
906 for (name, (node, hist)) in alltags.iteritems():
908 for (name, (node, hist)) in alltags.iteritems():
907 if node != nullid:
909 if node != nullid:
908 tags[encoding.tolocal(name)] = node
910 tags[encoding.tolocal(name)] = node
909 tags['tip'] = self.changelog.tip()
911 tags['tip'] = self.changelog.tip()
910 tagtypes = dict([(encoding.tolocal(name), value)
912 tagtypes = dict([(encoding.tolocal(name), value)
911 for (name, value) in tagtypes.iteritems()])
913 for (name, value) in tagtypes.iteritems()])
912 return (tags, tagtypes)
914 return (tags, tagtypes)
913
915
914 def tagtype(self, tagname):
916 def tagtype(self, tagname):
915 '''
917 '''
916 return the type of the given tag. result can be:
918 return the type of the given tag. result can be:
917
919
918 'local' : a local tag
920 'local' : a local tag
919 'global' : a global tag
921 'global' : a global tag
920 None : tag does not exist
922 None : tag does not exist
921 '''
923 '''
922
924
923 return self._tagscache.tagtypes.get(tagname)
925 return self._tagscache.tagtypes.get(tagname)
924
926
925 def tagslist(self):
927 def tagslist(self):
926 '''return a list of tags ordered by revision'''
928 '''return a list of tags ordered by revision'''
927 if not self._tagscache.tagslist:
929 if not self._tagscache.tagslist:
928 l = []
930 l = []
929 for t, n in self.tags().iteritems():
931 for t, n in self.tags().iteritems():
930 l.append((self.changelog.rev(n), t, n))
932 l.append((self.changelog.rev(n), t, n))
931 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
932
934
933 return self._tagscache.tagslist
935 return self._tagscache.tagslist
934
936
935 def nodetags(self, node):
937 def nodetags(self, node):
936 '''return the tags associated with a node'''
938 '''return the tags associated with a node'''
937 if not self._tagscache.nodetagscache:
939 if not self._tagscache.nodetagscache:
938 nodetagscache = {}
940 nodetagscache = {}
939 for t, n in self._tagscache.tags.iteritems():
941 for t, n in self._tagscache.tags.iteritems():
940 nodetagscache.setdefault(n, []).append(t)
942 nodetagscache.setdefault(n, []).append(t)
941 for tags in nodetagscache.itervalues():
943 for tags in nodetagscache.itervalues():
942 tags.sort()
944 tags.sort()
943 self._tagscache.nodetagscache = nodetagscache
945 self._tagscache.nodetagscache = nodetagscache
944 return self._tagscache.nodetagscache.get(node, [])
946 return self._tagscache.nodetagscache.get(node, [])
945
947
946 def nodebookmarks(self, node):
948 def nodebookmarks(self, node):
947 """return the list of bookmarks pointing to the specified node"""
949 """return the list of bookmarks pointing to the specified node"""
948 marks = []
950 marks = []
949 for bookmark, n in self._bookmarks.iteritems():
951 for bookmark, n in self._bookmarks.iteritems():
950 if n == node:
952 if n == node:
951 marks.append(bookmark)
953 marks.append(bookmark)
952 return sorted(marks)
954 return sorted(marks)
953
955
954 def branchmap(self):
956 def branchmap(self):
955 '''returns a dictionary {branch: [branchheads]} with branchheads
957 '''returns a dictionary {branch: [branchheads]} with branchheads
956 ordered by increasing revision number'''
958 ordered by increasing revision number'''
957 branchmap.updatecache(self)
959 branchmap.updatecache(self)
958 return self._branchcaches[self.filtername]
960 return self._branchcaches[self.filtername]
959
961
960 @unfilteredmethod
962 @unfilteredmethod
961 def revbranchcache(self):
963 def revbranchcache(self):
962 if not self._revbranchcache:
964 if not self._revbranchcache:
963 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
964 return self._revbranchcache
966 return self._revbranchcache
965
967
966 def branchtip(self, branch, ignoremissing=False):
968 def branchtip(self, branch, ignoremissing=False):
967 '''return the tip node for a given branch
969 '''return the tip node for a given branch
968
970
969 If ignoremissing is True, then this method will not raise an error.
971 If ignoremissing is True, then this method will not raise an error.
970 This is helpful for callers that only expect None for a missing branch
972 This is helpful for callers that only expect None for a missing branch
971 (e.g. namespace).
973 (e.g. namespace).
972
974
973 '''
975 '''
974 try:
976 try:
975 return self.branchmap().branchtip(branch)
977 return self.branchmap().branchtip(branch)
976 except KeyError:
978 except KeyError:
977 if not ignoremissing:
979 if not ignoremissing:
978 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
979 else:
981 else:
980 pass
982 pass
981
983
982 def lookup(self, key):
984 def lookup(self, key):
983 return self[key].node()
985 return self[key].node()
984
986
985 def lookupbranch(self, key, remote=None):
987 def lookupbranch(self, key, remote=None):
986 repo = remote or self
988 repo = remote or self
987 if key in repo.branchmap():
989 if key in repo.branchmap():
988 return key
990 return key
989
991
990 repo = (remote and remote.local()) and remote or self
992 repo = (remote and remote.local()) and remote or self
991 return repo[key].branch()
993 return repo[key].branch()
992
994
993 def known(self, nodes):
995 def known(self, nodes):
994 cl = self.changelog
996 cl = self.changelog
995 nm = cl.nodemap
997 nm = cl.nodemap
996 filtered = cl.filteredrevs
998 filtered = cl.filteredrevs
997 result = []
999 result = []
998 for n in nodes:
1000 for n in nodes:
999 r = nm.get(n)
1001 r = nm.get(n)
1000 resp = not (r is None or r in filtered)
1002 resp = not (r is None or r in filtered)
1001 result.append(resp)
1003 result.append(resp)
1002 return result
1004 return result
1003
1005
1004 def local(self):
1006 def local(self):
1005 return self
1007 return self
1006
1008
1007 def publishing(self):
1009 def publishing(self):
1008 # it's safe (and desirable) to trust the publish flag unconditionally
1010 # it's safe (and desirable) to trust the publish flag unconditionally
1009 # so that we don't finalize changes shared between users via ssh or nfs
1011 # so that we don't finalize changes shared between users via ssh or nfs
1010 return self.ui.configbool('phases', 'publish', untrusted=True)
1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1011
1013
1012 def cancopy(self):
1014 def cancopy(self):
1013 # so statichttprepo's override of local() works
1015 # so statichttprepo's override of local() works
1014 if not self.local():
1016 if not self.local():
1015 return False
1017 return False
1016 if not self.publishing():
1018 if not self.publishing():
1017 return True
1019 return True
1018 # if publishing we can't copy if there is filtered content
1020 # if publishing we can't copy if there is filtered content
1019 return not self.filtered('visible').changelog.filteredrevs
1021 return not self.filtered('visible').changelog.filteredrevs
1020
1022
1021 def shared(self):
1023 def shared(self):
1022 '''the type of shared repository (None if not shared)'''
1024 '''the type of shared repository (None if not shared)'''
1023 if self.sharedpath != self.path:
1025 if self.sharedpath != self.path:
1024 return 'store'
1026 return 'store'
1025 return None
1027 return None
1026
1028
1027 def wjoin(self, f, *insidef):
1029 def wjoin(self, f, *insidef):
1028 return self.vfs.reljoin(self.root, f, *insidef)
1030 return self.vfs.reljoin(self.root, f, *insidef)
1029
1031
1030 def file(self, f):
1032 def file(self, f):
1031 if f[0] == '/':
1033 if f[0] == '/':
1032 f = f[1:]
1034 f = f[1:]
1033 return filelog.filelog(self.svfs, f)
1035 return filelog.filelog(self.svfs, f)
1034
1036
1035 def changectx(self, changeid):
1037 def changectx(self, changeid):
1036 return self[changeid]
1038 return self[changeid]
1037
1039
1038 def setparents(self, p1, p2=nullid):
1040 def setparents(self, p1, p2=nullid):
1039 with self.dirstate.parentchange():
1041 with self.dirstate.parentchange():
1040 copies = self.dirstate.setparents(p1, p2)
1042 copies = self.dirstate.setparents(p1, p2)
1041 pctx = self[p1]
1043 pctx = self[p1]
1042 if copies:
1044 if copies:
1043 # Adjust copy records, the dirstate cannot do it, it
1045 # Adjust copy records, the dirstate cannot do it, it
1044 # requires access to parents manifests. Preserve them
1046 # requires access to parents manifests. Preserve them
1045 # only for entries added to first parent.
1047 # only for entries added to first parent.
1046 for f in copies:
1048 for f in copies:
1047 if f not in pctx and copies[f] in pctx:
1049 if f not in pctx and copies[f] in pctx:
1048 self.dirstate.copy(copies[f], f)
1050 self.dirstate.copy(copies[f], f)
1049 if p2 == nullid:
1051 if p2 == nullid:
1050 for f, s in sorted(self.dirstate.copies().items()):
1052 for f, s in sorted(self.dirstate.copies().items()):
1051 if f not in pctx and s not in pctx:
1053 if f not in pctx and s not in pctx:
1052 self.dirstate.copy(None, f)
1054 self.dirstate.copy(None, f)
1053
1055
1054 def filectx(self, path, changeid=None, fileid=None):
1056 def filectx(self, path, changeid=None, fileid=None):
1055 """changeid can be a changeset revision, node, or tag.
1057 """changeid can be a changeset revision, node, or tag.
1056 fileid can be a file revision or node."""
1058 fileid can be a file revision or node."""
1057 return context.filectx(self, path, changeid, fileid)
1059 return context.filectx(self, path, changeid, fileid)
1058
1060
1059 def getcwd(self):
1061 def getcwd(self):
1060 return self.dirstate.getcwd()
1062 return self.dirstate.getcwd()
1061
1063
1062 def pathto(self, f, cwd=None):
1064 def pathto(self, f, cwd=None):
1063 return self.dirstate.pathto(f, cwd)
1065 return self.dirstate.pathto(f, cwd)
1064
1066
1065 def _loadfilter(self, filter):
1067 def _loadfilter(self, filter):
1066 if filter not in self.filterpats:
1068 if filter not in self.filterpats:
1067 l = []
1069 l = []
1068 for pat, cmd in self.ui.configitems(filter):
1070 for pat, cmd in self.ui.configitems(filter):
1069 if cmd == '!':
1071 if cmd == '!':
1070 continue
1072 continue
1071 mf = matchmod.match(self.root, '', [pat])
1073 mf = matchmod.match(self.root, '', [pat])
1072 fn = None
1074 fn = None
1073 params = cmd
1075 params = cmd
1074 for name, filterfn in self._datafilters.iteritems():
1076 for name, filterfn in self._datafilters.iteritems():
1075 if cmd.startswith(name):
1077 if cmd.startswith(name):
1076 fn = filterfn
1078 fn = filterfn
1077 params = cmd[len(name):].lstrip()
1079 params = cmd[len(name):].lstrip()
1078 break
1080 break
1079 if not fn:
1081 if not fn:
1080 fn = lambda s, c, **kwargs: util.filter(s, c)
1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1081 # Wrap old filters not supporting keyword arguments
1083 # Wrap old filters not supporting keyword arguments
1082 if not inspect.getargspec(fn)[2]:
1084 if not inspect.getargspec(fn)[2]:
1083 oldfn = fn
1085 oldfn = fn
1084 fn = lambda s, c, **kwargs: oldfn(s, c)
1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1085 l.append((mf, fn, params))
1087 l.append((mf, fn, params))
1086 self.filterpats[filter] = l
1088 self.filterpats[filter] = l
1087 return self.filterpats[filter]
1089 return self.filterpats[filter]
1088
1090
1089 def _filter(self, filterpats, filename, data):
1091 def _filter(self, filterpats, filename, data):
1090 for mf, fn, cmd in filterpats:
1092 for mf, fn, cmd in filterpats:
1091 if mf(filename):
1093 if mf(filename):
1092 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1093 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1094 break
1096 break
1095
1097
1096 return data
1098 return data
1097
1099
1098 @unfilteredpropertycache
1100 @unfilteredpropertycache
1099 def _encodefilterpats(self):
1101 def _encodefilterpats(self):
1100 return self._loadfilter('encode')
1102 return self._loadfilter('encode')
1101
1103
1102 @unfilteredpropertycache
1104 @unfilteredpropertycache
1103 def _decodefilterpats(self):
1105 def _decodefilterpats(self):
1104 return self._loadfilter('decode')
1106 return self._loadfilter('decode')
1105
1107
1106 def adddatafilter(self, name, filter):
1108 def adddatafilter(self, name, filter):
1107 self._datafilters[name] = filter
1109 self._datafilters[name] = filter
1108
1110
1109 def wread(self, filename):
1111 def wread(self, filename):
1110 if self.wvfs.islink(filename):
1112 if self.wvfs.islink(filename):
1111 data = self.wvfs.readlink(filename)
1113 data = self.wvfs.readlink(filename)
1112 else:
1114 else:
1113 data = self.wvfs.read(filename)
1115 data = self.wvfs.read(filename)
1114 return self._filter(self._encodefilterpats, filename, data)
1116 return self._filter(self._encodefilterpats, filename, data)
1115
1117
1116 def wwrite(self, filename, data, flags, backgroundclose=False):
1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1117 """write ``data`` into ``filename`` in the working directory
1119 """write ``data`` into ``filename`` in the working directory
1118
1120
1119 This returns length of written (maybe decoded) data.
1121 This returns length of written (maybe decoded) data.
1120 """
1122 """
1121 data = self._filter(self._decodefilterpats, filename, data)
1123 data = self._filter(self._decodefilterpats, filename, data)
1122 if 'l' in flags:
1124 if 'l' in flags:
1123 self.wvfs.symlink(data, filename)
1125 self.wvfs.symlink(data, filename)
1124 else:
1126 else:
1125 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1126 if 'x' in flags:
1128 if 'x' in flags:
1127 self.wvfs.setflags(filename, False, True)
1129 self.wvfs.setflags(filename, False, True)
1128 return len(data)
1130 return len(data)
1129
1131
1130 def wwritedata(self, filename, data):
1132 def wwritedata(self, filename, data):
1131 return self._filter(self._decodefilterpats, filename, data)
1133 return self._filter(self._decodefilterpats, filename, data)
1132
1134
1133 def currenttransaction(self):
1135 def currenttransaction(self):
1134 """return the current transaction or None if non exists"""
1136 """return the current transaction or None if non exists"""
1135 if self._transref:
1137 if self._transref:
1136 tr = self._transref()
1138 tr = self._transref()
1137 else:
1139 else:
1138 tr = None
1140 tr = None
1139
1141
1140 if tr and tr.running():
1142 if tr and tr.running():
1141 return tr
1143 return tr
1142 return None
1144 return None
1143
1145
1144 def transaction(self, desc, report=None):
1146 def transaction(self, desc, report=None):
1145 if (self.ui.configbool('devel', 'all-warnings')
1147 if (self.ui.configbool('devel', 'all-warnings')
1146 or self.ui.configbool('devel', 'check-locks')):
1148 or self.ui.configbool('devel', 'check-locks')):
1147 if self._currentlock(self._lockref) is None:
1149 if self._currentlock(self._lockref) is None:
1148 raise error.ProgrammingError('transaction requires locking')
1150 raise error.ProgrammingError('transaction requires locking')
1149 tr = self.currenttransaction()
1151 tr = self.currenttransaction()
1150 if tr is not None:
1152 if tr is not None:
1151 scmutil.registersummarycallback(self, tr, desc)
1153 scmutil.registersummarycallback(self, tr, desc)
1152 return tr.nest()
1154 return tr.nest()
1153
1155
1154 # abort here if the journal already exists
1156 # abort here if the journal already exists
1155 if self.svfs.exists("journal"):
1157 if self.svfs.exists("journal"):
1156 raise error.RepoError(
1158 raise error.RepoError(
1157 _("abandoned transaction found"),
1159 _("abandoned transaction found"),
1158 hint=_("run 'hg recover' to clean up transaction"))
1160 hint=_("run 'hg recover' to clean up transaction"))
1159
1161
1160 idbase = "%.40f#%f" % (random.random(), time.time())
1162 idbase = "%.40f#%f" % (random.random(), time.time())
1161 ha = hex(hashlib.sha1(idbase).digest())
1163 ha = hex(hashlib.sha1(idbase).digest())
1162 txnid = 'TXN:' + ha
1164 txnid = 'TXN:' + ha
1163 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1164
1166
1165 self._writejournal(desc)
1167 self._writejournal(desc)
1166 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1167 if report:
1169 if report:
1168 rp = report
1170 rp = report
1169 else:
1171 else:
1170 rp = self.ui.warn
1172 rp = self.ui.warn
1171 vfsmap = {'plain': self.vfs} # root of .hg/
1173 vfsmap = {'plain': self.vfs} # root of .hg/
1172 # we must avoid cyclic reference between repo and transaction.
1174 # we must avoid cyclic reference between repo and transaction.
1173 reporef = weakref.ref(self)
1175 reporef = weakref.ref(self)
1174 # Code to track tag movement
1176 # Code to track tag movement
1175 #
1177 #
1176 # Since tags are all handled as file content, it is actually quite hard
1178 # Since tags are all handled as file content, it is actually quite hard
1177 # to track these movement from a code perspective. So we fallback to a
1179 # to track these movement from a code perspective. So we fallback to a
1178 # tracking at the repository level. One could envision to track changes
1180 # tracking at the repository level. One could envision to track changes
1179 # to the '.hgtags' file through changegroup apply but that fails to
1181 # to the '.hgtags' file through changegroup apply but that fails to
1180 # cope with case where transaction expose new heads without changegroup
1182 # cope with case where transaction expose new heads without changegroup
1181 # being involved (eg: phase movement).
1183 # being involved (eg: phase movement).
1182 #
1184 #
1183 # For now, We gate the feature behind a flag since this likely comes
1185 # For now, We gate the feature behind a flag since this likely comes
1184 # with performance impacts. The current code run more often than needed
1186 # with performance impacts. The current code run more often than needed
1185 # and do not use caches as much as it could. The current focus is on
1187 # and do not use caches as much as it could. The current focus is on
1186 # the behavior of the feature so we disable it by default. The flag
1188 # the behavior of the feature so we disable it by default. The flag
1187 # will be removed when we are happy with the performance impact.
1189 # will be removed when we are happy with the performance impact.
1188 #
1190 #
1189 # Once this feature is no longer experimental move the following
1191 # Once this feature is no longer experimental move the following
1190 # documentation to the appropriate help section:
1192 # documentation to the appropriate help section:
1191 #
1193 #
1192 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1193 # tags (new or changed or deleted tags). In addition the details of
1195 # tags (new or changed or deleted tags). In addition the details of
1194 # these changes are made available in a file at:
1196 # these changes are made available in a file at:
1195 # ``REPOROOT/.hg/changes/tags.changes``.
1197 # ``REPOROOT/.hg/changes/tags.changes``.
1196 # Make sure you check for HG_TAG_MOVED before reading that file as it
1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1197 # might exist from a previous transaction even if no tag were touched
1199 # might exist from a previous transaction even if no tag were touched
1198 # in this one. Changes are recorded in a line base format::
1200 # in this one. Changes are recorded in a line base format::
1199 #
1201 #
1200 # <action> <hex-node> <tag-name>\n
1202 # <action> <hex-node> <tag-name>\n
1201 #
1203 #
1202 # Actions are defined as follow:
1204 # Actions are defined as follow:
1203 # "-R": tag is removed,
1205 # "-R": tag is removed,
1204 # "+A": tag is added,
1206 # "+A": tag is added,
1205 # "-M": tag is moved (old value),
1207 # "-M": tag is moved (old value),
1206 # "+M": tag is moved (new value),
1208 # "+M": tag is moved (new value),
1207 tracktags = lambda x: None
1209 tracktags = lambda x: None
1208 # experimental config: experimental.hook-track-tags
1210 # experimental config: experimental.hook-track-tags
1209 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1210 if desc != 'strip' and shouldtracktags:
1212 if desc != 'strip' and shouldtracktags:
1211 oldheads = self.changelog.headrevs()
1213 oldheads = self.changelog.headrevs()
1212 def tracktags(tr2):
1214 def tracktags(tr2):
1213 repo = reporef()
1215 repo = reporef()
1214 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1215 newheads = repo.changelog.headrevs()
1217 newheads = repo.changelog.headrevs()
1216 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1217 # notes: we compare lists here.
1219 # notes: we compare lists here.
1218 # As we do it only once buiding set would not be cheaper
1220 # As we do it only once buiding set would not be cheaper
1219 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1220 if changes:
1222 if changes:
1221 tr2.hookargs['tag_moved'] = '1'
1223 tr2.hookargs['tag_moved'] = '1'
1222 with repo.vfs('changes/tags.changes', 'w',
1224 with repo.vfs('changes/tags.changes', 'w',
1223 atomictemp=True) as changesfile:
1225 atomictemp=True) as changesfile:
1224 # note: we do not register the file to the transaction
1226 # note: we do not register the file to the transaction
1225 # because we needs it to still exist on the transaction
1227 # because we needs it to still exist on the transaction
1226 # is close (for txnclose hooks)
1228 # is close (for txnclose hooks)
1227 tagsmod.writediff(changesfile, changes)
1229 tagsmod.writediff(changesfile, changes)
1228 def validate(tr2):
1230 def validate(tr2):
1229 """will run pre-closing hooks"""
1231 """will run pre-closing hooks"""
1230 # XXX the transaction API is a bit lacking here so we take a hacky
1232 # XXX the transaction API is a bit lacking here so we take a hacky
1231 # path for now
1233 # path for now
1232 #
1234 #
1233 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1234 # dict is copied before these run. In addition we needs the data
1236 # dict is copied before these run. In addition we needs the data
1235 # available to in memory hooks too.
1237 # available to in memory hooks too.
1236 #
1238 #
1237 # Moreover, we also need to make sure this runs before txnclose
1239 # Moreover, we also need to make sure this runs before txnclose
1238 # hooks and there is no "pending" mechanism that would execute
1240 # hooks and there is no "pending" mechanism that would execute
1239 # logic only if hooks are about to run.
1241 # logic only if hooks are about to run.
1240 #
1242 #
1241 # Fixing this limitation of the transaction is also needed to track
1243 # Fixing this limitation of the transaction is also needed to track
1242 # other families of changes (bookmarks, phases, obsolescence).
1244 # other families of changes (bookmarks, phases, obsolescence).
1243 #
1245 #
1244 # This will have to be fixed before we remove the experimental
1246 # This will have to be fixed before we remove the experimental
1245 # gating.
1247 # gating.
1246 tracktags(tr2)
1248 tracktags(tr2)
1247 repo = reporef()
1249 repo = reporef()
1248 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1250 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1249 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1251 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1250 args = tr.hookargs.copy()
1252 args = tr.hookargs.copy()
1251 args.update(bookmarks.preparehookargs(name, old, new))
1253 args.update(bookmarks.preparehookargs(name, old, new))
1252 repo.hook('pretxnclose-bookmark', throw=True,
1254 repo.hook('pretxnclose-bookmark', throw=True,
1253 txnname=desc,
1255 txnname=desc,
1254 **pycompat.strkwargs(args))
1256 **pycompat.strkwargs(args))
1255 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1257 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1256 cl = repo.unfiltered().changelog
1258 cl = repo.unfiltered().changelog
1257 for rev, (old, new) in tr.changes['phases'].items():
1259 for rev, (old, new) in tr.changes['phases'].items():
1258 args = tr.hookargs.copy()
1260 args = tr.hookargs.copy()
1259 node = hex(cl.node(rev))
1261 node = hex(cl.node(rev))
1260 args.update(phases.preparehookargs(node, old, new))
1262 args.update(phases.preparehookargs(node, old, new))
1261 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1263 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1262 **pycompat.strkwargs(args))
1264 **pycompat.strkwargs(args))
1263
1265
1264 repo.hook('pretxnclose', throw=True,
1266 repo.hook('pretxnclose', throw=True,
1265 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1267 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1266 def releasefn(tr, success):
1268 def releasefn(tr, success):
1267 repo = reporef()
1269 repo = reporef()
1268 if success:
1270 if success:
1269 # this should be explicitly invoked here, because
1271 # this should be explicitly invoked here, because
1270 # in-memory changes aren't written out at closing
1272 # in-memory changes aren't written out at closing
1271 # transaction, if tr.addfilegenerator (via
1273 # transaction, if tr.addfilegenerator (via
1272 # dirstate.write or so) isn't invoked while
1274 # dirstate.write or so) isn't invoked while
1273 # transaction running
1275 # transaction running
1274 repo.dirstate.write(None)
1276 repo.dirstate.write(None)
1275 else:
1277 else:
1276 # discard all changes (including ones already written
1278 # discard all changes (including ones already written
1277 # out) in this transaction
1279 # out) in this transaction
1278 repo.dirstate.restorebackup(None, 'journal.dirstate')
1280 repo.dirstate.restorebackup(None, 'journal.dirstate')
1279
1281
1280 repo.invalidate(clearfilecache=True)
1282 repo.invalidate(clearfilecache=True)
1281
1283
1282 tr = transaction.transaction(rp, self.svfs, vfsmap,
1284 tr = transaction.transaction(rp, self.svfs, vfsmap,
1283 "journal",
1285 "journal",
1284 "undo",
1286 "undo",
1285 aftertrans(renames),
1287 aftertrans(renames),
1286 self.store.createmode,
1288 self.store.createmode,
1287 validator=validate,
1289 validator=validate,
1288 releasefn=releasefn,
1290 releasefn=releasefn,
1289 checkambigfiles=_cachedfiles)
1291 checkambigfiles=_cachedfiles)
1290 tr.changes['revs'] = set()
1292 tr.changes['revs'] = set()
1291 tr.changes['obsmarkers'] = set()
1293 tr.changes['obsmarkers'] = set()
1292 tr.changes['phases'] = {}
1294 tr.changes['phases'] = {}
1293 tr.changes['bookmarks'] = {}
1295 tr.changes['bookmarks'] = {}
1294
1296
1295 tr.hookargs['txnid'] = txnid
1297 tr.hookargs['txnid'] = txnid
1296 # note: writing the fncache only during finalize mean that the file is
1298 # note: writing the fncache only during finalize mean that the file is
1297 # outdated when running hooks. As fncache is used for streaming clone,
1299 # outdated when running hooks. As fncache is used for streaming clone,
1298 # this is not expected to break anything that happen during the hooks.
1300 # this is not expected to break anything that happen during the hooks.
1299 tr.addfinalize('flush-fncache', self.store.write)
1301 tr.addfinalize('flush-fncache', self.store.write)
1300 def txnclosehook(tr2):
1302 def txnclosehook(tr2):
1301 """To be run if transaction is successful, will schedule a hook run
1303 """To be run if transaction is successful, will schedule a hook run
1302 """
1304 """
1303 # Don't reference tr2 in hook() so we don't hold a reference.
1305 # Don't reference tr2 in hook() so we don't hold a reference.
1304 # This reduces memory consumption when there are multiple
1306 # This reduces memory consumption when there are multiple
1305 # transactions per lock. This can likely go away if issue5045
1307 # transactions per lock. This can likely go away if issue5045
1306 # fixes the function accumulation.
1308 # fixes the function accumulation.
1307 hookargs = tr2.hookargs
1309 hookargs = tr2.hookargs
1308
1310
1309 def hookfunc():
1311 def hookfunc():
1310 repo = reporef()
1312 repo = reporef()
1311 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1313 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1312 bmchanges = sorted(tr.changes['bookmarks'].items())
1314 bmchanges = sorted(tr.changes['bookmarks'].items())
1313 for name, (old, new) in bmchanges:
1315 for name, (old, new) in bmchanges:
1314 args = tr.hookargs.copy()
1316 args = tr.hookargs.copy()
1315 args.update(bookmarks.preparehookargs(name, old, new))
1317 args.update(bookmarks.preparehookargs(name, old, new))
1316 repo.hook('txnclose-bookmark', throw=False,
1318 repo.hook('txnclose-bookmark', throw=False,
1317 txnname=desc, **pycompat.strkwargs(args))
1319 txnname=desc, **pycompat.strkwargs(args))
1318
1320
1319 if hook.hashook(repo.ui, 'txnclose-phase'):
1321 if hook.hashook(repo.ui, 'txnclose-phase'):
1320 cl = repo.unfiltered().changelog
1322 cl = repo.unfiltered().changelog
1321 phasemv = sorted(tr.changes['phases'].items())
1323 phasemv = sorted(tr.changes['phases'].items())
1322 for rev, (old, new) in phasemv:
1324 for rev, (old, new) in phasemv:
1323 args = tr.hookargs.copy()
1325 args = tr.hookargs.copy()
1324 node = hex(cl.node(rev))
1326 node = hex(cl.node(rev))
1325 args.update(phases.preparehookargs(node, old, new))
1327 args.update(phases.preparehookargs(node, old, new))
1326 repo.hook('txnclose-phase', throw=False, txnname=desc,
1328 repo.hook('txnclose-phase', throw=False, txnname=desc,
1327 **pycompat.strkwargs(args))
1329 **pycompat.strkwargs(args))
1328
1330
1329 repo.hook('txnclose', throw=False, txnname=desc,
1331 repo.hook('txnclose', throw=False, txnname=desc,
1330 **pycompat.strkwargs(hookargs))
1332 **pycompat.strkwargs(hookargs))
1331 reporef()._afterlock(hookfunc)
1333 reporef()._afterlock(hookfunc)
1332 tr.addfinalize('txnclose-hook', txnclosehook)
1334 tr.addfinalize('txnclose-hook', txnclosehook)
1333 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1335 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1334 def txnaborthook(tr2):
1336 def txnaborthook(tr2):
1335 """To be run if transaction is aborted
1337 """To be run if transaction is aborted
1336 """
1338 """
1337 reporef().hook('txnabort', throw=False, txnname=desc,
1339 reporef().hook('txnabort', throw=False, txnname=desc,
1338 **tr2.hookargs)
1340 **tr2.hookargs)
1339 tr.addabort('txnabort-hook', txnaborthook)
1341 tr.addabort('txnabort-hook', txnaborthook)
1340 # avoid eager cache invalidation. in-memory data should be identical
1342 # avoid eager cache invalidation. in-memory data should be identical
1341 # to stored data if transaction has no error.
1343 # to stored data if transaction has no error.
1342 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1344 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1343 self._transref = weakref.ref(tr)
1345 self._transref = weakref.ref(tr)
1344 scmutil.registersummarycallback(self, tr, desc)
1346 scmutil.registersummarycallback(self, tr, desc)
1345 return tr
1347 return tr
1346
1348
1347 def _journalfiles(self):
1349 def _journalfiles(self):
1348 return ((self.svfs, 'journal'),
1350 return ((self.svfs, 'journal'),
1349 (self.vfs, 'journal.dirstate'),
1351 (self.vfs, 'journal.dirstate'),
1350 (self.vfs, 'journal.branch'),
1352 (self.vfs, 'journal.branch'),
1351 (self.vfs, 'journal.desc'),
1353 (self.vfs, 'journal.desc'),
1352 (self.vfs, 'journal.bookmarks'),
1354 (self.vfs, 'journal.bookmarks'),
1353 (self.svfs, 'journal.phaseroots'))
1355 (self.svfs, 'journal.phaseroots'))
1354
1356
1355 def undofiles(self):
1357 def undofiles(self):
1356 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1358 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1357
1359
1358 @unfilteredmethod
1360 @unfilteredmethod
1359 def _writejournal(self, desc):
1361 def _writejournal(self, desc):
1360 self.dirstate.savebackup(None, 'journal.dirstate')
1362 self.dirstate.savebackup(None, 'journal.dirstate')
1361 self.vfs.write("journal.branch",
1363 self.vfs.write("journal.branch",
1362 encoding.fromlocal(self.dirstate.branch()))
1364 encoding.fromlocal(self.dirstate.branch()))
1363 self.vfs.write("journal.desc",
1365 self.vfs.write("journal.desc",
1364 "%d\n%s\n" % (len(self), desc))
1366 "%d\n%s\n" % (len(self), desc))
1365 self.vfs.write("journal.bookmarks",
1367 self.vfs.write("journal.bookmarks",
1366 self.vfs.tryread("bookmarks"))
1368 self.vfs.tryread("bookmarks"))
1367 self.svfs.write("journal.phaseroots",
1369 self.svfs.write("journal.phaseroots",
1368 self.svfs.tryread("phaseroots"))
1370 self.svfs.tryread("phaseroots"))
1369
1371
1370 def recover(self):
1372 def recover(self):
1371 with self.lock():
1373 with self.lock():
1372 if self.svfs.exists("journal"):
1374 if self.svfs.exists("journal"):
1373 self.ui.status(_("rolling back interrupted transaction\n"))
1375 self.ui.status(_("rolling back interrupted transaction\n"))
1374 vfsmap = {'': self.svfs,
1376 vfsmap = {'': self.svfs,
1375 'plain': self.vfs,}
1377 'plain': self.vfs,}
1376 transaction.rollback(self.svfs, vfsmap, "journal",
1378 transaction.rollback(self.svfs, vfsmap, "journal",
1377 self.ui.warn,
1379 self.ui.warn,
1378 checkambigfiles=_cachedfiles)
1380 checkambigfiles=_cachedfiles)
1379 self.invalidate()
1381 self.invalidate()
1380 return True
1382 return True
1381 else:
1383 else:
1382 self.ui.warn(_("no interrupted transaction available\n"))
1384 self.ui.warn(_("no interrupted transaction available\n"))
1383 return False
1385 return False
1384
1386
1385 def rollback(self, dryrun=False, force=False):
1387 def rollback(self, dryrun=False, force=False):
1386 wlock = lock = dsguard = None
1388 wlock = lock = dsguard = None
1387 try:
1389 try:
1388 wlock = self.wlock()
1390 wlock = self.wlock()
1389 lock = self.lock()
1391 lock = self.lock()
1390 if self.svfs.exists("undo"):
1392 if self.svfs.exists("undo"):
1391 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1393 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1392
1394
1393 return self._rollback(dryrun, force, dsguard)
1395 return self._rollback(dryrun, force, dsguard)
1394 else:
1396 else:
1395 self.ui.warn(_("no rollback information available\n"))
1397 self.ui.warn(_("no rollback information available\n"))
1396 return 1
1398 return 1
1397 finally:
1399 finally:
1398 release(dsguard, lock, wlock)
1400 release(dsguard, lock, wlock)
1399
1401
1400 @unfilteredmethod # Until we get smarter cache management
1402 @unfilteredmethod # Until we get smarter cache management
1401 def _rollback(self, dryrun, force, dsguard):
1403 def _rollback(self, dryrun, force, dsguard):
1402 ui = self.ui
1404 ui = self.ui
1403 try:
1405 try:
1404 args = self.vfs.read('undo.desc').splitlines()
1406 args = self.vfs.read('undo.desc').splitlines()
1405 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1407 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1406 if len(args) >= 3:
1408 if len(args) >= 3:
1407 detail = args[2]
1409 detail = args[2]
1408 oldtip = oldlen - 1
1410 oldtip = oldlen - 1
1409
1411
1410 if detail and ui.verbose:
1412 if detail and ui.verbose:
1411 msg = (_('repository tip rolled back to revision %d'
1413 msg = (_('repository tip rolled back to revision %d'
1412 ' (undo %s: %s)\n')
1414 ' (undo %s: %s)\n')
1413 % (oldtip, desc, detail))
1415 % (oldtip, desc, detail))
1414 else:
1416 else:
1415 msg = (_('repository tip rolled back to revision %d'
1417 msg = (_('repository tip rolled back to revision %d'
1416 ' (undo %s)\n')
1418 ' (undo %s)\n')
1417 % (oldtip, desc))
1419 % (oldtip, desc))
1418 except IOError:
1420 except IOError:
1419 msg = _('rolling back unknown transaction\n')
1421 msg = _('rolling back unknown transaction\n')
1420 desc = None
1422 desc = None
1421
1423
1422 if not force and self['.'] != self['tip'] and desc == 'commit':
1424 if not force and self['.'] != self['tip'] and desc == 'commit':
1423 raise error.Abort(
1425 raise error.Abort(
1424 _('rollback of last commit while not checked out '
1426 _('rollback of last commit while not checked out '
1425 'may lose data'), hint=_('use -f to force'))
1427 'may lose data'), hint=_('use -f to force'))
1426
1428
1427 ui.status(msg)
1429 ui.status(msg)
1428 if dryrun:
1430 if dryrun:
1429 return 0
1431 return 0
1430
1432
1431 parents = self.dirstate.parents()
1433 parents = self.dirstate.parents()
1432 self.destroying()
1434 self.destroying()
1433 vfsmap = {'plain': self.vfs, '': self.svfs}
1435 vfsmap = {'plain': self.vfs, '': self.svfs}
1434 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1436 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1435 checkambigfiles=_cachedfiles)
1437 checkambigfiles=_cachedfiles)
1436 if self.vfs.exists('undo.bookmarks'):
1438 if self.vfs.exists('undo.bookmarks'):
1437 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1439 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1438 if self.svfs.exists('undo.phaseroots'):
1440 if self.svfs.exists('undo.phaseroots'):
1439 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1441 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1440 self.invalidate()
1442 self.invalidate()
1441
1443
1442 parentgone = (parents[0] not in self.changelog.nodemap or
1444 parentgone = (parents[0] not in self.changelog.nodemap or
1443 parents[1] not in self.changelog.nodemap)
1445 parents[1] not in self.changelog.nodemap)
1444 if parentgone:
1446 if parentgone:
1445 # prevent dirstateguard from overwriting already restored one
1447 # prevent dirstateguard from overwriting already restored one
1446 dsguard.close()
1448 dsguard.close()
1447
1449
1448 self.dirstate.restorebackup(None, 'undo.dirstate')
1450 self.dirstate.restorebackup(None, 'undo.dirstate')
1449 try:
1451 try:
1450 branch = self.vfs.read('undo.branch')
1452 branch = self.vfs.read('undo.branch')
1451 self.dirstate.setbranch(encoding.tolocal(branch))
1453 self.dirstate.setbranch(encoding.tolocal(branch))
1452 except IOError:
1454 except IOError:
1453 ui.warn(_('named branch could not be reset: '
1455 ui.warn(_('named branch could not be reset: '
1454 'current branch is still \'%s\'\n')
1456 'current branch is still \'%s\'\n')
1455 % self.dirstate.branch())
1457 % self.dirstate.branch())
1456
1458
1457 parents = tuple([p.rev() for p in self[None].parents()])
1459 parents = tuple([p.rev() for p in self[None].parents()])
1458 if len(parents) > 1:
1460 if len(parents) > 1:
1459 ui.status(_('working directory now based on '
1461 ui.status(_('working directory now based on '
1460 'revisions %d and %d\n') % parents)
1462 'revisions %d and %d\n') % parents)
1461 else:
1463 else:
1462 ui.status(_('working directory now based on '
1464 ui.status(_('working directory now based on '
1463 'revision %d\n') % parents)
1465 'revision %d\n') % parents)
1464 mergemod.mergestate.clean(self, self['.'].node())
1466 mergemod.mergestate.clean(self, self['.'].node())
1465
1467
1466 # TODO: if we know which new heads may result from this rollback, pass
1468 # TODO: if we know which new heads may result from this rollback, pass
1467 # them to destroy(), which will prevent the branchhead cache from being
1469 # them to destroy(), which will prevent the branchhead cache from being
1468 # invalidated.
1470 # invalidated.
1469 self.destroyed()
1471 self.destroyed()
1470 return 0
1472 return 0
1471
1473
1472 def _buildcacheupdater(self, newtransaction):
1474 def _buildcacheupdater(self, newtransaction):
1473 """called during transaction to build the callback updating cache
1475 """called during transaction to build the callback updating cache
1474
1476
1475 Lives on the repository to help extension who might want to augment
1477 Lives on the repository to help extension who might want to augment
1476 this logic. For this purpose, the created transaction is passed to the
1478 this logic. For this purpose, the created transaction is passed to the
1477 method.
1479 method.
1478 """
1480 """
1479 # we must avoid cyclic reference between repo and transaction.
1481 # we must avoid cyclic reference between repo and transaction.
1480 reporef = weakref.ref(self)
1482 reporef = weakref.ref(self)
1481 def updater(tr):
1483 def updater(tr):
1482 repo = reporef()
1484 repo = reporef()
1483 repo.updatecaches(tr)
1485 repo.updatecaches(tr)
1484 return updater
1486 return updater
1485
1487
1486 @unfilteredmethod
1488 @unfilteredmethod
1487 def updatecaches(self, tr=None):
1489 def updatecaches(self, tr=None):
1488 """warm appropriate caches
1490 """warm appropriate caches
1489
1491
1490 If this function is called after a transaction closed. The transaction
1492 If this function is called after a transaction closed. The transaction
1491 will be available in the 'tr' argument. This can be used to selectively
1493 will be available in the 'tr' argument. This can be used to selectively
1492 update caches relevant to the changes in that transaction.
1494 update caches relevant to the changes in that transaction.
1493 """
1495 """
1494 if tr is not None and tr.hookargs.get('source') == 'strip':
1496 if tr is not None and tr.hookargs.get('source') == 'strip':
1495 # During strip, many caches are invalid but
1497 # During strip, many caches are invalid but
1496 # later call to `destroyed` will refresh them.
1498 # later call to `destroyed` will refresh them.
1497 return
1499 return
1498
1500
1499 if tr is None or tr.changes['revs']:
1501 if tr is None or tr.changes['revs']:
1500 # updating the unfiltered branchmap should refresh all the others,
1502 # updating the unfiltered branchmap should refresh all the others,
1501 self.ui.debug('updating the branch cache\n')
1503 self.ui.debug('updating the branch cache\n')
1502 branchmap.updatecache(self.filtered('served'))
1504 branchmap.updatecache(self.filtered('served'))
1503
1505
1504 def invalidatecaches(self):
1506 def invalidatecaches(self):
1505
1507
1506 if '_tagscache' in vars(self):
1508 if '_tagscache' in vars(self):
1507 # can't use delattr on proxy
1509 # can't use delattr on proxy
1508 del self.__dict__['_tagscache']
1510 del self.__dict__['_tagscache']
1509
1511
1510 self.unfiltered()._branchcaches.clear()
1512 self.unfiltered()._branchcaches.clear()
1511 self.invalidatevolatilesets()
1513 self.invalidatevolatilesets()
1512 self._sparsesignaturecache.clear()
1514 self._sparsesignaturecache.clear()
1513
1515
1514 def invalidatevolatilesets(self):
1516 def invalidatevolatilesets(self):
1515 self.filteredrevcache.clear()
1517 self.filteredrevcache.clear()
1516 obsolete.clearobscaches(self)
1518 obsolete.clearobscaches(self)
1517
1519
1518 def invalidatedirstate(self):
1520 def invalidatedirstate(self):
1519 '''Invalidates the dirstate, causing the next call to dirstate
1521 '''Invalidates the dirstate, causing the next call to dirstate
1520 to check if it was modified since the last time it was read,
1522 to check if it was modified since the last time it was read,
1521 rereading it if it has.
1523 rereading it if it has.
1522
1524
1523 This is different to dirstate.invalidate() that it doesn't always
1525 This is different to dirstate.invalidate() that it doesn't always
1524 rereads the dirstate. Use dirstate.invalidate() if you want to
1526 rereads the dirstate. Use dirstate.invalidate() if you want to
1525 explicitly read the dirstate again (i.e. restoring it to a previous
1527 explicitly read the dirstate again (i.e. restoring it to a previous
1526 known good state).'''
1528 known good state).'''
1527 if hasunfilteredcache(self, 'dirstate'):
1529 if hasunfilteredcache(self, 'dirstate'):
1528 for k in self.dirstate._filecache:
1530 for k in self.dirstate._filecache:
1529 try:
1531 try:
1530 delattr(self.dirstate, k)
1532 delattr(self.dirstate, k)
1531 except AttributeError:
1533 except AttributeError:
1532 pass
1534 pass
1533 delattr(self.unfiltered(), 'dirstate')
1535 delattr(self.unfiltered(), 'dirstate')
1534
1536
1535 def invalidate(self, clearfilecache=False):
1537 def invalidate(self, clearfilecache=False):
1536 '''Invalidates both store and non-store parts other than dirstate
1538 '''Invalidates both store and non-store parts other than dirstate
1537
1539
1538 If a transaction is running, invalidation of store is omitted,
1540 If a transaction is running, invalidation of store is omitted,
1539 because discarding in-memory changes might cause inconsistency
1541 because discarding in-memory changes might cause inconsistency
1540 (e.g. incomplete fncache causes unintentional failure, but
1542 (e.g. incomplete fncache causes unintentional failure, but
1541 redundant one doesn't).
1543 redundant one doesn't).
1542 '''
1544 '''
1543 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1545 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1544 for k in list(self._filecache.keys()):
1546 for k in list(self._filecache.keys()):
1545 # dirstate is invalidated separately in invalidatedirstate()
1547 # dirstate is invalidated separately in invalidatedirstate()
1546 if k == 'dirstate':
1548 if k == 'dirstate':
1547 continue
1549 continue
1548 if (k == 'changelog' and
1550 if (k == 'changelog' and
1549 self.currenttransaction() and
1551 self.currenttransaction() and
1550 self.changelog._delayed):
1552 self.changelog._delayed):
1551 # The changelog object may store unwritten revisions. We don't
1553 # The changelog object may store unwritten revisions. We don't
1552 # want to lose them.
1554 # want to lose them.
1553 # TODO: Solve the problem instead of working around it.
1555 # TODO: Solve the problem instead of working around it.
1554 continue
1556 continue
1555
1557
1556 if clearfilecache:
1558 if clearfilecache:
1557 del self._filecache[k]
1559 del self._filecache[k]
1558 try:
1560 try:
1559 delattr(unfiltered, k)
1561 delattr(unfiltered, k)
1560 except AttributeError:
1562 except AttributeError:
1561 pass
1563 pass
1562 self.invalidatecaches()
1564 self.invalidatecaches()
1563 if not self.currenttransaction():
1565 if not self.currenttransaction():
1564 # TODO: Changing contents of store outside transaction
1566 # TODO: Changing contents of store outside transaction
1565 # causes inconsistency. We should make in-memory store
1567 # causes inconsistency. We should make in-memory store
1566 # changes detectable, and abort if changed.
1568 # changes detectable, and abort if changed.
1567 self.store.invalidatecaches()
1569 self.store.invalidatecaches()
1568
1570
1569 def invalidateall(self):
1571 def invalidateall(self):
1570 '''Fully invalidates both store and non-store parts, causing the
1572 '''Fully invalidates both store and non-store parts, causing the
1571 subsequent operation to reread any outside changes.'''
1573 subsequent operation to reread any outside changes.'''
1572 # extension should hook this to invalidate its caches
1574 # extension should hook this to invalidate its caches
1573 self.invalidate()
1575 self.invalidate()
1574 self.invalidatedirstate()
1576 self.invalidatedirstate()
1575
1577
1576 @unfilteredmethod
1578 @unfilteredmethod
1577 def _refreshfilecachestats(self, tr):
1579 def _refreshfilecachestats(self, tr):
1578 """Reload stats of cached files so that they are flagged as valid"""
1580 """Reload stats of cached files so that they are flagged as valid"""
1579 for k, ce in self._filecache.items():
1581 for k, ce in self._filecache.items():
1580 if k == 'dirstate' or k not in self.__dict__:
1582 if k == 'dirstate' or k not in self.__dict__:
1581 continue
1583 continue
1582 ce.refresh()
1584 ce.refresh()
1583
1585
1584 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1586 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1585 inheritchecker=None, parentenvvar=None):
1587 inheritchecker=None, parentenvvar=None):
1586 parentlock = None
1588 parentlock = None
1587 # the contents of parentenvvar are used by the underlying lock to
1589 # the contents of parentenvvar are used by the underlying lock to
1588 # determine whether it can be inherited
1590 # determine whether it can be inherited
1589 if parentenvvar is not None:
1591 if parentenvvar is not None:
1590 parentlock = encoding.environ.get(parentenvvar)
1592 parentlock = encoding.environ.get(parentenvvar)
1591 try:
1593 try:
1592 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1594 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1593 acquirefn=acquirefn, desc=desc,
1595 acquirefn=acquirefn, desc=desc,
1594 inheritchecker=inheritchecker,
1596 inheritchecker=inheritchecker,
1595 parentlock=parentlock)
1597 parentlock=parentlock)
1596 except error.LockHeld as inst:
1598 except error.LockHeld as inst:
1597 if not wait:
1599 if not wait:
1598 raise
1600 raise
1599 # show more details for new-style locks
1601 # show more details for new-style locks
1600 if ':' in inst.locker:
1602 if ':' in inst.locker:
1601 host, pid = inst.locker.split(":", 1)
1603 host, pid = inst.locker.split(":", 1)
1602 self.ui.warn(
1604 self.ui.warn(
1603 _("waiting for lock on %s held by process %r "
1605 _("waiting for lock on %s held by process %r "
1604 "on host %r\n") % (desc, pid, host))
1606 "on host %r\n") % (desc, pid, host))
1605 else:
1607 else:
1606 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1608 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1607 (desc, inst.locker))
1609 (desc, inst.locker))
1608 # default to 600 seconds timeout
1610 # default to 600 seconds timeout
1609 l = lockmod.lock(vfs, lockname,
1611 l = lockmod.lock(vfs, lockname,
1610 int(self.ui.config("ui", "timeout")),
1612 int(self.ui.config("ui", "timeout")),
1611 releasefn=releasefn, acquirefn=acquirefn,
1613 releasefn=releasefn, acquirefn=acquirefn,
1612 desc=desc)
1614 desc=desc)
1613 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1615 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1614 return l
1616 return l
1615
1617
1616 def _afterlock(self, callback):
1618 def _afterlock(self, callback):
1617 """add a callback to be run when the repository is fully unlocked
1619 """add a callback to be run when the repository is fully unlocked
1618
1620
1619 The callback will be executed when the outermost lock is released
1621 The callback will be executed when the outermost lock is released
1620 (with wlock being higher level than 'lock')."""
1622 (with wlock being higher level than 'lock')."""
1621 for ref in (self._wlockref, self._lockref):
1623 for ref in (self._wlockref, self._lockref):
1622 l = ref and ref()
1624 l = ref and ref()
1623 if l and l.held:
1625 if l and l.held:
1624 l.postrelease.append(callback)
1626 l.postrelease.append(callback)
1625 break
1627 break
1626 else: # no lock have been found.
1628 else: # no lock have been found.
1627 callback()
1629 callback()
1628
1630
1629 def lock(self, wait=True):
1631 def lock(self, wait=True):
1630 '''Lock the repository store (.hg/store) and return a weak reference
1632 '''Lock the repository store (.hg/store) and return a weak reference
1631 to the lock. Use this before modifying the store (e.g. committing or
1633 to the lock. Use this before modifying the store (e.g. committing or
1632 stripping). If you are opening a transaction, get a lock as well.)
1634 stripping). If you are opening a transaction, get a lock as well.)
1633
1635
1634 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1636 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1635 'wlock' first to avoid a dead-lock hazard.'''
1637 'wlock' first to avoid a dead-lock hazard.'''
1636 l = self._currentlock(self._lockref)
1638 l = self._currentlock(self._lockref)
1637 if l is not None:
1639 if l is not None:
1638 l.lock()
1640 l.lock()
1639 return l
1641 return l
1640
1642
1641 l = self._lock(self.svfs, "lock", wait, None,
1643 l = self._lock(self.svfs, "lock", wait, None,
1642 self.invalidate, _('repository %s') % self.origroot)
1644 self.invalidate, _('repository %s') % self.origroot)
1643 self._lockref = weakref.ref(l)
1645 self._lockref = weakref.ref(l)
1644 return l
1646 return l
1645
1647
1646 def _wlockchecktransaction(self):
1648 def _wlockchecktransaction(self):
1647 if self.currenttransaction() is not None:
1649 if self.currenttransaction() is not None:
1648 raise error.LockInheritanceContractViolation(
1650 raise error.LockInheritanceContractViolation(
1649 'wlock cannot be inherited in the middle of a transaction')
1651 'wlock cannot be inherited in the middle of a transaction')
1650
1652
1651 def wlock(self, wait=True):
1653 def wlock(self, wait=True):
1652 '''Lock the non-store parts of the repository (everything under
1654 '''Lock the non-store parts of the repository (everything under
1653 .hg except .hg/store) and return a weak reference to the lock.
1655 .hg except .hg/store) and return a weak reference to the lock.
1654
1656
1655 Use this before modifying files in .hg.
1657 Use this before modifying files in .hg.
1656
1658
1657 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1659 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1658 'wlock' first to avoid a dead-lock hazard.'''
1660 'wlock' first to avoid a dead-lock hazard.'''
1659 l = self._wlockref and self._wlockref()
1661 l = self._wlockref and self._wlockref()
1660 if l is not None and l.held:
1662 if l is not None and l.held:
1661 l.lock()
1663 l.lock()
1662 return l
1664 return l
1663
1665
1664 # We do not need to check for non-waiting lock acquisition. Such
1666 # We do not need to check for non-waiting lock acquisition. Such
1665 # acquisition would not cause dead-lock as they would just fail.
1667 # acquisition would not cause dead-lock as they would just fail.
1666 if wait and (self.ui.configbool('devel', 'all-warnings')
1668 if wait and (self.ui.configbool('devel', 'all-warnings')
1667 or self.ui.configbool('devel', 'check-locks')):
1669 or self.ui.configbool('devel', 'check-locks')):
1668 if self._currentlock(self._lockref) is not None:
1670 if self._currentlock(self._lockref) is not None:
1669 self.ui.develwarn('"wlock" acquired after "lock"')
1671 self.ui.develwarn('"wlock" acquired after "lock"')
1670
1672
1671 def unlock():
1673 def unlock():
1672 if self.dirstate.pendingparentchange():
1674 if self.dirstate.pendingparentchange():
1673 self.dirstate.invalidate()
1675 self.dirstate.invalidate()
1674 else:
1676 else:
1675 self.dirstate.write(None)
1677 self.dirstate.write(None)
1676
1678
1677 self._filecache['dirstate'].refresh()
1679 self._filecache['dirstate'].refresh()
1678
1680
1679 l = self._lock(self.vfs, "wlock", wait, unlock,
1681 l = self._lock(self.vfs, "wlock", wait, unlock,
1680 self.invalidatedirstate, _('working directory of %s') %
1682 self.invalidatedirstate, _('working directory of %s') %
1681 self.origroot,
1683 self.origroot,
1682 inheritchecker=self._wlockchecktransaction,
1684 inheritchecker=self._wlockchecktransaction,
1683 parentenvvar='HG_WLOCK_LOCKER')
1685 parentenvvar='HG_WLOCK_LOCKER')
1684 self._wlockref = weakref.ref(l)
1686 self._wlockref = weakref.ref(l)
1685 return l
1687 return l
1686
1688
1687 def _currentlock(self, lockref):
1689 def _currentlock(self, lockref):
1688 """Returns the lock if it's held, or None if it's not."""
1690 """Returns the lock if it's held, or None if it's not."""
1689 if lockref is None:
1691 if lockref is None:
1690 return None
1692 return None
1691 l = lockref()
1693 l = lockref()
1692 if l is None or not l.held:
1694 if l is None or not l.held:
1693 return None
1695 return None
1694 return l
1696 return l
1695
1697
1696 def currentwlock(self):
1698 def currentwlock(self):
1697 """Returns the wlock if it's held, or None if it's not."""
1699 """Returns the wlock if it's held, or None if it's not."""
1698 return self._currentlock(self._wlockref)
1700 return self._currentlock(self._wlockref)
1699
1701
1700 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1702 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1701 """
1703 """
1702 commit an individual file as part of a larger transaction
1704 commit an individual file as part of a larger transaction
1703 """
1705 """
1704
1706
1705 fname = fctx.path()
1707 fname = fctx.path()
1706 fparent1 = manifest1.get(fname, nullid)
1708 fparent1 = manifest1.get(fname, nullid)
1707 fparent2 = manifest2.get(fname, nullid)
1709 fparent2 = manifest2.get(fname, nullid)
1708 if isinstance(fctx, context.filectx):
1710 if isinstance(fctx, context.filectx):
1709 node = fctx.filenode()
1711 node = fctx.filenode()
1710 if node in [fparent1, fparent2]:
1712 if node in [fparent1, fparent2]:
1711 self.ui.debug('reusing %s filelog entry\n' % fname)
1713 self.ui.debug('reusing %s filelog entry\n' % fname)
1712 if manifest1.flags(fname) != fctx.flags():
1714 if manifest1.flags(fname) != fctx.flags():
1713 changelist.append(fname)
1715 changelist.append(fname)
1714 return node
1716 return node
1715
1717
1716 flog = self.file(fname)
1718 flog = self.file(fname)
1717 meta = {}
1719 meta = {}
1718 copy = fctx.renamed()
1720 copy = fctx.renamed()
1719 if copy and copy[0] != fname:
1721 if copy and copy[0] != fname:
1720 # Mark the new revision of this file as a copy of another
1722 # Mark the new revision of this file as a copy of another
1721 # file. This copy data will effectively act as a parent
1723 # file. This copy data will effectively act as a parent
1722 # of this new revision. If this is a merge, the first
1724 # of this new revision. If this is a merge, the first
1723 # parent will be the nullid (meaning "look up the copy data")
1725 # parent will be the nullid (meaning "look up the copy data")
1724 # and the second one will be the other parent. For example:
1726 # and the second one will be the other parent. For example:
1725 #
1727 #
1726 # 0 --- 1 --- 3 rev1 changes file foo
1728 # 0 --- 1 --- 3 rev1 changes file foo
1727 # \ / rev2 renames foo to bar and changes it
1729 # \ / rev2 renames foo to bar and changes it
1728 # \- 2 -/ rev3 should have bar with all changes and
1730 # \- 2 -/ rev3 should have bar with all changes and
1729 # should record that bar descends from
1731 # should record that bar descends from
1730 # bar in rev2 and foo in rev1
1732 # bar in rev2 and foo in rev1
1731 #
1733 #
1732 # this allows this merge to succeed:
1734 # this allows this merge to succeed:
1733 #
1735 #
1734 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1736 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1735 # \ / merging rev3 and rev4 should use bar@rev2
1737 # \ / merging rev3 and rev4 should use bar@rev2
1736 # \- 2 --- 4 as the merge base
1738 # \- 2 --- 4 as the merge base
1737 #
1739 #
1738
1740
1739 cfname = copy[0]
1741 cfname = copy[0]
1740 crev = manifest1.get(cfname)
1742 crev = manifest1.get(cfname)
1741 newfparent = fparent2
1743 newfparent = fparent2
1742
1744
1743 if manifest2: # branch merge
1745 if manifest2: # branch merge
1744 if fparent2 == nullid or crev is None: # copied on remote side
1746 if fparent2 == nullid or crev is None: # copied on remote side
1745 if cfname in manifest2:
1747 if cfname in manifest2:
1746 crev = manifest2[cfname]
1748 crev = manifest2[cfname]
1747 newfparent = fparent1
1749 newfparent = fparent1
1748
1750
1749 # Here, we used to search backwards through history to try to find
1751 # Here, we used to search backwards through history to try to find
1750 # where the file copy came from if the source of a copy was not in
1752 # where the file copy came from if the source of a copy was not in
1751 # the parent directory. However, this doesn't actually make sense to
1753 # the parent directory. However, this doesn't actually make sense to
1752 # do (what does a copy from something not in your working copy even
1754 # do (what does a copy from something not in your working copy even
1753 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1755 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1754 # the user that copy information was dropped, so if they didn't
1756 # the user that copy information was dropped, so if they didn't
1755 # expect this outcome it can be fixed, but this is the correct
1757 # expect this outcome it can be fixed, but this is the correct
1756 # behavior in this circumstance.
1758 # behavior in this circumstance.
1757
1759
1758 if crev:
1760 if crev:
1759 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1761 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1760 meta["copy"] = cfname
1762 meta["copy"] = cfname
1761 meta["copyrev"] = hex(crev)
1763 meta["copyrev"] = hex(crev)
1762 fparent1, fparent2 = nullid, newfparent
1764 fparent1, fparent2 = nullid, newfparent
1763 else:
1765 else:
1764 self.ui.warn(_("warning: can't find ancestor for '%s' "
1766 self.ui.warn(_("warning: can't find ancestor for '%s' "
1765 "copied from '%s'!\n") % (fname, cfname))
1767 "copied from '%s'!\n") % (fname, cfname))
1766
1768
1767 elif fparent1 == nullid:
1769 elif fparent1 == nullid:
1768 fparent1, fparent2 = fparent2, nullid
1770 fparent1, fparent2 = fparent2, nullid
1769 elif fparent2 != nullid:
1771 elif fparent2 != nullid:
1770 # is one parent an ancestor of the other?
1772 # is one parent an ancestor of the other?
1771 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1773 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1772 if fparent1 in fparentancestors:
1774 if fparent1 in fparentancestors:
1773 fparent1, fparent2 = fparent2, nullid
1775 fparent1, fparent2 = fparent2, nullid
1774 elif fparent2 in fparentancestors:
1776 elif fparent2 in fparentancestors:
1775 fparent2 = nullid
1777 fparent2 = nullid
1776
1778
1777 # is the file changed?
1779 # is the file changed?
1778 text = fctx.data()
1780 text = fctx.data()
1779 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1781 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1780 changelist.append(fname)
1782 changelist.append(fname)
1781 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1783 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1782 # are just the flags changed during merge?
1784 # are just the flags changed during merge?
1783 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1785 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1784 changelist.append(fname)
1786 changelist.append(fname)
1785
1787
1786 return fparent1
1788 return fparent1
1787
1789
1788 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1790 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1789 """check for commit arguments that aren't committable"""
1791 """check for commit arguments that aren't committable"""
1790 if match.isexact() or match.prefix():
1792 if match.isexact() or match.prefix():
1791 matched = set(status.modified + status.added + status.removed)
1793 matched = set(status.modified + status.added + status.removed)
1792
1794
1793 for f in match.files():
1795 for f in match.files():
1794 f = self.dirstate.normalize(f)
1796 f = self.dirstate.normalize(f)
1795 if f == '.' or f in matched or f in wctx.substate:
1797 if f == '.' or f in matched or f in wctx.substate:
1796 continue
1798 continue
1797 if f in status.deleted:
1799 if f in status.deleted:
1798 fail(f, _('file not found!'))
1800 fail(f, _('file not found!'))
1799 if f in vdirs: # visited directory
1801 if f in vdirs: # visited directory
1800 d = f + '/'
1802 d = f + '/'
1801 for mf in matched:
1803 for mf in matched:
1802 if mf.startswith(d):
1804 if mf.startswith(d):
1803 break
1805 break
1804 else:
1806 else:
1805 fail(f, _("no match under directory!"))
1807 fail(f, _("no match under directory!"))
1806 elif f not in self.dirstate:
1808 elif f not in self.dirstate:
1807 fail(f, _("file not tracked!"))
1809 fail(f, _("file not tracked!"))
1808
1810
1809 @unfilteredmethod
1811 @unfilteredmethod
1810 def commit(self, text="", user=None, date=None, match=None, force=False,
1812 def commit(self, text="", user=None, date=None, match=None, force=False,
1811 editor=False, extra=None):
1813 editor=False, extra=None):
1812 """Add a new revision to current repository.
1814 """Add a new revision to current repository.
1813
1815
1814 Revision information is gathered from the working directory,
1816 Revision information is gathered from the working directory,
1815 match can be used to filter the committed files. If editor is
1817 match can be used to filter the committed files. If editor is
1816 supplied, it is called to get a commit message.
1818 supplied, it is called to get a commit message.
1817 """
1819 """
1818 if extra is None:
1820 if extra is None:
1819 extra = {}
1821 extra = {}
1820
1822
1821 def fail(f, msg):
1823 def fail(f, msg):
1822 raise error.Abort('%s: %s' % (f, msg))
1824 raise error.Abort('%s: %s' % (f, msg))
1823
1825
1824 if not match:
1826 if not match:
1825 match = matchmod.always(self.root, '')
1827 match = matchmod.always(self.root, '')
1826
1828
1827 if not force:
1829 if not force:
1828 vdirs = []
1830 vdirs = []
1829 match.explicitdir = vdirs.append
1831 match.explicitdir = vdirs.append
1830 match.bad = fail
1832 match.bad = fail
1831
1833
1832 wlock = lock = tr = None
1834 wlock = lock = tr = None
1833 try:
1835 try:
1834 wlock = self.wlock()
1836 wlock = self.wlock()
1835 lock = self.lock() # for recent changelog (see issue4368)
1837 lock = self.lock() # for recent changelog (see issue4368)
1836
1838
1837 wctx = self[None]
1839 wctx = self[None]
1838 merge = len(wctx.parents()) > 1
1840 merge = len(wctx.parents()) > 1
1839
1841
1840 if not force and merge and not match.always():
1842 if not force and merge and not match.always():
1841 raise error.Abort(_('cannot partially commit a merge '
1843 raise error.Abort(_('cannot partially commit a merge '
1842 '(do not specify files or patterns)'))
1844 '(do not specify files or patterns)'))
1843
1845
1844 status = self.status(match=match, clean=force)
1846 status = self.status(match=match, clean=force)
1845 if force:
1847 if force:
1846 status.modified.extend(status.clean) # mq may commit clean files
1848 status.modified.extend(status.clean) # mq may commit clean files
1847
1849
1848 # check subrepos
1850 # check subrepos
1849 subs = []
1851 subs = []
1850 commitsubs = set()
1852 commitsubs = set()
1851 newstate = wctx.substate.copy()
1853 newstate = wctx.substate.copy()
1852 # only manage subrepos and .hgsubstate if .hgsub is present
1854 # only manage subrepos and .hgsubstate if .hgsub is present
1853 if '.hgsub' in wctx:
1855 if '.hgsub' in wctx:
1854 # we'll decide whether to track this ourselves, thanks
1856 # we'll decide whether to track this ourselves, thanks
1855 for c in status.modified, status.added, status.removed:
1857 for c in status.modified, status.added, status.removed:
1856 if '.hgsubstate' in c:
1858 if '.hgsubstate' in c:
1857 c.remove('.hgsubstate')
1859 c.remove('.hgsubstate')
1858
1860
1859 # compare current state to last committed state
1861 # compare current state to last committed state
1860 # build new substate based on last committed state
1862 # build new substate based on last committed state
1861 oldstate = wctx.p1().substate
1863 oldstate = wctx.p1().substate
1862 for s in sorted(newstate.keys()):
1864 for s in sorted(newstate.keys()):
1863 if not match(s):
1865 if not match(s):
1864 # ignore working copy, use old state if present
1866 # ignore working copy, use old state if present
1865 if s in oldstate:
1867 if s in oldstate:
1866 newstate[s] = oldstate[s]
1868 newstate[s] = oldstate[s]
1867 continue
1869 continue
1868 if not force:
1870 if not force:
1869 raise error.Abort(
1871 raise error.Abort(
1870 _("commit with new subrepo %s excluded") % s)
1872 _("commit with new subrepo %s excluded") % s)
1871 dirtyreason = wctx.sub(s).dirtyreason(True)
1873 dirtyreason = wctx.sub(s).dirtyreason(True)
1872 if dirtyreason:
1874 if dirtyreason:
1873 if not self.ui.configbool('ui', 'commitsubrepos'):
1875 if not self.ui.configbool('ui', 'commitsubrepos'):
1874 raise error.Abort(dirtyreason,
1876 raise error.Abort(dirtyreason,
1875 hint=_("use --subrepos for recursive commit"))
1877 hint=_("use --subrepos for recursive commit"))
1876 subs.append(s)
1878 subs.append(s)
1877 commitsubs.add(s)
1879 commitsubs.add(s)
1878 else:
1880 else:
1879 bs = wctx.sub(s).basestate()
1881 bs = wctx.sub(s).basestate()
1880 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1882 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1881 if oldstate.get(s, (None, None, None))[1] != bs:
1883 if oldstate.get(s, (None, None, None))[1] != bs:
1882 subs.append(s)
1884 subs.append(s)
1883
1885
1884 # check for removed subrepos
1886 # check for removed subrepos
1885 for p in wctx.parents():
1887 for p in wctx.parents():
1886 r = [s for s in p.substate if s not in newstate]
1888 r = [s for s in p.substate if s not in newstate]
1887 subs += [s for s in r if match(s)]
1889 subs += [s for s in r if match(s)]
1888 if subs:
1890 if subs:
1889 if (not match('.hgsub') and
1891 if (not match('.hgsub') and
1890 '.hgsub' in (wctx.modified() + wctx.added())):
1892 '.hgsub' in (wctx.modified() + wctx.added())):
1891 raise error.Abort(
1893 raise error.Abort(
1892 _("can't commit subrepos without .hgsub"))
1894 _("can't commit subrepos without .hgsub"))
1893 status.modified.insert(0, '.hgsubstate')
1895 status.modified.insert(0, '.hgsubstate')
1894
1896
1895 elif '.hgsub' in status.removed:
1897 elif '.hgsub' in status.removed:
1896 # clean up .hgsubstate when .hgsub is removed
1898 # clean up .hgsubstate when .hgsub is removed
1897 if ('.hgsubstate' in wctx and
1899 if ('.hgsubstate' in wctx and
1898 '.hgsubstate' not in (status.modified + status.added +
1900 '.hgsubstate' not in (status.modified + status.added +
1899 status.removed)):
1901 status.removed)):
1900 status.removed.insert(0, '.hgsubstate')
1902 status.removed.insert(0, '.hgsubstate')
1901
1903
1902 # make sure all explicit patterns are matched
1904 # make sure all explicit patterns are matched
1903 if not force:
1905 if not force:
1904 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1906 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1905
1907
1906 cctx = context.workingcommitctx(self, status,
1908 cctx = context.workingcommitctx(self, status,
1907 text, user, date, extra)
1909 text, user, date, extra)
1908
1910
1909 # internal config: ui.allowemptycommit
1911 # internal config: ui.allowemptycommit
1910 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1912 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1911 or extra.get('close') or merge or cctx.files()
1913 or extra.get('close') or merge or cctx.files()
1912 or self.ui.configbool('ui', 'allowemptycommit'))
1914 or self.ui.configbool('ui', 'allowemptycommit'))
1913 if not allowemptycommit:
1915 if not allowemptycommit:
1914 return None
1916 return None
1915
1917
1916 if merge and cctx.deleted():
1918 if merge and cctx.deleted():
1917 raise error.Abort(_("cannot commit merge with missing files"))
1919 raise error.Abort(_("cannot commit merge with missing files"))
1918
1920
1919 ms = mergemod.mergestate.read(self)
1921 ms = mergemod.mergestate.read(self)
1920 mergeutil.checkunresolved(ms)
1922 mergeutil.checkunresolved(ms)
1921
1923
1922 if editor:
1924 if editor:
1923 cctx._text = editor(self, cctx, subs)
1925 cctx._text = editor(self, cctx, subs)
1924 edited = (text != cctx._text)
1926 edited = (text != cctx._text)
1925
1927
1926 # Save commit message in case this transaction gets rolled back
1928 # Save commit message in case this transaction gets rolled back
1927 # (e.g. by a pretxncommit hook). Leave the content alone on
1929 # (e.g. by a pretxncommit hook). Leave the content alone on
1928 # the assumption that the user will use the same editor again.
1930 # the assumption that the user will use the same editor again.
1929 msgfn = self.savecommitmessage(cctx._text)
1931 msgfn = self.savecommitmessage(cctx._text)
1930
1932
1931 # commit subs and write new state
1933 # commit subs and write new state
1932 if subs:
1934 if subs:
1933 for s in sorted(commitsubs):
1935 for s in sorted(commitsubs):
1934 sub = wctx.sub(s)
1936 sub = wctx.sub(s)
1935 self.ui.status(_('committing subrepository %s\n') %
1937 self.ui.status(_('committing subrepository %s\n') %
1936 subrepo.subrelpath(sub))
1938 subrepo.subrelpath(sub))
1937 sr = sub.commit(cctx._text, user, date)
1939 sr = sub.commit(cctx._text, user, date)
1938 newstate[s] = (newstate[s][0], sr)
1940 newstate[s] = (newstate[s][0], sr)
1939 subrepo.writestate(self, newstate)
1941 subrepo.writestate(self, newstate)
1940
1942
1941 p1, p2 = self.dirstate.parents()
1943 p1, p2 = self.dirstate.parents()
1942 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1944 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1943 try:
1945 try:
1944 self.hook("precommit", throw=True, parent1=hookp1,
1946 self.hook("precommit", throw=True, parent1=hookp1,
1945 parent2=hookp2)
1947 parent2=hookp2)
1946 tr = self.transaction('commit')
1948 tr = self.transaction('commit')
1947 ret = self.commitctx(cctx, True)
1949 ret = self.commitctx(cctx, True)
1948 except: # re-raises
1950 except: # re-raises
1949 if edited:
1951 if edited:
1950 self.ui.write(
1952 self.ui.write(
1951 _('note: commit message saved in %s\n') % msgfn)
1953 _('note: commit message saved in %s\n') % msgfn)
1952 raise
1954 raise
1953 # update bookmarks, dirstate and mergestate
1955 # update bookmarks, dirstate and mergestate
1954 bookmarks.update(self, [p1, p2], ret)
1956 bookmarks.update(self, [p1, p2], ret)
1955 cctx.markcommitted(ret)
1957 cctx.markcommitted(ret)
1956 ms.reset()
1958 ms.reset()
1957 tr.close()
1959 tr.close()
1958
1960
1959 finally:
1961 finally:
1960 lockmod.release(tr, lock, wlock)
1962 lockmod.release(tr, lock, wlock)
1961
1963
1962 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1964 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1963 # hack for command that use a temporary commit (eg: histedit)
1965 # hack for command that use a temporary commit (eg: histedit)
1964 # temporary commit got stripped before hook release
1966 # temporary commit got stripped before hook release
1965 if self.changelog.hasnode(ret):
1967 if self.changelog.hasnode(ret):
1966 self.hook("commit", node=node, parent1=parent1,
1968 self.hook("commit", node=node, parent1=parent1,
1967 parent2=parent2)
1969 parent2=parent2)
1968 self._afterlock(commithook)
1970 self._afterlock(commithook)
1969 return ret
1971 return ret
1970
1972
1971 @unfilteredmethod
1973 @unfilteredmethod
1972 def commitctx(self, ctx, error=False):
1974 def commitctx(self, ctx, error=False):
1973 """Add a new revision to current repository.
1975 """Add a new revision to current repository.
1974 Revision information is passed via the context argument.
1976 Revision information is passed via the context argument.
1975 """
1977 """
1976
1978
1977 tr = None
1979 tr = None
1978 p1, p2 = ctx.p1(), ctx.p2()
1980 p1, p2 = ctx.p1(), ctx.p2()
1979 user = ctx.user()
1981 user = ctx.user()
1980
1982
1981 lock = self.lock()
1983 lock = self.lock()
1982 try:
1984 try:
1983 tr = self.transaction("commit")
1985 tr = self.transaction("commit")
1984 trp = weakref.proxy(tr)
1986 trp = weakref.proxy(tr)
1985
1987
1986 if ctx.manifestnode():
1988 if ctx.manifestnode():
1987 # reuse an existing manifest revision
1989 # reuse an existing manifest revision
1988 mn = ctx.manifestnode()
1990 mn = ctx.manifestnode()
1989 files = ctx.files()
1991 files = ctx.files()
1990 elif ctx.files():
1992 elif ctx.files():
1991 m1ctx = p1.manifestctx()
1993 m1ctx = p1.manifestctx()
1992 m2ctx = p2.manifestctx()
1994 m2ctx = p2.manifestctx()
1993 mctx = m1ctx.copy()
1995 mctx = m1ctx.copy()
1994
1996
1995 m = mctx.read()
1997 m = mctx.read()
1996 m1 = m1ctx.read()
1998 m1 = m1ctx.read()
1997 m2 = m2ctx.read()
1999 m2 = m2ctx.read()
1998
2000
1999 # check in files
2001 # check in files
2000 added = []
2002 added = []
2001 changed = []
2003 changed = []
2002 removed = list(ctx.removed())
2004 removed = list(ctx.removed())
2003 linkrev = len(self)
2005 linkrev = len(self)
2004 self.ui.note(_("committing files:\n"))
2006 self.ui.note(_("committing files:\n"))
2005 for f in sorted(ctx.modified() + ctx.added()):
2007 for f in sorted(ctx.modified() + ctx.added()):
2006 self.ui.note(f + "\n")
2008 self.ui.note(f + "\n")
2007 try:
2009 try:
2008 fctx = ctx[f]
2010 fctx = ctx[f]
2009 if fctx is None:
2011 if fctx is None:
2010 removed.append(f)
2012 removed.append(f)
2011 else:
2013 else:
2012 added.append(f)
2014 added.append(f)
2013 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2015 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2014 trp, changed)
2016 trp, changed)
2015 m.setflag(f, fctx.flags())
2017 m.setflag(f, fctx.flags())
2016 except OSError as inst:
2018 except OSError as inst:
2017 self.ui.warn(_("trouble committing %s!\n") % f)
2019 self.ui.warn(_("trouble committing %s!\n") % f)
2018 raise
2020 raise
2019 except IOError as inst:
2021 except IOError as inst:
2020 errcode = getattr(inst, 'errno', errno.ENOENT)
2022 errcode = getattr(inst, 'errno', errno.ENOENT)
2021 if error or errcode and errcode != errno.ENOENT:
2023 if error or errcode and errcode != errno.ENOENT:
2022 self.ui.warn(_("trouble committing %s!\n") % f)
2024 self.ui.warn(_("trouble committing %s!\n") % f)
2023 raise
2025 raise
2024
2026
2025 # update manifest
2027 # update manifest
2026 self.ui.note(_("committing manifest\n"))
2028 self.ui.note(_("committing manifest\n"))
2027 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2029 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2028 drop = [f for f in removed if f in m]
2030 drop = [f for f in removed if f in m]
2029 for f in drop:
2031 for f in drop:
2030 del m[f]
2032 del m[f]
2031 mn = mctx.write(trp, linkrev,
2033 mn = mctx.write(trp, linkrev,
2032 p1.manifestnode(), p2.manifestnode(),
2034 p1.manifestnode(), p2.manifestnode(),
2033 added, drop)
2035 added, drop)
2034 files = changed + removed
2036 files = changed + removed
2035 else:
2037 else:
2036 mn = p1.manifestnode()
2038 mn = p1.manifestnode()
2037 files = []
2039 files = []
2038
2040
2039 # update changelog
2041 # update changelog
2040 self.ui.note(_("committing changelog\n"))
2042 self.ui.note(_("committing changelog\n"))
2041 self.changelog.delayupdate(tr)
2043 self.changelog.delayupdate(tr)
2042 n = self.changelog.add(mn, files, ctx.description(),
2044 n = self.changelog.add(mn, files, ctx.description(),
2043 trp, p1.node(), p2.node(),
2045 trp, p1.node(), p2.node(),
2044 user, ctx.date(), ctx.extra().copy())
2046 user, ctx.date(), ctx.extra().copy())
2045 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2047 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2046 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2048 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2047 parent2=xp2)
2049 parent2=xp2)
2048 # set the new commit is proper phase
2050 # set the new commit is proper phase
2049 targetphase = subrepo.newcommitphase(self.ui, ctx)
2051 targetphase = subrepo.newcommitphase(self.ui, ctx)
2050 if targetphase:
2052 if targetphase:
2051 # retract boundary do not alter parent changeset.
2053 # retract boundary do not alter parent changeset.
2052 # if a parent have higher the resulting phase will
2054 # if a parent have higher the resulting phase will
2053 # be compliant anyway
2055 # be compliant anyway
2054 #
2056 #
2055 # if minimal phase was 0 we don't need to retract anything
2057 # if minimal phase was 0 we don't need to retract anything
2056 phases.registernew(self, tr, targetphase, [n])
2058 phases.registernew(self, tr, targetphase, [n])
2057 tr.close()
2059 tr.close()
2058 return n
2060 return n
2059 finally:
2061 finally:
2060 if tr:
2062 if tr:
2061 tr.release()
2063 tr.release()
2062 lock.release()
2064 lock.release()
2063
2065
2064 @unfilteredmethod
2066 @unfilteredmethod
2065 def destroying(self):
2067 def destroying(self):
2066 '''Inform the repository that nodes are about to be destroyed.
2068 '''Inform the repository that nodes are about to be destroyed.
2067 Intended for use by strip and rollback, so there's a common
2069 Intended for use by strip and rollback, so there's a common
2068 place for anything that has to be done before destroying history.
2070 place for anything that has to be done before destroying history.
2069
2071
2070 This is mostly useful for saving state that is in memory and waiting
2072 This is mostly useful for saving state that is in memory and waiting
2071 to be flushed when the current lock is released. Because a call to
2073 to be flushed when the current lock is released. Because a call to
2072 destroyed is imminent, the repo will be invalidated causing those
2074 destroyed is imminent, the repo will be invalidated causing those
2073 changes to stay in memory (waiting for the next unlock), or vanish
2075 changes to stay in memory (waiting for the next unlock), or vanish
2074 completely.
2076 completely.
2075 '''
2077 '''
2076 # When using the same lock to commit and strip, the phasecache is left
2078 # When using the same lock to commit and strip, the phasecache is left
2077 # dirty after committing. Then when we strip, the repo is invalidated,
2079 # dirty after committing. Then when we strip, the repo is invalidated,
2078 # causing those changes to disappear.
2080 # causing those changes to disappear.
2079 if '_phasecache' in vars(self):
2081 if '_phasecache' in vars(self):
2080 self._phasecache.write()
2082 self._phasecache.write()
2081
2083
2082 @unfilteredmethod
2084 @unfilteredmethod
2083 def destroyed(self):
2085 def destroyed(self):
2084 '''Inform the repository that nodes have been destroyed.
2086 '''Inform the repository that nodes have been destroyed.
2085 Intended for use by strip and rollback, so there's a common
2087 Intended for use by strip and rollback, so there's a common
2086 place for anything that has to be done after destroying history.
2088 place for anything that has to be done after destroying history.
2087 '''
2089 '''
2088 # When one tries to:
2090 # When one tries to:
2089 # 1) destroy nodes thus calling this method (e.g. strip)
2091 # 1) destroy nodes thus calling this method (e.g. strip)
2090 # 2) use phasecache somewhere (e.g. commit)
2092 # 2) use phasecache somewhere (e.g. commit)
2091 #
2093 #
2092 # then 2) will fail because the phasecache contains nodes that were
2094 # then 2) will fail because the phasecache contains nodes that were
2093 # removed. We can either remove phasecache from the filecache,
2095 # removed. We can either remove phasecache from the filecache,
2094 # causing it to reload next time it is accessed, or simply filter
2096 # causing it to reload next time it is accessed, or simply filter
2095 # the removed nodes now and write the updated cache.
2097 # the removed nodes now and write the updated cache.
2096 self._phasecache.filterunknown(self)
2098 self._phasecache.filterunknown(self)
2097 self._phasecache.write()
2099 self._phasecache.write()
2098
2100
2099 # refresh all repository caches
2101 # refresh all repository caches
2100 self.updatecaches()
2102 self.updatecaches()
2101
2103
2102 # Ensure the persistent tag cache is updated. Doing it now
2104 # Ensure the persistent tag cache is updated. Doing it now
2103 # means that the tag cache only has to worry about destroyed
2105 # means that the tag cache only has to worry about destroyed
2104 # heads immediately after a strip/rollback. That in turn
2106 # heads immediately after a strip/rollback. That in turn
2105 # guarantees that "cachetip == currenttip" (comparing both rev
2107 # guarantees that "cachetip == currenttip" (comparing both rev
2106 # and node) always means no nodes have been added or destroyed.
2108 # and node) always means no nodes have been added or destroyed.
2107
2109
2108 # XXX this is suboptimal when qrefresh'ing: we strip the current
2110 # XXX this is suboptimal when qrefresh'ing: we strip the current
2109 # head, refresh the tag cache, then immediately add a new head.
2111 # head, refresh the tag cache, then immediately add a new head.
2110 # But I think doing it this way is necessary for the "instant
2112 # But I think doing it this way is necessary for the "instant
2111 # tag cache retrieval" case to work.
2113 # tag cache retrieval" case to work.
2112 self.invalidate()
2114 self.invalidate()
2113
2115
2114 def walk(self, match, node=None):
2116 def walk(self, match, node=None):
2115 '''
2117 '''
2116 walk recursively through the directory tree or a given
2118 walk recursively through the directory tree or a given
2117 changeset, finding all files matched by the match
2119 changeset, finding all files matched by the match
2118 function
2120 function
2119 '''
2121 '''
2120 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2122 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2121 return self[node].walk(match)
2123 return self[node].walk(match)
2122
2124
2123 def status(self, node1='.', node2=None, match=None,
2125 def status(self, node1='.', node2=None, match=None,
2124 ignored=False, clean=False, unknown=False,
2126 ignored=False, clean=False, unknown=False,
2125 listsubrepos=False):
2127 listsubrepos=False):
2126 '''a convenience method that calls node1.status(node2)'''
2128 '''a convenience method that calls node1.status(node2)'''
2127 return self[node1].status(node2, match, ignored, clean, unknown,
2129 return self[node1].status(node2, match, ignored, clean, unknown,
2128 listsubrepos)
2130 listsubrepos)
2129
2131
2130 def addpostdsstatus(self, ps):
2132 def addpostdsstatus(self, ps):
2131 """Add a callback to run within the wlock, at the point at which status
2133 """Add a callback to run within the wlock, at the point at which status
2132 fixups happen.
2134 fixups happen.
2133
2135
2134 On status completion, callback(wctx, status) will be called with the
2136 On status completion, callback(wctx, status) will be called with the
2135 wlock held, unless the dirstate has changed from underneath or the wlock
2137 wlock held, unless the dirstate has changed from underneath or the wlock
2136 couldn't be grabbed.
2138 couldn't be grabbed.
2137
2139
2138 Callbacks should not capture and use a cached copy of the dirstate --
2140 Callbacks should not capture and use a cached copy of the dirstate --
2139 it might change in the meanwhile. Instead, they should access the
2141 it might change in the meanwhile. Instead, they should access the
2140 dirstate via wctx.repo().dirstate.
2142 dirstate via wctx.repo().dirstate.
2141
2143
2142 This list is emptied out after each status run -- extensions should
2144 This list is emptied out after each status run -- extensions should
2143 make sure it adds to this list each time dirstate.status is called.
2145 make sure it adds to this list each time dirstate.status is called.
2144 Extensions should also make sure they don't call this for statuses
2146 Extensions should also make sure they don't call this for statuses
2145 that don't involve the dirstate.
2147 that don't involve the dirstate.
2146 """
2148 """
2147
2149
2148 # The list is located here for uniqueness reasons -- it is actually
2150 # The list is located here for uniqueness reasons -- it is actually
2149 # managed by the workingctx, but that isn't unique per-repo.
2151 # managed by the workingctx, but that isn't unique per-repo.
2150 self._postdsstatus.append(ps)
2152 self._postdsstatus.append(ps)
2151
2153
2152 def postdsstatus(self):
2154 def postdsstatus(self):
2153 """Used by workingctx to get the list of post-dirstate-status hooks."""
2155 """Used by workingctx to get the list of post-dirstate-status hooks."""
2154 return self._postdsstatus
2156 return self._postdsstatus
2155
2157
2156 def clearpostdsstatus(self):
2158 def clearpostdsstatus(self):
2157 """Used by workingctx to clear post-dirstate-status hooks."""
2159 """Used by workingctx to clear post-dirstate-status hooks."""
2158 del self._postdsstatus[:]
2160 del self._postdsstatus[:]
2159
2161
2160 def heads(self, start=None):
2162 def heads(self, start=None):
2161 if start is None:
2163 if start is None:
2162 cl = self.changelog
2164 cl = self.changelog
2163 headrevs = reversed(cl.headrevs())
2165 headrevs = reversed(cl.headrevs())
2164 return [cl.node(rev) for rev in headrevs]
2166 return [cl.node(rev) for rev in headrevs]
2165
2167
2166 heads = self.changelog.heads(start)
2168 heads = self.changelog.heads(start)
2167 # sort the output in rev descending order
2169 # sort the output in rev descending order
2168 return sorted(heads, key=self.changelog.rev, reverse=True)
2170 return sorted(heads, key=self.changelog.rev, reverse=True)
2169
2171
2170 def branchheads(self, branch=None, start=None, closed=False):
2172 def branchheads(self, branch=None, start=None, closed=False):
2171 '''return a (possibly filtered) list of heads for the given branch
2173 '''return a (possibly filtered) list of heads for the given branch
2172
2174
2173 Heads are returned in topological order, from newest to oldest.
2175 Heads are returned in topological order, from newest to oldest.
2174 If branch is None, use the dirstate branch.
2176 If branch is None, use the dirstate branch.
2175 If start is not None, return only heads reachable from start.
2177 If start is not None, return only heads reachable from start.
2176 If closed is True, return heads that are marked as closed as well.
2178 If closed is True, return heads that are marked as closed as well.
2177 '''
2179 '''
2178 if branch is None:
2180 if branch is None:
2179 branch = self[None].branch()
2181 branch = self[None].branch()
2180 branches = self.branchmap()
2182 branches = self.branchmap()
2181 if branch not in branches:
2183 if branch not in branches:
2182 return []
2184 return []
2183 # the cache returns heads ordered lowest to highest
2185 # the cache returns heads ordered lowest to highest
2184 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2186 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2185 if start is not None:
2187 if start is not None:
2186 # filter out the heads that cannot be reached from startrev
2188 # filter out the heads that cannot be reached from startrev
2187 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2189 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2188 bheads = [h for h in bheads if h in fbheads]
2190 bheads = [h for h in bheads if h in fbheads]
2189 return bheads
2191 return bheads
2190
2192
2191 def branches(self, nodes):
2193 def branches(self, nodes):
2192 if not nodes:
2194 if not nodes:
2193 nodes = [self.changelog.tip()]
2195 nodes = [self.changelog.tip()]
2194 b = []
2196 b = []
2195 for n in nodes:
2197 for n in nodes:
2196 t = n
2198 t = n
2197 while True:
2199 while True:
2198 p = self.changelog.parents(n)
2200 p = self.changelog.parents(n)
2199 if p[1] != nullid or p[0] == nullid:
2201 if p[1] != nullid or p[0] == nullid:
2200 b.append((t, n, p[0], p[1]))
2202 b.append((t, n, p[0], p[1]))
2201 break
2203 break
2202 n = p[0]
2204 n = p[0]
2203 return b
2205 return b
2204
2206
2205 def between(self, pairs):
2207 def between(self, pairs):
2206 r = []
2208 r = []
2207
2209
2208 for top, bottom in pairs:
2210 for top, bottom in pairs:
2209 n, l, i = top, [], 0
2211 n, l, i = top, [], 0
2210 f = 1
2212 f = 1
2211
2213
2212 while n != bottom and n != nullid:
2214 while n != bottom and n != nullid:
2213 p = self.changelog.parents(n)[0]
2215 p = self.changelog.parents(n)[0]
2214 if i == f:
2216 if i == f:
2215 l.append(n)
2217 l.append(n)
2216 f = f * 2
2218 f = f * 2
2217 n = p
2219 n = p
2218 i += 1
2220 i += 1
2219
2221
2220 r.append(l)
2222 r.append(l)
2221
2223
2222 return r
2224 return r
2223
2225
2224 def checkpush(self, pushop):
2226 def checkpush(self, pushop):
2225 """Extensions can override this function if additional checks have
2227 """Extensions can override this function if additional checks have
2226 to be performed before pushing, or call it if they override push
2228 to be performed before pushing, or call it if they override push
2227 command.
2229 command.
2228 """
2230 """
2229
2231
2230 @unfilteredpropertycache
2232 @unfilteredpropertycache
2231 def prepushoutgoinghooks(self):
2233 def prepushoutgoinghooks(self):
2232 """Return util.hooks consists of a pushop with repo, remote, outgoing
2234 """Return util.hooks consists of a pushop with repo, remote, outgoing
2233 methods, which are called before pushing changesets.
2235 methods, which are called before pushing changesets.
2234 """
2236 """
2235 return util.hooks()
2237 return util.hooks()
2236
2238
2237 def pushkey(self, namespace, key, old, new):
2239 def pushkey(self, namespace, key, old, new):
2238 try:
2240 try:
2239 tr = self.currenttransaction()
2241 tr = self.currenttransaction()
2240 hookargs = {}
2242 hookargs = {}
2241 if tr is not None:
2243 if tr is not None:
2242 hookargs.update(tr.hookargs)
2244 hookargs.update(tr.hookargs)
2243 hookargs['namespace'] = namespace
2245 hookargs['namespace'] = namespace
2244 hookargs['key'] = key
2246 hookargs['key'] = key
2245 hookargs['old'] = old
2247 hookargs['old'] = old
2246 hookargs['new'] = new
2248 hookargs['new'] = new
2247 self.hook('prepushkey', throw=True, **hookargs)
2249 self.hook('prepushkey', throw=True, **hookargs)
2248 except error.HookAbort as exc:
2250 except error.HookAbort as exc:
2249 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2251 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2250 if exc.hint:
2252 if exc.hint:
2251 self.ui.write_err(_("(%s)\n") % exc.hint)
2253 self.ui.write_err(_("(%s)\n") % exc.hint)
2252 return False
2254 return False
2253 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2255 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2254 ret = pushkey.push(self, namespace, key, old, new)
2256 ret = pushkey.push(self, namespace, key, old, new)
2255 def runhook():
2257 def runhook():
2256 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2258 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2257 ret=ret)
2259 ret=ret)
2258 self._afterlock(runhook)
2260 self._afterlock(runhook)
2259 return ret
2261 return ret
2260
2262
2261 def listkeys(self, namespace):
2263 def listkeys(self, namespace):
2262 self.hook('prelistkeys', throw=True, namespace=namespace)
2264 self.hook('prelistkeys', throw=True, namespace=namespace)
2263 self.ui.debug('listing keys for "%s"\n' % namespace)
2265 self.ui.debug('listing keys for "%s"\n' % namespace)
2264 values = pushkey.list(self, namespace)
2266 values = pushkey.list(self, namespace)
2265 self.hook('listkeys', namespace=namespace, values=values)
2267 self.hook('listkeys', namespace=namespace, values=values)
2266 return values
2268 return values
2267
2269
2268 def debugwireargs(self, one, two, three=None, four=None, five=None):
2270 def debugwireargs(self, one, two, three=None, four=None, five=None):
2269 '''used to test argument passing over the wire'''
2271 '''used to test argument passing over the wire'''
2270 return "%s %s %s %s %s" % (one, two, three, four, five)
2272 return "%s %s %s %s %s" % (one, two, three, four, five)
2271
2273
2272 def savecommitmessage(self, text):
2274 def savecommitmessage(self, text):
2273 fp = self.vfs('last-message.txt', 'wb')
2275 fp = self.vfs('last-message.txt', 'wb')
2274 try:
2276 try:
2275 fp.write(text)
2277 fp.write(text)
2276 finally:
2278 finally:
2277 fp.close()
2279 fp.close()
2278 return self.pathto(fp.name[len(self.root) + 1:])
2280 return self.pathto(fp.name[len(self.root) + 1:])
2279
2281
2280 # used to avoid circular references so destructors work
2282 # used to avoid circular references so destructors work
2281 def aftertrans(files):
2283 def aftertrans(files):
2282 renamefiles = [tuple(t) for t in files]
2284 renamefiles = [tuple(t) for t in files]
2283 def a():
2285 def a():
2284 for vfs, src, dest in renamefiles:
2286 for vfs, src, dest in renamefiles:
2285 # if src and dest refer to a same file, vfs.rename is a no-op,
2287 # if src and dest refer to a same file, vfs.rename is a no-op,
2286 # leaving both src and dest on disk. delete dest to make sure
2288 # leaving both src and dest on disk. delete dest to make sure
2287 # the rename couldn't be such a no-op.
2289 # the rename couldn't be such a no-op.
2288 vfs.tryunlink(dest)
2290 vfs.tryunlink(dest)
2289 try:
2291 try:
2290 vfs.rename(src, dest)
2292 vfs.rename(src, dest)
2291 except OSError: # journal file does not yet exist
2293 except OSError: # journal file does not yet exist
2292 pass
2294 pass
2293 return a
2295 return a
2294
2296
2295 def undoname(fn):
2297 def undoname(fn):
2296 base, name = os.path.split(fn)
2298 base, name = os.path.split(fn)
2297 assert name.startswith('journal')
2299 assert name.startswith('journal')
2298 return os.path.join(base, name.replace('journal', 'undo', 1))
2300 return os.path.join(base, name.replace('journal', 'undo', 1))
2299
2301
2300 def instance(ui, path, create):
2302 def instance(ui, path, create):
2301 return localrepository(ui, util.urllocalpath(path), create)
2303 return localrepository(ui, util.urllocalpath(path), create)
2302
2304
2303 def islocal(path):
2305 def islocal(path):
2304 return True
2306 return True
2305
2307
2306 def newreporequirements(repo):
2308 def newreporequirements(repo):
2307 """Determine the set of requirements for a new local repository.
2309 """Determine the set of requirements for a new local repository.
2308
2310
2309 Extensions can wrap this function to specify custom requirements for
2311 Extensions can wrap this function to specify custom requirements for
2310 new repositories.
2312 new repositories.
2311 """
2313 """
2312 ui = repo.ui
2314 ui = repo.ui
2313 requirements = {'revlogv1'}
2315 requirements = {'revlogv1'}
2314 if ui.configbool('format', 'usestore'):
2316 if ui.configbool('format', 'usestore'):
2315 requirements.add('store')
2317 requirements.add('store')
2316 if ui.configbool('format', 'usefncache'):
2318 if ui.configbool('format', 'usefncache'):
2317 requirements.add('fncache')
2319 requirements.add('fncache')
2318 if ui.configbool('format', 'dotencode'):
2320 if ui.configbool('format', 'dotencode'):
2319 requirements.add('dotencode')
2321 requirements.add('dotencode')
2320
2322
2321 compengine = ui.config('experimental', 'format.compression')
2323 compengine = ui.config('experimental', 'format.compression')
2322 if compengine not in util.compengines:
2324 if compengine not in util.compengines:
2323 raise error.Abort(_('compression engine %s defined by '
2325 raise error.Abort(_('compression engine %s defined by '
2324 'experimental.format.compression not available') %
2326 'experimental.format.compression not available') %
2325 compengine,
2327 compengine,
2326 hint=_('run "hg debuginstall" to list available '
2328 hint=_('run "hg debuginstall" to list available '
2327 'compression engines'))
2329 'compression engines'))
2328
2330
2329 # zlib is the historical default and doesn't need an explicit requirement.
2331 # zlib is the historical default and doesn't need an explicit requirement.
2330 if compengine != 'zlib':
2332 if compengine != 'zlib':
2331 requirements.add('exp-compression-%s' % compengine)
2333 requirements.add('exp-compression-%s' % compengine)
2332
2334
2333 if scmutil.gdinitconfig(ui):
2335 if scmutil.gdinitconfig(ui):
2334 requirements.add('generaldelta')
2336 requirements.add('generaldelta')
2335 if ui.configbool('experimental', 'treemanifest'):
2337 if ui.configbool('experimental', 'treemanifest'):
2336 requirements.add('treemanifest')
2338 requirements.add('treemanifest')
2337 if ui.configbool('experimental', 'manifestv2'):
2339 if ui.configbool('experimental', 'manifestv2'):
2338 requirements.add('manifestv2')
2340 requirements.add('manifestv2')
2339
2341
2340 revlogv2 = ui.config('experimental', 'revlogv2')
2342 revlogv2 = ui.config('experimental', 'revlogv2')
2341 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2343 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2342 requirements.remove('revlogv1')
2344 requirements.remove('revlogv1')
2343 # generaldelta is implied by revlogv2.
2345 # generaldelta is implied by revlogv2.
2344 requirements.discard('generaldelta')
2346 requirements.discard('generaldelta')
2345 requirements.add(REVLOGV2_REQUIREMENT)
2347 requirements.add(REVLOGV2_REQUIREMENT)
2346
2348
2347 return requirements
2349 return requirements
General Comments 0
You need to be logged in to leave comments. Login now