##// END OF EJS Templates
repoview: do not include filter name in name of proxy class...
Yuya Nishihara -
r35247:9ce4e01f default
parent child Browse files
Show More
@@ -1,2290 +1,2289 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # This is only used by context.workingctx.match in order to
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
372 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
375 self.baseui = baseui
376 self.ui = baseui.copy()
376 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
385 self._phasedefaults = []
386 try:
386 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
388 self._loadextensions()
389 except IOError:
389 except IOError:
390 pass
390 pass
391
391
392 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
399 else:
399 else:
400 self.supported = self._basesupported
400 self.supported = self._basesupported
401 color.setup(self.ui)
401 color.setup(self.ui)
402
402
403 # Add compression engines.
403 # Add compression engines.
404 for name in util.compengines:
404 for name in util.compengines:
405 engine = util.compengines[name]
405 engine = util.compengines[name]
406 if engine.revlogheader():
406 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
408
408
409 if not self.vfs.isdir():
409 if not self.vfs.isdir():
410 if create:
410 if create:
411 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
412
412
413 if not self.wvfs.exists():
413 if not self.wvfs.exists():
414 self.wvfs.makedirs()
414 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
416
416
417 if 'store' in self.requirements:
417 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
419
419
420 # create an invalid changelog
420 # create an invalid changelog
421 self.vfs.append(
421 self.vfs.append(
422 "00changelog.i",
422 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
425 )
425 )
426 else:
426 else:
427 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
428 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
430 else:
430 else:
431 try:
431 try:
432 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
433 self.vfs, self.supported)
434 except IOError as inst:
434 except IOError as inst:
435 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
436 raise
436 raise
437
437
438 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
439 self.sharedpath = self.path
440 try:
440 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
446 s = vfs.base
446 s = vfs.base
447 if not vfs.exists():
447 if not vfs.exists():
448 raise error.RepoError(
448 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
450 self.sharedpath = s
451 except IOError as inst:
451 except IOError as inst:
452 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
459
459
460 self.store = store.store(
460 self.store = store.store(
461 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
463 self.spath = self.store.path
464 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
465 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
473 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
475 self._applyopenerreqs()
476 if create:
476 if create:
477 self._writerequirements()
477 self._writerequirements()
478
478
479 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
480
480
481 self._branchcaches = {}
481 self._branchcaches = {}
482 self._revbranchcache = None
482 self._revbranchcache = None
483 self.filterpats = {}
483 self.filterpats = {}
484 self._datafilters = {}
484 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
486
486
487 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
488 # (used by the filecache decorator)
489 #
489 #
490 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
491 self._filecache = {}
492
492
493 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
495 # - new changesets,
495 # - new changesets,
496 # - phase change,
496 # - phase change,
497 # - new obsolescence marker,
497 # - new obsolescence marker,
498 # - working directory parent change,
498 # - working directory parent change,
499 # - bookmark changes
499 # - bookmark changes
500 self.filteredrevcache = {}
500 self.filteredrevcache = {}
501
501
502 # post-dirstate-status hooks
502 # post-dirstate-status hooks
503 self._postdsstatus = []
503 self._postdsstatus = []
504
504
505 # Cache of types representing filtered repos.
505 # Cache of types representing filtered repos.
506 self._filteredrepotypes = weakref.WeakKeyDictionary()
506 self._filteredrepotypes = weakref.WeakKeyDictionary()
507
507
508 # generic mapping between names and nodes
508 # generic mapping between names and nodes
509 self.names = namespaces.namespaces()
509 self.names = namespaces.namespaces()
510
510
511 # Key to signature value.
511 # Key to signature value.
512 self._sparsesignaturecache = {}
512 self._sparsesignaturecache = {}
513 # Signature to cached matcher instance.
513 # Signature to cached matcher instance.
514 self._sparsematchercache = {}
514 self._sparsematchercache = {}
515
515
516 def _getvfsward(self, origfunc):
516 def _getvfsward(self, origfunc):
517 """build a ward for self.vfs"""
517 """build a ward for self.vfs"""
518 rref = weakref.ref(self)
518 rref = weakref.ref(self)
519 def checkvfs(path, mode=None):
519 def checkvfs(path, mode=None):
520 ret = origfunc(path, mode=mode)
520 ret = origfunc(path, mode=mode)
521 repo = rref()
521 repo = rref()
522 if (repo is None
522 if (repo is None
523 or not util.safehasattr(repo, '_wlockref')
523 or not util.safehasattr(repo, '_wlockref')
524 or not util.safehasattr(repo, '_lockref')):
524 or not util.safehasattr(repo, '_lockref')):
525 return
525 return
526 if mode in (None, 'r', 'rb'):
526 if mode in (None, 'r', 'rb'):
527 return
527 return
528 if path.startswith(repo.path):
528 if path.startswith(repo.path):
529 # truncate name relative to the repository (.hg)
529 # truncate name relative to the repository (.hg)
530 path = path[len(repo.path) + 1:]
530 path = path[len(repo.path) + 1:]
531 if path.startswith('cache/'):
531 if path.startswith('cache/'):
532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
532 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
533 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
534 if path.startswith('journal.'):
534 if path.startswith('journal.'):
535 # journal is covered by 'lock'
535 # journal is covered by 'lock'
536 if repo._currentlock(repo._lockref) is None:
536 if repo._currentlock(repo._lockref) is None:
537 repo.ui.develwarn('write with no lock: "%s"' % path,
537 repo.ui.develwarn('write with no lock: "%s"' % path,
538 stacklevel=2, config='check-locks')
538 stacklevel=2, config='check-locks')
539 elif repo._currentlock(repo._wlockref) is None:
539 elif repo._currentlock(repo._wlockref) is None:
540 # rest of vfs files are covered by 'wlock'
540 # rest of vfs files are covered by 'wlock'
541 #
541 #
542 # exclude special files
542 # exclude special files
543 for prefix in self._wlockfreeprefix:
543 for prefix in self._wlockfreeprefix:
544 if path.startswith(prefix):
544 if path.startswith(prefix):
545 return
545 return
546 repo.ui.develwarn('write with no wlock: "%s"' % path,
546 repo.ui.develwarn('write with no wlock: "%s"' % path,
547 stacklevel=2, config='check-locks')
547 stacklevel=2, config='check-locks')
548 return ret
548 return ret
549 return checkvfs
549 return checkvfs
550
550
551 def _getsvfsward(self, origfunc):
551 def _getsvfsward(self, origfunc):
552 """build a ward for self.svfs"""
552 """build a ward for self.svfs"""
553 rref = weakref.ref(self)
553 rref = weakref.ref(self)
554 def checksvfs(path, mode=None):
554 def checksvfs(path, mode=None):
555 ret = origfunc(path, mode=mode)
555 ret = origfunc(path, mode=mode)
556 repo = rref()
556 repo = rref()
557 if repo is None or not util.safehasattr(repo, '_lockref'):
557 if repo is None or not util.safehasattr(repo, '_lockref'):
558 return
558 return
559 if mode in (None, 'r', 'rb'):
559 if mode in (None, 'r', 'rb'):
560 return
560 return
561 if path.startswith(repo.sharedpath):
561 if path.startswith(repo.sharedpath):
562 # truncate name relative to the repository (.hg)
562 # truncate name relative to the repository (.hg)
563 path = path[len(repo.sharedpath) + 1:]
563 path = path[len(repo.sharedpath) + 1:]
564 if repo._currentlock(repo._lockref) is None:
564 if repo._currentlock(repo._lockref) is None:
565 repo.ui.develwarn('write with no lock: "%s"' % path,
565 repo.ui.develwarn('write with no lock: "%s"' % path,
566 stacklevel=3)
566 stacklevel=3)
567 return ret
567 return ret
568 return checksvfs
568 return checksvfs
569
569
570 def close(self):
570 def close(self):
571 self._writecaches()
571 self._writecaches()
572
572
573 def _loadextensions(self):
573 def _loadextensions(self):
574 extensions.loadall(self.ui)
574 extensions.loadall(self.ui)
575
575
576 def _writecaches(self):
576 def _writecaches(self):
577 if self._revbranchcache:
577 if self._revbranchcache:
578 self._revbranchcache.write()
578 self._revbranchcache.write()
579
579
580 def _restrictcapabilities(self, caps):
580 def _restrictcapabilities(self, caps):
581 if self.ui.configbool('experimental', 'bundle2-advertise'):
581 if self.ui.configbool('experimental', 'bundle2-advertise'):
582 caps = set(caps)
582 caps = set(caps)
583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
583 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
584 caps.add('bundle2=' + urlreq.quote(capsblob))
585 return caps
585 return caps
586
586
587 def _applyopenerreqs(self):
587 def _applyopenerreqs(self):
588 self.svfs.options = dict((r, 1) for r in self.requirements
588 self.svfs.options = dict((r, 1) for r in self.requirements
589 if r in self.openerreqs)
589 if r in self.openerreqs)
590 # experimental config: format.chunkcachesize
590 # experimental config: format.chunkcachesize
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
591 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
592 if chunkcachesize is not None:
592 if chunkcachesize is not None:
593 self.svfs.options['chunkcachesize'] = chunkcachesize
593 self.svfs.options['chunkcachesize'] = chunkcachesize
594 # experimental config: format.maxchainlen
594 # experimental config: format.maxchainlen
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
595 maxchainlen = self.ui.configint('format', 'maxchainlen')
596 if maxchainlen is not None:
596 if maxchainlen is not None:
597 self.svfs.options['maxchainlen'] = maxchainlen
597 self.svfs.options['maxchainlen'] = maxchainlen
598 # experimental config: format.manifestcachesize
598 # experimental config: format.manifestcachesize
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
599 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
600 if manifestcachesize is not None:
600 if manifestcachesize is not None:
601 self.svfs.options['manifestcachesize'] = manifestcachesize
601 self.svfs.options['manifestcachesize'] = manifestcachesize
602 # experimental config: format.aggressivemergedeltas
602 # experimental config: format.aggressivemergedeltas
603 aggressivemergedeltas = self.ui.configbool('format',
603 aggressivemergedeltas = self.ui.configbool('format',
604 'aggressivemergedeltas')
604 'aggressivemergedeltas')
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
605 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
606 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
607 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
608 if 0 <= chainspan:
608 if 0 <= chainspan:
609 self.svfs.options['maxdeltachainspan'] = chainspan
609 self.svfs.options['maxdeltachainspan'] = chainspan
610 mmapindexthreshold = self.ui.configbytes('experimental',
610 mmapindexthreshold = self.ui.configbytes('experimental',
611 'mmapindexthreshold')
611 'mmapindexthreshold')
612 if mmapindexthreshold is not None:
612 if mmapindexthreshold is not None:
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
613 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
614 withsparseread = self.ui.configbool('experimental', 'sparse-read')
615 srdensitythres = float(self.ui.config('experimental',
615 srdensitythres = float(self.ui.config('experimental',
616 'sparse-read.density-threshold'))
616 'sparse-read.density-threshold'))
617 srmingapsize = self.ui.configbytes('experimental',
617 srmingapsize = self.ui.configbytes('experimental',
618 'sparse-read.min-gap-size')
618 'sparse-read.min-gap-size')
619 self.svfs.options['with-sparse-read'] = withsparseread
619 self.svfs.options['with-sparse-read'] = withsparseread
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
620 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
621 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
622
622
623 for r in self.requirements:
623 for r in self.requirements:
624 if r.startswith('exp-compression-'):
624 if r.startswith('exp-compression-'):
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
625 self.svfs.options['compengine'] = r[len('exp-compression-'):]
626
626
627 # TODO move "revlogv2" to openerreqs once finalized.
627 # TODO move "revlogv2" to openerreqs once finalized.
628 if REVLOGV2_REQUIREMENT in self.requirements:
628 if REVLOGV2_REQUIREMENT in self.requirements:
629 self.svfs.options['revlogv2'] = True
629 self.svfs.options['revlogv2'] = True
630
630
631 def _writerequirements(self):
631 def _writerequirements(self):
632 scmutil.writerequires(self.vfs, self.requirements)
632 scmutil.writerequires(self.vfs, self.requirements)
633
633
634 def _checknested(self, path):
634 def _checknested(self, path):
635 """Determine if path is a legal nested repository."""
635 """Determine if path is a legal nested repository."""
636 if not path.startswith(self.root):
636 if not path.startswith(self.root):
637 return False
637 return False
638 subpath = path[len(self.root) + 1:]
638 subpath = path[len(self.root) + 1:]
639 normsubpath = util.pconvert(subpath)
639 normsubpath = util.pconvert(subpath)
640
640
641 # XXX: Checking against the current working copy is wrong in
641 # XXX: Checking against the current working copy is wrong in
642 # the sense that it can reject things like
642 # the sense that it can reject things like
643 #
643 #
644 # $ hg cat -r 10 sub/x.txt
644 # $ hg cat -r 10 sub/x.txt
645 #
645 #
646 # if sub/ is no longer a subrepository in the working copy
646 # if sub/ is no longer a subrepository in the working copy
647 # parent revision.
647 # parent revision.
648 #
648 #
649 # However, it can of course also allow things that would have
649 # However, it can of course also allow things that would have
650 # been rejected before, such as the above cat command if sub/
650 # been rejected before, such as the above cat command if sub/
651 # is a subrepository now, but was a normal directory before.
651 # is a subrepository now, but was a normal directory before.
652 # The old path auditor would have rejected by mistake since it
652 # The old path auditor would have rejected by mistake since it
653 # panics when it sees sub/.hg/.
653 # panics when it sees sub/.hg/.
654 #
654 #
655 # All in all, checking against the working copy seems sensible
655 # All in all, checking against the working copy seems sensible
656 # since we want to prevent access to nested repositories on
656 # since we want to prevent access to nested repositories on
657 # the filesystem *now*.
657 # the filesystem *now*.
658 ctx = self[None]
658 ctx = self[None]
659 parts = util.splitpath(subpath)
659 parts = util.splitpath(subpath)
660 while parts:
660 while parts:
661 prefix = '/'.join(parts)
661 prefix = '/'.join(parts)
662 if prefix in ctx.substate:
662 if prefix in ctx.substate:
663 if prefix == normsubpath:
663 if prefix == normsubpath:
664 return True
664 return True
665 else:
665 else:
666 sub = ctx.sub(prefix)
666 sub = ctx.sub(prefix)
667 return sub.checknested(subpath[len(prefix) + 1:])
667 return sub.checknested(subpath[len(prefix) + 1:])
668 else:
668 else:
669 parts.pop()
669 parts.pop()
670 return False
670 return False
671
671
672 def peer(self):
672 def peer(self):
673 return localpeer(self) # not cached to avoid reference cycle
673 return localpeer(self) # not cached to avoid reference cycle
674
674
675 def unfiltered(self):
675 def unfiltered(self):
676 """Return unfiltered version of the repository
676 """Return unfiltered version of the repository
677
677
678 Intended to be overwritten by filtered repo."""
678 Intended to be overwritten by filtered repo."""
679 return self
679 return self
680
680
681 def filtered(self, name):
681 def filtered(self, name):
682 """Return a filtered version of a repository"""
682 """Return a filtered version of a repository"""
683 # Python <3.4 easily leaks types via __mro__. See
683 # Python <3.4 easily leaks types via __mro__. See
684 # https://bugs.python.org/issue17950. We cache dynamically
684 # https://bugs.python.org/issue17950. We cache dynamically
685 # created types so this method doesn't leak on every
685 # created types so this method doesn't leak on every
686 # invocation.
686 # invocation.
687
687
688 key = self.unfiltered().__class__
688 key = self.unfiltered().__class__
689 if key not in self._filteredrepotypes:
689 if key not in self._filteredrepotypes:
690 # Build a new type with the repoview mixin and the base
690 # Build a new type with the repoview mixin and the base
691 # class of this repo. Give it a name containing the
691 # class of this repo.
692 # filter name to aid debugging.
692 class filteredrepo(repoview.repoview, key):
693 bases = (repoview.repoview, key)
693 pass
694 cls = type(r'%sfilteredrepo' % name, bases, {})
694 self._filteredrepotypes[key] = filteredrepo
695 self._filteredrepotypes[key] = cls
696
695
697 return self._filteredrepotypes[key](self, name)
696 return self._filteredrepotypes[key](self, name)
698
697
699 @repofilecache('bookmarks', 'bookmarks.current')
698 @repofilecache('bookmarks', 'bookmarks.current')
700 def _bookmarks(self):
699 def _bookmarks(self):
701 return bookmarks.bmstore(self)
700 return bookmarks.bmstore(self)
702
701
703 @property
702 @property
704 def _activebookmark(self):
703 def _activebookmark(self):
705 return self._bookmarks.active
704 return self._bookmarks.active
706
705
707 # _phaserevs and _phasesets depend on changelog. what we need is to
706 # _phaserevs and _phasesets depend on changelog. what we need is to
708 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
707 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
709 # can't be easily expressed in filecache mechanism.
708 # can't be easily expressed in filecache mechanism.
710 @storecache('phaseroots', '00changelog.i')
709 @storecache('phaseroots', '00changelog.i')
711 def _phasecache(self):
710 def _phasecache(self):
712 return phases.phasecache(self, self._phasedefaults)
711 return phases.phasecache(self, self._phasedefaults)
713
712
714 @storecache('obsstore')
713 @storecache('obsstore')
715 def obsstore(self):
714 def obsstore(self):
716 return obsolete.makestore(self.ui, self)
715 return obsolete.makestore(self.ui, self)
717
716
718 @storecache('00changelog.i')
717 @storecache('00changelog.i')
719 def changelog(self):
718 def changelog(self):
720 return changelog.changelog(self.svfs,
719 return changelog.changelog(self.svfs,
721 trypending=txnutil.mayhavepending(self.root))
720 trypending=txnutil.mayhavepending(self.root))
722
721
723 def _constructmanifest(self):
722 def _constructmanifest(self):
724 # This is a temporary function while we migrate from manifest to
723 # This is a temporary function while we migrate from manifest to
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
724 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 # manifest creation.
725 # manifest creation.
727 return manifest.manifestrevlog(self.svfs)
726 return manifest.manifestrevlog(self.svfs)
728
727
729 @storecache('00manifest.i')
728 @storecache('00manifest.i')
730 def manifestlog(self):
729 def manifestlog(self):
731 return manifest.manifestlog(self.svfs, self)
730 return manifest.manifestlog(self.svfs, self)
732
731
733 @repofilecache('dirstate')
732 @repofilecache('dirstate')
734 def dirstate(self):
733 def dirstate(self):
735 sparsematchfn = lambda: sparse.matcher(self)
734 sparsematchfn = lambda: sparse.matcher(self)
736
735
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
736 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 self._dirstatevalidate, sparsematchfn)
737 self._dirstatevalidate, sparsematchfn)
739
738
740 def _dirstatevalidate(self, node):
739 def _dirstatevalidate(self, node):
741 try:
740 try:
742 self.changelog.rev(node)
741 self.changelog.rev(node)
743 return node
742 return node
744 except error.LookupError:
743 except error.LookupError:
745 if not self._dirstatevalidatewarned:
744 if not self._dirstatevalidatewarned:
746 self._dirstatevalidatewarned = True
745 self._dirstatevalidatewarned = True
747 self.ui.warn(_("warning: ignoring unknown"
746 self.ui.warn(_("warning: ignoring unknown"
748 " working parent %s!\n") % short(node))
747 " working parent %s!\n") % short(node))
749 return nullid
748 return nullid
750
749
751 def __getitem__(self, changeid):
750 def __getitem__(self, changeid):
752 if changeid is None:
751 if changeid is None:
753 return context.workingctx(self)
752 return context.workingctx(self)
754 if isinstance(changeid, slice):
753 if isinstance(changeid, slice):
755 # wdirrev isn't contiguous so the slice shouldn't include it
754 # wdirrev isn't contiguous so the slice shouldn't include it
756 return [context.changectx(self, i)
755 return [context.changectx(self, i)
757 for i in xrange(*changeid.indices(len(self)))
756 for i in xrange(*changeid.indices(len(self)))
758 if i not in self.changelog.filteredrevs]
757 if i not in self.changelog.filteredrevs]
759 try:
758 try:
760 return context.changectx(self, changeid)
759 return context.changectx(self, changeid)
761 except error.WdirUnsupported:
760 except error.WdirUnsupported:
762 return context.workingctx(self)
761 return context.workingctx(self)
763
762
764 def __contains__(self, changeid):
763 def __contains__(self, changeid):
765 """True if the given changeid exists
764 """True if the given changeid exists
766
765
767 error.LookupError is raised if an ambiguous node specified.
766 error.LookupError is raised if an ambiguous node specified.
768 """
767 """
769 try:
768 try:
770 self[changeid]
769 self[changeid]
771 return True
770 return True
772 except error.RepoLookupError:
771 except error.RepoLookupError:
773 return False
772 return False
774
773
775 def __nonzero__(self):
774 def __nonzero__(self):
776 return True
775 return True
777
776
778 __bool__ = __nonzero__
777 __bool__ = __nonzero__
779
778
780 def __len__(self):
779 def __len__(self):
781 return len(self.changelog)
780 return len(self.changelog)
782
781
783 def __iter__(self):
782 def __iter__(self):
784 return iter(self.changelog)
783 return iter(self.changelog)
785
784
786 def revs(self, expr, *args):
785 def revs(self, expr, *args):
787 '''Find revisions matching a revset.
786 '''Find revisions matching a revset.
788
787
789 The revset is specified as a string ``expr`` that may contain
788 The revset is specified as a string ``expr`` that may contain
790 %-formatting to escape certain types. See ``revsetlang.formatspec``.
789 %-formatting to escape certain types. See ``revsetlang.formatspec``.
791
790
792 Revset aliases from the configuration are not expanded. To expand
791 Revset aliases from the configuration are not expanded. To expand
793 user aliases, consider calling ``scmutil.revrange()`` or
792 user aliases, consider calling ``scmutil.revrange()`` or
794 ``repo.anyrevs([expr], user=True)``.
793 ``repo.anyrevs([expr], user=True)``.
795
794
796 Returns a revset.abstractsmartset, which is a list-like interface
795 Returns a revset.abstractsmartset, which is a list-like interface
797 that contains integer revisions.
796 that contains integer revisions.
798 '''
797 '''
799 expr = revsetlang.formatspec(expr, *args)
798 expr = revsetlang.formatspec(expr, *args)
800 m = revset.match(None, expr)
799 m = revset.match(None, expr)
801 return m(self)
800 return m(self)
802
801
803 def set(self, expr, *args):
802 def set(self, expr, *args):
804 '''Find revisions matching a revset and emit changectx instances.
803 '''Find revisions matching a revset and emit changectx instances.
805
804
806 This is a convenience wrapper around ``revs()`` that iterates the
805 This is a convenience wrapper around ``revs()`` that iterates the
807 result and is a generator of changectx instances.
806 result and is a generator of changectx instances.
808
807
809 Revset aliases from the configuration are not expanded. To expand
808 Revset aliases from the configuration are not expanded. To expand
810 user aliases, consider calling ``scmutil.revrange()``.
809 user aliases, consider calling ``scmutil.revrange()``.
811 '''
810 '''
812 for r in self.revs(expr, *args):
811 for r in self.revs(expr, *args):
813 yield self[r]
812 yield self[r]
814
813
815 def anyrevs(self, specs, user=False, localalias=None):
814 def anyrevs(self, specs, user=False, localalias=None):
816 '''Find revisions matching one of the given revsets.
815 '''Find revisions matching one of the given revsets.
817
816
818 Revset aliases from the configuration are not expanded by default. To
817 Revset aliases from the configuration are not expanded by default. To
819 expand user aliases, specify ``user=True``. To provide some local
818 expand user aliases, specify ``user=True``. To provide some local
820 definitions overriding user aliases, set ``localalias`` to
819 definitions overriding user aliases, set ``localalias`` to
821 ``{name: definitionstring}``.
820 ``{name: definitionstring}``.
822 '''
821 '''
823 if user:
822 if user:
824 m = revset.matchany(self.ui, specs, repo=self,
823 m = revset.matchany(self.ui, specs, repo=self,
825 localalias=localalias)
824 localalias=localalias)
826 else:
825 else:
827 m = revset.matchany(None, specs, localalias=localalias)
826 m = revset.matchany(None, specs, localalias=localalias)
828 return m(self)
827 return m(self)
829
828
830 def url(self):
829 def url(self):
831 return 'file:' + self.root
830 return 'file:' + self.root
832
831
833 def hook(self, name, throw=False, **args):
832 def hook(self, name, throw=False, **args):
834 """Call a hook, passing this repo instance.
833 """Call a hook, passing this repo instance.
835
834
836 This a convenience method to aid invoking hooks. Extensions likely
835 This a convenience method to aid invoking hooks. Extensions likely
837 won't call this unless they have registered a custom hook or are
836 won't call this unless they have registered a custom hook or are
838 replacing code that is expected to call a hook.
837 replacing code that is expected to call a hook.
839 """
838 """
840 return hook.hook(self.ui, self, name, throw, **args)
839 return hook.hook(self.ui, self, name, throw, **args)
841
840
842 @filteredpropertycache
841 @filteredpropertycache
843 def _tagscache(self):
842 def _tagscache(self):
844 '''Returns a tagscache object that contains various tags related
843 '''Returns a tagscache object that contains various tags related
845 caches.'''
844 caches.'''
846
845
847 # This simplifies its cache management by having one decorated
846 # This simplifies its cache management by having one decorated
848 # function (this one) and the rest simply fetch things from it.
847 # function (this one) and the rest simply fetch things from it.
849 class tagscache(object):
848 class tagscache(object):
850 def __init__(self):
849 def __init__(self):
851 # These two define the set of tags for this repository. tags
850 # These two define the set of tags for this repository. tags
852 # maps tag name to node; tagtypes maps tag name to 'global' or
851 # maps tag name to node; tagtypes maps tag name to 'global' or
853 # 'local'. (Global tags are defined by .hgtags across all
852 # 'local'. (Global tags are defined by .hgtags across all
854 # heads, and local tags are defined in .hg/localtags.)
853 # heads, and local tags are defined in .hg/localtags.)
855 # They constitute the in-memory cache of tags.
854 # They constitute the in-memory cache of tags.
856 self.tags = self.tagtypes = None
855 self.tags = self.tagtypes = None
857
856
858 self.nodetagscache = self.tagslist = None
857 self.nodetagscache = self.tagslist = None
859
858
860 cache = tagscache()
859 cache = tagscache()
861 cache.tags, cache.tagtypes = self._findtags()
860 cache.tags, cache.tagtypes = self._findtags()
862
861
863 return cache
862 return cache
864
863
865 def tags(self):
864 def tags(self):
866 '''return a mapping of tag to node'''
865 '''return a mapping of tag to node'''
867 t = {}
866 t = {}
868 if self.changelog.filteredrevs:
867 if self.changelog.filteredrevs:
869 tags, tt = self._findtags()
868 tags, tt = self._findtags()
870 else:
869 else:
871 tags = self._tagscache.tags
870 tags = self._tagscache.tags
872 for k, v in tags.iteritems():
871 for k, v in tags.iteritems():
873 try:
872 try:
874 # ignore tags to unknown nodes
873 # ignore tags to unknown nodes
875 self.changelog.rev(v)
874 self.changelog.rev(v)
876 t[k] = v
875 t[k] = v
877 except (error.LookupError, ValueError):
876 except (error.LookupError, ValueError):
878 pass
877 pass
879 return t
878 return t
880
879
881 def _findtags(self):
880 def _findtags(self):
882 '''Do the hard work of finding tags. Return a pair of dicts
881 '''Do the hard work of finding tags. Return a pair of dicts
883 (tags, tagtypes) where tags maps tag name to node, and tagtypes
882 (tags, tagtypes) where tags maps tag name to node, and tagtypes
884 maps tag name to a string like \'global\' or \'local\'.
883 maps tag name to a string like \'global\' or \'local\'.
885 Subclasses or extensions are free to add their own tags, but
884 Subclasses or extensions are free to add their own tags, but
886 should be aware that the returned dicts will be retained for the
885 should be aware that the returned dicts will be retained for the
887 duration of the localrepo object.'''
886 duration of the localrepo object.'''
888
887
889 # XXX what tagtype should subclasses/extensions use? Currently
888 # XXX what tagtype should subclasses/extensions use? Currently
890 # mq and bookmarks add tags, but do not set the tagtype at all.
889 # mq and bookmarks add tags, but do not set the tagtype at all.
891 # Should each extension invent its own tag type? Should there
890 # Should each extension invent its own tag type? Should there
892 # be one tagtype for all such "virtual" tags? Or is the status
891 # be one tagtype for all such "virtual" tags? Or is the status
893 # quo fine?
892 # quo fine?
894
893
895
894
896 # map tag name to (node, hist)
895 # map tag name to (node, hist)
897 alltags = tagsmod.findglobaltags(self.ui, self)
896 alltags = tagsmod.findglobaltags(self.ui, self)
898 # map tag name to tag type
897 # map tag name to tag type
899 tagtypes = dict((tag, 'global') for tag in alltags)
898 tagtypes = dict((tag, 'global') for tag in alltags)
900
899
901 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
900 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
902
901
903 # Build the return dicts. Have to re-encode tag names because
902 # Build the return dicts. Have to re-encode tag names because
904 # the tags module always uses UTF-8 (in order not to lose info
903 # the tags module always uses UTF-8 (in order not to lose info
905 # writing to the cache), but the rest of Mercurial wants them in
904 # writing to the cache), but the rest of Mercurial wants them in
906 # local encoding.
905 # local encoding.
907 tags = {}
906 tags = {}
908 for (name, (node, hist)) in alltags.iteritems():
907 for (name, (node, hist)) in alltags.iteritems():
909 if node != nullid:
908 if node != nullid:
910 tags[encoding.tolocal(name)] = node
909 tags[encoding.tolocal(name)] = node
911 tags['tip'] = self.changelog.tip()
910 tags['tip'] = self.changelog.tip()
912 tagtypes = dict([(encoding.tolocal(name), value)
911 tagtypes = dict([(encoding.tolocal(name), value)
913 for (name, value) in tagtypes.iteritems()])
912 for (name, value) in tagtypes.iteritems()])
914 return (tags, tagtypes)
913 return (tags, tagtypes)
915
914
916 def tagtype(self, tagname):
915 def tagtype(self, tagname):
917 '''
916 '''
918 return the type of the given tag. result can be:
917 return the type of the given tag. result can be:
919
918
920 'local' : a local tag
919 'local' : a local tag
921 'global' : a global tag
920 'global' : a global tag
922 None : tag does not exist
921 None : tag does not exist
923 '''
922 '''
924
923
925 return self._tagscache.tagtypes.get(tagname)
924 return self._tagscache.tagtypes.get(tagname)
926
925
927 def tagslist(self):
926 def tagslist(self):
928 '''return a list of tags ordered by revision'''
927 '''return a list of tags ordered by revision'''
929 if not self._tagscache.tagslist:
928 if not self._tagscache.tagslist:
930 l = []
929 l = []
931 for t, n in self.tags().iteritems():
930 for t, n in self.tags().iteritems():
932 l.append((self.changelog.rev(n), t, n))
931 l.append((self.changelog.rev(n), t, n))
933 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
932 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
934
933
935 return self._tagscache.tagslist
934 return self._tagscache.tagslist
936
935
937 def nodetags(self, node):
936 def nodetags(self, node):
938 '''return the tags associated with a node'''
937 '''return the tags associated with a node'''
939 if not self._tagscache.nodetagscache:
938 if not self._tagscache.nodetagscache:
940 nodetagscache = {}
939 nodetagscache = {}
941 for t, n in self._tagscache.tags.iteritems():
940 for t, n in self._tagscache.tags.iteritems():
942 nodetagscache.setdefault(n, []).append(t)
941 nodetagscache.setdefault(n, []).append(t)
943 for tags in nodetagscache.itervalues():
942 for tags in nodetagscache.itervalues():
944 tags.sort()
943 tags.sort()
945 self._tagscache.nodetagscache = nodetagscache
944 self._tagscache.nodetagscache = nodetagscache
946 return self._tagscache.nodetagscache.get(node, [])
945 return self._tagscache.nodetagscache.get(node, [])
947
946
948 def nodebookmarks(self, node):
947 def nodebookmarks(self, node):
949 """return the list of bookmarks pointing to the specified node"""
948 """return the list of bookmarks pointing to the specified node"""
950 marks = []
949 marks = []
951 for bookmark, n in self._bookmarks.iteritems():
950 for bookmark, n in self._bookmarks.iteritems():
952 if n == node:
951 if n == node:
953 marks.append(bookmark)
952 marks.append(bookmark)
954 return sorted(marks)
953 return sorted(marks)
955
954
956 def branchmap(self):
955 def branchmap(self):
957 '''returns a dictionary {branch: [branchheads]} with branchheads
956 '''returns a dictionary {branch: [branchheads]} with branchheads
958 ordered by increasing revision number'''
957 ordered by increasing revision number'''
959 branchmap.updatecache(self)
958 branchmap.updatecache(self)
960 return self._branchcaches[self.filtername]
959 return self._branchcaches[self.filtername]
961
960
962 @unfilteredmethod
961 @unfilteredmethod
963 def revbranchcache(self):
962 def revbranchcache(self):
964 if not self._revbranchcache:
963 if not self._revbranchcache:
965 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
964 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
966 return self._revbranchcache
965 return self._revbranchcache
967
966
968 def branchtip(self, branch, ignoremissing=False):
967 def branchtip(self, branch, ignoremissing=False):
969 '''return the tip node for a given branch
968 '''return the tip node for a given branch
970
969
971 If ignoremissing is True, then this method will not raise an error.
970 If ignoremissing is True, then this method will not raise an error.
972 This is helpful for callers that only expect None for a missing branch
971 This is helpful for callers that only expect None for a missing branch
973 (e.g. namespace).
972 (e.g. namespace).
974
973
975 '''
974 '''
976 try:
975 try:
977 return self.branchmap().branchtip(branch)
976 return self.branchmap().branchtip(branch)
978 except KeyError:
977 except KeyError:
979 if not ignoremissing:
978 if not ignoremissing:
980 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
979 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
981 else:
980 else:
982 pass
981 pass
983
982
984 def lookup(self, key):
983 def lookup(self, key):
985 return self[key].node()
984 return self[key].node()
986
985
987 def lookupbranch(self, key, remote=None):
986 def lookupbranch(self, key, remote=None):
988 repo = remote or self
987 repo = remote or self
989 if key in repo.branchmap():
988 if key in repo.branchmap():
990 return key
989 return key
991
990
992 repo = (remote and remote.local()) and remote or self
991 repo = (remote and remote.local()) and remote or self
993 return repo[key].branch()
992 return repo[key].branch()
994
993
995 def known(self, nodes):
994 def known(self, nodes):
996 cl = self.changelog
995 cl = self.changelog
997 nm = cl.nodemap
996 nm = cl.nodemap
998 filtered = cl.filteredrevs
997 filtered = cl.filteredrevs
999 result = []
998 result = []
1000 for n in nodes:
999 for n in nodes:
1001 r = nm.get(n)
1000 r = nm.get(n)
1002 resp = not (r is None or r in filtered)
1001 resp = not (r is None or r in filtered)
1003 result.append(resp)
1002 result.append(resp)
1004 return result
1003 return result
1005
1004
1006 def local(self):
1005 def local(self):
1007 return self
1006 return self
1008
1007
1009 def publishing(self):
1008 def publishing(self):
1010 # it's safe (and desirable) to trust the publish flag unconditionally
1009 # it's safe (and desirable) to trust the publish flag unconditionally
1011 # so that we don't finalize changes shared between users via ssh or nfs
1010 # so that we don't finalize changes shared between users via ssh or nfs
1012 return self.ui.configbool('phases', 'publish', untrusted=True)
1011 return self.ui.configbool('phases', 'publish', untrusted=True)
1013
1012
1014 def cancopy(self):
1013 def cancopy(self):
1015 # so statichttprepo's override of local() works
1014 # so statichttprepo's override of local() works
1016 if not self.local():
1015 if not self.local():
1017 return False
1016 return False
1018 if not self.publishing():
1017 if not self.publishing():
1019 return True
1018 return True
1020 # if publishing we can't copy if there is filtered content
1019 # if publishing we can't copy if there is filtered content
1021 return not self.filtered('visible').changelog.filteredrevs
1020 return not self.filtered('visible').changelog.filteredrevs
1022
1021
1023 def shared(self):
1022 def shared(self):
1024 '''the type of shared repository (None if not shared)'''
1023 '''the type of shared repository (None if not shared)'''
1025 if self.sharedpath != self.path:
1024 if self.sharedpath != self.path:
1026 return 'store'
1025 return 'store'
1027 return None
1026 return None
1028
1027
1029 def wjoin(self, f, *insidef):
1028 def wjoin(self, f, *insidef):
1030 return self.vfs.reljoin(self.root, f, *insidef)
1029 return self.vfs.reljoin(self.root, f, *insidef)
1031
1030
1032 def file(self, f):
1031 def file(self, f):
1033 if f[0] == '/':
1032 if f[0] == '/':
1034 f = f[1:]
1033 f = f[1:]
1035 return filelog.filelog(self.svfs, f)
1034 return filelog.filelog(self.svfs, f)
1036
1035
1037 def changectx(self, changeid):
1036 def changectx(self, changeid):
1038 return self[changeid]
1037 return self[changeid]
1039
1038
1040 def setparents(self, p1, p2=nullid):
1039 def setparents(self, p1, p2=nullid):
1041 with self.dirstate.parentchange():
1040 with self.dirstate.parentchange():
1042 copies = self.dirstate.setparents(p1, p2)
1041 copies = self.dirstate.setparents(p1, p2)
1043 pctx = self[p1]
1042 pctx = self[p1]
1044 if copies:
1043 if copies:
1045 # Adjust copy records, the dirstate cannot do it, it
1044 # Adjust copy records, the dirstate cannot do it, it
1046 # requires access to parents manifests. Preserve them
1045 # requires access to parents manifests. Preserve them
1047 # only for entries added to first parent.
1046 # only for entries added to first parent.
1048 for f in copies:
1047 for f in copies:
1049 if f not in pctx and copies[f] in pctx:
1048 if f not in pctx and copies[f] in pctx:
1050 self.dirstate.copy(copies[f], f)
1049 self.dirstate.copy(copies[f], f)
1051 if p2 == nullid:
1050 if p2 == nullid:
1052 for f, s in sorted(self.dirstate.copies().items()):
1051 for f, s in sorted(self.dirstate.copies().items()):
1053 if f not in pctx and s not in pctx:
1052 if f not in pctx and s not in pctx:
1054 self.dirstate.copy(None, f)
1053 self.dirstate.copy(None, f)
1055
1054
1056 def filectx(self, path, changeid=None, fileid=None):
1055 def filectx(self, path, changeid=None, fileid=None):
1057 """changeid can be a changeset revision, node, or tag.
1056 """changeid can be a changeset revision, node, or tag.
1058 fileid can be a file revision or node."""
1057 fileid can be a file revision or node."""
1059 return context.filectx(self, path, changeid, fileid)
1058 return context.filectx(self, path, changeid, fileid)
1060
1059
1061 def getcwd(self):
1060 def getcwd(self):
1062 return self.dirstate.getcwd()
1061 return self.dirstate.getcwd()
1063
1062
1064 def pathto(self, f, cwd=None):
1063 def pathto(self, f, cwd=None):
1065 return self.dirstate.pathto(f, cwd)
1064 return self.dirstate.pathto(f, cwd)
1066
1065
1067 def _loadfilter(self, filter):
1066 def _loadfilter(self, filter):
1068 if filter not in self.filterpats:
1067 if filter not in self.filterpats:
1069 l = []
1068 l = []
1070 for pat, cmd in self.ui.configitems(filter):
1069 for pat, cmd in self.ui.configitems(filter):
1071 if cmd == '!':
1070 if cmd == '!':
1072 continue
1071 continue
1073 mf = matchmod.match(self.root, '', [pat])
1072 mf = matchmod.match(self.root, '', [pat])
1074 fn = None
1073 fn = None
1075 params = cmd
1074 params = cmd
1076 for name, filterfn in self._datafilters.iteritems():
1075 for name, filterfn in self._datafilters.iteritems():
1077 if cmd.startswith(name):
1076 if cmd.startswith(name):
1078 fn = filterfn
1077 fn = filterfn
1079 params = cmd[len(name):].lstrip()
1078 params = cmd[len(name):].lstrip()
1080 break
1079 break
1081 if not fn:
1080 if not fn:
1082 fn = lambda s, c, **kwargs: util.filter(s, c)
1081 fn = lambda s, c, **kwargs: util.filter(s, c)
1083 # Wrap old filters not supporting keyword arguments
1082 # Wrap old filters not supporting keyword arguments
1084 if not inspect.getargspec(fn)[2]:
1083 if not inspect.getargspec(fn)[2]:
1085 oldfn = fn
1084 oldfn = fn
1086 fn = lambda s, c, **kwargs: oldfn(s, c)
1085 fn = lambda s, c, **kwargs: oldfn(s, c)
1087 l.append((mf, fn, params))
1086 l.append((mf, fn, params))
1088 self.filterpats[filter] = l
1087 self.filterpats[filter] = l
1089 return self.filterpats[filter]
1088 return self.filterpats[filter]
1090
1089
1091 def _filter(self, filterpats, filename, data):
1090 def _filter(self, filterpats, filename, data):
1092 for mf, fn, cmd in filterpats:
1091 for mf, fn, cmd in filterpats:
1093 if mf(filename):
1092 if mf(filename):
1094 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1093 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1095 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1094 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1096 break
1095 break
1097
1096
1098 return data
1097 return data
1099
1098
1100 @unfilteredpropertycache
1099 @unfilteredpropertycache
1101 def _encodefilterpats(self):
1100 def _encodefilterpats(self):
1102 return self._loadfilter('encode')
1101 return self._loadfilter('encode')
1103
1102
1104 @unfilteredpropertycache
1103 @unfilteredpropertycache
1105 def _decodefilterpats(self):
1104 def _decodefilterpats(self):
1106 return self._loadfilter('decode')
1105 return self._loadfilter('decode')
1107
1106
1108 def adddatafilter(self, name, filter):
1107 def adddatafilter(self, name, filter):
1109 self._datafilters[name] = filter
1108 self._datafilters[name] = filter
1110
1109
1111 def wread(self, filename):
1110 def wread(self, filename):
1112 if self.wvfs.islink(filename):
1111 if self.wvfs.islink(filename):
1113 data = self.wvfs.readlink(filename)
1112 data = self.wvfs.readlink(filename)
1114 else:
1113 else:
1115 data = self.wvfs.read(filename)
1114 data = self.wvfs.read(filename)
1116 return self._filter(self._encodefilterpats, filename, data)
1115 return self._filter(self._encodefilterpats, filename, data)
1117
1116
1118 def wwrite(self, filename, data, flags, backgroundclose=False):
1117 def wwrite(self, filename, data, flags, backgroundclose=False):
1119 """write ``data`` into ``filename`` in the working directory
1118 """write ``data`` into ``filename`` in the working directory
1120
1119
1121 This returns length of written (maybe decoded) data.
1120 This returns length of written (maybe decoded) data.
1122 """
1121 """
1123 data = self._filter(self._decodefilterpats, filename, data)
1122 data = self._filter(self._decodefilterpats, filename, data)
1124 if 'l' in flags:
1123 if 'l' in flags:
1125 self.wvfs.symlink(data, filename)
1124 self.wvfs.symlink(data, filename)
1126 else:
1125 else:
1127 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1126 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1128 if 'x' in flags:
1127 if 'x' in flags:
1129 self.wvfs.setflags(filename, False, True)
1128 self.wvfs.setflags(filename, False, True)
1130 return len(data)
1129 return len(data)
1131
1130
1132 def wwritedata(self, filename, data):
1131 def wwritedata(self, filename, data):
1133 return self._filter(self._decodefilterpats, filename, data)
1132 return self._filter(self._decodefilterpats, filename, data)
1134
1133
1135 def currenttransaction(self):
1134 def currenttransaction(self):
1136 """return the current transaction or None if non exists"""
1135 """return the current transaction or None if non exists"""
1137 if self._transref:
1136 if self._transref:
1138 tr = self._transref()
1137 tr = self._transref()
1139 else:
1138 else:
1140 tr = None
1139 tr = None
1141
1140
1142 if tr and tr.running():
1141 if tr and tr.running():
1143 return tr
1142 return tr
1144 return None
1143 return None
1145
1144
1146 def transaction(self, desc, report=None):
1145 def transaction(self, desc, report=None):
1147 if (self.ui.configbool('devel', 'all-warnings')
1146 if (self.ui.configbool('devel', 'all-warnings')
1148 or self.ui.configbool('devel', 'check-locks')):
1147 or self.ui.configbool('devel', 'check-locks')):
1149 if self._currentlock(self._lockref) is None:
1148 if self._currentlock(self._lockref) is None:
1150 raise error.ProgrammingError('transaction requires locking')
1149 raise error.ProgrammingError('transaction requires locking')
1151 tr = self.currenttransaction()
1150 tr = self.currenttransaction()
1152 if tr is not None:
1151 if tr is not None:
1153 scmutil.registersummarycallback(self, tr, desc)
1152 scmutil.registersummarycallback(self, tr, desc)
1154 return tr.nest()
1153 return tr.nest()
1155
1154
1156 # abort here if the journal already exists
1155 # abort here if the journal already exists
1157 if self.svfs.exists("journal"):
1156 if self.svfs.exists("journal"):
1158 raise error.RepoError(
1157 raise error.RepoError(
1159 _("abandoned transaction found"),
1158 _("abandoned transaction found"),
1160 hint=_("run 'hg recover' to clean up transaction"))
1159 hint=_("run 'hg recover' to clean up transaction"))
1161
1160
1162 idbase = "%.40f#%f" % (random.random(), time.time())
1161 idbase = "%.40f#%f" % (random.random(), time.time())
1163 ha = hex(hashlib.sha1(idbase).digest())
1162 ha = hex(hashlib.sha1(idbase).digest())
1164 txnid = 'TXN:' + ha
1163 txnid = 'TXN:' + ha
1165 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1164 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1166
1165
1167 self._writejournal(desc)
1166 self._writejournal(desc)
1168 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1167 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1169 if report:
1168 if report:
1170 rp = report
1169 rp = report
1171 else:
1170 else:
1172 rp = self.ui.warn
1171 rp = self.ui.warn
1173 vfsmap = {'plain': self.vfs} # root of .hg/
1172 vfsmap = {'plain': self.vfs} # root of .hg/
1174 # we must avoid cyclic reference between repo and transaction.
1173 # we must avoid cyclic reference between repo and transaction.
1175 reporef = weakref.ref(self)
1174 reporef = weakref.ref(self)
1176 # Code to track tag movement
1175 # Code to track tag movement
1177 #
1176 #
1178 # Since tags are all handled as file content, it is actually quite hard
1177 # Since tags are all handled as file content, it is actually quite hard
1179 # to track these movement from a code perspective. So we fallback to a
1178 # to track these movement from a code perspective. So we fallback to a
1180 # tracking at the repository level. One could envision to track changes
1179 # tracking at the repository level. One could envision to track changes
1181 # to the '.hgtags' file through changegroup apply but that fails to
1180 # to the '.hgtags' file through changegroup apply but that fails to
1182 # cope with case where transaction expose new heads without changegroup
1181 # cope with case where transaction expose new heads without changegroup
1183 # being involved (eg: phase movement).
1182 # being involved (eg: phase movement).
1184 #
1183 #
1185 # For now, We gate the feature behind a flag since this likely comes
1184 # For now, We gate the feature behind a flag since this likely comes
1186 # with performance impacts. The current code run more often than needed
1185 # with performance impacts. The current code run more often than needed
1187 # and do not use caches as much as it could. The current focus is on
1186 # and do not use caches as much as it could. The current focus is on
1188 # the behavior of the feature so we disable it by default. The flag
1187 # the behavior of the feature so we disable it by default. The flag
1189 # will be removed when we are happy with the performance impact.
1188 # will be removed when we are happy with the performance impact.
1190 #
1189 #
1191 # Once this feature is no longer experimental move the following
1190 # Once this feature is no longer experimental move the following
1192 # documentation to the appropriate help section:
1191 # documentation to the appropriate help section:
1193 #
1192 #
1194 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1193 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1195 # tags (new or changed or deleted tags). In addition the details of
1194 # tags (new or changed or deleted tags). In addition the details of
1196 # these changes are made available in a file at:
1195 # these changes are made available in a file at:
1197 # ``REPOROOT/.hg/changes/tags.changes``.
1196 # ``REPOROOT/.hg/changes/tags.changes``.
1198 # Make sure you check for HG_TAG_MOVED before reading that file as it
1197 # Make sure you check for HG_TAG_MOVED before reading that file as it
1199 # might exist from a previous transaction even if no tag were touched
1198 # might exist from a previous transaction even if no tag were touched
1200 # in this one. Changes are recorded in a line base format::
1199 # in this one. Changes are recorded in a line base format::
1201 #
1200 #
1202 # <action> <hex-node> <tag-name>\n
1201 # <action> <hex-node> <tag-name>\n
1203 #
1202 #
1204 # Actions are defined as follow:
1203 # Actions are defined as follow:
1205 # "-R": tag is removed,
1204 # "-R": tag is removed,
1206 # "+A": tag is added,
1205 # "+A": tag is added,
1207 # "-M": tag is moved (old value),
1206 # "-M": tag is moved (old value),
1208 # "+M": tag is moved (new value),
1207 # "+M": tag is moved (new value),
1209 tracktags = lambda x: None
1208 tracktags = lambda x: None
1210 # experimental config: experimental.hook-track-tags
1209 # experimental config: experimental.hook-track-tags
1211 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1210 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1212 if desc != 'strip' and shouldtracktags:
1211 if desc != 'strip' and shouldtracktags:
1213 oldheads = self.changelog.headrevs()
1212 oldheads = self.changelog.headrevs()
1214 def tracktags(tr2):
1213 def tracktags(tr2):
1215 repo = reporef()
1214 repo = reporef()
1216 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1215 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1217 newheads = repo.changelog.headrevs()
1216 newheads = repo.changelog.headrevs()
1218 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1217 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1219 # notes: we compare lists here.
1218 # notes: we compare lists here.
1220 # As we do it only once buiding set would not be cheaper
1219 # As we do it only once buiding set would not be cheaper
1221 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1220 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1222 if changes:
1221 if changes:
1223 tr2.hookargs['tag_moved'] = '1'
1222 tr2.hookargs['tag_moved'] = '1'
1224 with repo.vfs('changes/tags.changes', 'w',
1223 with repo.vfs('changes/tags.changes', 'w',
1225 atomictemp=True) as changesfile:
1224 atomictemp=True) as changesfile:
1226 # note: we do not register the file to the transaction
1225 # note: we do not register the file to the transaction
1227 # because we needs it to still exist on the transaction
1226 # because we needs it to still exist on the transaction
1228 # is close (for txnclose hooks)
1227 # is close (for txnclose hooks)
1229 tagsmod.writediff(changesfile, changes)
1228 tagsmod.writediff(changesfile, changes)
1230 def validate(tr2):
1229 def validate(tr2):
1231 """will run pre-closing hooks"""
1230 """will run pre-closing hooks"""
1232 # XXX the transaction API is a bit lacking here so we take a hacky
1231 # XXX the transaction API is a bit lacking here so we take a hacky
1233 # path for now
1232 # path for now
1234 #
1233 #
1235 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1234 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1236 # dict is copied before these run. In addition we needs the data
1235 # dict is copied before these run. In addition we needs the data
1237 # available to in memory hooks too.
1236 # available to in memory hooks too.
1238 #
1237 #
1239 # Moreover, we also need to make sure this runs before txnclose
1238 # Moreover, we also need to make sure this runs before txnclose
1240 # hooks and there is no "pending" mechanism that would execute
1239 # hooks and there is no "pending" mechanism that would execute
1241 # logic only if hooks are about to run.
1240 # logic only if hooks are about to run.
1242 #
1241 #
1243 # Fixing this limitation of the transaction is also needed to track
1242 # Fixing this limitation of the transaction is also needed to track
1244 # other families of changes (bookmarks, phases, obsolescence).
1243 # other families of changes (bookmarks, phases, obsolescence).
1245 #
1244 #
1246 # This will have to be fixed before we remove the experimental
1245 # This will have to be fixed before we remove the experimental
1247 # gating.
1246 # gating.
1248 tracktags(tr2)
1247 tracktags(tr2)
1249 repo = reporef()
1248 repo = reporef()
1250 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1249 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1251 scmutil.enforcesinglehead(repo, tr2, desc)
1250 scmutil.enforcesinglehead(repo, tr2, desc)
1252 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1251 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1253 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1252 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1254 args = tr.hookargs.copy()
1253 args = tr.hookargs.copy()
1255 args.update(bookmarks.preparehookargs(name, old, new))
1254 args.update(bookmarks.preparehookargs(name, old, new))
1256 repo.hook('pretxnclose-bookmark', throw=True,
1255 repo.hook('pretxnclose-bookmark', throw=True,
1257 txnname=desc,
1256 txnname=desc,
1258 **pycompat.strkwargs(args))
1257 **pycompat.strkwargs(args))
1259 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1258 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1260 cl = repo.unfiltered().changelog
1259 cl = repo.unfiltered().changelog
1261 for rev, (old, new) in tr.changes['phases'].items():
1260 for rev, (old, new) in tr.changes['phases'].items():
1262 args = tr.hookargs.copy()
1261 args = tr.hookargs.copy()
1263 node = hex(cl.node(rev))
1262 node = hex(cl.node(rev))
1264 args.update(phases.preparehookargs(node, old, new))
1263 args.update(phases.preparehookargs(node, old, new))
1265 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1264 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1266 **pycompat.strkwargs(args))
1265 **pycompat.strkwargs(args))
1267
1266
1268 repo.hook('pretxnclose', throw=True,
1267 repo.hook('pretxnclose', throw=True,
1269 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1268 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1270 def releasefn(tr, success):
1269 def releasefn(tr, success):
1271 repo = reporef()
1270 repo = reporef()
1272 if success:
1271 if success:
1273 # this should be explicitly invoked here, because
1272 # this should be explicitly invoked here, because
1274 # in-memory changes aren't written out at closing
1273 # in-memory changes aren't written out at closing
1275 # transaction, if tr.addfilegenerator (via
1274 # transaction, if tr.addfilegenerator (via
1276 # dirstate.write or so) isn't invoked while
1275 # dirstate.write or so) isn't invoked while
1277 # transaction running
1276 # transaction running
1278 repo.dirstate.write(None)
1277 repo.dirstate.write(None)
1279 else:
1278 else:
1280 # discard all changes (including ones already written
1279 # discard all changes (including ones already written
1281 # out) in this transaction
1280 # out) in this transaction
1282 repo.dirstate.restorebackup(None, 'journal.dirstate')
1281 repo.dirstate.restorebackup(None, 'journal.dirstate')
1283
1282
1284 repo.invalidate(clearfilecache=True)
1283 repo.invalidate(clearfilecache=True)
1285
1284
1286 tr = transaction.transaction(rp, self.svfs, vfsmap,
1285 tr = transaction.transaction(rp, self.svfs, vfsmap,
1287 "journal",
1286 "journal",
1288 "undo",
1287 "undo",
1289 aftertrans(renames),
1288 aftertrans(renames),
1290 self.store.createmode,
1289 self.store.createmode,
1291 validator=validate,
1290 validator=validate,
1292 releasefn=releasefn,
1291 releasefn=releasefn,
1293 checkambigfiles=_cachedfiles)
1292 checkambigfiles=_cachedfiles)
1294 tr.changes['revs'] = set()
1293 tr.changes['revs'] = set()
1295 tr.changes['obsmarkers'] = set()
1294 tr.changes['obsmarkers'] = set()
1296 tr.changes['phases'] = {}
1295 tr.changes['phases'] = {}
1297 tr.changes['bookmarks'] = {}
1296 tr.changes['bookmarks'] = {}
1298
1297
1299 tr.hookargs['txnid'] = txnid
1298 tr.hookargs['txnid'] = txnid
1300 # note: writing the fncache only during finalize mean that the file is
1299 # note: writing the fncache only during finalize mean that the file is
1301 # outdated when running hooks. As fncache is used for streaming clone,
1300 # outdated when running hooks. As fncache is used for streaming clone,
1302 # this is not expected to break anything that happen during the hooks.
1301 # this is not expected to break anything that happen during the hooks.
1303 tr.addfinalize('flush-fncache', self.store.write)
1302 tr.addfinalize('flush-fncache', self.store.write)
1304 def txnclosehook(tr2):
1303 def txnclosehook(tr2):
1305 """To be run if transaction is successful, will schedule a hook run
1304 """To be run if transaction is successful, will schedule a hook run
1306 """
1305 """
1307 # Don't reference tr2 in hook() so we don't hold a reference.
1306 # Don't reference tr2 in hook() so we don't hold a reference.
1308 # This reduces memory consumption when there are multiple
1307 # This reduces memory consumption when there are multiple
1309 # transactions per lock. This can likely go away if issue5045
1308 # transactions per lock. This can likely go away if issue5045
1310 # fixes the function accumulation.
1309 # fixes the function accumulation.
1311 hookargs = tr2.hookargs
1310 hookargs = tr2.hookargs
1312
1311
1313 def hookfunc():
1312 def hookfunc():
1314 repo = reporef()
1313 repo = reporef()
1315 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1314 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1316 bmchanges = sorted(tr.changes['bookmarks'].items())
1315 bmchanges = sorted(tr.changes['bookmarks'].items())
1317 for name, (old, new) in bmchanges:
1316 for name, (old, new) in bmchanges:
1318 args = tr.hookargs.copy()
1317 args = tr.hookargs.copy()
1319 args.update(bookmarks.preparehookargs(name, old, new))
1318 args.update(bookmarks.preparehookargs(name, old, new))
1320 repo.hook('txnclose-bookmark', throw=False,
1319 repo.hook('txnclose-bookmark', throw=False,
1321 txnname=desc, **pycompat.strkwargs(args))
1320 txnname=desc, **pycompat.strkwargs(args))
1322
1321
1323 if hook.hashook(repo.ui, 'txnclose-phase'):
1322 if hook.hashook(repo.ui, 'txnclose-phase'):
1324 cl = repo.unfiltered().changelog
1323 cl = repo.unfiltered().changelog
1325 phasemv = sorted(tr.changes['phases'].items())
1324 phasemv = sorted(tr.changes['phases'].items())
1326 for rev, (old, new) in phasemv:
1325 for rev, (old, new) in phasemv:
1327 args = tr.hookargs.copy()
1326 args = tr.hookargs.copy()
1328 node = hex(cl.node(rev))
1327 node = hex(cl.node(rev))
1329 args.update(phases.preparehookargs(node, old, new))
1328 args.update(phases.preparehookargs(node, old, new))
1330 repo.hook('txnclose-phase', throw=False, txnname=desc,
1329 repo.hook('txnclose-phase', throw=False, txnname=desc,
1331 **pycompat.strkwargs(args))
1330 **pycompat.strkwargs(args))
1332
1331
1333 repo.hook('txnclose', throw=False, txnname=desc,
1332 repo.hook('txnclose', throw=False, txnname=desc,
1334 **pycompat.strkwargs(hookargs))
1333 **pycompat.strkwargs(hookargs))
1335 reporef()._afterlock(hookfunc)
1334 reporef()._afterlock(hookfunc)
1336 tr.addfinalize('txnclose-hook', txnclosehook)
1335 tr.addfinalize('txnclose-hook', txnclosehook)
1337 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1336 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1338 def txnaborthook(tr2):
1337 def txnaborthook(tr2):
1339 """To be run if transaction is aborted
1338 """To be run if transaction is aborted
1340 """
1339 """
1341 reporef().hook('txnabort', throw=False, txnname=desc,
1340 reporef().hook('txnabort', throw=False, txnname=desc,
1342 **tr2.hookargs)
1341 **tr2.hookargs)
1343 tr.addabort('txnabort-hook', txnaborthook)
1342 tr.addabort('txnabort-hook', txnaborthook)
1344 # avoid eager cache invalidation. in-memory data should be identical
1343 # avoid eager cache invalidation. in-memory data should be identical
1345 # to stored data if transaction has no error.
1344 # to stored data if transaction has no error.
1346 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1345 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1347 self._transref = weakref.ref(tr)
1346 self._transref = weakref.ref(tr)
1348 scmutil.registersummarycallback(self, tr, desc)
1347 scmutil.registersummarycallback(self, tr, desc)
1349 return tr
1348 return tr
1350
1349
1351 def _journalfiles(self):
1350 def _journalfiles(self):
1352 return ((self.svfs, 'journal'),
1351 return ((self.svfs, 'journal'),
1353 (self.vfs, 'journal.dirstate'),
1352 (self.vfs, 'journal.dirstate'),
1354 (self.vfs, 'journal.branch'),
1353 (self.vfs, 'journal.branch'),
1355 (self.vfs, 'journal.desc'),
1354 (self.vfs, 'journal.desc'),
1356 (self.vfs, 'journal.bookmarks'),
1355 (self.vfs, 'journal.bookmarks'),
1357 (self.svfs, 'journal.phaseroots'))
1356 (self.svfs, 'journal.phaseroots'))
1358
1357
1359 def undofiles(self):
1358 def undofiles(self):
1360 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1359 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1361
1360
1362 @unfilteredmethod
1361 @unfilteredmethod
1363 def _writejournal(self, desc):
1362 def _writejournal(self, desc):
1364 self.dirstate.savebackup(None, 'journal.dirstate')
1363 self.dirstate.savebackup(None, 'journal.dirstate')
1365 self.vfs.write("journal.branch",
1364 self.vfs.write("journal.branch",
1366 encoding.fromlocal(self.dirstate.branch()))
1365 encoding.fromlocal(self.dirstate.branch()))
1367 self.vfs.write("journal.desc",
1366 self.vfs.write("journal.desc",
1368 "%d\n%s\n" % (len(self), desc))
1367 "%d\n%s\n" % (len(self), desc))
1369 self.vfs.write("journal.bookmarks",
1368 self.vfs.write("journal.bookmarks",
1370 self.vfs.tryread("bookmarks"))
1369 self.vfs.tryread("bookmarks"))
1371 self.svfs.write("journal.phaseroots",
1370 self.svfs.write("journal.phaseroots",
1372 self.svfs.tryread("phaseroots"))
1371 self.svfs.tryread("phaseroots"))
1373
1372
1374 def recover(self):
1373 def recover(self):
1375 with self.lock():
1374 with self.lock():
1376 if self.svfs.exists("journal"):
1375 if self.svfs.exists("journal"):
1377 self.ui.status(_("rolling back interrupted transaction\n"))
1376 self.ui.status(_("rolling back interrupted transaction\n"))
1378 vfsmap = {'': self.svfs,
1377 vfsmap = {'': self.svfs,
1379 'plain': self.vfs,}
1378 'plain': self.vfs,}
1380 transaction.rollback(self.svfs, vfsmap, "journal",
1379 transaction.rollback(self.svfs, vfsmap, "journal",
1381 self.ui.warn,
1380 self.ui.warn,
1382 checkambigfiles=_cachedfiles)
1381 checkambigfiles=_cachedfiles)
1383 self.invalidate()
1382 self.invalidate()
1384 return True
1383 return True
1385 else:
1384 else:
1386 self.ui.warn(_("no interrupted transaction available\n"))
1385 self.ui.warn(_("no interrupted transaction available\n"))
1387 return False
1386 return False
1388
1387
1389 def rollback(self, dryrun=False, force=False):
1388 def rollback(self, dryrun=False, force=False):
1390 wlock = lock = dsguard = None
1389 wlock = lock = dsguard = None
1391 try:
1390 try:
1392 wlock = self.wlock()
1391 wlock = self.wlock()
1393 lock = self.lock()
1392 lock = self.lock()
1394 if self.svfs.exists("undo"):
1393 if self.svfs.exists("undo"):
1395 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1394 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1396
1395
1397 return self._rollback(dryrun, force, dsguard)
1396 return self._rollback(dryrun, force, dsguard)
1398 else:
1397 else:
1399 self.ui.warn(_("no rollback information available\n"))
1398 self.ui.warn(_("no rollback information available\n"))
1400 return 1
1399 return 1
1401 finally:
1400 finally:
1402 release(dsguard, lock, wlock)
1401 release(dsguard, lock, wlock)
1403
1402
1404 @unfilteredmethod # Until we get smarter cache management
1403 @unfilteredmethod # Until we get smarter cache management
1405 def _rollback(self, dryrun, force, dsguard):
1404 def _rollback(self, dryrun, force, dsguard):
1406 ui = self.ui
1405 ui = self.ui
1407 try:
1406 try:
1408 args = self.vfs.read('undo.desc').splitlines()
1407 args = self.vfs.read('undo.desc').splitlines()
1409 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1408 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1410 if len(args) >= 3:
1409 if len(args) >= 3:
1411 detail = args[2]
1410 detail = args[2]
1412 oldtip = oldlen - 1
1411 oldtip = oldlen - 1
1413
1412
1414 if detail and ui.verbose:
1413 if detail and ui.verbose:
1415 msg = (_('repository tip rolled back to revision %d'
1414 msg = (_('repository tip rolled back to revision %d'
1416 ' (undo %s: %s)\n')
1415 ' (undo %s: %s)\n')
1417 % (oldtip, desc, detail))
1416 % (oldtip, desc, detail))
1418 else:
1417 else:
1419 msg = (_('repository tip rolled back to revision %d'
1418 msg = (_('repository tip rolled back to revision %d'
1420 ' (undo %s)\n')
1419 ' (undo %s)\n')
1421 % (oldtip, desc))
1420 % (oldtip, desc))
1422 except IOError:
1421 except IOError:
1423 msg = _('rolling back unknown transaction\n')
1422 msg = _('rolling back unknown transaction\n')
1424 desc = None
1423 desc = None
1425
1424
1426 if not force and self['.'] != self['tip'] and desc == 'commit':
1425 if not force and self['.'] != self['tip'] and desc == 'commit':
1427 raise error.Abort(
1426 raise error.Abort(
1428 _('rollback of last commit while not checked out '
1427 _('rollback of last commit while not checked out '
1429 'may lose data'), hint=_('use -f to force'))
1428 'may lose data'), hint=_('use -f to force'))
1430
1429
1431 ui.status(msg)
1430 ui.status(msg)
1432 if dryrun:
1431 if dryrun:
1433 return 0
1432 return 0
1434
1433
1435 parents = self.dirstate.parents()
1434 parents = self.dirstate.parents()
1436 self.destroying()
1435 self.destroying()
1437 vfsmap = {'plain': self.vfs, '': self.svfs}
1436 vfsmap = {'plain': self.vfs, '': self.svfs}
1438 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1437 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1439 checkambigfiles=_cachedfiles)
1438 checkambigfiles=_cachedfiles)
1440 if self.vfs.exists('undo.bookmarks'):
1439 if self.vfs.exists('undo.bookmarks'):
1441 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1440 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1442 if self.svfs.exists('undo.phaseroots'):
1441 if self.svfs.exists('undo.phaseroots'):
1443 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1442 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1444 self.invalidate()
1443 self.invalidate()
1445
1444
1446 parentgone = (parents[0] not in self.changelog.nodemap or
1445 parentgone = (parents[0] not in self.changelog.nodemap or
1447 parents[1] not in self.changelog.nodemap)
1446 parents[1] not in self.changelog.nodemap)
1448 if parentgone:
1447 if parentgone:
1449 # prevent dirstateguard from overwriting already restored one
1448 # prevent dirstateguard from overwriting already restored one
1450 dsguard.close()
1449 dsguard.close()
1451
1450
1452 self.dirstate.restorebackup(None, 'undo.dirstate')
1451 self.dirstate.restorebackup(None, 'undo.dirstate')
1453 try:
1452 try:
1454 branch = self.vfs.read('undo.branch')
1453 branch = self.vfs.read('undo.branch')
1455 self.dirstate.setbranch(encoding.tolocal(branch))
1454 self.dirstate.setbranch(encoding.tolocal(branch))
1456 except IOError:
1455 except IOError:
1457 ui.warn(_('named branch could not be reset: '
1456 ui.warn(_('named branch could not be reset: '
1458 'current branch is still \'%s\'\n')
1457 'current branch is still \'%s\'\n')
1459 % self.dirstate.branch())
1458 % self.dirstate.branch())
1460
1459
1461 parents = tuple([p.rev() for p in self[None].parents()])
1460 parents = tuple([p.rev() for p in self[None].parents()])
1462 if len(parents) > 1:
1461 if len(parents) > 1:
1463 ui.status(_('working directory now based on '
1462 ui.status(_('working directory now based on '
1464 'revisions %d and %d\n') % parents)
1463 'revisions %d and %d\n') % parents)
1465 else:
1464 else:
1466 ui.status(_('working directory now based on '
1465 ui.status(_('working directory now based on '
1467 'revision %d\n') % parents)
1466 'revision %d\n') % parents)
1468 mergemod.mergestate.clean(self, self['.'].node())
1467 mergemod.mergestate.clean(self, self['.'].node())
1469
1468
1470 # TODO: if we know which new heads may result from this rollback, pass
1469 # TODO: if we know which new heads may result from this rollback, pass
1471 # them to destroy(), which will prevent the branchhead cache from being
1470 # them to destroy(), which will prevent the branchhead cache from being
1472 # invalidated.
1471 # invalidated.
1473 self.destroyed()
1472 self.destroyed()
1474 return 0
1473 return 0
1475
1474
1476 def _buildcacheupdater(self, newtransaction):
1475 def _buildcacheupdater(self, newtransaction):
1477 """called during transaction to build the callback updating cache
1476 """called during transaction to build the callback updating cache
1478
1477
1479 Lives on the repository to help extension who might want to augment
1478 Lives on the repository to help extension who might want to augment
1480 this logic. For this purpose, the created transaction is passed to the
1479 this logic. For this purpose, the created transaction is passed to the
1481 method.
1480 method.
1482 """
1481 """
1483 # we must avoid cyclic reference between repo and transaction.
1482 # we must avoid cyclic reference between repo and transaction.
1484 reporef = weakref.ref(self)
1483 reporef = weakref.ref(self)
1485 def updater(tr):
1484 def updater(tr):
1486 repo = reporef()
1485 repo = reporef()
1487 repo.updatecaches(tr)
1486 repo.updatecaches(tr)
1488 return updater
1487 return updater
1489
1488
1490 @unfilteredmethod
1489 @unfilteredmethod
1491 def updatecaches(self, tr=None):
1490 def updatecaches(self, tr=None):
1492 """warm appropriate caches
1491 """warm appropriate caches
1493
1492
1494 If this function is called after a transaction closed. The transaction
1493 If this function is called after a transaction closed. The transaction
1495 will be available in the 'tr' argument. This can be used to selectively
1494 will be available in the 'tr' argument. This can be used to selectively
1496 update caches relevant to the changes in that transaction.
1495 update caches relevant to the changes in that transaction.
1497 """
1496 """
1498 if tr is not None and tr.hookargs.get('source') == 'strip':
1497 if tr is not None and tr.hookargs.get('source') == 'strip':
1499 # During strip, many caches are invalid but
1498 # During strip, many caches are invalid but
1500 # later call to `destroyed` will refresh them.
1499 # later call to `destroyed` will refresh them.
1501 return
1500 return
1502
1501
1503 if tr is None or tr.changes['revs']:
1502 if tr is None or tr.changes['revs']:
1504 # updating the unfiltered branchmap should refresh all the others,
1503 # updating the unfiltered branchmap should refresh all the others,
1505 self.ui.debug('updating the branch cache\n')
1504 self.ui.debug('updating the branch cache\n')
1506 branchmap.updatecache(self.filtered('served'))
1505 branchmap.updatecache(self.filtered('served'))
1507
1506
1508 def invalidatecaches(self):
1507 def invalidatecaches(self):
1509
1508
1510 if '_tagscache' in vars(self):
1509 if '_tagscache' in vars(self):
1511 # can't use delattr on proxy
1510 # can't use delattr on proxy
1512 del self.__dict__['_tagscache']
1511 del self.__dict__['_tagscache']
1513
1512
1514 self.unfiltered()._branchcaches.clear()
1513 self.unfiltered()._branchcaches.clear()
1515 self.invalidatevolatilesets()
1514 self.invalidatevolatilesets()
1516 self._sparsesignaturecache.clear()
1515 self._sparsesignaturecache.clear()
1517
1516
1518 def invalidatevolatilesets(self):
1517 def invalidatevolatilesets(self):
1519 self.filteredrevcache.clear()
1518 self.filteredrevcache.clear()
1520 obsolete.clearobscaches(self)
1519 obsolete.clearobscaches(self)
1521
1520
1522 def invalidatedirstate(self):
1521 def invalidatedirstate(self):
1523 '''Invalidates the dirstate, causing the next call to dirstate
1522 '''Invalidates the dirstate, causing the next call to dirstate
1524 to check if it was modified since the last time it was read,
1523 to check if it was modified since the last time it was read,
1525 rereading it if it has.
1524 rereading it if it has.
1526
1525
1527 This is different to dirstate.invalidate() that it doesn't always
1526 This is different to dirstate.invalidate() that it doesn't always
1528 rereads the dirstate. Use dirstate.invalidate() if you want to
1527 rereads the dirstate. Use dirstate.invalidate() if you want to
1529 explicitly read the dirstate again (i.e. restoring it to a previous
1528 explicitly read the dirstate again (i.e. restoring it to a previous
1530 known good state).'''
1529 known good state).'''
1531 if hasunfilteredcache(self, 'dirstate'):
1530 if hasunfilteredcache(self, 'dirstate'):
1532 for k in self.dirstate._filecache:
1531 for k in self.dirstate._filecache:
1533 try:
1532 try:
1534 delattr(self.dirstate, k)
1533 delattr(self.dirstate, k)
1535 except AttributeError:
1534 except AttributeError:
1536 pass
1535 pass
1537 delattr(self.unfiltered(), 'dirstate')
1536 delattr(self.unfiltered(), 'dirstate')
1538
1537
1539 def invalidate(self, clearfilecache=False):
1538 def invalidate(self, clearfilecache=False):
1540 '''Invalidates both store and non-store parts other than dirstate
1539 '''Invalidates both store and non-store parts other than dirstate
1541
1540
1542 If a transaction is running, invalidation of store is omitted,
1541 If a transaction is running, invalidation of store is omitted,
1543 because discarding in-memory changes might cause inconsistency
1542 because discarding in-memory changes might cause inconsistency
1544 (e.g. incomplete fncache causes unintentional failure, but
1543 (e.g. incomplete fncache causes unintentional failure, but
1545 redundant one doesn't).
1544 redundant one doesn't).
1546 '''
1545 '''
1547 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1546 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1548 for k in list(self._filecache.keys()):
1547 for k in list(self._filecache.keys()):
1549 # dirstate is invalidated separately in invalidatedirstate()
1548 # dirstate is invalidated separately in invalidatedirstate()
1550 if k == 'dirstate':
1549 if k == 'dirstate':
1551 continue
1550 continue
1552 if (k == 'changelog' and
1551 if (k == 'changelog' and
1553 self.currenttransaction() and
1552 self.currenttransaction() and
1554 self.changelog._delayed):
1553 self.changelog._delayed):
1555 # The changelog object may store unwritten revisions. We don't
1554 # The changelog object may store unwritten revisions. We don't
1556 # want to lose them.
1555 # want to lose them.
1557 # TODO: Solve the problem instead of working around it.
1556 # TODO: Solve the problem instead of working around it.
1558 continue
1557 continue
1559
1558
1560 if clearfilecache:
1559 if clearfilecache:
1561 del self._filecache[k]
1560 del self._filecache[k]
1562 try:
1561 try:
1563 delattr(unfiltered, k)
1562 delattr(unfiltered, k)
1564 except AttributeError:
1563 except AttributeError:
1565 pass
1564 pass
1566 self.invalidatecaches()
1565 self.invalidatecaches()
1567 if not self.currenttransaction():
1566 if not self.currenttransaction():
1568 # TODO: Changing contents of store outside transaction
1567 # TODO: Changing contents of store outside transaction
1569 # causes inconsistency. We should make in-memory store
1568 # causes inconsistency. We should make in-memory store
1570 # changes detectable, and abort if changed.
1569 # changes detectable, and abort if changed.
1571 self.store.invalidatecaches()
1570 self.store.invalidatecaches()
1572
1571
1573 def invalidateall(self):
1572 def invalidateall(self):
1574 '''Fully invalidates both store and non-store parts, causing the
1573 '''Fully invalidates both store and non-store parts, causing the
1575 subsequent operation to reread any outside changes.'''
1574 subsequent operation to reread any outside changes.'''
1576 # extension should hook this to invalidate its caches
1575 # extension should hook this to invalidate its caches
1577 self.invalidate()
1576 self.invalidate()
1578 self.invalidatedirstate()
1577 self.invalidatedirstate()
1579
1578
1580 @unfilteredmethod
1579 @unfilteredmethod
1581 def _refreshfilecachestats(self, tr):
1580 def _refreshfilecachestats(self, tr):
1582 """Reload stats of cached files so that they are flagged as valid"""
1581 """Reload stats of cached files so that they are flagged as valid"""
1583 for k, ce in self._filecache.items():
1582 for k, ce in self._filecache.items():
1584 if k == 'dirstate' or k not in self.__dict__:
1583 if k == 'dirstate' or k not in self.__dict__:
1585 continue
1584 continue
1586 ce.refresh()
1585 ce.refresh()
1587
1586
1588 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1587 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1589 inheritchecker=None, parentenvvar=None):
1588 inheritchecker=None, parentenvvar=None):
1590 parentlock = None
1589 parentlock = None
1591 # the contents of parentenvvar are used by the underlying lock to
1590 # the contents of parentenvvar are used by the underlying lock to
1592 # determine whether it can be inherited
1591 # determine whether it can be inherited
1593 if parentenvvar is not None:
1592 if parentenvvar is not None:
1594 parentlock = encoding.environ.get(parentenvvar)
1593 parentlock = encoding.environ.get(parentenvvar)
1595
1594
1596 timeout = 0
1595 timeout = 0
1597 warntimeout = 0
1596 warntimeout = 0
1598 if wait:
1597 if wait:
1599 timeout = self.ui.configint("ui", "timeout")
1598 timeout = self.ui.configint("ui", "timeout")
1600 warntimeout = self.ui.configint("ui", "timeout.warn")
1599 warntimeout = self.ui.configint("ui", "timeout.warn")
1601
1600
1602 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1601 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1603 releasefn=releasefn,
1602 releasefn=releasefn,
1604 acquirefn=acquirefn, desc=desc,
1603 acquirefn=acquirefn, desc=desc,
1605 inheritchecker=inheritchecker,
1604 inheritchecker=inheritchecker,
1606 parentlock=parentlock)
1605 parentlock=parentlock)
1607 return l
1606 return l
1608
1607
1609 def _afterlock(self, callback):
1608 def _afterlock(self, callback):
1610 """add a callback to be run when the repository is fully unlocked
1609 """add a callback to be run when the repository is fully unlocked
1611
1610
1612 The callback will be executed when the outermost lock is released
1611 The callback will be executed when the outermost lock is released
1613 (with wlock being higher level than 'lock')."""
1612 (with wlock being higher level than 'lock')."""
1614 for ref in (self._wlockref, self._lockref):
1613 for ref in (self._wlockref, self._lockref):
1615 l = ref and ref()
1614 l = ref and ref()
1616 if l and l.held:
1615 if l and l.held:
1617 l.postrelease.append(callback)
1616 l.postrelease.append(callback)
1618 break
1617 break
1619 else: # no lock have been found.
1618 else: # no lock have been found.
1620 callback()
1619 callback()
1621
1620
1622 def lock(self, wait=True):
1621 def lock(self, wait=True):
1623 '''Lock the repository store (.hg/store) and return a weak reference
1622 '''Lock the repository store (.hg/store) and return a weak reference
1624 to the lock. Use this before modifying the store (e.g. committing or
1623 to the lock. Use this before modifying the store (e.g. committing or
1625 stripping). If you are opening a transaction, get a lock as well.)
1624 stripping). If you are opening a transaction, get a lock as well.)
1626
1625
1627 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1626 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1628 'wlock' first to avoid a dead-lock hazard.'''
1627 'wlock' first to avoid a dead-lock hazard.'''
1629 l = self._currentlock(self._lockref)
1628 l = self._currentlock(self._lockref)
1630 if l is not None:
1629 if l is not None:
1631 l.lock()
1630 l.lock()
1632 return l
1631 return l
1633
1632
1634 l = self._lock(self.svfs, "lock", wait, None,
1633 l = self._lock(self.svfs, "lock", wait, None,
1635 self.invalidate, _('repository %s') % self.origroot)
1634 self.invalidate, _('repository %s') % self.origroot)
1636 self._lockref = weakref.ref(l)
1635 self._lockref = weakref.ref(l)
1637 return l
1636 return l
1638
1637
1639 def _wlockchecktransaction(self):
1638 def _wlockchecktransaction(self):
1640 if self.currenttransaction() is not None:
1639 if self.currenttransaction() is not None:
1641 raise error.LockInheritanceContractViolation(
1640 raise error.LockInheritanceContractViolation(
1642 'wlock cannot be inherited in the middle of a transaction')
1641 'wlock cannot be inherited in the middle of a transaction')
1643
1642
1644 def wlock(self, wait=True):
1643 def wlock(self, wait=True):
1645 '''Lock the non-store parts of the repository (everything under
1644 '''Lock the non-store parts of the repository (everything under
1646 .hg except .hg/store) and return a weak reference to the lock.
1645 .hg except .hg/store) and return a weak reference to the lock.
1647
1646
1648 Use this before modifying files in .hg.
1647 Use this before modifying files in .hg.
1649
1648
1650 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1649 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1651 'wlock' first to avoid a dead-lock hazard.'''
1650 'wlock' first to avoid a dead-lock hazard.'''
1652 l = self._wlockref and self._wlockref()
1651 l = self._wlockref and self._wlockref()
1653 if l is not None and l.held:
1652 if l is not None and l.held:
1654 l.lock()
1653 l.lock()
1655 return l
1654 return l
1656
1655
1657 # We do not need to check for non-waiting lock acquisition. Such
1656 # We do not need to check for non-waiting lock acquisition. Such
1658 # acquisition would not cause dead-lock as they would just fail.
1657 # acquisition would not cause dead-lock as they would just fail.
1659 if wait and (self.ui.configbool('devel', 'all-warnings')
1658 if wait and (self.ui.configbool('devel', 'all-warnings')
1660 or self.ui.configbool('devel', 'check-locks')):
1659 or self.ui.configbool('devel', 'check-locks')):
1661 if self._currentlock(self._lockref) is not None:
1660 if self._currentlock(self._lockref) is not None:
1662 self.ui.develwarn('"wlock" acquired after "lock"')
1661 self.ui.develwarn('"wlock" acquired after "lock"')
1663
1662
1664 def unlock():
1663 def unlock():
1665 if self.dirstate.pendingparentchange():
1664 if self.dirstate.pendingparentchange():
1666 self.dirstate.invalidate()
1665 self.dirstate.invalidate()
1667 else:
1666 else:
1668 self.dirstate.write(None)
1667 self.dirstate.write(None)
1669
1668
1670 self._filecache['dirstate'].refresh()
1669 self._filecache['dirstate'].refresh()
1671
1670
1672 l = self._lock(self.vfs, "wlock", wait, unlock,
1671 l = self._lock(self.vfs, "wlock", wait, unlock,
1673 self.invalidatedirstate, _('working directory of %s') %
1672 self.invalidatedirstate, _('working directory of %s') %
1674 self.origroot,
1673 self.origroot,
1675 inheritchecker=self._wlockchecktransaction,
1674 inheritchecker=self._wlockchecktransaction,
1676 parentenvvar='HG_WLOCK_LOCKER')
1675 parentenvvar='HG_WLOCK_LOCKER')
1677 self._wlockref = weakref.ref(l)
1676 self._wlockref = weakref.ref(l)
1678 return l
1677 return l
1679
1678
1680 def _currentlock(self, lockref):
1679 def _currentlock(self, lockref):
1681 """Returns the lock if it's held, or None if it's not."""
1680 """Returns the lock if it's held, or None if it's not."""
1682 if lockref is None:
1681 if lockref is None:
1683 return None
1682 return None
1684 l = lockref()
1683 l = lockref()
1685 if l is None or not l.held:
1684 if l is None or not l.held:
1686 return None
1685 return None
1687 return l
1686 return l
1688
1687
1689 def currentwlock(self):
1688 def currentwlock(self):
1690 """Returns the wlock if it's held, or None if it's not."""
1689 """Returns the wlock if it's held, or None if it's not."""
1691 return self._currentlock(self._wlockref)
1690 return self._currentlock(self._wlockref)
1692
1691
1693 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1692 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1694 """
1693 """
1695 commit an individual file as part of a larger transaction
1694 commit an individual file as part of a larger transaction
1696 """
1695 """
1697
1696
1698 fname = fctx.path()
1697 fname = fctx.path()
1699 fparent1 = manifest1.get(fname, nullid)
1698 fparent1 = manifest1.get(fname, nullid)
1700 fparent2 = manifest2.get(fname, nullid)
1699 fparent2 = manifest2.get(fname, nullid)
1701 if isinstance(fctx, context.filectx):
1700 if isinstance(fctx, context.filectx):
1702 node = fctx.filenode()
1701 node = fctx.filenode()
1703 if node in [fparent1, fparent2]:
1702 if node in [fparent1, fparent2]:
1704 self.ui.debug('reusing %s filelog entry\n' % fname)
1703 self.ui.debug('reusing %s filelog entry\n' % fname)
1705 if manifest1.flags(fname) != fctx.flags():
1704 if manifest1.flags(fname) != fctx.flags():
1706 changelist.append(fname)
1705 changelist.append(fname)
1707 return node
1706 return node
1708
1707
1709 flog = self.file(fname)
1708 flog = self.file(fname)
1710 meta = {}
1709 meta = {}
1711 copy = fctx.renamed()
1710 copy = fctx.renamed()
1712 if copy and copy[0] != fname:
1711 if copy and copy[0] != fname:
1713 # Mark the new revision of this file as a copy of another
1712 # Mark the new revision of this file as a copy of another
1714 # file. This copy data will effectively act as a parent
1713 # file. This copy data will effectively act as a parent
1715 # of this new revision. If this is a merge, the first
1714 # of this new revision. If this is a merge, the first
1716 # parent will be the nullid (meaning "look up the copy data")
1715 # parent will be the nullid (meaning "look up the copy data")
1717 # and the second one will be the other parent. For example:
1716 # and the second one will be the other parent. For example:
1718 #
1717 #
1719 # 0 --- 1 --- 3 rev1 changes file foo
1718 # 0 --- 1 --- 3 rev1 changes file foo
1720 # \ / rev2 renames foo to bar and changes it
1719 # \ / rev2 renames foo to bar and changes it
1721 # \- 2 -/ rev3 should have bar with all changes and
1720 # \- 2 -/ rev3 should have bar with all changes and
1722 # should record that bar descends from
1721 # should record that bar descends from
1723 # bar in rev2 and foo in rev1
1722 # bar in rev2 and foo in rev1
1724 #
1723 #
1725 # this allows this merge to succeed:
1724 # this allows this merge to succeed:
1726 #
1725 #
1727 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1726 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1728 # \ / merging rev3 and rev4 should use bar@rev2
1727 # \ / merging rev3 and rev4 should use bar@rev2
1729 # \- 2 --- 4 as the merge base
1728 # \- 2 --- 4 as the merge base
1730 #
1729 #
1731
1730
1732 cfname = copy[0]
1731 cfname = copy[0]
1733 crev = manifest1.get(cfname)
1732 crev = manifest1.get(cfname)
1734 newfparent = fparent2
1733 newfparent = fparent2
1735
1734
1736 if manifest2: # branch merge
1735 if manifest2: # branch merge
1737 if fparent2 == nullid or crev is None: # copied on remote side
1736 if fparent2 == nullid or crev is None: # copied on remote side
1738 if cfname in manifest2:
1737 if cfname in manifest2:
1739 crev = manifest2[cfname]
1738 crev = manifest2[cfname]
1740 newfparent = fparent1
1739 newfparent = fparent1
1741
1740
1742 # Here, we used to search backwards through history to try to find
1741 # Here, we used to search backwards through history to try to find
1743 # where the file copy came from if the source of a copy was not in
1742 # where the file copy came from if the source of a copy was not in
1744 # the parent directory. However, this doesn't actually make sense to
1743 # the parent directory. However, this doesn't actually make sense to
1745 # do (what does a copy from something not in your working copy even
1744 # do (what does a copy from something not in your working copy even
1746 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1745 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1747 # the user that copy information was dropped, so if they didn't
1746 # the user that copy information was dropped, so if they didn't
1748 # expect this outcome it can be fixed, but this is the correct
1747 # expect this outcome it can be fixed, but this is the correct
1749 # behavior in this circumstance.
1748 # behavior in this circumstance.
1750
1749
1751 if crev:
1750 if crev:
1752 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1751 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1753 meta["copy"] = cfname
1752 meta["copy"] = cfname
1754 meta["copyrev"] = hex(crev)
1753 meta["copyrev"] = hex(crev)
1755 fparent1, fparent2 = nullid, newfparent
1754 fparent1, fparent2 = nullid, newfparent
1756 else:
1755 else:
1757 self.ui.warn(_("warning: can't find ancestor for '%s' "
1756 self.ui.warn(_("warning: can't find ancestor for '%s' "
1758 "copied from '%s'!\n") % (fname, cfname))
1757 "copied from '%s'!\n") % (fname, cfname))
1759
1758
1760 elif fparent1 == nullid:
1759 elif fparent1 == nullid:
1761 fparent1, fparent2 = fparent2, nullid
1760 fparent1, fparent2 = fparent2, nullid
1762 elif fparent2 != nullid:
1761 elif fparent2 != nullid:
1763 # is one parent an ancestor of the other?
1762 # is one parent an ancestor of the other?
1764 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1763 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1765 if fparent1 in fparentancestors:
1764 if fparent1 in fparentancestors:
1766 fparent1, fparent2 = fparent2, nullid
1765 fparent1, fparent2 = fparent2, nullid
1767 elif fparent2 in fparentancestors:
1766 elif fparent2 in fparentancestors:
1768 fparent2 = nullid
1767 fparent2 = nullid
1769
1768
1770 # is the file changed?
1769 # is the file changed?
1771 text = fctx.data()
1770 text = fctx.data()
1772 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1771 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1773 changelist.append(fname)
1772 changelist.append(fname)
1774 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1773 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1775 # are just the flags changed during merge?
1774 # are just the flags changed during merge?
1776 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1775 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1777 changelist.append(fname)
1776 changelist.append(fname)
1778
1777
1779 return fparent1
1778 return fparent1
1780
1779
1781 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1780 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1782 """check for commit arguments that aren't committable"""
1781 """check for commit arguments that aren't committable"""
1783 if match.isexact() or match.prefix():
1782 if match.isexact() or match.prefix():
1784 matched = set(status.modified + status.added + status.removed)
1783 matched = set(status.modified + status.added + status.removed)
1785
1784
1786 for f in match.files():
1785 for f in match.files():
1787 f = self.dirstate.normalize(f)
1786 f = self.dirstate.normalize(f)
1788 if f == '.' or f in matched or f in wctx.substate:
1787 if f == '.' or f in matched or f in wctx.substate:
1789 continue
1788 continue
1790 if f in status.deleted:
1789 if f in status.deleted:
1791 fail(f, _('file not found!'))
1790 fail(f, _('file not found!'))
1792 if f in vdirs: # visited directory
1791 if f in vdirs: # visited directory
1793 d = f + '/'
1792 d = f + '/'
1794 for mf in matched:
1793 for mf in matched:
1795 if mf.startswith(d):
1794 if mf.startswith(d):
1796 break
1795 break
1797 else:
1796 else:
1798 fail(f, _("no match under directory!"))
1797 fail(f, _("no match under directory!"))
1799 elif f not in self.dirstate:
1798 elif f not in self.dirstate:
1800 fail(f, _("file not tracked!"))
1799 fail(f, _("file not tracked!"))
1801
1800
1802 @unfilteredmethod
1801 @unfilteredmethod
1803 def commit(self, text="", user=None, date=None, match=None, force=False,
1802 def commit(self, text="", user=None, date=None, match=None, force=False,
1804 editor=False, extra=None):
1803 editor=False, extra=None):
1805 """Add a new revision to current repository.
1804 """Add a new revision to current repository.
1806
1805
1807 Revision information is gathered from the working directory,
1806 Revision information is gathered from the working directory,
1808 match can be used to filter the committed files. If editor is
1807 match can be used to filter the committed files. If editor is
1809 supplied, it is called to get a commit message.
1808 supplied, it is called to get a commit message.
1810 """
1809 """
1811 if extra is None:
1810 if extra is None:
1812 extra = {}
1811 extra = {}
1813
1812
1814 def fail(f, msg):
1813 def fail(f, msg):
1815 raise error.Abort('%s: %s' % (f, msg))
1814 raise error.Abort('%s: %s' % (f, msg))
1816
1815
1817 if not match:
1816 if not match:
1818 match = matchmod.always(self.root, '')
1817 match = matchmod.always(self.root, '')
1819
1818
1820 if not force:
1819 if not force:
1821 vdirs = []
1820 vdirs = []
1822 match.explicitdir = vdirs.append
1821 match.explicitdir = vdirs.append
1823 match.bad = fail
1822 match.bad = fail
1824
1823
1825 wlock = lock = tr = None
1824 wlock = lock = tr = None
1826 try:
1825 try:
1827 wlock = self.wlock()
1826 wlock = self.wlock()
1828 lock = self.lock() # for recent changelog (see issue4368)
1827 lock = self.lock() # for recent changelog (see issue4368)
1829
1828
1830 wctx = self[None]
1829 wctx = self[None]
1831 merge = len(wctx.parents()) > 1
1830 merge = len(wctx.parents()) > 1
1832
1831
1833 if not force and merge and not match.always():
1832 if not force and merge and not match.always():
1834 raise error.Abort(_('cannot partially commit a merge '
1833 raise error.Abort(_('cannot partially commit a merge '
1835 '(do not specify files or patterns)'))
1834 '(do not specify files or patterns)'))
1836
1835
1837 status = self.status(match=match, clean=force)
1836 status = self.status(match=match, clean=force)
1838 if force:
1837 if force:
1839 status.modified.extend(status.clean) # mq may commit clean files
1838 status.modified.extend(status.clean) # mq may commit clean files
1840
1839
1841 # check subrepos
1840 # check subrepos
1842 subs, commitsubs, newstate = subrepo.precommit(
1841 subs, commitsubs, newstate = subrepo.precommit(
1843 self.ui, wctx, status, match, force=force)
1842 self.ui, wctx, status, match, force=force)
1844
1843
1845 # make sure all explicit patterns are matched
1844 # make sure all explicit patterns are matched
1846 if not force:
1845 if not force:
1847 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1846 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1848
1847
1849 cctx = context.workingcommitctx(self, status,
1848 cctx = context.workingcommitctx(self, status,
1850 text, user, date, extra)
1849 text, user, date, extra)
1851
1850
1852 # internal config: ui.allowemptycommit
1851 # internal config: ui.allowemptycommit
1853 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1852 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1854 or extra.get('close') or merge or cctx.files()
1853 or extra.get('close') or merge or cctx.files()
1855 or self.ui.configbool('ui', 'allowemptycommit'))
1854 or self.ui.configbool('ui', 'allowemptycommit'))
1856 if not allowemptycommit:
1855 if not allowemptycommit:
1857 return None
1856 return None
1858
1857
1859 if merge and cctx.deleted():
1858 if merge and cctx.deleted():
1860 raise error.Abort(_("cannot commit merge with missing files"))
1859 raise error.Abort(_("cannot commit merge with missing files"))
1861
1860
1862 ms = mergemod.mergestate.read(self)
1861 ms = mergemod.mergestate.read(self)
1863 mergeutil.checkunresolved(ms)
1862 mergeutil.checkunresolved(ms)
1864
1863
1865 if editor:
1864 if editor:
1866 cctx._text = editor(self, cctx, subs)
1865 cctx._text = editor(self, cctx, subs)
1867 edited = (text != cctx._text)
1866 edited = (text != cctx._text)
1868
1867
1869 # Save commit message in case this transaction gets rolled back
1868 # Save commit message in case this transaction gets rolled back
1870 # (e.g. by a pretxncommit hook). Leave the content alone on
1869 # (e.g. by a pretxncommit hook). Leave the content alone on
1871 # the assumption that the user will use the same editor again.
1870 # the assumption that the user will use the same editor again.
1872 msgfn = self.savecommitmessage(cctx._text)
1871 msgfn = self.savecommitmessage(cctx._text)
1873
1872
1874 # commit subs and write new state
1873 # commit subs and write new state
1875 if subs:
1874 if subs:
1876 for s in sorted(commitsubs):
1875 for s in sorted(commitsubs):
1877 sub = wctx.sub(s)
1876 sub = wctx.sub(s)
1878 self.ui.status(_('committing subrepository %s\n') %
1877 self.ui.status(_('committing subrepository %s\n') %
1879 subrepo.subrelpath(sub))
1878 subrepo.subrelpath(sub))
1880 sr = sub.commit(cctx._text, user, date)
1879 sr = sub.commit(cctx._text, user, date)
1881 newstate[s] = (newstate[s][0], sr)
1880 newstate[s] = (newstate[s][0], sr)
1882 subrepo.writestate(self, newstate)
1881 subrepo.writestate(self, newstate)
1883
1882
1884 p1, p2 = self.dirstate.parents()
1883 p1, p2 = self.dirstate.parents()
1885 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1884 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1886 try:
1885 try:
1887 self.hook("precommit", throw=True, parent1=hookp1,
1886 self.hook("precommit", throw=True, parent1=hookp1,
1888 parent2=hookp2)
1887 parent2=hookp2)
1889 tr = self.transaction('commit')
1888 tr = self.transaction('commit')
1890 ret = self.commitctx(cctx, True)
1889 ret = self.commitctx(cctx, True)
1891 except: # re-raises
1890 except: # re-raises
1892 if edited:
1891 if edited:
1893 self.ui.write(
1892 self.ui.write(
1894 _('note: commit message saved in %s\n') % msgfn)
1893 _('note: commit message saved in %s\n') % msgfn)
1895 raise
1894 raise
1896 # update bookmarks, dirstate and mergestate
1895 # update bookmarks, dirstate and mergestate
1897 bookmarks.update(self, [p1, p2], ret)
1896 bookmarks.update(self, [p1, p2], ret)
1898 cctx.markcommitted(ret)
1897 cctx.markcommitted(ret)
1899 ms.reset()
1898 ms.reset()
1900 tr.close()
1899 tr.close()
1901
1900
1902 finally:
1901 finally:
1903 lockmod.release(tr, lock, wlock)
1902 lockmod.release(tr, lock, wlock)
1904
1903
1905 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1904 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1906 # hack for command that use a temporary commit (eg: histedit)
1905 # hack for command that use a temporary commit (eg: histedit)
1907 # temporary commit got stripped before hook release
1906 # temporary commit got stripped before hook release
1908 if self.changelog.hasnode(ret):
1907 if self.changelog.hasnode(ret):
1909 self.hook("commit", node=node, parent1=parent1,
1908 self.hook("commit", node=node, parent1=parent1,
1910 parent2=parent2)
1909 parent2=parent2)
1911 self._afterlock(commithook)
1910 self._afterlock(commithook)
1912 return ret
1911 return ret
1913
1912
1914 @unfilteredmethod
1913 @unfilteredmethod
1915 def commitctx(self, ctx, error=False):
1914 def commitctx(self, ctx, error=False):
1916 """Add a new revision to current repository.
1915 """Add a new revision to current repository.
1917 Revision information is passed via the context argument.
1916 Revision information is passed via the context argument.
1918 """
1917 """
1919
1918
1920 tr = None
1919 tr = None
1921 p1, p2 = ctx.p1(), ctx.p2()
1920 p1, p2 = ctx.p1(), ctx.p2()
1922 user = ctx.user()
1921 user = ctx.user()
1923
1922
1924 lock = self.lock()
1923 lock = self.lock()
1925 try:
1924 try:
1926 tr = self.transaction("commit")
1925 tr = self.transaction("commit")
1927 trp = weakref.proxy(tr)
1926 trp = weakref.proxy(tr)
1928
1927
1929 if ctx.manifestnode():
1928 if ctx.manifestnode():
1930 # reuse an existing manifest revision
1929 # reuse an existing manifest revision
1931 mn = ctx.manifestnode()
1930 mn = ctx.manifestnode()
1932 files = ctx.files()
1931 files = ctx.files()
1933 elif ctx.files():
1932 elif ctx.files():
1934 m1ctx = p1.manifestctx()
1933 m1ctx = p1.manifestctx()
1935 m2ctx = p2.manifestctx()
1934 m2ctx = p2.manifestctx()
1936 mctx = m1ctx.copy()
1935 mctx = m1ctx.copy()
1937
1936
1938 m = mctx.read()
1937 m = mctx.read()
1939 m1 = m1ctx.read()
1938 m1 = m1ctx.read()
1940 m2 = m2ctx.read()
1939 m2 = m2ctx.read()
1941
1940
1942 # check in files
1941 # check in files
1943 added = []
1942 added = []
1944 changed = []
1943 changed = []
1945 removed = list(ctx.removed())
1944 removed = list(ctx.removed())
1946 linkrev = len(self)
1945 linkrev = len(self)
1947 self.ui.note(_("committing files:\n"))
1946 self.ui.note(_("committing files:\n"))
1948 for f in sorted(ctx.modified() + ctx.added()):
1947 for f in sorted(ctx.modified() + ctx.added()):
1949 self.ui.note(f + "\n")
1948 self.ui.note(f + "\n")
1950 try:
1949 try:
1951 fctx = ctx[f]
1950 fctx = ctx[f]
1952 if fctx is None:
1951 if fctx is None:
1953 removed.append(f)
1952 removed.append(f)
1954 else:
1953 else:
1955 added.append(f)
1954 added.append(f)
1956 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1955 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1957 trp, changed)
1956 trp, changed)
1958 m.setflag(f, fctx.flags())
1957 m.setflag(f, fctx.flags())
1959 except OSError as inst:
1958 except OSError as inst:
1960 self.ui.warn(_("trouble committing %s!\n") % f)
1959 self.ui.warn(_("trouble committing %s!\n") % f)
1961 raise
1960 raise
1962 except IOError as inst:
1961 except IOError as inst:
1963 errcode = getattr(inst, 'errno', errno.ENOENT)
1962 errcode = getattr(inst, 'errno', errno.ENOENT)
1964 if error or errcode and errcode != errno.ENOENT:
1963 if error or errcode and errcode != errno.ENOENT:
1965 self.ui.warn(_("trouble committing %s!\n") % f)
1964 self.ui.warn(_("trouble committing %s!\n") % f)
1966 raise
1965 raise
1967
1966
1968 # update manifest
1967 # update manifest
1969 self.ui.note(_("committing manifest\n"))
1968 self.ui.note(_("committing manifest\n"))
1970 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1969 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1971 drop = [f for f in removed if f in m]
1970 drop = [f for f in removed if f in m]
1972 for f in drop:
1971 for f in drop:
1973 del m[f]
1972 del m[f]
1974 mn = mctx.write(trp, linkrev,
1973 mn = mctx.write(trp, linkrev,
1975 p1.manifestnode(), p2.manifestnode(),
1974 p1.manifestnode(), p2.manifestnode(),
1976 added, drop)
1975 added, drop)
1977 files = changed + removed
1976 files = changed + removed
1978 else:
1977 else:
1979 mn = p1.manifestnode()
1978 mn = p1.manifestnode()
1980 files = []
1979 files = []
1981
1980
1982 # update changelog
1981 # update changelog
1983 self.ui.note(_("committing changelog\n"))
1982 self.ui.note(_("committing changelog\n"))
1984 self.changelog.delayupdate(tr)
1983 self.changelog.delayupdate(tr)
1985 n = self.changelog.add(mn, files, ctx.description(),
1984 n = self.changelog.add(mn, files, ctx.description(),
1986 trp, p1.node(), p2.node(),
1985 trp, p1.node(), p2.node(),
1987 user, ctx.date(), ctx.extra().copy())
1986 user, ctx.date(), ctx.extra().copy())
1988 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1987 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1989 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1988 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1990 parent2=xp2)
1989 parent2=xp2)
1991 # set the new commit is proper phase
1990 # set the new commit is proper phase
1992 targetphase = subrepo.newcommitphase(self.ui, ctx)
1991 targetphase = subrepo.newcommitphase(self.ui, ctx)
1993 if targetphase:
1992 if targetphase:
1994 # retract boundary do not alter parent changeset.
1993 # retract boundary do not alter parent changeset.
1995 # if a parent have higher the resulting phase will
1994 # if a parent have higher the resulting phase will
1996 # be compliant anyway
1995 # be compliant anyway
1997 #
1996 #
1998 # if minimal phase was 0 we don't need to retract anything
1997 # if minimal phase was 0 we don't need to retract anything
1999 phases.registernew(self, tr, targetphase, [n])
1998 phases.registernew(self, tr, targetphase, [n])
2000 tr.close()
1999 tr.close()
2001 return n
2000 return n
2002 finally:
2001 finally:
2003 if tr:
2002 if tr:
2004 tr.release()
2003 tr.release()
2005 lock.release()
2004 lock.release()
2006
2005
2007 @unfilteredmethod
2006 @unfilteredmethod
2008 def destroying(self):
2007 def destroying(self):
2009 '''Inform the repository that nodes are about to be destroyed.
2008 '''Inform the repository that nodes are about to be destroyed.
2010 Intended for use by strip and rollback, so there's a common
2009 Intended for use by strip and rollback, so there's a common
2011 place for anything that has to be done before destroying history.
2010 place for anything that has to be done before destroying history.
2012
2011
2013 This is mostly useful for saving state that is in memory and waiting
2012 This is mostly useful for saving state that is in memory and waiting
2014 to be flushed when the current lock is released. Because a call to
2013 to be flushed when the current lock is released. Because a call to
2015 destroyed is imminent, the repo will be invalidated causing those
2014 destroyed is imminent, the repo will be invalidated causing those
2016 changes to stay in memory (waiting for the next unlock), or vanish
2015 changes to stay in memory (waiting for the next unlock), or vanish
2017 completely.
2016 completely.
2018 '''
2017 '''
2019 # When using the same lock to commit and strip, the phasecache is left
2018 # When using the same lock to commit and strip, the phasecache is left
2020 # dirty after committing. Then when we strip, the repo is invalidated,
2019 # dirty after committing. Then when we strip, the repo is invalidated,
2021 # causing those changes to disappear.
2020 # causing those changes to disappear.
2022 if '_phasecache' in vars(self):
2021 if '_phasecache' in vars(self):
2023 self._phasecache.write()
2022 self._phasecache.write()
2024
2023
2025 @unfilteredmethod
2024 @unfilteredmethod
2026 def destroyed(self):
2025 def destroyed(self):
2027 '''Inform the repository that nodes have been destroyed.
2026 '''Inform the repository that nodes have been destroyed.
2028 Intended for use by strip and rollback, so there's a common
2027 Intended for use by strip and rollback, so there's a common
2029 place for anything that has to be done after destroying history.
2028 place for anything that has to be done after destroying history.
2030 '''
2029 '''
2031 # When one tries to:
2030 # When one tries to:
2032 # 1) destroy nodes thus calling this method (e.g. strip)
2031 # 1) destroy nodes thus calling this method (e.g. strip)
2033 # 2) use phasecache somewhere (e.g. commit)
2032 # 2) use phasecache somewhere (e.g. commit)
2034 #
2033 #
2035 # then 2) will fail because the phasecache contains nodes that were
2034 # then 2) will fail because the phasecache contains nodes that were
2036 # removed. We can either remove phasecache from the filecache,
2035 # removed. We can either remove phasecache from the filecache,
2037 # causing it to reload next time it is accessed, or simply filter
2036 # causing it to reload next time it is accessed, or simply filter
2038 # the removed nodes now and write the updated cache.
2037 # the removed nodes now and write the updated cache.
2039 self._phasecache.filterunknown(self)
2038 self._phasecache.filterunknown(self)
2040 self._phasecache.write()
2039 self._phasecache.write()
2041
2040
2042 # refresh all repository caches
2041 # refresh all repository caches
2043 self.updatecaches()
2042 self.updatecaches()
2044
2043
2045 # Ensure the persistent tag cache is updated. Doing it now
2044 # Ensure the persistent tag cache is updated. Doing it now
2046 # means that the tag cache only has to worry about destroyed
2045 # means that the tag cache only has to worry about destroyed
2047 # heads immediately after a strip/rollback. That in turn
2046 # heads immediately after a strip/rollback. That in turn
2048 # guarantees that "cachetip == currenttip" (comparing both rev
2047 # guarantees that "cachetip == currenttip" (comparing both rev
2049 # and node) always means no nodes have been added or destroyed.
2048 # and node) always means no nodes have been added or destroyed.
2050
2049
2051 # XXX this is suboptimal when qrefresh'ing: we strip the current
2050 # XXX this is suboptimal when qrefresh'ing: we strip the current
2052 # head, refresh the tag cache, then immediately add a new head.
2051 # head, refresh the tag cache, then immediately add a new head.
2053 # But I think doing it this way is necessary for the "instant
2052 # But I think doing it this way is necessary for the "instant
2054 # tag cache retrieval" case to work.
2053 # tag cache retrieval" case to work.
2055 self.invalidate()
2054 self.invalidate()
2056
2055
2057 def walk(self, match, node=None):
2056 def walk(self, match, node=None):
2058 '''
2057 '''
2059 walk recursively through the directory tree or a given
2058 walk recursively through the directory tree or a given
2060 changeset, finding all files matched by the match
2059 changeset, finding all files matched by the match
2061 function
2060 function
2062 '''
2061 '''
2063 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2062 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2064 return self[node].walk(match)
2063 return self[node].walk(match)
2065
2064
2066 def status(self, node1='.', node2=None, match=None,
2065 def status(self, node1='.', node2=None, match=None,
2067 ignored=False, clean=False, unknown=False,
2066 ignored=False, clean=False, unknown=False,
2068 listsubrepos=False):
2067 listsubrepos=False):
2069 '''a convenience method that calls node1.status(node2)'''
2068 '''a convenience method that calls node1.status(node2)'''
2070 return self[node1].status(node2, match, ignored, clean, unknown,
2069 return self[node1].status(node2, match, ignored, clean, unknown,
2071 listsubrepos)
2070 listsubrepos)
2072
2071
2073 def addpostdsstatus(self, ps):
2072 def addpostdsstatus(self, ps):
2074 """Add a callback to run within the wlock, at the point at which status
2073 """Add a callback to run within the wlock, at the point at which status
2075 fixups happen.
2074 fixups happen.
2076
2075
2077 On status completion, callback(wctx, status) will be called with the
2076 On status completion, callback(wctx, status) will be called with the
2078 wlock held, unless the dirstate has changed from underneath or the wlock
2077 wlock held, unless the dirstate has changed from underneath or the wlock
2079 couldn't be grabbed.
2078 couldn't be grabbed.
2080
2079
2081 Callbacks should not capture and use a cached copy of the dirstate --
2080 Callbacks should not capture and use a cached copy of the dirstate --
2082 it might change in the meanwhile. Instead, they should access the
2081 it might change in the meanwhile. Instead, they should access the
2083 dirstate via wctx.repo().dirstate.
2082 dirstate via wctx.repo().dirstate.
2084
2083
2085 This list is emptied out after each status run -- extensions should
2084 This list is emptied out after each status run -- extensions should
2086 make sure it adds to this list each time dirstate.status is called.
2085 make sure it adds to this list each time dirstate.status is called.
2087 Extensions should also make sure they don't call this for statuses
2086 Extensions should also make sure they don't call this for statuses
2088 that don't involve the dirstate.
2087 that don't involve the dirstate.
2089 """
2088 """
2090
2089
2091 # The list is located here for uniqueness reasons -- it is actually
2090 # The list is located here for uniqueness reasons -- it is actually
2092 # managed by the workingctx, but that isn't unique per-repo.
2091 # managed by the workingctx, but that isn't unique per-repo.
2093 self._postdsstatus.append(ps)
2092 self._postdsstatus.append(ps)
2094
2093
2095 def postdsstatus(self):
2094 def postdsstatus(self):
2096 """Used by workingctx to get the list of post-dirstate-status hooks."""
2095 """Used by workingctx to get the list of post-dirstate-status hooks."""
2097 return self._postdsstatus
2096 return self._postdsstatus
2098
2097
2099 def clearpostdsstatus(self):
2098 def clearpostdsstatus(self):
2100 """Used by workingctx to clear post-dirstate-status hooks."""
2099 """Used by workingctx to clear post-dirstate-status hooks."""
2101 del self._postdsstatus[:]
2100 del self._postdsstatus[:]
2102
2101
2103 def heads(self, start=None):
2102 def heads(self, start=None):
2104 if start is None:
2103 if start is None:
2105 cl = self.changelog
2104 cl = self.changelog
2106 headrevs = reversed(cl.headrevs())
2105 headrevs = reversed(cl.headrevs())
2107 return [cl.node(rev) for rev in headrevs]
2106 return [cl.node(rev) for rev in headrevs]
2108
2107
2109 heads = self.changelog.heads(start)
2108 heads = self.changelog.heads(start)
2110 # sort the output in rev descending order
2109 # sort the output in rev descending order
2111 return sorted(heads, key=self.changelog.rev, reverse=True)
2110 return sorted(heads, key=self.changelog.rev, reverse=True)
2112
2111
2113 def branchheads(self, branch=None, start=None, closed=False):
2112 def branchheads(self, branch=None, start=None, closed=False):
2114 '''return a (possibly filtered) list of heads for the given branch
2113 '''return a (possibly filtered) list of heads for the given branch
2115
2114
2116 Heads are returned in topological order, from newest to oldest.
2115 Heads are returned in topological order, from newest to oldest.
2117 If branch is None, use the dirstate branch.
2116 If branch is None, use the dirstate branch.
2118 If start is not None, return only heads reachable from start.
2117 If start is not None, return only heads reachable from start.
2119 If closed is True, return heads that are marked as closed as well.
2118 If closed is True, return heads that are marked as closed as well.
2120 '''
2119 '''
2121 if branch is None:
2120 if branch is None:
2122 branch = self[None].branch()
2121 branch = self[None].branch()
2123 branches = self.branchmap()
2122 branches = self.branchmap()
2124 if branch not in branches:
2123 if branch not in branches:
2125 return []
2124 return []
2126 # the cache returns heads ordered lowest to highest
2125 # the cache returns heads ordered lowest to highest
2127 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2126 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2128 if start is not None:
2127 if start is not None:
2129 # filter out the heads that cannot be reached from startrev
2128 # filter out the heads that cannot be reached from startrev
2130 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2129 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2131 bheads = [h for h in bheads if h in fbheads]
2130 bheads = [h for h in bheads if h in fbheads]
2132 return bheads
2131 return bheads
2133
2132
2134 def branches(self, nodes):
2133 def branches(self, nodes):
2135 if not nodes:
2134 if not nodes:
2136 nodes = [self.changelog.tip()]
2135 nodes = [self.changelog.tip()]
2137 b = []
2136 b = []
2138 for n in nodes:
2137 for n in nodes:
2139 t = n
2138 t = n
2140 while True:
2139 while True:
2141 p = self.changelog.parents(n)
2140 p = self.changelog.parents(n)
2142 if p[1] != nullid or p[0] == nullid:
2141 if p[1] != nullid or p[0] == nullid:
2143 b.append((t, n, p[0], p[1]))
2142 b.append((t, n, p[0], p[1]))
2144 break
2143 break
2145 n = p[0]
2144 n = p[0]
2146 return b
2145 return b
2147
2146
2148 def between(self, pairs):
2147 def between(self, pairs):
2149 r = []
2148 r = []
2150
2149
2151 for top, bottom in pairs:
2150 for top, bottom in pairs:
2152 n, l, i = top, [], 0
2151 n, l, i = top, [], 0
2153 f = 1
2152 f = 1
2154
2153
2155 while n != bottom and n != nullid:
2154 while n != bottom and n != nullid:
2156 p = self.changelog.parents(n)[0]
2155 p = self.changelog.parents(n)[0]
2157 if i == f:
2156 if i == f:
2158 l.append(n)
2157 l.append(n)
2159 f = f * 2
2158 f = f * 2
2160 n = p
2159 n = p
2161 i += 1
2160 i += 1
2162
2161
2163 r.append(l)
2162 r.append(l)
2164
2163
2165 return r
2164 return r
2166
2165
2167 def checkpush(self, pushop):
2166 def checkpush(self, pushop):
2168 """Extensions can override this function if additional checks have
2167 """Extensions can override this function if additional checks have
2169 to be performed before pushing, or call it if they override push
2168 to be performed before pushing, or call it if they override push
2170 command.
2169 command.
2171 """
2170 """
2172
2171
2173 @unfilteredpropertycache
2172 @unfilteredpropertycache
2174 def prepushoutgoinghooks(self):
2173 def prepushoutgoinghooks(self):
2175 """Return util.hooks consists of a pushop with repo, remote, outgoing
2174 """Return util.hooks consists of a pushop with repo, remote, outgoing
2176 methods, which are called before pushing changesets.
2175 methods, which are called before pushing changesets.
2177 """
2176 """
2178 return util.hooks()
2177 return util.hooks()
2179
2178
2180 def pushkey(self, namespace, key, old, new):
2179 def pushkey(self, namespace, key, old, new):
2181 try:
2180 try:
2182 tr = self.currenttransaction()
2181 tr = self.currenttransaction()
2183 hookargs = {}
2182 hookargs = {}
2184 if tr is not None:
2183 if tr is not None:
2185 hookargs.update(tr.hookargs)
2184 hookargs.update(tr.hookargs)
2186 hookargs['namespace'] = namespace
2185 hookargs['namespace'] = namespace
2187 hookargs['key'] = key
2186 hookargs['key'] = key
2188 hookargs['old'] = old
2187 hookargs['old'] = old
2189 hookargs['new'] = new
2188 hookargs['new'] = new
2190 self.hook('prepushkey', throw=True, **hookargs)
2189 self.hook('prepushkey', throw=True, **hookargs)
2191 except error.HookAbort as exc:
2190 except error.HookAbort as exc:
2192 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2191 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2193 if exc.hint:
2192 if exc.hint:
2194 self.ui.write_err(_("(%s)\n") % exc.hint)
2193 self.ui.write_err(_("(%s)\n") % exc.hint)
2195 return False
2194 return False
2196 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2195 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2197 ret = pushkey.push(self, namespace, key, old, new)
2196 ret = pushkey.push(self, namespace, key, old, new)
2198 def runhook():
2197 def runhook():
2199 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2198 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2200 ret=ret)
2199 ret=ret)
2201 self._afterlock(runhook)
2200 self._afterlock(runhook)
2202 return ret
2201 return ret
2203
2202
2204 def listkeys(self, namespace):
2203 def listkeys(self, namespace):
2205 self.hook('prelistkeys', throw=True, namespace=namespace)
2204 self.hook('prelistkeys', throw=True, namespace=namespace)
2206 self.ui.debug('listing keys for "%s"\n' % namespace)
2205 self.ui.debug('listing keys for "%s"\n' % namespace)
2207 values = pushkey.list(self, namespace)
2206 values = pushkey.list(self, namespace)
2208 self.hook('listkeys', namespace=namespace, values=values)
2207 self.hook('listkeys', namespace=namespace, values=values)
2209 return values
2208 return values
2210
2209
2211 def debugwireargs(self, one, two, three=None, four=None, five=None):
2210 def debugwireargs(self, one, two, three=None, four=None, five=None):
2212 '''used to test argument passing over the wire'''
2211 '''used to test argument passing over the wire'''
2213 return "%s %s %s %s %s" % (one, two, three, four, five)
2212 return "%s %s %s %s %s" % (one, two, three, four, five)
2214
2213
2215 def savecommitmessage(self, text):
2214 def savecommitmessage(self, text):
2216 fp = self.vfs('last-message.txt', 'wb')
2215 fp = self.vfs('last-message.txt', 'wb')
2217 try:
2216 try:
2218 fp.write(text)
2217 fp.write(text)
2219 finally:
2218 finally:
2220 fp.close()
2219 fp.close()
2221 return self.pathto(fp.name[len(self.root) + 1:])
2220 return self.pathto(fp.name[len(self.root) + 1:])
2222
2221
2223 # used to avoid circular references so destructors work
2222 # used to avoid circular references so destructors work
2224 def aftertrans(files):
2223 def aftertrans(files):
2225 renamefiles = [tuple(t) for t in files]
2224 renamefiles = [tuple(t) for t in files]
2226 def a():
2225 def a():
2227 for vfs, src, dest in renamefiles:
2226 for vfs, src, dest in renamefiles:
2228 # if src and dest refer to a same file, vfs.rename is a no-op,
2227 # if src and dest refer to a same file, vfs.rename is a no-op,
2229 # leaving both src and dest on disk. delete dest to make sure
2228 # leaving both src and dest on disk. delete dest to make sure
2230 # the rename couldn't be such a no-op.
2229 # the rename couldn't be such a no-op.
2231 vfs.tryunlink(dest)
2230 vfs.tryunlink(dest)
2232 try:
2231 try:
2233 vfs.rename(src, dest)
2232 vfs.rename(src, dest)
2234 except OSError: # journal file does not yet exist
2233 except OSError: # journal file does not yet exist
2235 pass
2234 pass
2236 return a
2235 return a
2237
2236
2238 def undoname(fn):
2237 def undoname(fn):
2239 base, name = os.path.split(fn)
2238 base, name = os.path.split(fn)
2240 assert name.startswith('journal')
2239 assert name.startswith('journal')
2241 return os.path.join(base, name.replace('journal', 'undo', 1))
2240 return os.path.join(base, name.replace('journal', 'undo', 1))
2242
2241
2243 def instance(ui, path, create):
2242 def instance(ui, path, create):
2244 return localrepository(ui, util.urllocalpath(path), create)
2243 return localrepository(ui, util.urllocalpath(path), create)
2245
2244
2246 def islocal(path):
2245 def islocal(path):
2247 return True
2246 return True
2248
2247
2249 def newreporequirements(repo):
2248 def newreporequirements(repo):
2250 """Determine the set of requirements for a new local repository.
2249 """Determine the set of requirements for a new local repository.
2251
2250
2252 Extensions can wrap this function to specify custom requirements for
2251 Extensions can wrap this function to specify custom requirements for
2253 new repositories.
2252 new repositories.
2254 """
2253 """
2255 ui = repo.ui
2254 ui = repo.ui
2256 requirements = {'revlogv1'}
2255 requirements = {'revlogv1'}
2257 if ui.configbool('format', 'usestore'):
2256 if ui.configbool('format', 'usestore'):
2258 requirements.add('store')
2257 requirements.add('store')
2259 if ui.configbool('format', 'usefncache'):
2258 if ui.configbool('format', 'usefncache'):
2260 requirements.add('fncache')
2259 requirements.add('fncache')
2261 if ui.configbool('format', 'dotencode'):
2260 if ui.configbool('format', 'dotencode'):
2262 requirements.add('dotencode')
2261 requirements.add('dotencode')
2263
2262
2264 compengine = ui.config('experimental', 'format.compression')
2263 compengine = ui.config('experimental', 'format.compression')
2265 if compengine not in util.compengines:
2264 if compengine not in util.compengines:
2266 raise error.Abort(_('compression engine %s defined by '
2265 raise error.Abort(_('compression engine %s defined by '
2267 'experimental.format.compression not available') %
2266 'experimental.format.compression not available') %
2268 compengine,
2267 compengine,
2269 hint=_('run "hg debuginstall" to list available '
2268 hint=_('run "hg debuginstall" to list available '
2270 'compression engines'))
2269 'compression engines'))
2271
2270
2272 # zlib is the historical default and doesn't need an explicit requirement.
2271 # zlib is the historical default and doesn't need an explicit requirement.
2273 if compengine != 'zlib':
2272 if compengine != 'zlib':
2274 requirements.add('exp-compression-%s' % compengine)
2273 requirements.add('exp-compression-%s' % compengine)
2275
2274
2276 if scmutil.gdinitconfig(ui):
2275 if scmutil.gdinitconfig(ui):
2277 requirements.add('generaldelta')
2276 requirements.add('generaldelta')
2278 if ui.configbool('experimental', 'treemanifest'):
2277 if ui.configbool('experimental', 'treemanifest'):
2279 requirements.add('treemanifest')
2278 requirements.add('treemanifest')
2280 if ui.configbool('experimental', 'manifestv2'):
2279 if ui.configbool('experimental', 'manifestv2'):
2281 requirements.add('manifestv2')
2280 requirements.add('manifestv2')
2282
2281
2283 revlogv2 = ui.config('experimental', 'revlogv2')
2282 revlogv2 = ui.config('experimental', 'revlogv2')
2284 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2283 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2285 requirements.remove('revlogv1')
2284 requirements.remove('revlogv1')
2286 # generaldelta is implied by revlogv2.
2285 # generaldelta is implied by revlogv2.
2287 requirements.discard('generaldelta')
2286 requirements.discard('generaldelta')
2288 requirements.add(REVLOGV2_REQUIREMENT)
2287 requirements.add(REVLOGV2_REQUIREMENT)
2289
2288
2290 return requirements
2289 return requirements
General Comments 0
You need to be logged in to leave comments. Login now