##// END OF EJS Templates
localrepo: micro-optimize __len__() to bypass repoview...
Yuya Nishihara -
r35754:29f57ce4 default
parent child Browse files
Show More
@@ -1,2276 +1,2278
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)
200 **kwargs)
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # This is only used by context.workingctx.match in order to
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
372 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
375 self.baseui = baseui
376 self.ui = baseui.copy()
376 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
385 self._phasedefaults = []
386 try:
386 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
388 self._loadextensions()
389 except IOError:
389 except IOError:
390 pass
390 pass
391
391
392 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
399 else:
399 else:
400 self.supported = self._basesupported
400 self.supported = self._basesupported
401 color.setup(self.ui)
401 color.setup(self.ui)
402
402
403 # Add compression engines.
403 # Add compression engines.
404 for name in util.compengines:
404 for name in util.compengines:
405 engine = util.compengines[name]
405 engine = util.compengines[name]
406 if engine.revlogheader():
406 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
408
408
409 if not self.vfs.isdir():
409 if not self.vfs.isdir():
410 if create:
410 if create:
411 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
412
412
413 if not self.wvfs.exists():
413 if not self.wvfs.exists():
414 self.wvfs.makedirs()
414 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
416
416
417 if 'store' in self.requirements:
417 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
419
419
420 # create an invalid changelog
420 # create an invalid changelog
421 self.vfs.append(
421 self.vfs.append(
422 "00changelog.i",
422 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
425 )
425 )
426 else:
426 else:
427 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
428 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
430 else:
430 else:
431 try:
431 try:
432 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
433 self.vfs, self.supported)
434 except IOError as inst:
434 except IOError as inst:
435 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
436 raise
436 raise
437
437
438 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
439 self.sharedpath = self.path
440 try:
440 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
446 s = vfs.base
446 s = vfs.base
447 if not vfs.exists():
447 if not vfs.exists():
448 raise error.RepoError(
448 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
450 self.sharedpath = s
451 except IOError as inst:
451 except IOError as inst:
452 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
459
459
460 self.store = store.store(
460 self.store = store.store(
461 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
463 self.spath = self.store.path
464 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
465 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
473 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
475 self._applyopenerreqs()
476 if create:
476 if create:
477 self._writerequirements()
477 self._writerequirements()
478
478
479 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
480
480
481 self._branchcaches = {}
481 self._branchcaches = {}
482 self._revbranchcache = None
482 self._revbranchcache = None
483 self.filterpats = {}
483 self.filterpats = {}
484 self._datafilters = {}
484 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
486
486
487 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
488 # (used by the filecache decorator)
489 #
489 #
490 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
491 self._filecache = {}
492
492
493 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
495 # - new changesets,
495 # - new changesets,
496 # - phase change,
496 # - phase change,
497 # - new obsolescence marker,
497 # - new obsolescence marker,
498 # - working directory parent change,
498 # - working directory parent change,
499 # - bookmark changes
499 # - bookmark changes
500 self.filteredrevcache = {}
500 self.filteredrevcache = {}
501
501
502 # post-dirstate-status hooks
502 # post-dirstate-status hooks
503 self._postdsstatus = []
503 self._postdsstatus = []
504
504
505 # generic mapping between names and nodes
505 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
506 self.names = namespaces.namespaces()
507
507
508 # Key to signature value.
508 # Key to signature value.
509 self._sparsesignaturecache = {}
509 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
510 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
511 self._sparsematchercache = {}
512
512
513 def _getvfsward(self, origfunc):
513 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
514 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
515 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
516 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
517 ret = origfunc(path, mode=mode)
518 repo = rref()
518 repo = rref()
519 if (repo is None
519 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
521 or not util.safehasattr(repo, '_lockref')):
522 return
522 return
523 if mode in (None, 'r', 'rb'):
523 if mode in (None, 'r', 'rb'):
524 return
524 return
525 if path.startswith(repo.path):
525 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
526 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
527 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
528 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
531 if path.startswith('journal.'):
532 # journal is covered by 'lock'
532 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
533 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
535 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
536 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
537 # rest of vfs files are covered by 'wlock'
538 #
538 #
539 # exclude special files
539 # exclude special files
540 for prefix in self._wlockfreeprefix:
540 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
541 if path.startswith(prefix):
542 return
542 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
544 stacklevel=2, config='check-locks')
545 return ret
545 return ret
546 return checkvfs
546 return checkvfs
547
547
548 def _getsvfsward(self, origfunc):
548 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
549 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
550 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
551 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
552 ret = origfunc(path, mode=mode)
553 repo = rref()
553 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
555 return
556 if mode in (None, 'r', 'rb'):
556 if mode in (None, 'r', 'rb'):
557 return
557 return
558 if path.startswith(repo.sharedpath):
558 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
559 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
560 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
561 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
563 stacklevel=3)
564 return ret
564 return ret
565 return checksvfs
565 return checksvfs
566
566
567 def close(self):
567 def close(self):
568 self._writecaches()
568 self._writecaches()
569
569
570 def _loadextensions(self):
570 def _loadextensions(self):
571 extensions.loadall(self.ui)
571 extensions.loadall(self.ui)
572
572
573 def _writecaches(self):
573 def _writecaches(self):
574 if self._revbranchcache:
574 if self._revbranchcache:
575 self._revbranchcache.write()
575 self._revbranchcache.write()
576
576
577 def _restrictcapabilities(self, caps):
577 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
579 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
581 caps.add('bundle2=' + urlreq.quote(capsblob))
582 return caps
582 return caps
583
583
584 def _applyopenerreqs(self):
584 def _applyopenerreqs(self):
585 self.svfs.options = dict((r, 1) for r in self.requirements
585 self.svfs.options = dict((r, 1) for r in self.requirements
586 if r in self.openerreqs)
586 if r in self.openerreqs)
587 # experimental config: format.chunkcachesize
587 # experimental config: format.chunkcachesize
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
588 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 if chunkcachesize is not None:
589 if chunkcachesize is not None:
590 self.svfs.options['chunkcachesize'] = chunkcachesize
590 self.svfs.options['chunkcachesize'] = chunkcachesize
591 # experimental config: format.maxchainlen
591 # experimental config: format.maxchainlen
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
592 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 if maxchainlen is not None:
593 if maxchainlen is not None:
594 self.svfs.options['maxchainlen'] = maxchainlen
594 self.svfs.options['maxchainlen'] = maxchainlen
595 # experimental config: format.manifestcachesize
595 # experimental config: format.manifestcachesize
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
596 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 if manifestcachesize is not None:
597 if manifestcachesize is not None:
598 self.svfs.options['manifestcachesize'] = manifestcachesize
598 self.svfs.options['manifestcachesize'] = manifestcachesize
599 # experimental config: format.aggressivemergedeltas
599 # experimental config: format.aggressivemergedeltas
600 aggressivemergedeltas = self.ui.configbool('format',
600 aggressivemergedeltas = self.ui.configbool('format',
601 'aggressivemergedeltas')
601 'aggressivemergedeltas')
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
602 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
603 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
604 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 if 0 <= chainspan:
605 if 0 <= chainspan:
606 self.svfs.options['maxdeltachainspan'] = chainspan
606 self.svfs.options['maxdeltachainspan'] = chainspan
607 mmapindexthreshold = self.ui.configbytes('experimental',
607 mmapindexthreshold = self.ui.configbytes('experimental',
608 'mmapindexthreshold')
608 'mmapindexthreshold')
609 if mmapindexthreshold is not None:
609 if mmapindexthreshold is not None:
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
610 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
611 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 srdensitythres = float(self.ui.config('experimental',
612 srdensitythres = float(self.ui.config('experimental',
613 'sparse-read.density-threshold'))
613 'sparse-read.density-threshold'))
614 srmingapsize = self.ui.configbytes('experimental',
614 srmingapsize = self.ui.configbytes('experimental',
615 'sparse-read.min-gap-size')
615 'sparse-read.min-gap-size')
616 self.svfs.options['with-sparse-read'] = withsparseread
616 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
617 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
618 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619
619
620 for r in self.requirements:
620 for r in self.requirements:
621 if r.startswith('exp-compression-'):
621 if r.startswith('exp-compression-'):
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
622 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623
623
624 # TODO move "revlogv2" to openerreqs once finalized.
624 # TODO move "revlogv2" to openerreqs once finalized.
625 if REVLOGV2_REQUIREMENT in self.requirements:
625 if REVLOGV2_REQUIREMENT in self.requirements:
626 self.svfs.options['revlogv2'] = True
626 self.svfs.options['revlogv2'] = True
627
627
628 def _writerequirements(self):
628 def _writerequirements(self):
629 scmutil.writerequires(self.vfs, self.requirements)
629 scmutil.writerequires(self.vfs, self.requirements)
630
630
631 def _checknested(self, path):
631 def _checknested(self, path):
632 """Determine if path is a legal nested repository."""
632 """Determine if path is a legal nested repository."""
633 if not path.startswith(self.root):
633 if not path.startswith(self.root):
634 return False
634 return False
635 subpath = path[len(self.root) + 1:]
635 subpath = path[len(self.root) + 1:]
636 normsubpath = util.pconvert(subpath)
636 normsubpath = util.pconvert(subpath)
637
637
638 # XXX: Checking against the current working copy is wrong in
638 # XXX: Checking against the current working copy is wrong in
639 # the sense that it can reject things like
639 # the sense that it can reject things like
640 #
640 #
641 # $ hg cat -r 10 sub/x.txt
641 # $ hg cat -r 10 sub/x.txt
642 #
642 #
643 # if sub/ is no longer a subrepository in the working copy
643 # if sub/ is no longer a subrepository in the working copy
644 # parent revision.
644 # parent revision.
645 #
645 #
646 # However, it can of course also allow things that would have
646 # However, it can of course also allow things that would have
647 # been rejected before, such as the above cat command if sub/
647 # been rejected before, such as the above cat command if sub/
648 # is a subrepository now, but was a normal directory before.
648 # is a subrepository now, but was a normal directory before.
649 # The old path auditor would have rejected by mistake since it
649 # The old path auditor would have rejected by mistake since it
650 # panics when it sees sub/.hg/.
650 # panics when it sees sub/.hg/.
651 #
651 #
652 # All in all, checking against the working copy seems sensible
652 # All in all, checking against the working copy seems sensible
653 # since we want to prevent access to nested repositories on
653 # since we want to prevent access to nested repositories on
654 # the filesystem *now*.
654 # the filesystem *now*.
655 ctx = self[None]
655 ctx = self[None]
656 parts = util.splitpath(subpath)
656 parts = util.splitpath(subpath)
657 while parts:
657 while parts:
658 prefix = '/'.join(parts)
658 prefix = '/'.join(parts)
659 if prefix in ctx.substate:
659 if prefix in ctx.substate:
660 if prefix == normsubpath:
660 if prefix == normsubpath:
661 return True
661 return True
662 else:
662 else:
663 sub = ctx.sub(prefix)
663 sub = ctx.sub(prefix)
664 return sub.checknested(subpath[len(prefix) + 1:])
664 return sub.checknested(subpath[len(prefix) + 1:])
665 else:
665 else:
666 parts.pop()
666 parts.pop()
667 return False
667 return False
668
668
669 def peer(self):
669 def peer(self):
670 return localpeer(self) # not cached to avoid reference cycle
670 return localpeer(self) # not cached to avoid reference cycle
671
671
672 def unfiltered(self):
672 def unfiltered(self):
673 """Return unfiltered version of the repository
673 """Return unfiltered version of the repository
674
674
675 Intended to be overwritten by filtered repo."""
675 Intended to be overwritten by filtered repo."""
676 return self
676 return self
677
677
678 def filtered(self, name, visibilityexceptions=None):
678 def filtered(self, name, visibilityexceptions=None):
679 """Return a filtered version of a repository"""
679 """Return a filtered version of a repository"""
680 cls = repoview.newtype(self.unfiltered().__class__)
680 cls = repoview.newtype(self.unfiltered().__class__)
681 return cls(self, name, visibilityexceptions)
681 return cls(self, name, visibilityexceptions)
682
682
683 @repofilecache('bookmarks', 'bookmarks.current')
683 @repofilecache('bookmarks', 'bookmarks.current')
684 def _bookmarks(self):
684 def _bookmarks(self):
685 return bookmarks.bmstore(self)
685 return bookmarks.bmstore(self)
686
686
687 @property
687 @property
688 def _activebookmark(self):
688 def _activebookmark(self):
689 return self._bookmarks.active
689 return self._bookmarks.active
690
690
691 # _phasesets depend on changelog. what we need is to call
691 # _phasesets depend on changelog. what we need is to call
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
692 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 # can't be easily expressed in filecache mechanism.
693 # can't be easily expressed in filecache mechanism.
694 @storecache('phaseroots', '00changelog.i')
694 @storecache('phaseroots', '00changelog.i')
695 def _phasecache(self):
695 def _phasecache(self):
696 return phases.phasecache(self, self._phasedefaults)
696 return phases.phasecache(self, self._phasedefaults)
697
697
698 @storecache('obsstore')
698 @storecache('obsstore')
699 def obsstore(self):
699 def obsstore(self):
700 return obsolete.makestore(self.ui, self)
700 return obsolete.makestore(self.ui, self)
701
701
702 @storecache('00changelog.i')
702 @storecache('00changelog.i')
703 def changelog(self):
703 def changelog(self):
704 return changelog.changelog(self.svfs,
704 return changelog.changelog(self.svfs,
705 trypending=txnutil.mayhavepending(self.root))
705 trypending=txnutil.mayhavepending(self.root))
706
706
707 def _constructmanifest(self):
707 def _constructmanifest(self):
708 # This is a temporary function while we migrate from manifest to
708 # This is a temporary function while we migrate from manifest to
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
709 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 # manifest creation.
710 # manifest creation.
711 return manifest.manifestrevlog(self.svfs)
711 return manifest.manifestrevlog(self.svfs)
712
712
713 @storecache('00manifest.i')
713 @storecache('00manifest.i')
714 def manifestlog(self):
714 def manifestlog(self):
715 return manifest.manifestlog(self.svfs, self)
715 return manifest.manifestlog(self.svfs, self)
716
716
717 @repofilecache('dirstate')
717 @repofilecache('dirstate')
718 def dirstate(self):
718 def dirstate(self):
719 sparsematchfn = lambda: sparse.matcher(self)
719 sparsematchfn = lambda: sparse.matcher(self)
720
720
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
721 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 self._dirstatevalidate, sparsematchfn)
722 self._dirstatevalidate, sparsematchfn)
723
723
724 def _dirstatevalidate(self, node):
724 def _dirstatevalidate(self, node):
725 try:
725 try:
726 self.changelog.rev(node)
726 self.changelog.rev(node)
727 return node
727 return node
728 except error.LookupError:
728 except error.LookupError:
729 if not self._dirstatevalidatewarned:
729 if not self._dirstatevalidatewarned:
730 self._dirstatevalidatewarned = True
730 self._dirstatevalidatewarned = True
731 self.ui.warn(_("warning: ignoring unknown"
731 self.ui.warn(_("warning: ignoring unknown"
732 " working parent %s!\n") % short(node))
732 " working parent %s!\n") % short(node))
733 return nullid
733 return nullid
734
734
735 def __getitem__(self, changeid):
735 def __getitem__(self, changeid):
736 if changeid is None:
736 if changeid is None:
737 return context.workingctx(self)
737 return context.workingctx(self)
738 if isinstance(changeid, slice):
738 if isinstance(changeid, slice):
739 # wdirrev isn't contiguous so the slice shouldn't include it
739 # wdirrev isn't contiguous so the slice shouldn't include it
740 return [context.changectx(self, i)
740 return [context.changectx(self, i)
741 for i in xrange(*changeid.indices(len(self)))
741 for i in xrange(*changeid.indices(len(self)))
742 if i not in self.changelog.filteredrevs]
742 if i not in self.changelog.filteredrevs]
743 try:
743 try:
744 return context.changectx(self, changeid)
744 return context.changectx(self, changeid)
745 except error.WdirUnsupported:
745 except error.WdirUnsupported:
746 return context.workingctx(self)
746 return context.workingctx(self)
747
747
748 def __contains__(self, changeid):
748 def __contains__(self, changeid):
749 """True if the given changeid exists
749 """True if the given changeid exists
750
750
751 error.LookupError is raised if an ambiguous node specified.
751 error.LookupError is raised if an ambiguous node specified.
752 """
752 """
753 try:
753 try:
754 self[changeid]
754 self[changeid]
755 return True
755 return True
756 except error.RepoLookupError:
756 except error.RepoLookupError:
757 return False
757 return False
758
758
759 def __nonzero__(self):
759 def __nonzero__(self):
760 return True
760 return True
761
761
762 __bool__ = __nonzero__
762 __bool__ = __nonzero__
763
763
764 def __len__(self):
764 def __len__(self):
765 return len(self.changelog)
765 # no need to pay the cost of repoview.changelog
766 unfi = self.unfiltered()
767 return len(unfi.changelog)
766
768
767 def __iter__(self):
769 def __iter__(self):
768 return iter(self.changelog)
770 return iter(self.changelog)
769
771
770 def revs(self, expr, *args):
772 def revs(self, expr, *args):
771 '''Find revisions matching a revset.
773 '''Find revisions matching a revset.
772
774
773 The revset is specified as a string ``expr`` that may contain
775 The revset is specified as a string ``expr`` that may contain
774 %-formatting to escape certain types. See ``revsetlang.formatspec``.
776 %-formatting to escape certain types. See ``revsetlang.formatspec``.
775
777
776 Revset aliases from the configuration are not expanded. To expand
778 Revset aliases from the configuration are not expanded. To expand
777 user aliases, consider calling ``scmutil.revrange()`` or
779 user aliases, consider calling ``scmutil.revrange()`` or
778 ``repo.anyrevs([expr], user=True)``.
780 ``repo.anyrevs([expr], user=True)``.
779
781
780 Returns a revset.abstractsmartset, which is a list-like interface
782 Returns a revset.abstractsmartset, which is a list-like interface
781 that contains integer revisions.
783 that contains integer revisions.
782 '''
784 '''
783 expr = revsetlang.formatspec(expr, *args)
785 expr = revsetlang.formatspec(expr, *args)
784 m = revset.match(None, expr)
786 m = revset.match(None, expr)
785 return m(self)
787 return m(self)
786
788
787 def set(self, expr, *args):
789 def set(self, expr, *args):
788 '''Find revisions matching a revset and emit changectx instances.
790 '''Find revisions matching a revset and emit changectx instances.
789
791
790 This is a convenience wrapper around ``revs()`` that iterates the
792 This is a convenience wrapper around ``revs()`` that iterates the
791 result and is a generator of changectx instances.
793 result and is a generator of changectx instances.
792
794
793 Revset aliases from the configuration are not expanded. To expand
795 Revset aliases from the configuration are not expanded. To expand
794 user aliases, consider calling ``scmutil.revrange()``.
796 user aliases, consider calling ``scmutil.revrange()``.
795 '''
797 '''
796 for r in self.revs(expr, *args):
798 for r in self.revs(expr, *args):
797 yield self[r]
799 yield self[r]
798
800
799 def anyrevs(self, specs, user=False, localalias=None):
801 def anyrevs(self, specs, user=False, localalias=None):
800 '''Find revisions matching one of the given revsets.
802 '''Find revisions matching one of the given revsets.
801
803
802 Revset aliases from the configuration are not expanded by default. To
804 Revset aliases from the configuration are not expanded by default. To
803 expand user aliases, specify ``user=True``. To provide some local
805 expand user aliases, specify ``user=True``. To provide some local
804 definitions overriding user aliases, set ``localalias`` to
806 definitions overriding user aliases, set ``localalias`` to
805 ``{name: definitionstring}``.
807 ``{name: definitionstring}``.
806 '''
808 '''
807 if user:
809 if user:
808 m = revset.matchany(self.ui, specs, repo=self,
810 m = revset.matchany(self.ui, specs, repo=self,
809 localalias=localalias)
811 localalias=localalias)
810 else:
812 else:
811 m = revset.matchany(None, specs, localalias=localalias)
813 m = revset.matchany(None, specs, localalias=localalias)
812 return m(self)
814 return m(self)
813
815
814 def url(self):
816 def url(self):
815 return 'file:' + self.root
817 return 'file:' + self.root
816
818
817 def hook(self, name, throw=False, **args):
819 def hook(self, name, throw=False, **args):
818 """Call a hook, passing this repo instance.
820 """Call a hook, passing this repo instance.
819
821
820 This a convenience method to aid invoking hooks. Extensions likely
822 This a convenience method to aid invoking hooks. Extensions likely
821 won't call this unless they have registered a custom hook or are
823 won't call this unless they have registered a custom hook or are
822 replacing code that is expected to call a hook.
824 replacing code that is expected to call a hook.
823 """
825 """
824 return hook.hook(self.ui, self, name, throw, **args)
826 return hook.hook(self.ui, self, name, throw, **args)
825
827
826 @filteredpropertycache
828 @filteredpropertycache
827 def _tagscache(self):
829 def _tagscache(self):
828 '''Returns a tagscache object that contains various tags related
830 '''Returns a tagscache object that contains various tags related
829 caches.'''
831 caches.'''
830
832
831 # This simplifies its cache management by having one decorated
833 # This simplifies its cache management by having one decorated
832 # function (this one) and the rest simply fetch things from it.
834 # function (this one) and the rest simply fetch things from it.
833 class tagscache(object):
835 class tagscache(object):
834 def __init__(self):
836 def __init__(self):
835 # These two define the set of tags for this repository. tags
837 # These two define the set of tags for this repository. tags
836 # maps tag name to node; tagtypes maps tag name to 'global' or
838 # maps tag name to node; tagtypes maps tag name to 'global' or
837 # 'local'. (Global tags are defined by .hgtags across all
839 # 'local'. (Global tags are defined by .hgtags across all
838 # heads, and local tags are defined in .hg/localtags.)
840 # heads, and local tags are defined in .hg/localtags.)
839 # They constitute the in-memory cache of tags.
841 # They constitute the in-memory cache of tags.
840 self.tags = self.tagtypes = None
842 self.tags = self.tagtypes = None
841
843
842 self.nodetagscache = self.tagslist = None
844 self.nodetagscache = self.tagslist = None
843
845
844 cache = tagscache()
846 cache = tagscache()
845 cache.tags, cache.tagtypes = self._findtags()
847 cache.tags, cache.tagtypes = self._findtags()
846
848
847 return cache
849 return cache
848
850
849 def tags(self):
851 def tags(self):
850 '''return a mapping of tag to node'''
852 '''return a mapping of tag to node'''
851 t = {}
853 t = {}
852 if self.changelog.filteredrevs:
854 if self.changelog.filteredrevs:
853 tags, tt = self._findtags()
855 tags, tt = self._findtags()
854 else:
856 else:
855 tags = self._tagscache.tags
857 tags = self._tagscache.tags
856 for k, v in tags.iteritems():
858 for k, v in tags.iteritems():
857 try:
859 try:
858 # ignore tags to unknown nodes
860 # ignore tags to unknown nodes
859 self.changelog.rev(v)
861 self.changelog.rev(v)
860 t[k] = v
862 t[k] = v
861 except (error.LookupError, ValueError):
863 except (error.LookupError, ValueError):
862 pass
864 pass
863 return t
865 return t
864
866
865 def _findtags(self):
867 def _findtags(self):
866 '''Do the hard work of finding tags. Return a pair of dicts
868 '''Do the hard work of finding tags. Return a pair of dicts
867 (tags, tagtypes) where tags maps tag name to node, and tagtypes
869 (tags, tagtypes) where tags maps tag name to node, and tagtypes
868 maps tag name to a string like \'global\' or \'local\'.
870 maps tag name to a string like \'global\' or \'local\'.
869 Subclasses or extensions are free to add their own tags, but
871 Subclasses or extensions are free to add their own tags, but
870 should be aware that the returned dicts will be retained for the
872 should be aware that the returned dicts will be retained for the
871 duration of the localrepo object.'''
873 duration of the localrepo object.'''
872
874
873 # XXX what tagtype should subclasses/extensions use? Currently
875 # XXX what tagtype should subclasses/extensions use? Currently
874 # mq and bookmarks add tags, but do not set the tagtype at all.
876 # mq and bookmarks add tags, but do not set the tagtype at all.
875 # Should each extension invent its own tag type? Should there
877 # Should each extension invent its own tag type? Should there
876 # be one tagtype for all such "virtual" tags? Or is the status
878 # be one tagtype for all such "virtual" tags? Or is the status
877 # quo fine?
879 # quo fine?
878
880
879
881
880 # map tag name to (node, hist)
882 # map tag name to (node, hist)
881 alltags = tagsmod.findglobaltags(self.ui, self)
883 alltags = tagsmod.findglobaltags(self.ui, self)
882 # map tag name to tag type
884 # map tag name to tag type
883 tagtypes = dict((tag, 'global') for tag in alltags)
885 tagtypes = dict((tag, 'global') for tag in alltags)
884
886
885 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
887 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
886
888
887 # Build the return dicts. Have to re-encode tag names because
889 # Build the return dicts. Have to re-encode tag names because
888 # the tags module always uses UTF-8 (in order not to lose info
890 # the tags module always uses UTF-8 (in order not to lose info
889 # writing to the cache), but the rest of Mercurial wants them in
891 # writing to the cache), but the rest of Mercurial wants them in
890 # local encoding.
892 # local encoding.
891 tags = {}
893 tags = {}
892 for (name, (node, hist)) in alltags.iteritems():
894 for (name, (node, hist)) in alltags.iteritems():
893 if node != nullid:
895 if node != nullid:
894 tags[encoding.tolocal(name)] = node
896 tags[encoding.tolocal(name)] = node
895 tags['tip'] = self.changelog.tip()
897 tags['tip'] = self.changelog.tip()
896 tagtypes = dict([(encoding.tolocal(name), value)
898 tagtypes = dict([(encoding.tolocal(name), value)
897 for (name, value) in tagtypes.iteritems()])
899 for (name, value) in tagtypes.iteritems()])
898 return (tags, tagtypes)
900 return (tags, tagtypes)
899
901
900 def tagtype(self, tagname):
902 def tagtype(self, tagname):
901 '''
903 '''
902 return the type of the given tag. result can be:
904 return the type of the given tag. result can be:
903
905
904 'local' : a local tag
906 'local' : a local tag
905 'global' : a global tag
907 'global' : a global tag
906 None : tag does not exist
908 None : tag does not exist
907 '''
909 '''
908
910
909 return self._tagscache.tagtypes.get(tagname)
911 return self._tagscache.tagtypes.get(tagname)
910
912
911 def tagslist(self):
913 def tagslist(self):
912 '''return a list of tags ordered by revision'''
914 '''return a list of tags ordered by revision'''
913 if not self._tagscache.tagslist:
915 if not self._tagscache.tagslist:
914 l = []
916 l = []
915 for t, n in self.tags().iteritems():
917 for t, n in self.tags().iteritems():
916 l.append((self.changelog.rev(n), t, n))
918 l.append((self.changelog.rev(n), t, n))
917 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
919 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
918
920
919 return self._tagscache.tagslist
921 return self._tagscache.tagslist
920
922
921 def nodetags(self, node):
923 def nodetags(self, node):
922 '''return the tags associated with a node'''
924 '''return the tags associated with a node'''
923 if not self._tagscache.nodetagscache:
925 if not self._tagscache.nodetagscache:
924 nodetagscache = {}
926 nodetagscache = {}
925 for t, n in self._tagscache.tags.iteritems():
927 for t, n in self._tagscache.tags.iteritems():
926 nodetagscache.setdefault(n, []).append(t)
928 nodetagscache.setdefault(n, []).append(t)
927 for tags in nodetagscache.itervalues():
929 for tags in nodetagscache.itervalues():
928 tags.sort()
930 tags.sort()
929 self._tagscache.nodetagscache = nodetagscache
931 self._tagscache.nodetagscache = nodetagscache
930 return self._tagscache.nodetagscache.get(node, [])
932 return self._tagscache.nodetagscache.get(node, [])
931
933
932 def nodebookmarks(self, node):
934 def nodebookmarks(self, node):
933 """return the list of bookmarks pointing to the specified node"""
935 """return the list of bookmarks pointing to the specified node"""
934 marks = []
936 marks = []
935 for bookmark, n in self._bookmarks.iteritems():
937 for bookmark, n in self._bookmarks.iteritems():
936 if n == node:
938 if n == node:
937 marks.append(bookmark)
939 marks.append(bookmark)
938 return sorted(marks)
940 return sorted(marks)
939
941
940 def branchmap(self):
942 def branchmap(self):
941 '''returns a dictionary {branch: [branchheads]} with branchheads
943 '''returns a dictionary {branch: [branchheads]} with branchheads
942 ordered by increasing revision number'''
944 ordered by increasing revision number'''
943 branchmap.updatecache(self)
945 branchmap.updatecache(self)
944 return self._branchcaches[self.filtername]
946 return self._branchcaches[self.filtername]
945
947
946 @unfilteredmethod
948 @unfilteredmethod
947 def revbranchcache(self):
949 def revbranchcache(self):
948 if not self._revbranchcache:
950 if not self._revbranchcache:
949 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
951 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
950 return self._revbranchcache
952 return self._revbranchcache
951
953
952 def branchtip(self, branch, ignoremissing=False):
954 def branchtip(self, branch, ignoremissing=False):
953 '''return the tip node for a given branch
955 '''return the tip node for a given branch
954
956
955 If ignoremissing is True, then this method will not raise an error.
957 If ignoremissing is True, then this method will not raise an error.
956 This is helpful for callers that only expect None for a missing branch
958 This is helpful for callers that only expect None for a missing branch
957 (e.g. namespace).
959 (e.g. namespace).
958
960
959 '''
961 '''
960 try:
962 try:
961 return self.branchmap().branchtip(branch)
963 return self.branchmap().branchtip(branch)
962 except KeyError:
964 except KeyError:
963 if not ignoremissing:
965 if not ignoremissing:
964 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
966 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
965 else:
967 else:
966 pass
968 pass
967
969
968 def lookup(self, key):
970 def lookup(self, key):
969 return self[key].node()
971 return self[key].node()
970
972
971 def lookupbranch(self, key, remote=None):
973 def lookupbranch(self, key, remote=None):
972 repo = remote or self
974 repo = remote or self
973 if key in repo.branchmap():
975 if key in repo.branchmap():
974 return key
976 return key
975
977
976 repo = (remote and remote.local()) and remote or self
978 repo = (remote and remote.local()) and remote or self
977 return repo[key].branch()
979 return repo[key].branch()
978
980
979 def known(self, nodes):
981 def known(self, nodes):
980 cl = self.changelog
982 cl = self.changelog
981 nm = cl.nodemap
983 nm = cl.nodemap
982 filtered = cl.filteredrevs
984 filtered = cl.filteredrevs
983 result = []
985 result = []
984 for n in nodes:
986 for n in nodes:
985 r = nm.get(n)
987 r = nm.get(n)
986 resp = not (r is None or r in filtered)
988 resp = not (r is None or r in filtered)
987 result.append(resp)
989 result.append(resp)
988 return result
990 return result
989
991
990 def local(self):
992 def local(self):
991 return self
993 return self
992
994
993 def publishing(self):
995 def publishing(self):
994 # it's safe (and desirable) to trust the publish flag unconditionally
996 # it's safe (and desirable) to trust the publish flag unconditionally
995 # so that we don't finalize changes shared between users via ssh or nfs
997 # so that we don't finalize changes shared between users via ssh or nfs
996 return self.ui.configbool('phases', 'publish', untrusted=True)
998 return self.ui.configbool('phases', 'publish', untrusted=True)
997
999
998 def cancopy(self):
1000 def cancopy(self):
999 # so statichttprepo's override of local() works
1001 # so statichttprepo's override of local() works
1000 if not self.local():
1002 if not self.local():
1001 return False
1003 return False
1002 if not self.publishing():
1004 if not self.publishing():
1003 return True
1005 return True
1004 # if publishing we can't copy if there is filtered content
1006 # if publishing we can't copy if there is filtered content
1005 return not self.filtered('visible').changelog.filteredrevs
1007 return not self.filtered('visible').changelog.filteredrevs
1006
1008
1007 def shared(self):
1009 def shared(self):
1008 '''the type of shared repository (None if not shared)'''
1010 '''the type of shared repository (None if not shared)'''
1009 if self.sharedpath != self.path:
1011 if self.sharedpath != self.path:
1010 return 'store'
1012 return 'store'
1011 return None
1013 return None
1012
1014
1013 def wjoin(self, f, *insidef):
1015 def wjoin(self, f, *insidef):
1014 return self.vfs.reljoin(self.root, f, *insidef)
1016 return self.vfs.reljoin(self.root, f, *insidef)
1015
1017
1016 def file(self, f):
1018 def file(self, f):
1017 if f[0] == '/':
1019 if f[0] == '/':
1018 f = f[1:]
1020 f = f[1:]
1019 return filelog.filelog(self.svfs, f)
1021 return filelog.filelog(self.svfs, f)
1020
1022
1021 def changectx(self, changeid):
1023 def changectx(self, changeid):
1022 return self[changeid]
1024 return self[changeid]
1023
1025
1024 def setparents(self, p1, p2=nullid):
1026 def setparents(self, p1, p2=nullid):
1025 with self.dirstate.parentchange():
1027 with self.dirstate.parentchange():
1026 copies = self.dirstate.setparents(p1, p2)
1028 copies = self.dirstate.setparents(p1, p2)
1027 pctx = self[p1]
1029 pctx = self[p1]
1028 if copies:
1030 if copies:
1029 # Adjust copy records, the dirstate cannot do it, it
1031 # Adjust copy records, the dirstate cannot do it, it
1030 # requires access to parents manifests. Preserve them
1032 # requires access to parents manifests. Preserve them
1031 # only for entries added to first parent.
1033 # only for entries added to first parent.
1032 for f in copies:
1034 for f in copies:
1033 if f not in pctx and copies[f] in pctx:
1035 if f not in pctx and copies[f] in pctx:
1034 self.dirstate.copy(copies[f], f)
1036 self.dirstate.copy(copies[f], f)
1035 if p2 == nullid:
1037 if p2 == nullid:
1036 for f, s in sorted(self.dirstate.copies().items()):
1038 for f, s in sorted(self.dirstate.copies().items()):
1037 if f not in pctx and s not in pctx:
1039 if f not in pctx and s not in pctx:
1038 self.dirstate.copy(None, f)
1040 self.dirstate.copy(None, f)
1039
1041
1040 def filectx(self, path, changeid=None, fileid=None):
1042 def filectx(self, path, changeid=None, fileid=None):
1041 """changeid can be a changeset revision, node, or tag.
1043 """changeid can be a changeset revision, node, or tag.
1042 fileid can be a file revision or node."""
1044 fileid can be a file revision or node."""
1043 return context.filectx(self, path, changeid, fileid)
1045 return context.filectx(self, path, changeid, fileid)
1044
1046
1045 def getcwd(self):
1047 def getcwd(self):
1046 return self.dirstate.getcwd()
1048 return self.dirstate.getcwd()
1047
1049
1048 def pathto(self, f, cwd=None):
1050 def pathto(self, f, cwd=None):
1049 return self.dirstate.pathto(f, cwd)
1051 return self.dirstate.pathto(f, cwd)
1050
1052
1051 def _loadfilter(self, filter):
1053 def _loadfilter(self, filter):
1052 if filter not in self.filterpats:
1054 if filter not in self.filterpats:
1053 l = []
1055 l = []
1054 for pat, cmd in self.ui.configitems(filter):
1056 for pat, cmd in self.ui.configitems(filter):
1055 if cmd == '!':
1057 if cmd == '!':
1056 continue
1058 continue
1057 mf = matchmod.match(self.root, '', [pat])
1059 mf = matchmod.match(self.root, '', [pat])
1058 fn = None
1060 fn = None
1059 params = cmd
1061 params = cmd
1060 for name, filterfn in self._datafilters.iteritems():
1062 for name, filterfn in self._datafilters.iteritems():
1061 if cmd.startswith(name):
1063 if cmd.startswith(name):
1062 fn = filterfn
1064 fn = filterfn
1063 params = cmd[len(name):].lstrip()
1065 params = cmd[len(name):].lstrip()
1064 break
1066 break
1065 if not fn:
1067 if not fn:
1066 fn = lambda s, c, **kwargs: util.filter(s, c)
1068 fn = lambda s, c, **kwargs: util.filter(s, c)
1067 # Wrap old filters not supporting keyword arguments
1069 # Wrap old filters not supporting keyword arguments
1068 if not inspect.getargspec(fn)[2]:
1070 if not inspect.getargspec(fn)[2]:
1069 oldfn = fn
1071 oldfn = fn
1070 fn = lambda s, c, **kwargs: oldfn(s, c)
1072 fn = lambda s, c, **kwargs: oldfn(s, c)
1071 l.append((mf, fn, params))
1073 l.append((mf, fn, params))
1072 self.filterpats[filter] = l
1074 self.filterpats[filter] = l
1073 return self.filterpats[filter]
1075 return self.filterpats[filter]
1074
1076
1075 def _filter(self, filterpats, filename, data):
1077 def _filter(self, filterpats, filename, data):
1076 for mf, fn, cmd in filterpats:
1078 for mf, fn, cmd in filterpats:
1077 if mf(filename):
1079 if mf(filename):
1078 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1080 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1079 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1081 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1080 break
1082 break
1081
1083
1082 return data
1084 return data
1083
1085
1084 @unfilteredpropertycache
1086 @unfilteredpropertycache
1085 def _encodefilterpats(self):
1087 def _encodefilterpats(self):
1086 return self._loadfilter('encode')
1088 return self._loadfilter('encode')
1087
1089
1088 @unfilteredpropertycache
1090 @unfilteredpropertycache
1089 def _decodefilterpats(self):
1091 def _decodefilterpats(self):
1090 return self._loadfilter('decode')
1092 return self._loadfilter('decode')
1091
1093
1092 def adddatafilter(self, name, filter):
1094 def adddatafilter(self, name, filter):
1093 self._datafilters[name] = filter
1095 self._datafilters[name] = filter
1094
1096
1095 def wread(self, filename):
1097 def wread(self, filename):
1096 if self.wvfs.islink(filename):
1098 if self.wvfs.islink(filename):
1097 data = self.wvfs.readlink(filename)
1099 data = self.wvfs.readlink(filename)
1098 else:
1100 else:
1099 data = self.wvfs.read(filename)
1101 data = self.wvfs.read(filename)
1100 return self._filter(self._encodefilterpats, filename, data)
1102 return self._filter(self._encodefilterpats, filename, data)
1101
1103
1102 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1104 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1103 """write ``data`` into ``filename`` in the working directory
1105 """write ``data`` into ``filename`` in the working directory
1104
1106
1105 This returns length of written (maybe decoded) data.
1107 This returns length of written (maybe decoded) data.
1106 """
1108 """
1107 data = self._filter(self._decodefilterpats, filename, data)
1109 data = self._filter(self._decodefilterpats, filename, data)
1108 if 'l' in flags:
1110 if 'l' in flags:
1109 self.wvfs.symlink(data, filename)
1111 self.wvfs.symlink(data, filename)
1110 else:
1112 else:
1111 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1113 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1112 **kwargs)
1114 **kwargs)
1113 if 'x' in flags:
1115 if 'x' in flags:
1114 self.wvfs.setflags(filename, False, True)
1116 self.wvfs.setflags(filename, False, True)
1115 else:
1117 else:
1116 self.wvfs.setflags(filename, False, False)
1118 self.wvfs.setflags(filename, False, False)
1117 return len(data)
1119 return len(data)
1118
1120
1119 def wwritedata(self, filename, data):
1121 def wwritedata(self, filename, data):
1120 return self._filter(self._decodefilterpats, filename, data)
1122 return self._filter(self._decodefilterpats, filename, data)
1121
1123
1122 def currenttransaction(self):
1124 def currenttransaction(self):
1123 """return the current transaction or None if non exists"""
1125 """return the current transaction or None if non exists"""
1124 if self._transref:
1126 if self._transref:
1125 tr = self._transref()
1127 tr = self._transref()
1126 else:
1128 else:
1127 tr = None
1129 tr = None
1128
1130
1129 if tr and tr.running():
1131 if tr and tr.running():
1130 return tr
1132 return tr
1131 return None
1133 return None
1132
1134
1133 def transaction(self, desc, report=None):
1135 def transaction(self, desc, report=None):
1134 if (self.ui.configbool('devel', 'all-warnings')
1136 if (self.ui.configbool('devel', 'all-warnings')
1135 or self.ui.configbool('devel', 'check-locks')):
1137 or self.ui.configbool('devel', 'check-locks')):
1136 if self._currentlock(self._lockref) is None:
1138 if self._currentlock(self._lockref) is None:
1137 raise error.ProgrammingError('transaction requires locking')
1139 raise error.ProgrammingError('transaction requires locking')
1138 tr = self.currenttransaction()
1140 tr = self.currenttransaction()
1139 if tr is not None:
1141 if tr is not None:
1140 return tr.nest()
1142 return tr.nest()
1141
1143
1142 # abort here if the journal already exists
1144 # abort here if the journal already exists
1143 if self.svfs.exists("journal"):
1145 if self.svfs.exists("journal"):
1144 raise error.RepoError(
1146 raise error.RepoError(
1145 _("abandoned transaction found"),
1147 _("abandoned transaction found"),
1146 hint=_("run 'hg recover' to clean up transaction"))
1148 hint=_("run 'hg recover' to clean up transaction"))
1147
1149
1148 idbase = "%.40f#%f" % (random.random(), time.time())
1150 idbase = "%.40f#%f" % (random.random(), time.time())
1149 ha = hex(hashlib.sha1(idbase).digest())
1151 ha = hex(hashlib.sha1(idbase).digest())
1150 txnid = 'TXN:' + ha
1152 txnid = 'TXN:' + ha
1151 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1153 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1152
1154
1153 self._writejournal(desc)
1155 self._writejournal(desc)
1154 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1156 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1155 if report:
1157 if report:
1156 rp = report
1158 rp = report
1157 else:
1159 else:
1158 rp = self.ui.warn
1160 rp = self.ui.warn
1159 vfsmap = {'plain': self.vfs} # root of .hg/
1161 vfsmap = {'plain': self.vfs} # root of .hg/
1160 # we must avoid cyclic reference between repo and transaction.
1162 # we must avoid cyclic reference between repo and transaction.
1161 reporef = weakref.ref(self)
1163 reporef = weakref.ref(self)
1162 # Code to track tag movement
1164 # Code to track tag movement
1163 #
1165 #
1164 # Since tags are all handled as file content, it is actually quite hard
1166 # Since tags are all handled as file content, it is actually quite hard
1165 # to track these movement from a code perspective. So we fallback to a
1167 # to track these movement from a code perspective. So we fallback to a
1166 # tracking at the repository level. One could envision to track changes
1168 # tracking at the repository level. One could envision to track changes
1167 # to the '.hgtags' file through changegroup apply but that fails to
1169 # to the '.hgtags' file through changegroup apply but that fails to
1168 # cope with case where transaction expose new heads without changegroup
1170 # cope with case where transaction expose new heads without changegroup
1169 # being involved (eg: phase movement).
1171 # being involved (eg: phase movement).
1170 #
1172 #
1171 # For now, We gate the feature behind a flag since this likely comes
1173 # For now, We gate the feature behind a flag since this likely comes
1172 # with performance impacts. The current code run more often than needed
1174 # with performance impacts. The current code run more often than needed
1173 # and do not use caches as much as it could. The current focus is on
1175 # and do not use caches as much as it could. The current focus is on
1174 # the behavior of the feature so we disable it by default. The flag
1176 # the behavior of the feature so we disable it by default. The flag
1175 # will be removed when we are happy with the performance impact.
1177 # will be removed when we are happy with the performance impact.
1176 #
1178 #
1177 # Once this feature is no longer experimental move the following
1179 # Once this feature is no longer experimental move the following
1178 # documentation to the appropriate help section:
1180 # documentation to the appropriate help section:
1179 #
1181 #
1180 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1182 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1181 # tags (new or changed or deleted tags). In addition the details of
1183 # tags (new or changed or deleted tags). In addition the details of
1182 # these changes are made available in a file at:
1184 # these changes are made available in a file at:
1183 # ``REPOROOT/.hg/changes/tags.changes``.
1185 # ``REPOROOT/.hg/changes/tags.changes``.
1184 # Make sure you check for HG_TAG_MOVED before reading that file as it
1186 # Make sure you check for HG_TAG_MOVED before reading that file as it
1185 # might exist from a previous transaction even if no tag were touched
1187 # might exist from a previous transaction even if no tag were touched
1186 # in this one. Changes are recorded in a line base format::
1188 # in this one. Changes are recorded in a line base format::
1187 #
1189 #
1188 # <action> <hex-node> <tag-name>\n
1190 # <action> <hex-node> <tag-name>\n
1189 #
1191 #
1190 # Actions are defined as follow:
1192 # Actions are defined as follow:
1191 # "-R": tag is removed,
1193 # "-R": tag is removed,
1192 # "+A": tag is added,
1194 # "+A": tag is added,
1193 # "-M": tag is moved (old value),
1195 # "-M": tag is moved (old value),
1194 # "+M": tag is moved (new value),
1196 # "+M": tag is moved (new value),
1195 tracktags = lambda x: None
1197 tracktags = lambda x: None
1196 # experimental config: experimental.hook-track-tags
1198 # experimental config: experimental.hook-track-tags
1197 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1199 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1198 if desc != 'strip' and shouldtracktags:
1200 if desc != 'strip' and shouldtracktags:
1199 oldheads = self.changelog.headrevs()
1201 oldheads = self.changelog.headrevs()
1200 def tracktags(tr2):
1202 def tracktags(tr2):
1201 repo = reporef()
1203 repo = reporef()
1202 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1204 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1203 newheads = repo.changelog.headrevs()
1205 newheads = repo.changelog.headrevs()
1204 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1206 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1205 # notes: we compare lists here.
1207 # notes: we compare lists here.
1206 # As we do it only once buiding set would not be cheaper
1208 # As we do it only once buiding set would not be cheaper
1207 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1209 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1208 if changes:
1210 if changes:
1209 tr2.hookargs['tag_moved'] = '1'
1211 tr2.hookargs['tag_moved'] = '1'
1210 with repo.vfs('changes/tags.changes', 'w',
1212 with repo.vfs('changes/tags.changes', 'w',
1211 atomictemp=True) as changesfile:
1213 atomictemp=True) as changesfile:
1212 # note: we do not register the file to the transaction
1214 # note: we do not register the file to the transaction
1213 # because we needs it to still exist on the transaction
1215 # because we needs it to still exist on the transaction
1214 # is close (for txnclose hooks)
1216 # is close (for txnclose hooks)
1215 tagsmod.writediff(changesfile, changes)
1217 tagsmod.writediff(changesfile, changes)
1216 def validate(tr2):
1218 def validate(tr2):
1217 """will run pre-closing hooks"""
1219 """will run pre-closing hooks"""
1218 # XXX the transaction API is a bit lacking here so we take a hacky
1220 # XXX the transaction API is a bit lacking here so we take a hacky
1219 # path for now
1221 # path for now
1220 #
1222 #
1221 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1223 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1222 # dict is copied before these run. In addition we needs the data
1224 # dict is copied before these run. In addition we needs the data
1223 # available to in memory hooks too.
1225 # available to in memory hooks too.
1224 #
1226 #
1225 # Moreover, we also need to make sure this runs before txnclose
1227 # Moreover, we also need to make sure this runs before txnclose
1226 # hooks and there is no "pending" mechanism that would execute
1228 # hooks and there is no "pending" mechanism that would execute
1227 # logic only if hooks are about to run.
1229 # logic only if hooks are about to run.
1228 #
1230 #
1229 # Fixing this limitation of the transaction is also needed to track
1231 # Fixing this limitation of the transaction is also needed to track
1230 # other families of changes (bookmarks, phases, obsolescence).
1232 # other families of changes (bookmarks, phases, obsolescence).
1231 #
1233 #
1232 # This will have to be fixed before we remove the experimental
1234 # This will have to be fixed before we remove the experimental
1233 # gating.
1235 # gating.
1234 tracktags(tr2)
1236 tracktags(tr2)
1235 repo = reporef()
1237 repo = reporef()
1236 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1238 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1237 scmutil.enforcesinglehead(repo, tr2, desc)
1239 scmutil.enforcesinglehead(repo, tr2, desc)
1238 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1240 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1239 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1241 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1240 args = tr.hookargs.copy()
1242 args = tr.hookargs.copy()
1241 args.update(bookmarks.preparehookargs(name, old, new))
1243 args.update(bookmarks.preparehookargs(name, old, new))
1242 repo.hook('pretxnclose-bookmark', throw=True,
1244 repo.hook('pretxnclose-bookmark', throw=True,
1243 txnname=desc,
1245 txnname=desc,
1244 **pycompat.strkwargs(args))
1246 **pycompat.strkwargs(args))
1245 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1247 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1246 cl = repo.unfiltered().changelog
1248 cl = repo.unfiltered().changelog
1247 for rev, (old, new) in tr.changes['phases'].items():
1249 for rev, (old, new) in tr.changes['phases'].items():
1248 args = tr.hookargs.copy()
1250 args = tr.hookargs.copy()
1249 node = hex(cl.node(rev))
1251 node = hex(cl.node(rev))
1250 args.update(phases.preparehookargs(node, old, new))
1252 args.update(phases.preparehookargs(node, old, new))
1251 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1253 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1252 **pycompat.strkwargs(args))
1254 **pycompat.strkwargs(args))
1253
1255
1254 repo.hook('pretxnclose', throw=True,
1256 repo.hook('pretxnclose', throw=True,
1255 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1257 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1256 def releasefn(tr, success):
1258 def releasefn(tr, success):
1257 repo = reporef()
1259 repo = reporef()
1258 if success:
1260 if success:
1259 # this should be explicitly invoked here, because
1261 # this should be explicitly invoked here, because
1260 # in-memory changes aren't written out at closing
1262 # in-memory changes aren't written out at closing
1261 # transaction, if tr.addfilegenerator (via
1263 # transaction, if tr.addfilegenerator (via
1262 # dirstate.write or so) isn't invoked while
1264 # dirstate.write or so) isn't invoked while
1263 # transaction running
1265 # transaction running
1264 repo.dirstate.write(None)
1266 repo.dirstate.write(None)
1265 else:
1267 else:
1266 # discard all changes (including ones already written
1268 # discard all changes (including ones already written
1267 # out) in this transaction
1269 # out) in this transaction
1268 repo.dirstate.restorebackup(None, 'journal.dirstate')
1270 repo.dirstate.restorebackup(None, 'journal.dirstate')
1269
1271
1270 repo.invalidate(clearfilecache=True)
1272 repo.invalidate(clearfilecache=True)
1271
1273
1272 tr = transaction.transaction(rp, self.svfs, vfsmap,
1274 tr = transaction.transaction(rp, self.svfs, vfsmap,
1273 "journal",
1275 "journal",
1274 "undo",
1276 "undo",
1275 aftertrans(renames),
1277 aftertrans(renames),
1276 self.store.createmode,
1278 self.store.createmode,
1277 validator=validate,
1279 validator=validate,
1278 releasefn=releasefn,
1280 releasefn=releasefn,
1279 checkambigfiles=_cachedfiles)
1281 checkambigfiles=_cachedfiles)
1280 tr.changes['revs'] = xrange(0, 0)
1282 tr.changes['revs'] = xrange(0, 0)
1281 tr.changes['obsmarkers'] = set()
1283 tr.changes['obsmarkers'] = set()
1282 tr.changes['phases'] = {}
1284 tr.changes['phases'] = {}
1283 tr.changes['bookmarks'] = {}
1285 tr.changes['bookmarks'] = {}
1284
1286
1285 tr.hookargs['txnid'] = txnid
1287 tr.hookargs['txnid'] = txnid
1286 # note: writing the fncache only during finalize mean that the file is
1288 # note: writing the fncache only during finalize mean that the file is
1287 # outdated when running hooks. As fncache is used for streaming clone,
1289 # outdated when running hooks. As fncache is used for streaming clone,
1288 # this is not expected to break anything that happen during the hooks.
1290 # this is not expected to break anything that happen during the hooks.
1289 tr.addfinalize('flush-fncache', self.store.write)
1291 tr.addfinalize('flush-fncache', self.store.write)
1290 def txnclosehook(tr2):
1292 def txnclosehook(tr2):
1291 """To be run if transaction is successful, will schedule a hook run
1293 """To be run if transaction is successful, will schedule a hook run
1292 """
1294 """
1293 # Don't reference tr2 in hook() so we don't hold a reference.
1295 # Don't reference tr2 in hook() so we don't hold a reference.
1294 # This reduces memory consumption when there are multiple
1296 # This reduces memory consumption when there are multiple
1295 # transactions per lock. This can likely go away if issue5045
1297 # transactions per lock. This can likely go away if issue5045
1296 # fixes the function accumulation.
1298 # fixes the function accumulation.
1297 hookargs = tr2.hookargs
1299 hookargs = tr2.hookargs
1298
1300
1299 def hookfunc():
1301 def hookfunc():
1300 repo = reporef()
1302 repo = reporef()
1301 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1303 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1302 bmchanges = sorted(tr.changes['bookmarks'].items())
1304 bmchanges = sorted(tr.changes['bookmarks'].items())
1303 for name, (old, new) in bmchanges:
1305 for name, (old, new) in bmchanges:
1304 args = tr.hookargs.copy()
1306 args = tr.hookargs.copy()
1305 args.update(bookmarks.preparehookargs(name, old, new))
1307 args.update(bookmarks.preparehookargs(name, old, new))
1306 repo.hook('txnclose-bookmark', throw=False,
1308 repo.hook('txnclose-bookmark', throw=False,
1307 txnname=desc, **pycompat.strkwargs(args))
1309 txnname=desc, **pycompat.strkwargs(args))
1308
1310
1309 if hook.hashook(repo.ui, 'txnclose-phase'):
1311 if hook.hashook(repo.ui, 'txnclose-phase'):
1310 cl = repo.unfiltered().changelog
1312 cl = repo.unfiltered().changelog
1311 phasemv = sorted(tr.changes['phases'].items())
1313 phasemv = sorted(tr.changes['phases'].items())
1312 for rev, (old, new) in phasemv:
1314 for rev, (old, new) in phasemv:
1313 args = tr.hookargs.copy()
1315 args = tr.hookargs.copy()
1314 node = hex(cl.node(rev))
1316 node = hex(cl.node(rev))
1315 args.update(phases.preparehookargs(node, old, new))
1317 args.update(phases.preparehookargs(node, old, new))
1316 repo.hook('txnclose-phase', throw=False, txnname=desc,
1318 repo.hook('txnclose-phase', throw=False, txnname=desc,
1317 **pycompat.strkwargs(args))
1319 **pycompat.strkwargs(args))
1318
1320
1319 repo.hook('txnclose', throw=False, txnname=desc,
1321 repo.hook('txnclose', throw=False, txnname=desc,
1320 **pycompat.strkwargs(hookargs))
1322 **pycompat.strkwargs(hookargs))
1321 reporef()._afterlock(hookfunc)
1323 reporef()._afterlock(hookfunc)
1322 tr.addfinalize('txnclose-hook', txnclosehook)
1324 tr.addfinalize('txnclose-hook', txnclosehook)
1323 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1325 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1324 def txnaborthook(tr2):
1326 def txnaborthook(tr2):
1325 """To be run if transaction is aborted
1327 """To be run if transaction is aborted
1326 """
1328 """
1327 reporef().hook('txnabort', throw=False, txnname=desc,
1329 reporef().hook('txnabort', throw=False, txnname=desc,
1328 **tr2.hookargs)
1330 **tr2.hookargs)
1329 tr.addabort('txnabort-hook', txnaborthook)
1331 tr.addabort('txnabort-hook', txnaborthook)
1330 # avoid eager cache invalidation. in-memory data should be identical
1332 # avoid eager cache invalidation. in-memory data should be identical
1331 # to stored data if transaction has no error.
1333 # to stored data if transaction has no error.
1332 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1334 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1333 self._transref = weakref.ref(tr)
1335 self._transref = weakref.ref(tr)
1334 scmutil.registersummarycallback(self, tr, desc)
1336 scmutil.registersummarycallback(self, tr, desc)
1335 return tr
1337 return tr
1336
1338
1337 def _journalfiles(self):
1339 def _journalfiles(self):
1338 return ((self.svfs, 'journal'),
1340 return ((self.svfs, 'journal'),
1339 (self.vfs, 'journal.dirstate'),
1341 (self.vfs, 'journal.dirstate'),
1340 (self.vfs, 'journal.branch'),
1342 (self.vfs, 'journal.branch'),
1341 (self.vfs, 'journal.desc'),
1343 (self.vfs, 'journal.desc'),
1342 (self.vfs, 'journal.bookmarks'),
1344 (self.vfs, 'journal.bookmarks'),
1343 (self.svfs, 'journal.phaseroots'))
1345 (self.svfs, 'journal.phaseroots'))
1344
1346
1345 def undofiles(self):
1347 def undofiles(self):
1346 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1348 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1347
1349
1348 @unfilteredmethod
1350 @unfilteredmethod
1349 def _writejournal(self, desc):
1351 def _writejournal(self, desc):
1350 self.dirstate.savebackup(None, 'journal.dirstate')
1352 self.dirstate.savebackup(None, 'journal.dirstate')
1351 self.vfs.write("journal.branch",
1353 self.vfs.write("journal.branch",
1352 encoding.fromlocal(self.dirstate.branch()))
1354 encoding.fromlocal(self.dirstate.branch()))
1353 self.vfs.write("journal.desc",
1355 self.vfs.write("journal.desc",
1354 "%d\n%s\n" % (len(self), desc))
1356 "%d\n%s\n" % (len(self), desc))
1355 self.vfs.write("journal.bookmarks",
1357 self.vfs.write("journal.bookmarks",
1356 self.vfs.tryread("bookmarks"))
1358 self.vfs.tryread("bookmarks"))
1357 self.svfs.write("journal.phaseroots",
1359 self.svfs.write("journal.phaseroots",
1358 self.svfs.tryread("phaseroots"))
1360 self.svfs.tryread("phaseroots"))
1359
1361
1360 def recover(self):
1362 def recover(self):
1361 with self.lock():
1363 with self.lock():
1362 if self.svfs.exists("journal"):
1364 if self.svfs.exists("journal"):
1363 self.ui.status(_("rolling back interrupted transaction\n"))
1365 self.ui.status(_("rolling back interrupted transaction\n"))
1364 vfsmap = {'': self.svfs,
1366 vfsmap = {'': self.svfs,
1365 'plain': self.vfs,}
1367 'plain': self.vfs,}
1366 transaction.rollback(self.svfs, vfsmap, "journal",
1368 transaction.rollback(self.svfs, vfsmap, "journal",
1367 self.ui.warn,
1369 self.ui.warn,
1368 checkambigfiles=_cachedfiles)
1370 checkambigfiles=_cachedfiles)
1369 self.invalidate()
1371 self.invalidate()
1370 return True
1372 return True
1371 else:
1373 else:
1372 self.ui.warn(_("no interrupted transaction available\n"))
1374 self.ui.warn(_("no interrupted transaction available\n"))
1373 return False
1375 return False
1374
1376
1375 def rollback(self, dryrun=False, force=False):
1377 def rollback(self, dryrun=False, force=False):
1376 wlock = lock = dsguard = None
1378 wlock = lock = dsguard = None
1377 try:
1379 try:
1378 wlock = self.wlock()
1380 wlock = self.wlock()
1379 lock = self.lock()
1381 lock = self.lock()
1380 if self.svfs.exists("undo"):
1382 if self.svfs.exists("undo"):
1381 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1383 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1382
1384
1383 return self._rollback(dryrun, force, dsguard)
1385 return self._rollback(dryrun, force, dsguard)
1384 else:
1386 else:
1385 self.ui.warn(_("no rollback information available\n"))
1387 self.ui.warn(_("no rollback information available\n"))
1386 return 1
1388 return 1
1387 finally:
1389 finally:
1388 release(dsguard, lock, wlock)
1390 release(dsguard, lock, wlock)
1389
1391
1390 @unfilteredmethod # Until we get smarter cache management
1392 @unfilteredmethod # Until we get smarter cache management
1391 def _rollback(self, dryrun, force, dsguard):
1393 def _rollback(self, dryrun, force, dsguard):
1392 ui = self.ui
1394 ui = self.ui
1393 try:
1395 try:
1394 args = self.vfs.read('undo.desc').splitlines()
1396 args = self.vfs.read('undo.desc').splitlines()
1395 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1397 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1396 if len(args) >= 3:
1398 if len(args) >= 3:
1397 detail = args[2]
1399 detail = args[2]
1398 oldtip = oldlen - 1
1400 oldtip = oldlen - 1
1399
1401
1400 if detail and ui.verbose:
1402 if detail and ui.verbose:
1401 msg = (_('repository tip rolled back to revision %d'
1403 msg = (_('repository tip rolled back to revision %d'
1402 ' (undo %s: %s)\n')
1404 ' (undo %s: %s)\n')
1403 % (oldtip, desc, detail))
1405 % (oldtip, desc, detail))
1404 else:
1406 else:
1405 msg = (_('repository tip rolled back to revision %d'
1407 msg = (_('repository tip rolled back to revision %d'
1406 ' (undo %s)\n')
1408 ' (undo %s)\n')
1407 % (oldtip, desc))
1409 % (oldtip, desc))
1408 except IOError:
1410 except IOError:
1409 msg = _('rolling back unknown transaction\n')
1411 msg = _('rolling back unknown transaction\n')
1410 desc = None
1412 desc = None
1411
1413
1412 if not force and self['.'] != self['tip'] and desc == 'commit':
1414 if not force and self['.'] != self['tip'] and desc == 'commit':
1413 raise error.Abort(
1415 raise error.Abort(
1414 _('rollback of last commit while not checked out '
1416 _('rollback of last commit while not checked out '
1415 'may lose data'), hint=_('use -f to force'))
1417 'may lose data'), hint=_('use -f to force'))
1416
1418
1417 ui.status(msg)
1419 ui.status(msg)
1418 if dryrun:
1420 if dryrun:
1419 return 0
1421 return 0
1420
1422
1421 parents = self.dirstate.parents()
1423 parents = self.dirstate.parents()
1422 self.destroying()
1424 self.destroying()
1423 vfsmap = {'plain': self.vfs, '': self.svfs}
1425 vfsmap = {'plain': self.vfs, '': self.svfs}
1424 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1426 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1425 checkambigfiles=_cachedfiles)
1427 checkambigfiles=_cachedfiles)
1426 if self.vfs.exists('undo.bookmarks'):
1428 if self.vfs.exists('undo.bookmarks'):
1427 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1429 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1428 if self.svfs.exists('undo.phaseroots'):
1430 if self.svfs.exists('undo.phaseroots'):
1429 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1431 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1430 self.invalidate()
1432 self.invalidate()
1431
1433
1432 parentgone = (parents[0] not in self.changelog.nodemap or
1434 parentgone = (parents[0] not in self.changelog.nodemap or
1433 parents[1] not in self.changelog.nodemap)
1435 parents[1] not in self.changelog.nodemap)
1434 if parentgone:
1436 if parentgone:
1435 # prevent dirstateguard from overwriting already restored one
1437 # prevent dirstateguard from overwriting already restored one
1436 dsguard.close()
1438 dsguard.close()
1437
1439
1438 self.dirstate.restorebackup(None, 'undo.dirstate')
1440 self.dirstate.restorebackup(None, 'undo.dirstate')
1439 try:
1441 try:
1440 branch = self.vfs.read('undo.branch')
1442 branch = self.vfs.read('undo.branch')
1441 self.dirstate.setbranch(encoding.tolocal(branch))
1443 self.dirstate.setbranch(encoding.tolocal(branch))
1442 except IOError:
1444 except IOError:
1443 ui.warn(_('named branch could not be reset: '
1445 ui.warn(_('named branch could not be reset: '
1444 'current branch is still \'%s\'\n')
1446 'current branch is still \'%s\'\n')
1445 % self.dirstate.branch())
1447 % self.dirstate.branch())
1446
1448
1447 parents = tuple([p.rev() for p in self[None].parents()])
1449 parents = tuple([p.rev() for p in self[None].parents()])
1448 if len(parents) > 1:
1450 if len(parents) > 1:
1449 ui.status(_('working directory now based on '
1451 ui.status(_('working directory now based on '
1450 'revisions %d and %d\n') % parents)
1452 'revisions %d and %d\n') % parents)
1451 else:
1453 else:
1452 ui.status(_('working directory now based on '
1454 ui.status(_('working directory now based on '
1453 'revision %d\n') % parents)
1455 'revision %d\n') % parents)
1454 mergemod.mergestate.clean(self, self['.'].node())
1456 mergemod.mergestate.clean(self, self['.'].node())
1455
1457
1456 # TODO: if we know which new heads may result from this rollback, pass
1458 # TODO: if we know which new heads may result from this rollback, pass
1457 # them to destroy(), which will prevent the branchhead cache from being
1459 # them to destroy(), which will prevent the branchhead cache from being
1458 # invalidated.
1460 # invalidated.
1459 self.destroyed()
1461 self.destroyed()
1460 return 0
1462 return 0
1461
1463
1462 def _buildcacheupdater(self, newtransaction):
1464 def _buildcacheupdater(self, newtransaction):
1463 """called during transaction to build the callback updating cache
1465 """called during transaction to build the callback updating cache
1464
1466
1465 Lives on the repository to help extension who might want to augment
1467 Lives on the repository to help extension who might want to augment
1466 this logic. For this purpose, the created transaction is passed to the
1468 this logic. For this purpose, the created transaction is passed to the
1467 method.
1469 method.
1468 """
1470 """
1469 # we must avoid cyclic reference between repo and transaction.
1471 # we must avoid cyclic reference between repo and transaction.
1470 reporef = weakref.ref(self)
1472 reporef = weakref.ref(self)
1471 def updater(tr):
1473 def updater(tr):
1472 repo = reporef()
1474 repo = reporef()
1473 repo.updatecaches(tr)
1475 repo.updatecaches(tr)
1474 return updater
1476 return updater
1475
1477
1476 @unfilteredmethod
1478 @unfilteredmethod
1477 def updatecaches(self, tr=None):
1479 def updatecaches(self, tr=None):
1478 """warm appropriate caches
1480 """warm appropriate caches
1479
1481
1480 If this function is called after a transaction closed. The transaction
1482 If this function is called after a transaction closed. The transaction
1481 will be available in the 'tr' argument. This can be used to selectively
1483 will be available in the 'tr' argument. This can be used to selectively
1482 update caches relevant to the changes in that transaction.
1484 update caches relevant to the changes in that transaction.
1483 """
1485 """
1484 if tr is not None and tr.hookargs.get('source') == 'strip':
1486 if tr is not None and tr.hookargs.get('source') == 'strip':
1485 # During strip, many caches are invalid but
1487 # During strip, many caches are invalid but
1486 # later call to `destroyed` will refresh them.
1488 # later call to `destroyed` will refresh them.
1487 return
1489 return
1488
1490
1489 if tr is None or tr.changes['revs']:
1491 if tr is None or tr.changes['revs']:
1490 # updating the unfiltered branchmap should refresh all the others,
1492 # updating the unfiltered branchmap should refresh all the others,
1491 self.ui.debug('updating the branch cache\n')
1493 self.ui.debug('updating the branch cache\n')
1492 branchmap.updatecache(self.filtered('served'))
1494 branchmap.updatecache(self.filtered('served'))
1493
1495
1494 def invalidatecaches(self):
1496 def invalidatecaches(self):
1495
1497
1496 if '_tagscache' in vars(self):
1498 if '_tagscache' in vars(self):
1497 # can't use delattr on proxy
1499 # can't use delattr on proxy
1498 del self.__dict__['_tagscache']
1500 del self.__dict__['_tagscache']
1499
1501
1500 self.unfiltered()._branchcaches.clear()
1502 self.unfiltered()._branchcaches.clear()
1501 self.invalidatevolatilesets()
1503 self.invalidatevolatilesets()
1502 self._sparsesignaturecache.clear()
1504 self._sparsesignaturecache.clear()
1503
1505
1504 def invalidatevolatilesets(self):
1506 def invalidatevolatilesets(self):
1505 self.filteredrevcache.clear()
1507 self.filteredrevcache.clear()
1506 obsolete.clearobscaches(self)
1508 obsolete.clearobscaches(self)
1507
1509
1508 def invalidatedirstate(self):
1510 def invalidatedirstate(self):
1509 '''Invalidates the dirstate, causing the next call to dirstate
1511 '''Invalidates the dirstate, causing the next call to dirstate
1510 to check if it was modified since the last time it was read,
1512 to check if it was modified since the last time it was read,
1511 rereading it if it has.
1513 rereading it if it has.
1512
1514
1513 This is different to dirstate.invalidate() that it doesn't always
1515 This is different to dirstate.invalidate() that it doesn't always
1514 rereads the dirstate. Use dirstate.invalidate() if you want to
1516 rereads the dirstate. Use dirstate.invalidate() if you want to
1515 explicitly read the dirstate again (i.e. restoring it to a previous
1517 explicitly read the dirstate again (i.e. restoring it to a previous
1516 known good state).'''
1518 known good state).'''
1517 if hasunfilteredcache(self, 'dirstate'):
1519 if hasunfilteredcache(self, 'dirstate'):
1518 for k in self.dirstate._filecache:
1520 for k in self.dirstate._filecache:
1519 try:
1521 try:
1520 delattr(self.dirstate, k)
1522 delattr(self.dirstate, k)
1521 except AttributeError:
1523 except AttributeError:
1522 pass
1524 pass
1523 delattr(self.unfiltered(), 'dirstate')
1525 delattr(self.unfiltered(), 'dirstate')
1524
1526
1525 def invalidate(self, clearfilecache=False):
1527 def invalidate(self, clearfilecache=False):
1526 '''Invalidates both store and non-store parts other than dirstate
1528 '''Invalidates both store and non-store parts other than dirstate
1527
1529
1528 If a transaction is running, invalidation of store is omitted,
1530 If a transaction is running, invalidation of store is omitted,
1529 because discarding in-memory changes might cause inconsistency
1531 because discarding in-memory changes might cause inconsistency
1530 (e.g. incomplete fncache causes unintentional failure, but
1532 (e.g. incomplete fncache causes unintentional failure, but
1531 redundant one doesn't).
1533 redundant one doesn't).
1532 '''
1534 '''
1533 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1535 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1534 for k in list(self._filecache.keys()):
1536 for k in list(self._filecache.keys()):
1535 # dirstate is invalidated separately in invalidatedirstate()
1537 # dirstate is invalidated separately in invalidatedirstate()
1536 if k == 'dirstate':
1538 if k == 'dirstate':
1537 continue
1539 continue
1538 if (k == 'changelog' and
1540 if (k == 'changelog' and
1539 self.currenttransaction() and
1541 self.currenttransaction() and
1540 self.changelog._delayed):
1542 self.changelog._delayed):
1541 # The changelog object may store unwritten revisions. We don't
1543 # The changelog object may store unwritten revisions. We don't
1542 # want to lose them.
1544 # want to lose them.
1543 # TODO: Solve the problem instead of working around it.
1545 # TODO: Solve the problem instead of working around it.
1544 continue
1546 continue
1545
1547
1546 if clearfilecache:
1548 if clearfilecache:
1547 del self._filecache[k]
1549 del self._filecache[k]
1548 try:
1550 try:
1549 delattr(unfiltered, k)
1551 delattr(unfiltered, k)
1550 except AttributeError:
1552 except AttributeError:
1551 pass
1553 pass
1552 self.invalidatecaches()
1554 self.invalidatecaches()
1553 if not self.currenttransaction():
1555 if not self.currenttransaction():
1554 # TODO: Changing contents of store outside transaction
1556 # TODO: Changing contents of store outside transaction
1555 # causes inconsistency. We should make in-memory store
1557 # causes inconsistency. We should make in-memory store
1556 # changes detectable, and abort if changed.
1558 # changes detectable, and abort if changed.
1557 self.store.invalidatecaches()
1559 self.store.invalidatecaches()
1558
1560
1559 def invalidateall(self):
1561 def invalidateall(self):
1560 '''Fully invalidates both store and non-store parts, causing the
1562 '''Fully invalidates both store and non-store parts, causing the
1561 subsequent operation to reread any outside changes.'''
1563 subsequent operation to reread any outside changes.'''
1562 # extension should hook this to invalidate its caches
1564 # extension should hook this to invalidate its caches
1563 self.invalidate()
1565 self.invalidate()
1564 self.invalidatedirstate()
1566 self.invalidatedirstate()
1565
1567
1566 @unfilteredmethod
1568 @unfilteredmethod
1567 def _refreshfilecachestats(self, tr):
1569 def _refreshfilecachestats(self, tr):
1568 """Reload stats of cached files so that they are flagged as valid"""
1570 """Reload stats of cached files so that they are flagged as valid"""
1569 for k, ce in self._filecache.items():
1571 for k, ce in self._filecache.items():
1570 if k == 'dirstate' or k not in self.__dict__:
1572 if k == 'dirstate' or k not in self.__dict__:
1571 continue
1573 continue
1572 ce.refresh()
1574 ce.refresh()
1573
1575
1574 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1576 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1575 inheritchecker=None, parentenvvar=None):
1577 inheritchecker=None, parentenvvar=None):
1576 parentlock = None
1578 parentlock = None
1577 # the contents of parentenvvar are used by the underlying lock to
1579 # the contents of parentenvvar are used by the underlying lock to
1578 # determine whether it can be inherited
1580 # determine whether it can be inherited
1579 if parentenvvar is not None:
1581 if parentenvvar is not None:
1580 parentlock = encoding.environ.get(parentenvvar)
1582 parentlock = encoding.environ.get(parentenvvar)
1581
1583
1582 timeout = 0
1584 timeout = 0
1583 warntimeout = 0
1585 warntimeout = 0
1584 if wait:
1586 if wait:
1585 timeout = self.ui.configint("ui", "timeout")
1587 timeout = self.ui.configint("ui", "timeout")
1586 warntimeout = self.ui.configint("ui", "timeout.warn")
1588 warntimeout = self.ui.configint("ui", "timeout.warn")
1587
1589
1588 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1590 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1589 releasefn=releasefn,
1591 releasefn=releasefn,
1590 acquirefn=acquirefn, desc=desc,
1592 acquirefn=acquirefn, desc=desc,
1591 inheritchecker=inheritchecker,
1593 inheritchecker=inheritchecker,
1592 parentlock=parentlock)
1594 parentlock=parentlock)
1593 return l
1595 return l
1594
1596
1595 def _afterlock(self, callback):
1597 def _afterlock(self, callback):
1596 """add a callback to be run when the repository is fully unlocked
1598 """add a callback to be run when the repository is fully unlocked
1597
1599
1598 The callback will be executed when the outermost lock is released
1600 The callback will be executed when the outermost lock is released
1599 (with wlock being higher level than 'lock')."""
1601 (with wlock being higher level than 'lock')."""
1600 for ref in (self._wlockref, self._lockref):
1602 for ref in (self._wlockref, self._lockref):
1601 l = ref and ref()
1603 l = ref and ref()
1602 if l and l.held:
1604 if l and l.held:
1603 l.postrelease.append(callback)
1605 l.postrelease.append(callback)
1604 break
1606 break
1605 else: # no lock have been found.
1607 else: # no lock have been found.
1606 callback()
1608 callback()
1607
1609
1608 def lock(self, wait=True):
1610 def lock(self, wait=True):
1609 '''Lock the repository store (.hg/store) and return a weak reference
1611 '''Lock the repository store (.hg/store) and return a weak reference
1610 to the lock. Use this before modifying the store (e.g. committing or
1612 to the lock. Use this before modifying the store (e.g. committing or
1611 stripping). If you are opening a transaction, get a lock as well.)
1613 stripping). If you are opening a transaction, get a lock as well.)
1612
1614
1613 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1615 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1614 'wlock' first to avoid a dead-lock hazard.'''
1616 'wlock' first to avoid a dead-lock hazard.'''
1615 l = self._currentlock(self._lockref)
1617 l = self._currentlock(self._lockref)
1616 if l is not None:
1618 if l is not None:
1617 l.lock()
1619 l.lock()
1618 return l
1620 return l
1619
1621
1620 l = self._lock(self.svfs, "lock", wait, None,
1622 l = self._lock(self.svfs, "lock", wait, None,
1621 self.invalidate, _('repository %s') % self.origroot)
1623 self.invalidate, _('repository %s') % self.origroot)
1622 self._lockref = weakref.ref(l)
1624 self._lockref = weakref.ref(l)
1623 return l
1625 return l
1624
1626
1625 def _wlockchecktransaction(self):
1627 def _wlockchecktransaction(self):
1626 if self.currenttransaction() is not None:
1628 if self.currenttransaction() is not None:
1627 raise error.LockInheritanceContractViolation(
1629 raise error.LockInheritanceContractViolation(
1628 'wlock cannot be inherited in the middle of a transaction')
1630 'wlock cannot be inherited in the middle of a transaction')
1629
1631
1630 def wlock(self, wait=True):
1632 def wlock(self, wait=True):
1631 '''Lock the non-store parts of the repository (everything under
1633 '''Lock the non-store parts of the repository (everything under
1632 .hg except .hg/store) and return a weak reference to the lock.
1634 .hg except .hg/store) and return a weak reference to the lock.
1633
1635
1634 Use this before modifying files in .hg.
1636 Use this before modifying files in .hg.
1635
1637
1636 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1638 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1637 'wlock' first to avoid a dead-lock hazard.'''
1639 'wlock' first to avoid a dead-lock hazard.'''
1638 l = self._wlockref and self._wlockref()
1640 l = self._wlockref and self._wlockref()
1639 if l is not None and l.held:
1641 if l is not None and l.held:
1640 l.lock()
1642 l.lock()
1641 return l
1643 return l
1642
1644
1643 # We do not need to check for non-waiting lock acquisition. Such
1645 # We do not need to check for non-waiting lock acquisition. Such
1644 # acquisition would not cause dead-lock as they would just fail.
1646 # acquisition would not cause dead-lock as they would just fail.
1645 if wait and (self.ui.configbool('devel', 'all-warnings')
1647 if wait and (self.ui.configbool('devel', 'all-warnings')
1646 or self.ui.configbool('devel', 'check-locks')):
1648 or self.ui.configbool('devel', 'check-locks')):
1647 if self._currentlock(self._lockref) is not None:
1649 if self._currentlock(self._lockref) is not None:
1648 self.ui.develwarn('"wlock" acquired after "lock"')
1650 self.ui.develwarn('"wlock" acquired after "lock"')
1649
1651
1650 def unlock():
1652 def unlock():
1651 if self.dirstate.pendingparentchange():
1653 if self.dirstate.pendingparentchange():
1652 self.dirstate.invalidate()
1654 self.dirstate.invalidate()
1653 else:
1655 else:
1654 self.dirstate.write(None)
1656 self.dirstate.write(None)
1655
1657
1656 self._filecache['dirstate'].refresh()
1658 self._filecache['dirstate'].refresh()
1657
1659
1658 l = self._lock(self.vfs, "wlock", wait, unlock,
1660 l = self._lock(self.vfs, "wlock", wait, unlock,
1659 self.invalidatedirstate, _('working directory of %s') %
1661 self.invalidatedirstate, _('working directory of %s') %
1660 self.origroot,
1662 self.origroot,
1661 inheritchecker=self._wlockchecktransaction,
1663 inheritchecker=self._wlockchecktransaction,
1662 parentenvvar='HG_WLOCK_LOCKER')
1664 parentenvvar='HG_WLOCK_LOCKER')
1663 self._wlockref = weakref.ref(l)
1665 self._wlockref = weakref.ref(l)
1664 return l
1666 return l
1665
1667
1666 def _currentlock(self, lockref):
1668 def _currentlock(self, lockref):
1667 """Returns the lock if it's held, or None if it's not."""
1669 """Returns the lock if it's held, or None if it's not."""
1668 if lockref is None:
1670 if lockref is None:
1669 return None
1671 return None
1670 l = lockref()
1672 l = lockref()
1671 if l is None or not l.held:
1673 if l is None or not l.held:
1672 return None
1674 return None
1673 return l
1675 return l
1674
1676
1675 def currentwlock(self):
1677 def currentwlock(self):
1676 """Returns the wlock if it's held, or None if it's not."""
1678 """Returns the wlock if it's held, or None if it's not."""
1677 return self._currentlock(self._wlockref)
1679 return self._currentlock(self._wlockref)
1678
1680
1679 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1681 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1680 """
1682 """
1681 commit an individual file as part of a larger transaction
1683 commit an individual file as part of a larger transaction
1682 """
1684 """
1683
1685
1684 fname = fctx.path()
1686 fname = fctx.path()
1685 fparent1 = manifest1.get(fname, nullid)
1687 fparent1 = manifest1.get(fname, nullid)
1686 fparent2 = manifest2.get(fname, nullid)
1688 fparent2 = manifest2.get(fname, nullid)
1687 if isinstance(fctx, context.filectx):
1689 if isinstance(fctx, context.filectx):
1688 node = fctx.filenode()
1690 node = fctx.filenode()
1689 if node in [fparent1, fparent2]:
1691 if node in [fparent1, fparent2]:
1690 self.ui.debug('reusing %s filelog entry\n' % fname)
1692 self.ui.debug('reusing %s filelog entry\n' % fname)
1691 if manifest1.flags(fname) != fctx.flags():
1693 if manifest1.flags(fname) != fctx.flags():
1692 changelist.append(fname)
1694 changelist.append(fname)
1693 return node
1695 return node
1694
1696
1695 flog = self.file(fname)
1697 flog = self.file(fname)
1696 meta = {}
1698 meta = {}
1697 copy = fctx.renamed()
1699 copy = fctx.renamed()
1698 if copy and copy[0] != fname:
1700 if copy and copy[0] != fname:
1699 # Mark the new revision of this file as a copy of another
1701 # Mark the new revision of this file as a copy of another
1700 # file. This copy data will effectively act as a parent
1702 # file. This copy data will effectively act as a parent
1701 # of this new revision. If this is a merge, the first
1703 # of this new revision. If this is a merge, the first
1702 # parent will be the nullid (meaning "look up the copy data")
1704 # parent will be the nullid (meaning "look up the copy data")
1703 # and the second one will be the other parent. For example:
1705 # and the second one will be the other parent. For example:
1704 #
1706 #
1705 # 0 --- 1 --- 3 rev1 changes file foo
1707 # 0 --- 1 --- 3 rev1 changes file foo
1706 # \ / rev2 renames foo to bar and changes it
1708 # \ / rev2 renames foo to bar and changes it
1707 # \- 2 -/ rev3 should have bar with all changes and
1709 # \- 2 -/ rev3 should have bar with all changes and
1708 # should record that bar descends from
1710 # should record that bar descends from
1709 # bar in rev2 and foo in rev1
1711 # bar in rev2 and foo in rev1
1710 #
1712 #
1711 # this allows this merge to succeed:
1713 # this allows this merge to succeed:
1712 #
1714 #
1713 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1715 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1714 # \ / merging rev3 and rev4 should use bar@rev2
1716 # \ / merging rev3 and rev4 should use bar@rev2
1715 # \- 2 --- 4 as the merge base
1717 # \- 2 --- 4 as the merge base
1716 #
1718 #
1717
1719
1718 cfname = copy[0]
1720 cfname = copy[0]
1719 crev = manifest1.get(cfname)
1721 crev = manifest1.get(cfname)
1720 newfparent = fparent2
1722 newfparent = fparent2
1721
1723
1722 if manifest2: # branch merge
1724 if manifest2: # branch merge
1723 if fparent2 == nullid or crev is None: # copied on remote side
1725 if fparent2 == nullid or crev is None: # copied on remote side
1724 if cfname in manifest2:
1726 if cfname in manifest2:
1725 crev = manifest2[cfname]
1727 crev = manifest2[cfname]
1726 newfparent = fparent1
1728 newfparent = fparent1
1727
1729
1728 # Here, we used to search backwards through history to try to find
1730 # Here, we used to search backwards through history to try to find
1729 # where the file copy came from if the source of a copy was not in
1731 # where the file copy came from if the source of a copy was not in
1730 # the parent directory. However, this doesn't actually make sense to
1732 # the parent directory. However, this doesn't actually make sense to
1731 # do (what does a copy from something not in your working copy even
1733 # do (what does a copy from something not in your working copy even
1732 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1734 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1733 # the user that copy information was dropped, so if they didn't
1735 # the user that copy information was dropped, so if they didn't
1734 # expect this outcome it can be fixed, but this is the correct
1736 # expect this outcome it can be fixed, but this is the correct
1735 # behavior in this circumstance.
1737 # behavior in this circumstance.
1736
1738
1737 if crev:
1739 if crev:
1738 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1740 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1739 meta["copy"] = cfname
1741 meta["copy"] = cfname
1740 meta["copyrev"] = hex(crev)
1742 meta["copyrev"] = hex(crev)
1741 fparent1, fparent2 = nullid, newfparent
1743 fparent1, fparent2 = nullid, newfparent
1742 else:
1744 else:
1743 self.ui.warn(_("warning: can't find ancestor for '%s' "
1745 self.ui.warn(_("warning: can't find ancestor for '%s' "
1744 "copied from '%s'!\n") % (fname, cfname))
1746 "copied from '%s'!\n") % (fname, cfname))
1745
1747
1746 elif fparent1 == nullid:
1748 elif fparent1 == nullid:
1747 fparent1, fparent2 = fparent2, nullid
1749 fparent1, fparent2 = fparent2, nullid
1748 elif fparent2 != nullid:
1750 elif fparent2 != nullid:
1749 # is one parent an ancestor of the other?
1751 # is one parent an ancestor of the other?
1750 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1752 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1751 if fparent1 in fparentancestors:
1753 if fparent1 in fparentancestors:
1752 fparent1, fparent2 = fparent2, nullid
1754 fparent1, fparent2 = fparent2, nullid
1753 elif fparent2 in fparentancestors:
1755 elif fparent2 in fparentancestors:
1754 fparent2 = nullid
1756 fparent2 = nullid
1755
1757
1756 # is the file changed?
1758 # is the file changed?
1757 text = fctx.data()
1759 text = fctx.data()
1758 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1759 changelist.append(fname)
1761 changelist.append(fname)
1760 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1761 # are just the flags changed during merge?
1763 # are just the flags changed during merge?
1762 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1764 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1763 changelist.append(fname)
1765 changelist.append(fname)
1764
1766
1765 return fparent1
1767 return fparent1
1766
1768
1767 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1769 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1768 """check for commit arguments that aren't committable"""
1770 """check for commit arguments that aren't committable"""
1769 if match.isexact() or match.prefix():
1771 if match.isexact() or match.prefix():
1770 matched = set(status.modified + status.added + status.removed)
1772 matched = set(status.modified + status.added + status.removed)
1771
1773
1772 for f in match.files():
1774 for f in match.files():
1773 f = self.dirstate.normalize(f)
1775 f = self.dirstate.normalize(f)
1774 if f == '.' or f in matched or f in wctx.substate:
1776 if f == '.' or f in matched or f in wctx.substate:
1775 continue
1777 continue
1776 if f in status.deleted:
1778 if f in status.deleted:
1777 fail(f, _('file not found!'))
1779 fail(f, _('file not found!'))
1778 if f in vdirs: # visited directory
1780 if f in vdirs: # visited directory
1779 d = f + '/'
1781 d = f + '/'
1780 for mf in matched:
1782 for mf in matched:
1781 if mf.startswith(d):
1783 if mf.startswith(d):
1782 break
1784 break
1783 else:
1785 else:
1784 fail(f, _("no match under directory!"))
1786 fail(f, _("no match under directory!"))
1785 elif f not in self.dirstate:
1787 elif f not in self.dirstate:
1786 fail(f, _("file not tracked!"))
1788 fail(f, _("file not tracked!"))
1787
1789
1788 @unfilteredmethod
1790 @unfilteredmethod
1789 def commit(self, text="", user=None, date=None, match=None, force=False,
1791 def commit(self, text="", user=None, date=None, match=None, force=False,
1790 editor=False, extra=None):
1792 editor=False, extra=None):
1791 """Add a new revision to current repository.
1793 """Add a new revision to current repository.
1792
1794
1793 Revision information is gathered from the working directory,
1795 Revision information is gathered from the working directory,
1794 match can be used to filter the committed files. If editor is
1796 match can be used to filter the committed files. If editor is
1795 supplied, it is called to get a commit message.
1797 supplied, it is called to get a commit message.
1796 """
1798 """
1797 if extra is None:
1799 if extra is None:
1798 extra = {}
1800 extra = {}
1799
1801
1800 def fail(f, msg):
1802 def fail(f, msg):
1801 raise error.Abort('%s: %s' % (f, msg))
1803 raise error.Abort('%s: %s' % (f, msg))
1802
1804
1803 if not match:
1805 if not match:
1804 match = matchmod.always(self.root, '')
1806 match = matchmod.always(self.root, '')
1805
1807
1806 if not force:
1808 if not force:
1807 vdirs = []
1809 vdirs = []
1808 match.explicitdir = vdirs.append
1810 match.explicitdir = vdirs.append
1809 match.bad = fail
1811 match.bad = fail
1810
1812
1811 wlock = lock = tr = None
1813 wlock = lock = tr = None
1812 try:
1814 try:
1813 wlock = self.wlock()
1815 wlock = self.wlock()
1814 lock = self.lock() # for recent changelog (see issue4368)
1816 lock = self.lock() # for recent changelog (see issue4368)
1815
1817
1816 wctx = self[None]
1818 wctx = self[None]
1817 merge = len(wctx.parents()) > 1
1819 merge = len(wctx.parents()) > 1
1818
1820
1819 if not force and merge and not match.always():
1821 if not force and merge and not match.always():
1820 raise error.Abort(_('cannot partially commit a merge '
1822 raise error.Abort(_('cannot partially commit a merge '
1821 '(do not specify files or patterns)'))
1823 '(do not specify files or patterns)'))
1822
1824
1823 status = self.status(match=match, clean=force)
1825 status = self.status(match=match, clean=force)
1824 if force:
1826 if force:
1825 status.modified.extend(status.clean) # mq may commit clean files
1827 status.modified.extend(status.clean) # mq may commit clean files
1826
1828
1827 # check subrepos
1829 # check subrepos
1828 subs, commitsubs, newstate = subrepo.precommit(
1830 subs, commitsubs, newstate = subrepo.precommit(
1829 self.ui, wctx, status, match, force=force)
1831 self.ui, wctx, status, match, force=force)
1830
1832
1831 # make sure all explicit patterns are matched
1833 # make sure all explicit patterns are matched
1832 if not force:
1834 if not force:
1833 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1835 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1834
1836
1835 cctx = context.workingcommitctx(self, status,
1837 cctx = context.workingcommitctx(self, status,
1836 text, user, date, extra)
1838 text, user, date, extra)
1837
1839
1838 # internal config: ui.allowemptycommit
1840 # internal config: ui.allowemptycommit
1839 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1841 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1840 or extra.get('close') or merge or cctx.files()
1842 or extra.get('close') or merge or cctx.files()
1841 or self.ui.configbool('ui', 'allowemptycommit'))
1843 or self.ui.configbool('ui', 'allowemptycommit'))
1842 if not allowemptycommit:
1844 if not allowemptycommit:
1843 return None
1845 return None
1844
1846
1845 if merge and cctx.deleted():
1847 if merge and cctx.deleted():
1846 raise error.Abort(_("cannot commit merge with missing files"))
1848 raise error.Abort(_("cannot commit merge with missing files"))
1847
1849
1848 ms = mergemod.mergestate.read(self)
1850 ms = mergemod.mergestate.read(self)
1849 mergeutil.checkunresolved(ms)
1851 mergeutil.checkunresolved(ms)
1850
1852
1851 if editor:
1853 if editor:
1852 cctx._text = editor(self, cctx, subs)
1854 cctx._text = editor(self, cctx, subs)
1853 edited = (text != cctx._text)
1855 edited = (text != cctx._text)
1854
1856
1855 # Save commit message in case this transaction gets rolled back
1857 # Save commit message in case this transaction gets rolled back
1856 # (e.g. by a pretxncommit hook). Leave the content alone on
1858 # (e.g. by a pretxncommit hook). Leave the content alone on
1857 # the assumption that the user will use the same editor again.
1859 # the assumption that the user will use the same editor again.
1858 msgfn = self.savecommitmessage(cctx._text)
1860 msgfn = self.savecommitmessage(cctx._text)
1859
1861
1860 # commit subs and write new state
1862 # commit subs and write new state
1861 if subs:
1863 if subs:
1862 for s in sorted(commitsubs):
1864 for s in sorted(commitsubs):
1863 sub = wctx.sub(s)
1865 sub = wctx.sub(s)
1864 self.ui.status(_('committing subrepository %s\n') %
1866 self.ui.status(_('committing subrepository %s\n') %
1865 subrepo.subrelpath(sub))
1867 subrepo.subrelpath(sub))
1866 sr = sub.commit(cctx._text, user, date)
1868 sr = sub.commit(cctx._text, user, date)
1867 newstate[s] = (newstate[s][0], sr)
1869 newstate[s] = (newstate[s][0], sr)
1868 subrepo.writestate(self, newstate)
1870 subrepo.writestate(self, newstate)
1869
1871
1870 p1, p2 = self.dirstate.parents()
1872 p1, p2 = self.dirstate.parents()
1871 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1873 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1872 try:
1874 try:
1873 self.hook("precommit", throw=True, parent1=hookp1,
1875 self.hook("precommit", throw=True, parent1=hookp1,
1874 parent2=hookp2)
1876 parent2=hookp2)
1875 tr = self.transaction('commit')
1877 tr = self.transaction('commit')
1876 ret = self.commitctx(cctx, True)
1878 ret = self.commitctx(cctx, True)
1877 except: # re-raises
1879 except: # re-raises
1878 if edited:
1880 if edited:
1879 self.ui.write(
1881 self.ui.write(
1880 _('note: commit message saved in %s\n') % msgfn)
1882 _('note: commit message saved in %s\n') % msgfn)
1881 raise
1883 raise
1882 # update bookmarks, dirstate and mergestate
1884 # update bookmarks, dirstate and mergestate
1883 bookmarks.update(self, [p1, p2], ret)
1885 bookmarks.update(self, [p1, p2], ret)
1884 cctx.markcommitted(ret)
1886 cctx.markcommitted(ret)
1885 ms.reset()
1887 ms.reset()
1886 tr.close()
1888 tr.close()
1887
1889
1888 finally:
1890 finally:
1889 lockmod.release(tr, lock, wlock)
1891 lockmod.release(tr, lock, wlock)
1890
1892
1891 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1893 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1892 # hack for command that use a temporary commit (eg: histedit)
1894 # hack for command that use a temporary commit (eg: histedit)
1893 # temporary commit got stripped before hook release
1895 # temporary commit got stripped before hook release
1894 if self.changelog.hasnode(ret):
1896 if self.changelog.hasnode(ret):
1895 self.hook("commit", node=node, parent1=parent1,
1897 self.hook("commit", node=node, parent1=parent1,
1896 parent2=parent2)
1898 parent2=parent2)
1897 self._afterlock(commithook)
1899 self._afterlock(commithook)
1898 return ret
1900 return ret
1899
1901
1900 @unfilteredmethod
1902 @unfilteredmethod
1901 def commitctx(self, ctx, error=False):
1903 def commitctx(self, ctx, error=False):
1902 """Add a new revision to current repository.
1904 """Add a new revision to current repository.
1903 Revision information is passed via the context argument.
1905 Revision information is passed via the context argument.
1904 """
1906 """
1905
1907
1906 tr = None
1908 tr = None
1907 p1, p2 = ctx.p1(), ctx.p2()
1909 p1, p2 = ctx.p1(), ctx.p2()
1908 user = ctx.user()
1910 user = ctx.user()
1909
1911
1910 lock = self.lock()
1912 lock = self.lock()
1911 try:
1913 try:
1912 tr = self.transaction("commit")
1914 tr = self.transaction("commit")
1913 trp = weakref.proxy(tr)
1915 trp = weakref.proxy(tr)
1914
1916
1915 if ctx.manifestnode():
1917 if ctx.manifestnode():
1916 # reuse an existing manifest revision
1918 # reuse an existing manifest revision
1917 mn = ctx.manifestnode()
1919 mn = ctx.manifestnode()
1918 files = ctx.files()
1920 files = ctx.files()
1919 elif ctx.files():
1921 elif ctx.files():
1920 m1ctx = p1.manifestctx()
1922 m1ctx = p1.manifestctx()
1921 m2ctx = p2.manifestctx()
1923 m2ctx = p2.manifestctx()
1922 mctx = m1ctx.copy()
1924 mctx = m1ctx.copy()
1923
1925
1924 m = mctx.read()
1926 m = mctx.read()
1925 m1 = m1ctx.read()
1927 m1 = m1ctx.read()
1926 m2 = m2ctx.read()
1928 m2 = m2ctx.read()
1927
1929
1928 # check in files
1930 # check in files
1929 added = []
1931 added = []
1930 changed = []
1932 changed = []
1931 removed = list(ctx.removed())
1933 removed = list(ctx.removed())
1932 linkrev = len(self)
1934 linkrev = len(self)
1933 self.ui.note(_("committing files:\n"))
1935 self.ui.note(_("committing files:\n"))
1934 for f in sorted(ctx.modified() + ctx.added()):
1936 for f in sorted(ctx.modified() + ctx.added()):
1935 self.ui.note(f + "\n")
1937 self.ui.note(f + "\n")
1936 try:
1938 try:
1937 fctx = ctx[f]
1939 fctx = ctx[f]
1938 if fctx is None:
1940 if fctx is None:
1939 removed.append(f)
1941 removed.append(f)
1940 else:
1942 else:
1941 added.append(f)
1943 added.append(f)
1942 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1944 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1943 trp, changed)
1945 trp, changed)
1944 m.setflag(f, fctx.flags())
1946 m.setflag(f, fctx.flags())
1945 except OSError as inst:
1947 except OSError as inst:
1946 self.ui.warn(_("trouble committing %s!\n") % f)
1948 self.ui.warn(_("trouble committing %s!\n") % f)
1947 raise
1949 raise
1948 except IOError as inst:
1950 except IOError as inst:
1949 errcode = getattr(inst, 'errno', errno.ENOENT)
1951 errcode = getattr(inst, 'errno', errno.ENOENT)
1950 if error or errcode and errcode != errno.ENOENT:
1952 if error or errcode and errcode != errno.ENOENT:
1951 self.ui.warn(_("trouble committing %s!\n") % f)
1953 self.ui.warn(_("trouble committing %s!\n") % f)
1952 raise
1954 raise
1953
1955
1954 # update manifest
1956 # update manifest
1955 self.ui.note(_("committing manifest\n"))
1957 self.ui.note(_("committing manifest\n"))
1956 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1958 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1957 drop = [f for f in removed if f in m]
1959 drop = [f for f in removed if f in m]
1958 for f in drop:
1960 for f in drop:
1959 del m[f]
1961 del m[f]
1960 mn = mctx.write(trp, linkrev,
1962 mn = mctx.write(trp, linkrev,
1961 p1.manifestnode(), p2.manifestnode(),
1963 p1.manifestnode(), p2.manifestnode(),
1962 added, drop)
1964 added, drop)
1963 files = changed + removed
1965 files = changed + removed
1964 else:
1966 else:
1965 mn = p1.manifestnode()
1967 mn = p1.manifestnode()
1966 files = []
1968 files = []
1967
1969
1968 # update changelog
1970 # update changelog
1969 self.ui.note(_("committing changelog\n"))
1971 self.ui.note(_("committing changelog\n"))
1970 self.changelog.delayupdate(tr)
1972 self.changelog.delayupdate(tr)
1971 n = self.changelog.add(mn, files, ctx.description(),
1973 n = self.changelog.add(mn, files, ctx.description(),
1972 trp, p1.node(), p2.node(),
1974 trp, p1.node(), p2.node(),
1973 user, ctx.date(), ctx.extra().copy())
1975 user, ctx.date(), ctx.extra().copy())
1974 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1976 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1975 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1977 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1976 parent2=xp2)
1978 parent2=xp2)
1977 # set the new commit is proper phase
1979 # set the new commit is proper phase
1978 targetphase = subrepo.newcommitphase(self.ui, ctx)
1980 targetphase = subrepo.newcommitphase(self.ui, ctx)
1979 if targetphase:
1981 if targetphase:
1980 # retract boundary do not alter parent changeset.
1982 # retract boundary do not alter parent changeset.
1981 # if a parent have higher the resulting phase will
1983 # if a parent have higher the resulting phase will
1982 # be compliant anyway
1984 # be compliant anyway
1983 #
1985 #
1984 # if minimal phase was 0 we don't need to retract anything
1986 # if minimal phase was 0 we don't need to retract anything
1985 phases.registernew(self, tr, targetphase, [n])
1987 phases.registernew(self, tr, targetphase, [n])
1986 tr.close()
1988 tr.close()
1987 return n
1989 return n
1988 finally:
1990 finally:
1989 if tr:
1991 if tr:
1990 tr.release()
1992 tr.release()
1991 lock.release()
1993 lock.release()
1992
1994
1993 @unfilteredmethod
1995 @unfilteredmethod
1994 def destroying(self):
1996 def destroying(self):
1995 '''Inform the repository that nodes are about to be destroyed.
1997 '''Inform the repository that nodes are about to be destroyed.
1996 Intended for use by strip and rollback, so there's a common
1998 Intended for use by strip and rollback, so there's a common
1997 place for anything that has to be done before destroying history.
1999 place for anything that has to be done before destroying history.
1998
2000
1999 This is mostly useful for saving state that is in memory and waiting
2001 This is mostly useful for saving state that is in memory and waiting
2000 to be flushed when the current lock is released. Because a call to
2002 to be flushed when the current lock is released. Because a call to
2001 destroyed is imminent, the repo will be invalidated causing those
2003 destroyed is imminent, the repo will be invalidated causing those
2002 changes to stay in memory (waiting for the next unlock), or vanish
2004 changes to stay in memory (waiting for the next unlock), or vanish
2003 completely.
2005 completely.
2004 '''
2006 '''
2005 # When using the same lock to commit and strip, the phasecache is left
2007 # When using the same lock to commit and strip, the phasecache is left
2006 # dirty after committing. Then when we strip, the repo is invalidated,
2008 # dirty after committing. Then when we strip, the repo is invalidated,
2007 # causing those changes to disappear.
2009 # causing those changes to disappear.
2008 if '_phasecache' in vars(self):
2010 if '_phasecache' in vars(self):
2009 self._phasecache.write()
2011 self._phasecache.write()
2010
2012
2011 @unfilteredmethod
2013 @unfilteredmethod
2012 def destroyed(self):
2014 def destroyed(self):
2013 '''Inform the repository that nodes have been destroyed.
2015 '''Inform the repository that nodes have been destroyed.
2014 Intended for use by strip and rollback, so there's a common
2016 Intended for use by strip and rollback, so there's a common
2015 place for anything that has to be done after destroying history.
2017 place for anything that has to be done after destroying history.
2016 '''
2018 '''
2017 # When one tries to:
2019 # When one tries to:
2018 # 1) destroy nodes thus calling this method (e.g. strip)
2020 # 1) destroy nodes thus calling this method (e.g. strip)
2019 # 2) use phasecache somewhere (e.g. commit)
2021 # 2) use phasecache somewhere (e.g. commit)
2020 #
2022 #
2021 # then 2) will fail because the phasecache contains nodes that were
2023 # then 2) will fail because the phasecache contains nodes that were
2022 # removed. We can either remove phasecache from the filecache,
2024 # removed. We can either remove phasecache from the filecache,
2023 # causing it to reload next time it is accessed, or simply filter
2025 # causing it to reload next time it is accessed, or simply filter
2024 # the removed nodes now and write the updated cache.
2026 # the removed nodes now and write the updated cache.
2025 self._phasecache.filterunknown(self)
2027 self._phasecache.filterunknown(self)
2026 self._phasecache.write()
2028 self._phasecache.write()
2027
2029
2028 # refresh all repository caches
2030 # refresh all repository caches
2029 self.updatecaches()
2031 self.updatecaches()
2030
2032
2031 # Ensure the persistent tag cache is updated. Doing it now
2033 # Ensure the persistent tag cache is updated. Doing it now
2032 # means that the tag cache only has to worry about destroyed
2034 # means that the tag cache only has to worry about destroyed
2033 # heads immediately after a strip/rollback. That in turn
2035 # heads immediately after a strip/rollback. That in turn
2034 # guarantees that "cachetip == currenttip" (comparing both rev
2036 # guarantees that "cachetip == currenttip" (comparing both rev
2035 # and node) always means no nodes have been added or destroyed.
2037 # and node) always means no nodes have been added or destroyed.
2036
2038
2037 # XXX this is suboptimal when qrefresh'ing: we strip the current
2039 # XXX this is suboptimal when qrefresh'ing: we strip the current
2038 # head, refresh the tag cache, then immediately add a new head.
2040 # head, refresh the tag cache, then immediately add a new head.
2039 # But I think doing it this way is necessary for the "instant
2041 # But I think doing it this way is necessary for the "instant
2040 # tag cache retrieval" case to work.
2042 # tag cache retrieval" case to work.
2041 self.invalidate()
2043 self.invalidate()
2042
2044
2043 def walk(self, match, node=None):
2045 def walk(self, match, node=None):
2044 '''
2046 '''
2045 walk recursively through the directory tree or a given
2047 walk recursively through the directory tree or a given
2046 changeset, finding all files matched by the match
2048 changeset, finding all files matched by the match
2047 function
2049 function
2048 '''
2050 '''
2049 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2051 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2050 return self[node].walk(match)
2052 return self[node].walk(match)
2051
2053
2052 def status(self, node1='.', node2=None, match=None,
2054 def status(self, node1='.', node2=None, match=None,
2053 ignored=False, clean=False, unknown=False,
2055 ignored=False, clean=False, unknown=False,
2054 listsubrepos=False):
2056 listsubrepos=False):
2055 '''a convenience method that calls node1.status(node2)'''
2057 '''a convenience method that calls node1.status(node2)'''
2056 return self[node1].status(node2, match, ignored, clean, unknown,
2058 return self[node1].status(node2, match, ignored, clean, unknown,
2057 listsubrepos)
2059 listsubrepos)
2058
2060
2059 def addpostdsstatus(self, ps):
2061 def addpostdsstatus(self, ps):
2060 """Add a callback to run within the wlock, at the point at which status
2062 """Add a callback to run within the wlock, at the point at which status
2061 fixups happen.
2063 fixups happen.
2062
2064
2063 On status completion, callback(wctx, status) will be called with the
2065 On status completion, callback(wctx, status) will be called with the
2064 wlock held, unless the dirstate has changed from underneath or the wlock
2066 wlock held, unless the dirstate has changed from underneath or the wlock
2065 couldn't be grabbed.
2067 couldn't be grabbed.
2066
2068
2067 Callbacks should not capture and use a cached copy of the dirstate --
2069 Callbacks should not capture and use a cached copy of the dirstate --
2068 it might change in the meanwhile. Instead, they should access the
2070 it might change in the meanwhile. Instead, they should access the
2069 dirstate via wctx.repo().dirstate.
2071 dirstate via wctx.repo().dirstate.
2070
2072
2071 This list is emptied out after each status run -- extensions should
2073 This list is emptied out after each status run -- extensions should
2072 make sure it adds to this list each time dirstate.status is called.
2074 make sure it adds to this list each time dirstate.status is called.
2073 Extensions should also make sure they don't call this for statuses
2075 Extensions should also make sure they don't call this for statuses
2074 that don't involve the dirstate.
2076 that don't involve the dirstate.
2075 """
2077 """
2076
2078
2077 # The list is located here for uniqueness reasons -- it is actually
2079 # The list is located here for uniqueness reasons -- it is actually
2078 # managed by the workingctx, but that isn't unique per-repo.
2080 # managed by the workingctx, but that isn't unique per-repo.
2079 self._postdsstatus.append(ps)
2081 self._postdsstatus.append(ps)
2080
2082
2081 def postdsstatus(self):
2083 def postdsstatus(self):
2082 """Used by workingctx to get the list of post-dirstate-status hooks."""
2084 """Used by workingctx to get the list of post-dirstate-status hooks."""
2083 return self._postdsstatus
2085 return self._postdsstatus
2084
2086
2085 def clearpostdsstatus(self):
2087 def clearpostdsstatus(self):
2086 """Used by workingctx to clear post-dirstate-status hooks."""
2088 """Used by workingctx to clear post-dirstate-status hooks."""
2087 del self._postdsstatus[:]
2089 del self._postdsstatus[:]
2088
2090
2089 def heads(self, start=None):
2091 def heads(self, start=None):
2090 if start is None:
2092 if start is None:
2091 cl = self.changelog
2093 cl = self.changelog
2092 headrevs = reversed(cl.headrevs())
2094 headrevs = reversed(cl.headrevs())
2093 return [cl.node(rev) for rev in headrevs]
2095 return [cl.node(rev) for rev in headrevs]
2094
2096
2095 heads = self.changelog.heads(start)
2097 heads = self.changelog.heads(start)
2096 # sort the output in rev descending order
2098 # sort the output in rev descending order
2097 return sorted(heads, key=self.changelog.rev, reverse=True)
2099 return sorted(heads, key=self.changelog.rev, reverse=True)
2098
2100
2099 def branchheads(self, branch=None, start=None, closed=False):
2101 def branchheads(self, branch=None, start=None, closed=False):
2100 '''return a (possibly filtered) list of heads for the given branch
2102 '''return a (possibly filtered) list of heads for the given branch
2101
2103
2102 Heads are returned in topological order, from newest to oldest.
2104 Heads are returned in topological order, from newest to oldest.
2103 If branch is None, use the dirstate branch.
2105 If branch is None, use the dirstate branch.
2104 If start is not None, return only heads reachable from start.
2106 If start is not None, return only heads reachable from start.
2105 If closed is True, return heads that are marked as closed as well.
2107 If closed is True, return heads that are marked as closed as well.
2106 '''
2108 '''
2107 if branch is None:
2109 if branch is None:
2108 branch = self[None].branch()
2110 branch = self[None].branch()
2109 branches = self.branchmap()
2111 branches = self.branchmap()
2110 if branch not in branches:
2112 if branch not in branches:
2111 return []
2113 return []
2112 # the cache returns heads ordered lowest to highest
2114 # the cache returns heads ordered lowest to highest
2113 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2115 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2114 if start is not None:
2116 if start is not None:
2115 # filter out the heads that cannot be reached from startrev
2117 # filter out the heads that cannot be reached from startrev
2116 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2118 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2117 bheads = [h for h in bheads if h in fbheads]
2119 bheads = [h for h in bheads if h in fbheads]
2118 return bheads
2120 return bheads
2119
2121
2120 def branches(self, nodes):
2122 def branches(self, nodes):
2121 if not nodes:
2123 if not nodes:
2122 nodes = [self.changelog.tip()]
2124 nodes = [self.changelog.tip()]
2123 b = []
2125 b = []
2124 for n in nodes:
2126 for n in nodes:
2125 t = n
2127 t = n
2126 while True:
2128 while True:
2127 p = self.changelog.parents(n)
2129 p = self.changelog.parents(n)
2128 if p[1] != nullid or p[0] == nullid:
2130 if p[1] != nullid or p[0] == nullid:
2129 b.append((t, n, p[0], p[1]))
2131 b.append((t, n, p[0], p[1]))
2130 break
2132 break
2131 n = p[0]
2133 n = p[0]
2132 return b
2134 return b
2133
2135
2134 def between(self, pairs):
2136 def between(self, pairs):
2135 r = []
2137 r = []
2136
2138
2137 for top, bottom in pairs:
2139 for top, bottom in pairs:
2138 n, l, i = top, [], 0
2140 n, l, i = top, [], 0
2139 f = 1
2141 f = 1
2140
2142
2141 while n != bottom and n != nullid:
2143 while n != bottom and n != nullid:
2142 p = self.changelog.parents(n)[0]
2144 p = self.changelog.parents(n)[0]
2143 if i == f:
2145 if i == f:
2144 l.append(n)
2146 l.append(n)
2145 f = f * 2
2147 f = f * 2
2146 n = p
2148 n = p
2147 i += 1
2149 i += 1
2148
2150
2149 r.append(l)
2151 r.append(l)
2150
2152
2151 return r
2153 return r
2152
2154
2153 def checkpush(self, pushop):
2155 def checkpush(self, pushop):
2154 """Extensions can override this function if additional checks have
2156 """Extensions can override this function if additional checks have
2155 to be performed before pushing, or call it if they override push
2157 to be performed before pushing, or call it if they override push
2156 command.
2158 command.
2157 """
2159 """
2158
2160
2159 @unfilteredpropertycache
2161 @unfilteredpropertycache
2160 def prepushoutgoinghooks(self):
2162 def prepushoutgoinghooks(self):
2161 """Return util.hooks consists of a pushop with repo, remote, outgoing
2163 """Return util.hooks consists of a pushop with repo, remote, outgoing
2162 methods, which are called before pushing changesets.
2164 methods, which are called before pushing changesets.
2163 """
2165 """
2164 return util.hooks()
2166 return util.hooks()
2165
2167
2166 def pushkey(self, namespace, key, old, new):
2168 def pushkey(self, namespace, key, old, new):
2167 try:
2169 try:
2168 tr = self.currenttransaction()
2170 tr = self.currenttransaction()
2169 hookargs = {}
2171 hookargs = {}
2170 if tr is not None:
2172 if tr is not None:
2171 hookargs.update(tr.hookargs)
2173 hookargs.update(tr.hookargs)
2172 hookargs['namespace'] = namespace
2174 hookargs['namespace'] = namespace
2173 hookargs['key'] = key
2175 hookargs['key'] = key
2174 hookargs['old'] = old
2176 hookargs['old'] = old
2175 hookargs['new'] = new
2177 hookargs['new'] = new
2176 self.hook('prepushkey', throw=True, **hookargs)
2178 self.hook('prepushkey', throw=True, **hookargs)
2177 except error.HookAbort as exc:
2179 except error.HookAbort as exc:
2178 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2180 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2179 if exc.hint:
2181 if exc.hint:
2180 self.ui.write_err(_("(%s)\n") % exc.hint)
2182 self.ui.write_err(_("(%s)\n") % exc.hint)
2181 return False
2183 return False
2182 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2184 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2183 ret = pushkey.push(self, namespace, key, old, new)
2185 ret = pushkey.push(self, namespace, key, old, new)
2184 def runhook():
2186 def runhook():
2185 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2187 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2186 ret=ret)
2188 ret=ret)
2187 self._afterlock(runhook)
2189 self._afterlock(runhook)
2188 return ret
2190 return ret
2189
2191
2190 def listkeys(self, namespace):
2192 def listkeys(self, namespace):
2191 self.hook('prelistkeys', throw=True, namespace=namespace)
2193 self.hook('prelistkeys', throw=True, namespace=namespace)
2192 self.ui.debug('listing keys for "%s"\n' % namespace)
2194 self.ui.debug('listing keys for "%s"\n' % namespace)
2193 values = pushkey.list(self, namespace)
2195 values = pushkey.list(self, namespace)
2194 self.hook('listkeys', namespace=namespace, values=values)
2196 self.hook('listkeys', namespace=namespace, values=values)
2195 return values
2197 return values
2196
2198
2197 def debugwireargs(self, one, two, three=None, four=None, five=None):
2199 def debugwireargs(self, one, two, three=None, four=None, five=None):
2198 '''used to test argument passing over the wire'''
2200 '''used to test argument passing over the wire'''
2199 return "%s %s %s %s %s" % (one, two, three, four, five)
2201 return "%s %s %s %s %s" % (one, two, three, four, five)
2200
2202
2201 def savecommitmessage(self, text):
2203 def savecommitmessage(self, text):
2202 fp = self.vfs('last-message.txt', 'wb')
2204 fp = self.vfs('last-message.txt', 'wb')
2203 try:
2205 try:
2204 fp.write(text)
2206 fp.write(text)
2205 finally:
2207 finally:
2206 fp.close()
2208 fp.close()
2207 return self.pathto(fp.name[len(self.root) + 1:])
2209 return self.pathto(fp.name[len(self.root) + 1:])
2208
2210
2209 # used to avoid circular references so destructors work
2211 # used to avoid circular references so destructors work
2210 def aftertrans(files):
2212 def aftertrans(files):
2211 renamefiles = [tuple(t) for t in files]
2213 renamefiles = [tuple(t) for t in files]
2212 def a():
2214 def a():
2213 for vfs, src, dest in renamefiles:
2215 for vfs, src, dest in renamefiles:
2214 # if src and dest refer to a same file, vfs.rename is a no-op,
2216 # if src and dest refer to a same file, vfs.rename is a no-op,
2215 # leaving both src and dest on disk. delete dest to make sure
2217 # leaving both src and dest on disk. delete dest to make sure
2216 # the rename couldn't be such a no-op.
2218 # the rename couldn't be such a no-op.
2217 vfs.tryunlink(dest)
2219 vfs.tryunlink(dest)
2218 try:
2220 try:
2219 vfs.rename(src, dest)
2221 vfs.rename(src, dest)
2220 except OSError: # journal file does not yet exist
2222 except OSError: # journal file does not yet exist
2221 pass
2223 pass
2222 return a
2224 return a
2223
2225
2224 def undoname(fn):
2226 def undoname(fn):
2225 base, name = os.path.split(fn)
2227 base, name = os.path.split(fn)
2226 assert name.startswith('journal')
2228 assert name.startswith('journal')
2227 return os.path.join(base, name.replace('journal', 'undo', 1))
2229 return os.path.join(base, name.replace('journal', 'undo', 1))
2228
2230
2229 def instance(ui, path, create):
2231 def instance(ui, path, create):
2230 return localrepository(ui, util.urllocalpath(path), create)
2232 return localrepository(ui, util.urllocalpath(path), create)
2231
2233
2232 def islocal(path):
2234 def islocal(path):
2233 return True
2235 return True
2234
2236
2235 def newreporequirements(repo):
2237 def newreporequirements(repo):
2236 """Determine the set of requirements for a new local repository.
2238 """Determine the set of requirements for a new local repository.
2237
2239
2238 Extensions can wrap this function to specify custom requirements for
2240 Extensions can wrap this function to specify custom requirements for
2239 new repositories.
2241 new repositories.
2240 """
2242 """
2241 ui = repo.ui
2243 ui = repo.ui
2242 requirements = {'revlogv1'}
2244 requirements = {'revlogv1'}
2243 if ui.configbool('format', 'usestore'):
2245 if ui.configbool('format', 'usestore'):
2244 requirements.add('store')
2246 requirements.add('store')
2245 if ui.configbool('format', 'usefncache'):
2247 if ui.configbool('format', 'usefncache'):
2246 requirements.add('fncache')
2248 requirements.add('fncache')
2247 if ui.configbool('format', 'dotencode'):
2249 if ui.configbool('format', 'dotencode'):
2248 requirements.add('dotencode')
2250 requirements.add('dotencode')
2249
2251
2250 compengine = ui.config('experimental', 'format.compression')
2252 compengine = ui.config('experimental', 'format.compression')
2251 if compengine not in util.compengines:
2253 if compengine not in util.compengines:
2252 raise error.Abort(_('compression engine %s defined by '
2254 raise error.Abort(_('compression engine %s defined by '
2253 'experimental.format.compression not available') %
2255 'experimental.format.compression not available') %
2254 compengine,
2256 compengine,
2255 hint=_('run "hg debuginstall" to list available '
2257 hint=_('run "hg debuginstall" to list available '
2256 'compression engines'))
2258 'compression engines'))
2257
2259
2258 # zlib is the historical default and doesn't need an explicit requirement.
2260 # zlib is the historical default and doesn't need an explicit requirement.
2259 if compengine != 'zlib':
2261 if compengine != 'zlib':
2260 requirements.add('exp-compression-%s' % compengine)
2262 requirements.add('exp-compression-%s' % compengine)
2261
2263
2262 if scmutil.gdinitconfig(ui):
2264 if scmutil.gdinitconfig(ui):
2263 requirements.add('generaldelta')
2265 requirements.add('generaldelta')
2264 if ui.configbool('experimental', 'treemanifest'):
2266 if ui.configbool('experimental', 'treemanifest'):
2265 requirements.add('treemanifest')
2267 requirements.add('treemanifest')
2266 if ui.configbool('experimental', 'manifestv2'):
2268 if ui.configbool('experimental', 'manifestv2'):
2267 requirements.add('manifestv2')
2269 requirements.add('manifestv2')
2268
2270
2269 revlogv2 = ui.config('experimental', 'revlogv2')
2271 revlogv2 = ui.config('experimental', 'revlogv2')
2270 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2272 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2271 requirements.remove('revlogv1')
2273 requirements.remove('revlogv1')
2272 # generaldelta is implied by revlogv2.
2274 # generaldelta is implied by revlogv2.
2273 requirements.discard('generaldelta')
2275 requirements.discard('generaldelta')
2274 requirements.add(REVLOGV2_REQUIREMENT)
2276 requirements.add(REVLOGV2_REQUIREMENT)
2275
2277
2276 return requirements
2278 return requirements
General Comments 0
You need to be logged in to leave comments. Login now