##// END OF EJS Templates
localrepo: pass transaction kwargs as strings, not bytes...
Augie Fackler -
r35858:b43b314c default
parent child Browse files
Show More
@@ -1,2284 +1,2284 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepo,
60 subrepo,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, three, four, five)
194 return "%s %s %s %s %s" % (one, two, three, four, five)
195
195
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
196 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
197 **kwargs):
197 **kwargs):
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
198 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
199 common=common, bundlecaps=bundlecaps,
199 common=common, bundlecaps=bundlecaps,
200 **kwargs)[1]
200 **kwargs)[1]
201 cb = util.chunkbuffer(chunks)
201 cb = util.chunkbuffer(chunks)
202
202
203 if exchange.bundle2requested(bundlecaps):
203 if exchange.bundle2requested(bundlecaps):
204 # When requesting a bundle2, getbundle returns a stream to make the
204 # When requesting a bundle2, getbundle returns a stream to make the
205 # wire level function happier. We need to build a proper object
205 # wire level function happier. We need to build a proper object
206 # from it in local peer.
206 # from it in local peer.
207 return bundle2.getunbundler(self.ui, cb)
207 return bundle2.getunbundler(self.ui, cb)
208 else:
208 else:
209 return changegroup.getunbundler('01', cb, None)
209 return changegroup.getunbundler('01', cb, None)
210
210
211 def heads(self):
211 def heads(self):
212 return self._repo.heads()
212 return self._repo.heads()
213
213
214 def known(self, nodes):
214 def known(self, nodes):
215 return self._repo.known(nodes)
215 return self._repo.known(nodes)
216
216
217 def listkeys(self, namespace):
217 def listkeys(self, namespace):
218 return self._repo.listkeys(namespace)
218 return self._repo.listkeys(namespace)
219
219
220 def lookup(self, key):
220 def lookup(self, key):
221 return self._repo.lookup(key)
221 return self._repo.lookup(key)
222
222
223 def pushkey(self, namespace, key, old, new):
223 def pushkey(self, namespace, key, old, new):
224 return self._repo.pushkey(namespace, key, old, new)
224 return self._repo.pushkey(namespace, key, old, new)
225
225
226 def stream_out(self):
226 def stream_out(self):
227 raise error.Abort(_('cannot perform stream clone against local '
227 raise error.Abort(_('cannot perform stream clone against local '
228 'peer'))
228 'peer'))
229
229
230 def unbundle(self, cg, heads, url):
230 def unbundle(self, cg, heads, url):
231 """apply a bundle on a repo
231 """apply a bundle on a repo
232
232
233 This function handles the repo locking itself."""
233 This function handles the repo locking itself."""
234 try:
234 try:
235 try:
235 try:
236 cg = exchange.readbundle(self.ui, cg, None)
236 cg = exchange.readbundle(self.ui, cg, None)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
237 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
238 if util.safehasattr(ret, 'getchunks'):
238 if util.safehasattr(ret, 'getchunks'):
239 # This is a bundle20 object, turn it into an unbundler.
239 # This is a bundle20 object, turn it into an unbundler.
240 # This little dance should be dropped eventually when the
240 # This little dance should be dropped eventually when the
241 # API is finally improved.
241 # API is finally improved.
242 stream = util.chunkbuffer(ret.getchunks())
242 stream = util.chunkbuffer(ret.getchunks())
243 ret = bundle2.getunbundler(self.ui, stream)
243 ret = bundle2.getunbundler(self.ui, stream)
244 return ret
244 return ret
245 except Exception as exc:
245 except Exception as exc:
246 # If the exception contains output salvaged from a bundle2
246 # If the exception contains output salvaged from a bundle2
247 # reply, we need to make sure it is printed before continuing
247 # reply, we need to make sure it is printed before continuing
248 # to fail. So we build a bundle2 with such output and consume
248 # to fail. So we build a bundle2 with such output and consume
249 # it directly.
249 # it directly.
250 #
250 #
251 # This is not very elegant but allows a "simple" solution for
251 # This is not very elegant but allows a "simple" solution for
252 # issue4594
252 # issue4594
253 output = getattr(exc, '_bundle2salvagedoutput', ())
253 output = getattr(exc, '_bundle2salvagedoutput', ())
254 if output:
254 if output:
255 bundler = bundle2.bundle20(self._repo.ui)
255 bundler = bundle2.bundle20(self._repo.ui)
256 for out in output:
256 for out in output:
257 bundler.addpart(out)
257 bundler.addpart(out)
258 stream = util.chunkbuffer(bundler.getchunks())
258 stream = util.chunkbuffer(bundler.getchunks())
259 b = bundle2.getunbundler(self.ui, stream)
259 b = bundle2.getunbundler(self.ui, stream)
260 bundle2.processbundle(self._repo, b)
260 bundle2.processbundle(self._repo, b)
261 raise
261 raise
262 except error.PushRaced as exc:
262 except error.PushRaced as exc:
263 raise error.ResponseError(_('push failed:'), str(exc))
263 raise error.ResponseError(_('push failed:'), str(exc))
264
264
265 # End of _basewirecommands interface.
265 # End of _basewirecommands interface.
266
266
267 # Begin of peer interface.
267 # Begin of peer interface.
268
268
269 def iterbatch(self):
269 def iterbatch(self):
270 return peer.localiterbatcher(self)
270 return peer.localiterbatcher(self)
271
271
272 # End of peer interface.
272 # End of peer interface.
273
273
274 class locallegacypeer(repository.legacypeer, localpeer):
274 class locallegacypeer(repository.legacypeer, localpeer):
275 '''peer extension which implements legacy methods too; used for tests with
275 '''peer extension which implements legacy methods too; used for tests with
276 restricted capabilities'''
276 restricted capabilities'''
277
277
278 def __init__(self, repo):
278 def __init__(self, repo):
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
279 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
280
280
281 # Begin of baselegacywirecommands interface.
281 # Begin of baselegacywirecommands interface.
282
282
283 def between(self, pairs):
283 def between(self, pairs):
284 return self._repo.between(pairs)
284 return self._repo.between(pairs)
285
285
286 def branches(self, nodes):
286 def branches(self, nodes):
287 return self._repo.branches(nodes)
287 return self._repo.branches(nodes)
288
288
289 def changegroup(self, basenodes, source):
289 def changegroup(self, basenodes, source):
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
290 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
291 missingheads=self._repo.heads())
291 missingheads=self._repo.heads())
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
292 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
293
293
294 def changegroupsubset(self, bases, heads, source):
294 def changegroupsubset(self, bases, heads, source):
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
295 outgoing = discovery.outgoing(self._repo, missingroots=bases,
296 missingheads=heads)
296 missingheads=heads)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
297 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298
298
299 # End of baselegacywirecommands interface.
299 # End of baselegacywirecommands interface.
300
300
301 # Increment the sub-version when the revlog v2 format changes to lock out old
301 # Increment the sub-version when the revlog v2 format changes to lock out old
302 # clients.
302 # clients.
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
303 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
304
304
305 class localrepository(object):
305 class localrepository(object):
306
306
307 supportedformats = {
307 supportedformats = {
308 'revlogv1',
308 'revlogv1',
309 'generaldelta',
309 'generaldelta',
310 'treemanifest',
310 'treemanifest',
311 'manifestv2',
311 'manifestv2',
312 REVLOGV2_REQUIREMENT,
312 REVLOGV2_REQUIREMENT,
313 }
313 }
314 _basesupported = supportedformats | {
314 _basesupported = supportedformats | {
315 'store',
315 'store',
316 'fncache',
316 'fncache',
317 'shared',
317 'shared',
318 'relshared',
318 'relshared',
319 'dotencode',
319 'dotencode',
320 'exp-sparse',
320 'exp-sparse',
321 }
321 }
322 openerreqs = {
322 openerreqs = {
323 'revlogv1',
323 'revlogv1',
324 'generaldelta',
324 'generaldelta',
325 'treemanifest',
325 'treemanifest',
326 'manifestv2',
326 'manifestv2',
327 }
327 }
328
328
329 # a list of (ui, featureset) functions.
329 # a list of (ui, featureset) functions.
330 # only functions defined in module of enabled extensions are invoked
330 # only functions defined in module of enabled extensions are invoked
331 featuresetupfuncs = set()
331 featuresetupfuncs = set()
332
332
333 # list of prefix for file which can be written without 'wlock'
333 # list of prefix for file which can be written without 'wlock'
334 # Extensions should extend this list when needed
334 # Extensions should extend this list when needed
335 _wlockfreeprefix = {
335 _wlockfreeprefix = {
336 # We migh consider requiring 'wlock' for the next
336 # We migh consider requiring 'wlock' for the next
337 # two, but pretty much all the existing code assume
337 # two, but pretty much all the existing code assume
338 # wlock is not needed so we keep them excluded for
338 # wlock is not needed so we keep them excluded for
339 # now.
339 # now.
340 'hgrc',
340 'hgrc',
341 'requires',
341 'requires',
342 # XXX cache is a complicatged business someone
342 # XXX cache is a complicatged business someone
343 # should investigate this in depth at some point
343 # should investigate this in depth at some point
344 'cache/',
344 'cache/',
345 # XXX shouldn't be dirstate covered by the wlock?
345 # XXX shouldn't be dirstate covered by the wlock?
346 'dirstate',
346 'dirstate',
347 # XXX bisect was still a bit too messy at the time
347 # XXX bisect was still a bit too messy at the time
348 # this changeset was introduced. Someone should fix
348 # this changeset was introduced. Someone should fix
349 # the remainig bit and drop this line
349 # the remainig bit and drop this line
350 'bisect.state',
350 'bisect.state',
351 }
351 }
352
352
353 def __init__(self, baseui, path, create=False):
353 def __init__(self, baseui, path, create=False):
354 self.requirements = set()
354 self.requirements = set()
355 self.filtername = None
355 self.filtername = None
356 # wvfs: rooted at the repository root, used to access the working copy
356 # wvfs: rooted at the repository root, used to access the working copy
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
357 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
358 # vfs: rooted at .hg, used to access repo files outside of .hg/store
359 self.vfs = None
359 self.vfs = None
360 # svfs: usually rooted at .hg/store, used to access repository history
360 # svfs: usually rooted at .hg/store, used to access repository history
361 # If this is a shared repository, this vfs may point to another
361 # If this is a shared repository, this vfs may point to another
362 # repository's .hg/store directory.
362 # repository's .hg/store directory.
363 self.svfs = None
363 self.svfs = None
364 self.root = self.wvfs.base
364 self.root = self.wvfs.base
365 self.path = self.wvfs.join(".hg")
365 self.path = self.wvfs.join(".hg")
366 self.origroot = path
366 self.origroot = path
367 # This is only used by context.workingctx.match in order to
367 # This is only used by context.workingctx.match in order to
368 # detect files in subrepos.
368 # detect files in subrepos.
369 self.auditor = pathutil.pathauditor(
369 self.auditor = pathutil.pathauditor(
370 self.root, callback=self._checknested)
370 self.root, callback=self._checknested)
371 # This is only used by context.basectx.match in order to detect
371 # This is only used by context.basectx.match in order to detect
372 # files in subrepos.
372 # files in subrepos.
373 self.nofsauditor = pathutil.pathauditor(
373 self.nofsauditor = pathutil.pathauditor(
374 self.root, callback=self._checknested, realfs=False, cached=True)
374 self.root, callback=self._checknested, realfs=False, cached=True)
375 self.baseui = baseui
375 self.baseui = baseui
376 self.ui = baseui.copy()
376 self.ui = baseui.copy()
377 self.ui.copy = baseui.copy # prevent copying repo configuration
377 self.ui.copy = baseui.copy # prevent copying repo configuration
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
378 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
379 if (self.ui.configbool('devel', 'all-warnings') or
379 if (self.ui.configbool('devel', 'all-warnings') or
380 self.ui.configbool('devel', 'check-locks')):
380 self.ui.configbool('devel', 'check-locks')):
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
381 self.vfs.audit = self._getvfsward(self.vfs.audit)
382 # A list of callback to shape the phase if no data were found.
382 # A list of callback to shape the phase if no data were found.
383 # Callback are in the form: func(repo, roots) --> processed root.
383 # Callback are in the form: func(repo, roots) --> processed root.
384 # This list it to be filled by extension during repo setup
384 # This list it to be filled by extension during repo setup
385 self._phasedefaults = []
385 self._phasedefaults = []
386 try:
386 try:
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
387 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
388 self._loadextensions()
388 self._loadextensions()
389 except IOError:
389 except IOError:
390 pass
390 pass
391
391
392 if self.featuresetupfuncs:
392 if self.featuresetupfuncs:
393 self.supported = set(self._basesupported) # use private copy
393 self.supported = set(self._basesupported) # use private copy
394 extmods = set(m.__name__ for n, m
394 extmods = set(m.__name__ for n, m
395 in extensions.extensions(self.ui))
395 in extensions.extensions(self.ui))
396 for setupfunc in self.featuresetupfuncs:
396 for setupfunc in self.featuresetupfuncs:
397 if setupfunc.__module__ in extmods:
397 if setupfunc.__module__ in extmods:
398 setupfunc(self.ui, self.supported)
398 setupfunc(self.ui, self.supported)
399 else:
399 else:
400 self.supported = self._basesupported
400 self.supported = self._basesupported
401 color.setup(self.ui)
401 color.setup(self.ui)
402
402
403 # Add compression engines.
403 # Add compression engines.
404 for name in util.compengines:
404 for name in util.compengines:
405 engine = util.compengines[name]
405 engine = util.compengines[name]
406 if engine.revlogheader():
406 if engine.revlogheader():
407 self.supported.add('exp-compression-%s' % name)
407 self.supported.add('exp-compression-%s' % name)
408
408
409 if not self.vfs.isdir():
409 if not self.vfs.isdir():
410 if create:
410 if create:
411 self.requirements = newreporequirements(self)
411 self.requirements = newreporequirements(self)
412
412
413 if not self.wvfs.exists():
413 if not self.wvfs.exists():
414 self.wvfs.makedirs()
414 self.wvfs.makedirs()
415 self.vfs.makedir(notindexed=True)
415 self.vfs.makedir(notindexed=True)
416
416
417 if 'store' in self.requirements:
417 if 'store' in self.requirements:
418 self.vfs.mkdir("store")
418 self.vfs.mkdir("store")
419
419
420 # create an invalid changelog
420 # create an invalid changelog
421 self.vfs.append(
421 self.vfs.append(
422 "00changelog.i",
422 "00changelog.i",
423 '\0\0\0\2' # represents revlogv2
423 '\0\0\0\2' # represents revlogv2
424 ' dummy changelog to prevent using the old repo layout'
424 ' dummy changelog to prevent using the old repo layout'
425 )
425 )
426 else:
426 else:
427 raise error.RepoError(_("repository %s not found") % path)
427 raise error.RepoError(_("repository %s not found") % path)
428 elif create:
428 elif create:
429 raise error.RepoError(_("repository %s already exists") % path)
429 raise error.RepoError(_("repository %s already exists") % path)
430 else:
430 else:
431 try:
431 try:
432 self.requirements = scmutil.readrequires(
432 self.requirements = scmutil.readrequires(
433 self.vfs, self.supported)
433 self.vfs, self.supported)
434 except IOError as inst:
434 except IOError as inst:
435 if inst.errno != errno.ENOENT:
435 if inst.errno != errno.ENOENT:
436 raise
436 raise
437
437
438 cachepath = self.vfs.join('cache')
438 cachepath = self.vfs.join('cache')
439 self.sharedpath = self.path
439 self.sharedpath = self.path
440 try:
440 try:
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
441 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
442 if 'relshared' in self.requirements:
442 if 'relshared' in self.requirements:
443 sharedpath = self.vfs.join(sharedpath)
443 sharedpath = self.vfs.join(sharedpath)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
444 vfs = vfsmod.vfs(sharedpath, realpath=True)
445 cachepath = vfs.join('cache')
445 cachepath = vfs.join('cache')
446 s = vfs.base
446 s = vfs.base
447 if not vfs.exists():
447 if not vfs.exists():
448 raise error.RepoError(
448 raise error.RepoError(
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
449 _('.hg/sharedpath points to nonexistent directory %s') % s)
450 self.sharedpath = s
450 self.sharedpath = s
451 except IOError as inst:
451 except IOError as inst:
452 if inst.errno != errno.ENOENT:
452 if inst.errno != errno.ENOENT:
453 raise
453 raise
454
454
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
455 if 'exp-sparse' in self.requirements and not sparse.enabled:
456 raise error.RepoError(_('repository is using sparse feature but '
456 raise error.RepoError(_('repository is using sparse feature but '
457 'sparse is not enabled; enable the '
457 'sparse is not enabled; enable the '
458 '"sparse" extensions to access'))
458 '"sparse" extensions to access'))
459
459
460 self.store = store.store(
460 self.store = store.store(
461 self.requirements, self.sharedpath,
461 self.requirements, self.sharedpath,
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
462 lambda base: vfsmod.vfs(base, cacheaudited=True))
463 self.spath = self.store.path
463 self.spath = self.store.path
464 self.svfs = self.store.vfs
464 self.svfs = self.store.vfs
465 self.sjoin = self.store.join
465 self.sjoin = self.store.join
466 self.vfs.createmode = self.store.createmode
466 self.vfs.createmode = self.store.createmode
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
467 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
468 self.cachevfs.createmode = self.store.createmode
468 self.cachevfs.createmode = self.store.createmode
469 if (self.ui.configbool('devel', 'all-warnings') or
469 if (self.ui.configbool('devel', 'all-warnings') or
470 self.ui.configbool('devel', 'check-locks')):
470 self.ui.configbool('devel', 'check-locks')):
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
471 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
472 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
473 else: # standard vfs
473 else: # standard vfs
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
474 self.svfs.audit = self._getsvfsward(self.svfs.audit)
475 self._applyopenerreqs()
475 self._applyopenerreqs()
476 if create:
476 if create:
477 self._writerequirements()
477 self._writerequirements()
478
478
479 self._dirstatevalidatewarned = False
479 self._dirstatevalidatewarned = False
480
480
481 self._branchcaches = {}
481 self._branchcaches = {}
482 self._revbranchcache = None
482 self._revbranchcache = None
483 self.filterpats = {}
483 self.filterpats = {}
484 self._datafilters = {}
484 self._datafilters = {}
485 self._transref = self._lockref = self._wlockref = None
485 self._transref = self._lockref = self._wlockref = None
486
486
487 # A cache for various files under .hg/ that tracks file changes,
487 # A cache for various files under .hg/ that tracks file changes,
488 # (used by the filecache decorator)
488 # (used by the filecache decorator)
489 #
489 #
490 # Maps a property name to its util.filecacheentry
490 # Maps a property name to its util.filecacheentry
491 self._filecache = {}
491 self._filecache = {}
492
492
493 # hold sets of revision to be filtered
493 # hold sets of revision to be filtered
494 # should be cleared when something might have changed the filter value:
494 # should be cleared when something might have changed the filter value:
495 # - new changesets,
495 # - new changesets,
496 # - phase change,
496 # - phase change,
497 # - new obsolescence marker,
497 # - new obsolescence marker,
498 # - working directory parent change,
498 # - working directory parent change,
499 # - bookmark changes
499 # - bookmark changes
500 self.filteredrevcache = {}
500 self.filteredrevcache = {}
501
501
502 # post-dirstate-status hooks
502 # post-dirstate-status hooks
503 self._postdsstatus = []
503 self._postdsstatus = []
504
504
505 # generic mapping between names and nodes
505 # generic mapping between names and nodes
506 self.names = namespaces.namespaces()
506 self.names = namespaces.namespaces()
507
507
508 # Key to signature value.
508 # Key to signature value.
509 self._sparsesignaturecache = {}
509 self._sparsesignaturecache = {}
510 # Signature to cached matcher instance.
510 # Signature to cached matcher instance.
511 self._sparsematchercache = {}
511 self._sparsematchercache = {}
512
512
513 def _getvfsward(self, origfunc):
513 def _getvfsward(self, origfunc):
514 """build a ward for self.vfs"""
514 """build a ward for self.vfs"""
515 rref = weakref.ref(self)
515 rref = weakref.ref(self)
516 def checkvfs(path, mode=None):
516 def checkvfs(path, mode=None):
517 ret = origfunc(path, mode=mode)
517 ret = origfunc(path, mode=mode)
518 repo = rref()
518 repo = rref()
519 if (repo is None
519 if (repo is None
520 or not util.safehasattr(repo, '_wlockref')
520 or not util.safehasattr(repo, '_wlockref')
521 or not util.safehasattr(repo, '_lockref')):
521 or not util.safehasattr(repo, '_lockref')):
522 return
522 return
523 if mode in (None, 'r', 'rb'):
523 if mode in (None, 'r', 'rb'):
524 return
524 return
525 if path.startswith(repo.path):
525 if path.startswith(repo.path):
526 # truncate name relative to the repository (.hg)
526 # truncate name relative to the repository (.hg)
527 path = path[len(repo.path) + 1:]
527 path = path[len(repo.path) + 1:]
528 if path.startswith('cache/'):
528 if path.startswith('cache/'):
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
529 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
530 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
531 if path.startswith('journal.'):
531 if path.startswith('journal.'):
532 # journal is covered by 'lock'
532 # journal is covered by 'lock'
533 if repo._currentlock(repo._lockref) is None:
533 if repo._currentlock(repo._lockref) is None:
534 repo.ui.develwarn('write with no lock: "%s"' % path,
534 repo.ui.develwarn('write with no lock: "%s"' % path,
535 stacklevel=2, config='check-locks')
535 stacklevel=2, config='check-locks')
536 elif repo._currentlock(repo._wlockref) is None:
536 elif repo._currentlock(repo._wlockref) is None:
537 # rest of vfs files are covered by 'wlock'
537 # rest of vfs files are covered by 'wlock'
538 #
538 #
539 # exclude special files
539 # exclude special files
540 for prefix in self._wlockfreeprefix:
540 for prefix in self._wlockfreeprefix:
541 if path.startswith(prefix):
541 if path.startswith(prefix):
542 return
542 return
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
543 repo.ui.develwarn('write with no wlock: "%s"' % path,
544 stacklevel=2, config='check-locks')
544 stacklevel=2, config='check-locks')
545 return ret
545 return ret
546 return checkvfs
546 return checkvfs
547
547
548 def _getsvfsward(self, origfunc):
548 def _getsvfsward(self, origfunc):
549 """build a ward for self.svfs"""
549 """build a ward for self.svfs"""
550 rref = weakref.ref(self)
550 rref = weakref.ref(self)
551 def checksvfs(path, mode=None):
551 def checksvfs(path, mode=None):
552 ret = origfunc(path, mode=mode)
552 ret = origfunc(path, mode=mode)
553 repo = rref()
553 repo = rref()
554 if repo is None or not util.safehasattr(repo, '_lockref'):
554 if repo is None or not util.safehasattr(repo, '_lockref'):
555 return
555 return
556 if mode in (None, 'r', 'rb'):
556 if mode in (None, 'r', 'rb'):
557 return
557 return
558 if path.startswith(repo.sharedpath):
558 if path.startswith(repo.sharedpath):
559 # truncate name relative to the repository (.hg)
559 # truncate name relative to the repository (.hg)
560 path = path[len(repo.sharedpath) + 1:]
560 path = path[len(repo.sharedpath) + 1:]
561 if repo._currentlock(repo._lockref) is None:
561 if repo._currentlock(repo._lockref) is None:
562 repo.ui.develwarn('write with no lock: "%s"' % path,
562 repo.ui.develwarn('write with no lock: "%s"' % path,
563 stacklevel=3)
563 stacklevel=3)
564 return ret
564 return ret
565 return checksvfs
565 return checksvfs
566
566
567 def close(self):
567 def close(self):
568 self._writecaches()
568 self._writecaches()
569
569
570 def _loadextensions(self):
570 def _loadextensions(self):
571 extensions.loadall(self.ui)
571 extensions.loadall(self.ui)
572
572
573 def _writecaches(self):
573 def _writecaches(self):
574 if self._revbranchcache:
574 if self._revbranchcache:
575 self._revbranchcache.write()
575 self._revbranchcache.write()
576
576
577 def _restrictcapabilities(self, caps):
577 def _restrictcapabilities(self, caps):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
578 if self.ui.configbool('experimental', 'bundle2-advertise'):
579 caps = set(caps)
579 caps = set(caps)
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
580 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
581 role='client'))
581 role='client'))
582 caps.add('bundle2=' + urlreq.quote(capsblob))
582 caps.add('bundle2=' + urlreq.quote(capsblob))
583 return caps
583 return caps
584
584
585 def _applyopenerreqs(self):
585 def _applyopenerreqs(self):
586 self.svfs.options = dict((r, 1) for r in self.requirements
586 self.svfs.options = dict((r, 1) for r in self.requirements
587 if r in self.openerreqs)
587 if r in self.openerreqs)
588 # experimental config: format.chunkcachesize
588 # experimental config: format.chunkcachesize
589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
589 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
590 if chunkcachesize is not None:
590 if chunkcachesize is not None:
591 self.svfs.options['chunkcachesize'] = chunkcachesize
591 self.svfs.options['chunkcachesize'] = chunkcachesize
592 # experimental config: format.maxchainlen
592 # experimental config: format.maxchainlen
593 maxchainlen = self.ui.configint('format', 'maxchainlen')
593 maxchainlen = self.ui.configint('format', 'maxchainlen')
594 if maxchainlen is not None:
594 if maxchainlen is not None:
595 self.svfs.options['maxchainlen'] = maxchainlen
595 self.svfs.options['maxchainlen'] = maxchainlen
596 # experimental config: format.manifestcachesize
596 # experimental config: format.manifestcachesize
597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
597 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
598 if manifestcachesize is not None:
598 if manifestcachesize is not None:
599 self.svfs.options['manifestcachesize'] = manifestcachesize
599 self.svfs.options['manifestcachesize'] = manifestcachesize
600 # experimental config: format.aggressivemergedeltas
600 # experimental config: format.aggressivemergedeltas
601 aggressivemergedeltas = self.ui.configbool('format',
601 aggressivemergedeltas = self.ui.configbool('format',
602 'aggressivemergedeltas')
602 'aggressivemergedeltas')
603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
603 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
604 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
605 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
606 if 0 <= chainspan:
606 if 0 <= chainspan:
607 self.svfs.options['maxdeltachainspan'] = chainspan
607 self.svfs.options['maxdeltachainspan'] = chainspan
608 mmapindexthreshold = self.ui.configbytes('experimental',
608 mmapindexthreshold = self.ui.configbytes('experimental',
609 'mmapindexthreshold')
609 'mmapindexthreshold')
610 if mmapindexthreshold is not None:
610 if mmapindexthreshold is not None:
611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
611 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
612 withsparseread = self.ui.configbool('experimental', 'sparse-read')
613 srdensitythres = float(self.ui.config('experimental',
613 srdensitythres = float(self.ui.config('experimental',
614 'sparse-read.density-threshold'))
614 'sparse-read.density-threshold'))
615 srmingapsize = self.ui.configbytes('experimental',
615 srmingapsize = self.ui.configbytes('experimental',
616 'sparse-read.min-gap-size')
616 'sparse-read.min-gap-size')
617 self.svfs.options['with-sparse-read'] = withsparseread
617 self.svfs.options['with-sparse-read'] = withsparseread
618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
618 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
619 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
620
620
621 for r in self.requirements:
621 for r in self.requirements:
622 if r.startswith('exp-compression-'):
622 if r.startswith('exp-compression-'):
623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
623 self.svfs.options['compengine'] = r[len('exp-compression-'):]
624
624
625 # TODO move "revlogv2" to openerreqs once finalized.
625 # TODO move "revlogv2" to openerreqs once finalized.
626 if REVLOGV2_REQUIREMENT in self.requirements:
626 if REVLOGV2_REQUIREMENT in self.requirements:
627 self.svfs.options['revlogv2'] = True
627 self.svfs.options['revlogv2'] = True
628
628
629 def _writerequirements(self):
629 def _writerequirements(self):
630 scmutil.writerequires(self.vfs, self.requirements)
630 scmutil.writerequires(self.vfs, self.requirements)
631
631
632 def _checknested(self, path):
632 def _checknested(self, path):
633 """Determine if path is a legal nested repository."""
633 """Determine if path is a legal nested repository."""
634 if not path.startswith(self.root):
634 if not path.startswith(self.root):
635 return False
635 return False
636 subpath = path[len(self.root) + 1:]
636 subpath = path[len(self.root) + 1:]
637 normsubpath = util.pconvert(subpath)
637 normsubpath = util.pconvert(subpath)
638
638
639 # XXX: Checking against the current working copy is wrong in
639 # XXX: Checking against the current working copy is wrong in
640 # the sense that it can reject things like
640 # the sense that it can reject things like
641 #
641 #
642 # $ hg cat -r 10 sub/x.txt
642 # $ hg cat -r 10 sub/x.txt
643 #
643 #
644 # if sub/ is no longer a subrepository in the working copy
644 # if sub/ is no longer a subrepository in the working copy
645 # parent revision.
645 # parent revision.
646 #
646 #
647 # However, it can of course also allow things that would have
647 # However, it can of course also allow things that would have
648 # been rejected before, such as the above cat command if sub/
648 # been rejected before, such as the above cat command if sub/
649 # is a subrepository now, but was a normal directory before.
649 # is a subrepository now, but was a normal directory before.
650 # The old path auditor would have rejected by mistake since it
650 # The old path auditor would have rejected by mistake since it
651 # panics when it sees sub/.hg/.
651 # panics when it sees sub/.hg/.
652 #
652 #
653 # All in all, checking against the working copy seems sensible
653 # All in all, checking against the working copy seems sensible
654 # since we want to prevent access to nested repositories on
654 # since we want to prevent access to nested repositories on
655 # the filesystem *now*.
655 # the filesystem *now*.
656 ctx = self[None]
656 ctx = self[None]
657 parts = util.splitpath(subpath)
657 parts = util.splitpath(subpath)
658 while parts:
658 while parts:
659 prefix = '/'.join(parts)
659 prefix = '/'.join(parts)
660 if prefix in ctx.substate:
660 if prefix in ctx.substate:
661 if prefix == normsubpath:
661 if prefix == normsubpath:
662 return True
662 return True
663 else:
663 else:
664 sub = ctx.sub(prefix)
664 sub = ctx.sub(prefix)
665 return sub.checknested(subpath[len(prefix) + 1:])
665 return sub.checknested(subpath[len(prefix) + 1:])
666 else:
666 else:
667 parts.pop()
667 parts.pop()
668 return False
668 return False
669
669
670 def peer(self):
670 def peer(self):
671 return localpeer(self) # not cached to avoid reference cycle
671 return localpeer(self) # not cached to avoid reference cycle
672
672
673 def unfiltered(self):
673 def unfiltered(self):
674 """Return unfiltered version of the repository
674 """Return unfiltered version of the repository
675
675
676 Intended to be overwritten by filtered repo."""
676 Intended to be overwritten by filtered repo."""
677 return self
677 return self
678
678
679 def filtered(self, name, visibilityexceptions=None):
679 def filtered(self, name, visibilityexceptions=None):
680 """Return a filtered version of a repository"""
680 """Return a filtered version of a repository"""
681 cls = repoview.newtype(self.unfiltered().__class__)
681 cls = repoview.newtype(self.unfiltered().__class__)
682 return cls(self, name, visibilityexceptions)
682 return cls(self, name, visibilityexceptions)
683
683
684 @repofilecache('bookmarks', 'bookmarks.current')
684 @repofilecache('bookmarks', 'bookmarks.current')
685 def _bookmarks(self):
685 def _bookmarks(self):
686 return bookmarks.bmstore(self)
686 return bookmarks.bmstore(self)
687
687
688 @property
688 @property
689 def _activebookmark(self):
689 def _activebookmark(self):
690 return self._bookmarks.active
690 return self._bookmarks.active
691
691
692 # _phasesets depend on changelog. what we need is to call
692 # _phasesets depend on changelog. what we need is to call
693 # _phasecache.invalidate() if '00changelog.i' was changed, but it
693 # _phasecache.invalidate() if '00changelog.i' was changed, but it
694 # can't be easily expressed in filecache mechanism.
694 # can't be easily expressed in filecache mechanism.
695 @storecache('phaseroots', '00changelog.i')
695 @storecache('phaseroots', '00changelog.i')
696 def _phasecache(self):
696 def _phasecache(self):
697 return phases.phasecache(self, self._phasedefaults)
697 return phases.phasecache(self, self._phasedefaults)
698
698
699 @storecache('obsstore')
699 @storecache('obsstore')
700 def obsstore(self):
700 def obsstore(self):
701 return obsolete.makestore(self.ui, self)
701 return obsolete.makestore(self.ui, self)
702
702
703 @storecache('00changelog.i')
703 @storecache('00changelog.i')
704 def changelog(self):
704 def changelog(self):
705 return changelog.changelog(self.svfs,
705 return changelog.changelog(self.svfs,
706 trypending=txnutil.mayhavepending(self.root))
706 trypending=txnutil.mayhavepending(self.root))
707
707
708 def _constructmanifest(self):
708 def _constructmanifest(self):
709 # This is a temporary function while we migrate from manifest to
709 # This is a temporary function while we migrate from manifest to
710 # manifestlog. It allows bundlerepo and unionrepo to intercept the
710 # manifestlog. It allows bundlerepo and unionrepo to intercept the
711 # manifest creation.
711 # manifest creation.
712 return manifest.manifestrevlog(self.svfs)
712 return manifest.manifestrevlog(self.svfs)
713
713
714 @storecache('00manifest.i')
714 @storecache('00manifest.i')
715 def manifestlog(self):
715 def manifestlog(self):
716 return manifest.manifestlog(self.svfs, self)
716 return manifest.manifestlog(self.svfs, self)
717
717
718 @repofilecache('dirstate')
718 @repofilecache('dirstate')
719 def dirstate(self):
719 def dirstate(self):
720 sparsematchfn = lambda: sparse.matcher(self)
720 sparsematchfn = lambda: sparse.matcher(self)
721
721
722 return dirstate.dirstate(self.vfs, self.ui, self.root,
722 return dirstate.dirstate(self.vfs, self.ui, self.root,
723 self._dirstatevalidate, sparsematchfn)
723 self._dirstatevalidate, sparsematchfn)
724
724
725 def _dirstatevalidate(self, node):
725 def _dirstatevalidate(self, node):
726 try:
726 try:
727 self.changelog.rev(node)
727 self.changelog.rev(node)
728 return node
728 return node
729 except error.LookupError:
729 except error.LookupError:
730 if not self._dirstatevalidatewarned:
730 if not self._dirstatevalidatewarned:
731 self._dirstatevalidatewarned = True
731 self._dirstatevalidatewarned = True
732 self.ui.warn(_("warning: ignoring unknown"
732 self.ui.warn(_("warning: ignoring unknown"
733 " working parent %s!\n") % short(node))
733 " working parent %s!\n") % short(node))
734 return nullid
734 return nullid
735
735
736 def __getitem__(self, changeid):
736 def __getitem__(self, changeid):
737 if changeid is None:
737 if changeid is None:
738 return context.workingctx(self)
738 return context.workingctx(self)
739 if isinstance(changeid, slice):
739 if isinstance(changeid, slice):
740 # wdirrev isn't contiguous so the slice shouldn't include it
740 # wdirrev isn't contiguous so the slice shouldn't include it
741 return [context.changectx(self, i)
741 return [context.changectx(self, i)
742 for i in xrange(*changeid.indices(len(self)))
742 for i in xrange(*changeid.indices(len(self)))
743 if i not in self.changelog.filteredrevs]
743 if i not in self.changelog.filteredrevs]
744 try:
744 try:
745 return context.changectx(self, changeid)
745 return context.changectx(self, changeid)
746 except error.WdirUnsupported:
746 except error.WdirUnsupported:
747 return context.workingctx(self)
747 return context.workingctx(self)
748
748
749 def __contains__(self, changeid):
749 def __contains__(self, changeid):
750 """True if the given changeid exists
750 """True if the given changeid exists
751
751
752 error.LookupError is raised if an ambiguous node specified.
752 error.LookupError is raised if an ambiguous node specified.
753 """
753 """
754 try:
754 try:
755 self[changeid]
755 self[changeid]
756 return True
756 return True
757 except error.RepoLookupError:
757 except error.RepoLookupError:
758 return False
758 return False
759
759
760 def __nonzero__(self):
760 def __nonzero__(self):
761 return True
761 return True
762
762
763 __bool__ = __nonzero__
763 __bool__ = __nonzero__
764
764
765 def __len__(self):
765 def __len__(self):
766 # no need to pay the cost of repoview.changelog
766 # no need to pay the cost of repoview.changelog
767 unfi = self.unfiltered()
767 unfi = self.unfiltered()
768 return len(unfi.changelog)
768 return len(unfi.changelog)
769
769
770 def __iter__(self):
770 def __iter__(self):
771 return iter(self.changelog)
771 return iter(self.changelog)
772
772
773 def revs(self, expr, *args):
773 def revs(self, expr, *args):
774 '''Find revisions matching a revset.
774 '''Find revisions matching a revset.
775
775
776 The revset is specified as a string ``expr`` that may contain
776 The revset is specified as a string ``expr`` that may contain
777 %-formatting to escape certain types. See ``revsetlang.formatspec``.
777 %-formatting to escape certain types. See ``revsetlang.formatspec``.
778
778
779 Revset aliases from the configuration are not expanded. To expand
779 Revset aliases from the configuration are not expanded. To expand
780 user aliases, consider calling ``scmutil.revrange()`` or
780 user aliases, consider calling ``scmutil.revrange()`` or
781 ``repo.anyrevs([expr], user=True)``.
781 ``repo.anyrevs([expr], user=True)``.
782
782
783 Returns a revset.abstractsmartset, which is a list-like interface
783 Returns a revset.abstractsmartset, which is a list-like interface
784 that contains integer revisions.
784 that contains integer revisions.
785 '''
785 '''
786 expr = revsetlang.formatspec(expr, *args)
786 expr = revsetlang.formatspec(expr, *args)
787 m = revset.match(None, expr)
787 m = revset.match(None, expr)
788 return m(self)
788 return m(self)
789
789
790 def set(self, expr, *args):
790 def set(self, expr, *args):
791 '''Find revisions matching a revset and emit changectx instances.
791 '''Find revisions matching a revset and emit changectx instances.
792
792
793 This is a convenience wrapper around ``revs()`` that iterates the
793 This is a convenience wrapper around ``revs()`` that iterates the
794 result and is a generator of changectx instances.
794 result and is a generator of changectx instances.
795
795
796 Revset aliases from the configuration are not expanded. To expand
796 Revset aliases from the configuration are not expanded. To expand
797 user aliases, consider calling ``scmutil.revrange()``.
797 user aliases, consider calling ``scmutil.revrange()``.
798 '''
798 '''
799 for r in self.revs(expr, *args):
799 for r in self.revs(expr, *args):
800 yield self[r]
800 yield self[r]
801
801
802 def anyrevs(self, specs, user=False, localalias=None):
802 def anyrevs(self, specs, user=False, localalias=None):
803 '''Find revisions matching one of the given revsets.
803 '''Find revisions matching one of the given revsets.
804
804
805 Revset aliases from the configuration are not expanded by default. To
805 Revset aliases from the configuration are not expanded by default. To
806 expand user aliases, specify ``user=True``. To provide some local
806 expand user aliases, specify ``user=True``. To provide some local
807 definitions overriding user aliases, set ``localalias`` to
807 definitions overriding user aliases, set ``localalias`` to
808 ``{name: definitionstring}``.
808 ``{name: definitionstring}``.
809 '''
809 '''
810 if user:
810 if user:
811 m = revset.matchany(self.ui, specs, repo=self,
811 m = revset.matchany(self.ui, specs, repo=self,
812 localalias=localalias)
812 localalias=localalias)
813 else:
813 else:
814 m = revset.matchany(None, specs, localalias=localalias)
814 m = revset.matchany(None, specs, localalias=localalias)
815 return m(self)
815 return m(self)
816
816
817 def url(self):
817 def url(self):
818 return 'file:' + self.root
818 return 'file:' + self.root
819
819
820 def hook(self, name, throw=False, **args):
820 def hook(self, name, throw=False, **args):
821 """Call a hook, passing this repo instance.
821 """Call a hook, passing this repo instance.
822
822
823 This a convenience method to aid invoking hooks. Extensions likely
823 This a convenience method to aid invoking hooks. Extensions likely
824 won't call this unless they have registered a custom hook or are
824 won't call this unless they have registered a custom hook or are
825 replacing code that is expected to call a hook.
825 replacing code that is expected to call a hook.
826 """
826 """
827 return hook.hook(self.ui, self, name, throw, **args)
827 return hook.hook(self.ui, self, name, throw, **args)
828
828
829 @filteredpropertycache
829 @filteredpropertycache
830 def _tagscache(self):
830 def _tagscache(self):
831 '''Returns a tagscache object that contains various tags related
831 '''Returns a tagscache object that contains various tags related
832 caches.'''
832 caches.'''
833
833
834 # This simplifies its cache management by having one decorated
834 # This simplifies its cache management by having one decorated
835 # function (this one) and the rest simply fetch things from it.
835 # function (this one) and the rest simply fetch things from it.
836 class tagscache(object):
836 class tagscache(object):
837 def __init__(self):
837 def __init__(self):
838 # These two define the set of tags for this repository. tags
838 # These two define the set of tags for this repository. tags
839 # maps tag name to node; tagtypes maps tag name to 'global' or
839 # maps tag name to node; tagtypes maps tag name to 'global' or
840 # 'local'. (Global tags are defined by .hgtags across all
840 # 'local'. (Global tags are defined by .hgtags across all
841 # heads, and local tags are defined in .hg/localtags.)
841 # heads, and local tags are defined in .hg/localtags.)
842 # They constitute the in-memory cache of tags.
842 # They constitute the in-memory cache of tags.
843 self.tags = self.tagtypes = None
843 self.tags = self.tagtypes = None
844
844
845 self.nodetagscache = self.tagslist = None
845 self.nodetagscache = self.tagslist = None
846
846
847 cache = tagscache()
847 cache = tagscache()
848 cache.tags, cache.tagtypes = self._findtags()
848 cache.tags, cache.tagtypes = self._findtags()
849
849
850 return cache
850 return cache
851
851
852 def tags(self):
852 def tags(self):
853 '''return a mapping of tag to node'''
853 '''return a mapping of tag to node'''
854 t = {}
854 t = {}
855 if self.changelog.filteredrevs:
855 if self.changelog.filteredrevs:
856 tags, tt = self._findtags()
856 tags, tt = self._findtags()
857 else:
857 else:
858 tags = self._tagscache.tags
858 tags = self._tagscache.tags
859 for k, v in tags.iteritems():
859 for k, v in tags.iteritems():
860 try:
860 try:
861 # ignore tags to unknown nodes
861 # ignore tags to unknown nodes
862 self.changelog.rev(v)
862 self.changelog.rev(v)
863 t[k] = v
863 t[k] = v
864 except (error.LookupError, ValueError):
864 except (error.LookupError, ValueError):
865 pass
865 pass
866 return t
866 return t
867
867
868 def _findtags(self):
868 def _findtags(self):
869 '''Do the hard work of finding tags. Return a pair of dicts
869 '''Do the hard work of finding tags. Return a pair of dicts
870 (tags, tagtypes) where tags maps tag name to node, and tagtypes
870 (tags, tagtypes) where tags maps tag name to node, and tagtypes
871 maps tag name to a string like \'global\' or \'local\'.
871 maps tag name to a string like \'global\' or \'local\'.
872 Subclasses or extensions are free to add their own tags, but
872 Subclasses or extensions are free to add their own tags, but
873 should be aware that the returned dicts will be retained for the
873 should be aware that the returned dicts will be retained for the
874 duration of the localrepo object.'''
874 duration of the localrepo object.'''
875
875
876 # XXX what tagtype should subclasses/extensions use? Currently
876 # XXX what tagtype should subclasses/extensions use? Currently
877 # mq and bookmarks add tags, but do not set the tagtype at all.
877 # mq and bookmarks add tags, but do not set the tagtype at all.
878 # Should each extension invent its own tag type? Should there
878 # Should each extension invent its own tag type? Should there
879 # be one tagtype for all such "virtual" tags? Or is the status
879 # be one tagtype for all such "virtual" tags? Or is the status
880 # quo fine?
880 # quo fine?
881
881
882
882
883 # map tag name to (node, hist)
883 # map tag name to (node, hist)
884 alltags = tagsmod.findglobaltags(self.ui, self)
884 alltags = tagsmod.findglobaltags(self.ui, self)
885 # map tag name to tag type
885 # map tag name to tag type
886 tagtypes = dict((tag, 'global') for tag in alltags)
886 tagtypes = dict((tag, 'global') for tag in alltags)
887
887
888 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
888 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
889
889
890 # Build the return dicts. Have to re-encode tag names because
890 # Build the return dicts. Have to re-encode tag names because
891 # the tags module always uses UTF-8 (in order not to lose info
891 # the tags module always uses UTF-8 (in order not to lose info
892 # writing to the cache), but the rest of Mercurial wants them in
892 # writing to the cache), but the rest of Mercurial wants them in
893 # local encoding.
893 # local encoding.
894 tags = {}
894 tags = {}
895 for (name, (node, hist)) in alltags.iteritems():
895 for (name, (node, hist)) in alltags.iteritems():
896 if node != nullid:
896 if node != nullid:
897 tags[encoding.tolocal(name)] = node
897 tags[encoding.tolocal(name)] = node
898 tags['tip'] = self.changelog.tip()
898 tags['tip'] = self.changelog.tip()
899 tagtypes = dict([(encoding.tolocal(name), value)
899 tagtypes = dict([(encoding.tolocal(name), value)
900 for (name, value) in tagtypes.iteritems()])
900 for (name, value) in tagtypes.iteritems()])
901 return (tags, tagtypes)
901 return (tags, tagtypes)
902
902
903 def tagtype(self, tagname):
903 def tagtype(self, tagname):
904 '''
904 '''
905 return the type of the given tag. result can be:
905 return the type of the given tag. result can be:
906
906
907 'local' : a local tag
907 'local' : a local tag
908 'global' : a global tag
908 'global' : a global tag
909 None : tag does not exist
909 None : tag does not exist
910 '''
910 '''
911
911
912 return self._tagscache.tagtypes.get(tagname)
912 return self._tagscache.tagtypes.get(tagname)
913
913
914 def tagslist(self):
914 def tagslist(self):
915 '''return a list of tags ordered by revision'''
915 '''return a list of tags ordered by revision'''
916 if not self._tagscache.tagslist:
916 if not self._tagscache.tagslist:
917 l = []
917 l = []
918 for t, n in self.tags().iteritems():
918 for t, n in self.tags().iteritems():
919 l.append((self.changelog.rev(n), t, n))
919 l.append((self.changelog.rev(n), t, n))
920 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
920 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
921
921
922 return self._tagscache.tagslist
922 return self._tagscache.tagslist
923
923
924 def nodetags(self, node):
924 def nodetags(self, node):
925 '''return the tags associated with a node'''
925 '''return the tags associated with a node'''
926 if not self._tagscache.nodetagscache:
926 if not self._tagscache.nodetagscache:
927 nodetagscache = {}
927 nodetagscache = {}
928 for t, n in self._tagscache.tags.iteritems():
928 for t, n in self._tagscache.tags.iteritems():
929 nodetagscache.setdefault(n, []).append(t)
929 nodetagscache.setdefault(n, []).append(t)
930 for tags in nodetagscache.itervalues():
930 for tags in nodetagscache.itervalues():
931 tags.sort()
931 tags.sort()
932 self._tagscache.nodetagscache = nodetagscache
932 self._tagscache.nodetagscache = nodetagscache
933 return self._tagscache.nodetagscache.get(node, [])
933 return self._tagscache.nodetagscache.get(node, [])
934
934
935 def nodebookmarks(self, node):
935 def nodebookmarks(self, node):
936 """return the list of bookmarks pointing to the specified node"""
936 """return the list of bookmarks pointing to the specified node"""
937 marks = []
937 marks = []
938 for bookmark, n in self._bookmarks.iteritems():
938 for bookmark, n in self._bookmarks.iteritems():
939 if n == node:
939 if n == node:
940 marks.append(bookmark)
940 marks.append(bookmark)
941 return sorted(marks)
941 return sorted(marks)
942
942
943 def branchmap(self):
943 def branchmap(self):
944 '''returns a dictionary {branch: [branchheads]} with branchheads
944 '''returns a dictionary {branch: [branchheads]} with branchheads
945 ordered by increasing revision number'''
945 ordered by increasing revision number'''
946 branchmap.updatecache(self)
946 branchmap.updatecache(self)
947 return self._branchcaches[self.filtername]
947 return self._branchcaches[self.filtername]
948
948
949 @unfilteredmethod
949 @unfilteredmethod
950 def revbranchcache(self):
950 def revbranchcache(self):
951 if not self._revbranchcache:
951 if not self._revbranchcache:
952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
952 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
953 return self._revbranchcache
953 return self._revbranchcache
954
954
955 def branchtip(self, branch, ignoremissing=False):
955 def branchtip(self, branch, ignoremissing=False):
956 '''return the tip node for a given branch
956 '''return the tip node for a given branch
957
957
958 If ignoremissing is True, then this method will not raise an error.
958 If ignoremissing is True, then this method will not raise an error.
959 This is helpful for callers that only expect None for a missing branch
959 This is helpful for callers that only expect None for a missing branch
960 (e.g. namespace).
960 (e.g. namespace).
961
961
962 '''
962 '''
963 try:
963 try:
964 return self.branchmap().branchtip(branch)
964 return self.branchmap().branchtip(branch)
965 except KeyError:
965 except KeyError:
966 if not ignoremissing:
966 if not ignoremissing:
967 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
967 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
968 else:
968 else:
969 pass
969 pass
970
970
971 def lookup(self, key):
971 def lookup(self, key):
972 return self[key].node()
972 return self[key].node()
973
973
974 def lookupbranch(self, key, remote=None):
974 def lookupbranch(self, key, remote=None):
975 repo = remote or self
975 repo = remote or self
976 if key in repo.branchmap():
976 if key in repo.branchmap():
977 return key
977 return key
978
978
979 repo = (remote and remote.local()) and remote or self
979 repo = (remote and remote.local()) and remote or self
980 return repo[key].branch()
980 return repo[key].branch()
981
981
982 def known(self, nodes):
982 def known(self, nodes):
983 cl = self.changelog
983 cl = self.changelog
984 nm = cl.nodemap
984 nm = cl.nodemap
985 filtered = cl.filteredrevs
985 filtered = cl.filteredrevs
986 result = []
986 result = []
987 for n in nodes:
987 for n in nodes:
988 r = nm.get(n)
988 r = nm.get(n)
989 resp = not (r is None or r in filtered)
989 resp = not (r is None or r in filtered)
990 result.append(resp)
990 result.append(resp)
991 return result
991 return result
992
992
993 def local(self):
993 def local(self):
994 return self
994 return self
995
995
996 def publishing(self):
996 def publishing(self):
997 # it's safe (and desirable) to trust the publish flag unconditionally
997 # it's safe (and desirable) to trust the publish flag unconditionally
998 # so that we don't finalize changes shared between users via ssh or nfs
998 # so that we don't finalize changes shared between users via ssh or nfs
999 return self.ui.configbool('phases', 'publish', untrusted=True)
999 return self.ui.configbool('phases', 'publish', untrusted=True)
1000
1000
1001 def cancopy(self):
1001 def cancopy(self):
1002 # so statichttprepo's override of local() works
1002 # so statichttprepo's override of local() works
1003 if not self.local():
1003 if not self.local():
1004 return False
1004 return False
1005 if not self.publishing():
1005 if not self.publishing():
1006 return True
1006 return True
1007 # if publishing we can't copy if there is filtered content
1007 # if publishing we can't copy if there is filtered content
1008 return not self.filtered('visible').changelog.filteredrevs
1008 return not self.filtered('visible').changelog.filteredrevs
1009
1009
1010 def shared(self):
1010 def shared(self):
1011 '''the type of shared repository (None if not shared)'''
1011 '''the type of shared repository (None if not shared)'''
1012 if self.sharedpath != self.path:
1012 if self.sharedpath != self.path:
1013 return 'store'
1013 return 'store'
1014 return None
1014 return None
1015
1015
1016 def wjoin(self, f, *insidef):
1016 def wjoin(self, f, *insidef):
1017 return self.vfs.reljoin(self.root, f, *insidef)
1017 return self.vfs.reljoin(self.root, f, *insidef)
1018
1018
1019 def file(self, f):
1019 def file(self, f):
1020 if f[0] == '/':
1020 if f[0] == '/':
1021 f = f[1:]
1021 f = f[1:]
1022 return filelog.filelog(self.svfs, f)
1022 return filelog.filelog(self.svfs, f)
1023
1023
1024 def changectx(self, changeid):
1024 def changectx(self, changeid):
1025 return self[changeid]
1025 return self[changeid]
1026
1026
1027 def setparents(self, p1, p2=nullid):
1027 def setparents(self, p1, p2=nullid):
1028 with self.dirstate.parentchange():
1028 with self.dirstate.parentchange():
1029 copies = self.dirstate.setparents(p1, p2)
1029 copies = self.dirstate.setparents(p1, p2)
1030 pctx = self[p1]
1030 pctx = self[p1]
1031 if copies:
1031 if copies:
1032 # Adjust copy records, the dirstate cannot do it, it
1032 # Adjust copy records, the dirstate cannot do it, it
1033 # requires access to parents manifests. Preserve them
1033 # requires access to parents manifests. Preserve them
1034 # only for entries added to first parent.
1034 # only for entries added to first parent.
1035 for f in copies:
1035 for f in copies:
1036 if f not in pctx and copies[f] in pctx:
1036 if f not in pctx and copies[f] in pctx:
1037 self.dirstate.copy(copies[f], f)
1037 self.dirstate.copy(copies[f], f)
1038 if p2 == nullid:
1038 if p2 == nullid:
1039 for f, s in sorted(self.dirstate.copies().items()):
1039 for f, s in sorted(self.dirstate.copies().items()):
1040 if f not in pctx and s not in pctx:
1040 if f not in pctx and s not in pctx:
1041 self.dirstate.copy(None, f)
1041 self.dirstate.copy(None, f)
1042
1042
1043 def filectx(self, path, changeid=None, fileid=None):
1043 def filectx(self, path, changeid=None, fileid=None):
1044 """changeid can be a changeset revision, node, or tag.
1044 """changeid can be a changeset revision, node, or tag.
1045 fileid can be a file revision or node."""
1045 fileid can be a file revision or node."""
1046 return context.filectx(self, path, changeid, fileid)
1046 return context.filectx(self, path, changeid, fileid)
1047
1047
1048 def getcwd(self):
1048 def getcwd(self):
1049 return self.dirstate.getcwd()
1049 return self.dirstate.getcwd()
1050
1050
1051 def pathto(self, f, cwd=None):
1051 def pathto(self, f, cwd=None):
1052 return self.dirstate.pathto(f, cwd)
1052 return self.dirstate.pathto(f, cwd)
1053
1053
1054 def _loadfilter(self, filter):
1054 def _loadfilter(self, filter):
1055 if filter not in self.filterpats:
1055 if filter not in self.filterpats:
1056 l = []
1056 l = []
1057 for pat, cmd in self.ui.configitems(filter):
1057 for pat, cmd in self.ui.configitems(filter):
1058 if cmd == '!':
1058 if cmd == '!':
1059 continue
1059 continue
1060 mf = matchmod.match(self.root, '', [pat])
1060 mf = matchmod.match(self.root, '', [pat])
1061 fn = None
1061 fn = None
1062 params = cmd
1062 params = cmd
1063 for name, filterfn in self._datafilters.iteritems():
1063 for name, filterfn in self._datafilters.iteritems():
1064 if cmd.startswith(name):
1064 if cmd.startswith(name):
1065 fn = filterfn
1065 fn = filterfn
1066 params = cmd[len(name):].lstrip()
1066 params = cmd[len(name):].lstrip()
1067 break
1067 break
1068 if not fn:
1068 if not fn:
1069 fn = lambda s, c, **kwargs: util.filter(s, c)
1069 fn = lambda s, c, **kwargs: util.filter(s, c)
1070 # Wrap old filters not supporting keyword arguments
1070 # Wrap old filters not supporting keyword arguments
1071 if not inspect.getargspec(fn)[2]:
1071 if not inspect.getargspec(fn)[2]:
1072 oldfn = fn
1072 oldfn = fn
1073 fn = lambda s, c, **kwargs: oldfn(s, c)
1073 fn = lambda s, c, **kwargs: oldfn(s, c)
1074 l.append((mf, fn, params))
1074 l.append((mf, fn, params))
1075 self.filterpats[filter] = l
1075 self.filterpats[filter] = l
1076 return self.filterpats[filter]
1076 return self.filterpats[filter]
1077
1077
1078 def _filter(self, filterpats, filename, data):
1078 def _filter(self, filterpats, filename, data):
1079 for mf, fn, cmd in filterpats:
1079 for mf, fn, cmd in filterpats:
1080 if mf(filename):
1080 if mf(filename):
1081 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1081 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1082 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1082 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1083 break
1083 break
1084
1084
1085 return data
1085 return data
1086
1086
1087 @unfilteredpropertycache
1087 @unfilteredpropertycache
1088 def _encodefilterpats(self):
1088 def _encodefilterpats(self):
1089 return self._loadfilter('encode')
1089 return self._loadfilter('encode')
1090
1090
1091 @unfilteredpropertycache
1091 @unfilteredpropertycache
1092 def _decodefilterpats(self):
1092 def _decodefilterpats(self):
1093 return self._loadfilter('decode')
1093 return self._loadfilter('decode')
1094
1094
1095 def adddatafilter(self, name, filter):
1095 def adddatafilter(self, name, filter):
1096 self._datafilters[name] = filter
1096 self._datafilters[name] = filter
1097
1097
1098 def wread(self, filename):
1098 def wread(self, filename):
1099 if self.wvfs.islink(filename):
1099 if self.wvfs.islink(filename):
1100 data = self.wvfs.readlink(filename)
1100 data = self.wvfs.readlink(filename)
1101 else:
1101 else:
1102 data = self.wvfs.read(filename)
1102 data = self.wvfs.read(filename)
1103 return self._filter(self._encodefilterpats, filename, data)
1103 return self._filter(self._encodefilterpats, filename, data)
1104
1104
1105 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1105 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1106 """write ``data`` into ``filename`` in the working directory
1106 """write ``data`` into ``filename`` in the working directory
1107
1107
1108 This returns length of written (maybe decoded) data.
1108 This returns length of written (maybe decoded) data.
1109 """
1109 """
1110 data = self._filter(self._decodefilterpats, filename, data)
1110 data = self._filter(self._decodefilterpats, filename, data)
1111 if 'l' in flags:
1111 if 'l' in flags:
1112 self.wvfs.symlink(data, filename)
1112 self.wvfs.symlink(data, filename)
1113 else:
1113 else:
1114 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1114 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1115 **kwargs)
1115 **kwargs)
1116 if 'x' in flags:
1116 if 'x' in flags:
1117 self.wvfs.setflags(filename, False, True)
1117 self.wvfs.setflags(filename, False, True)
1118 else:
1118 else:
1119 self.wvfs.setflags(filename, False, False)
1119 self.wvfs.setflags(filename, False, False)
1120 return len(data)
1120 return len(data)
1121
1121
1122 def wwritedata(self, filename, data):
1122 def wwritedata(self, filename, data):
1123 return self._filter(self._decodefilterpats, filename, data)
1123 return self._filter(self._decodefilterpats, filename, data)
1124
1124
1125 def currenttransaction(self):
1125 def currenttransaction(self):
1126 """return the current transaction or None if non exists"""
1126 """return the current transaction or None if non exists"""
1127 if self._transref:
1127 if self._transref:
1128 tr = self._transref()
1128 tr = self._transref()
1129 else:
1129 else:
1130 tr = None
1130 tr = None
1131
1131
1132 if tr and tr.running():
1132 if tr and tr.running():
1133 return tr
1133 return tr
1134 return None
1134 return None
1135
1135
1136 def transaction(self, desc, report=None):
1136 def transaction(self, desc, report=None):
1137 if (self.ui.configbool('devel', 'all-warnings')
1137 if (self.ui.configbool('devel', 'all-warnings')
1138 or self.ui.configbool('devel', 'check-locks')):
1138 or self.ui.configbool('devel', 'check-locks')):
1139 if self._currentlock(self._lockref) is None:
1139 if self._currentlock(self._lockref) is None:
1140 raise error.ProgrammingError('transaction requires locking')
1140 raise error.ProgrammingError('transaction requires locking')
1141 tr = self.currenttransaction()
1141 tr = self.currenttransaction()
1142 if tr is not None:
1142 if tr is not None:
1143 return tr.nest()
1143 return tr.nest()
1144
1144
1145 # abort here if the journal already exists
1145 # abort here if the journal already exists
1146 if self.svfs.exists("journal"):
1146 if self.svfs.exists("journal"):
1147 raise error.RepoError(
1147 raise error.RepoError(
1148 _("abandoned transaction found"),
1148 _("abandoned transaction found"),
1149 hint=_("run 'hg recover' to clean up transaction"))
1149 hint=_("run 'hg recover' to clean up transaction"))
1150
1150
1151 idbase = "%.40f#%f" % (random.random(), time.time())
1151 idbase = "%.40f#%f" % (random.random(), time.time())
1152 ha = hex(hashlib.sha1(idbase).digest())
1152 ha = hex(hashlib.sha1(idbase).digest())
1153 txnid = 'TXN:' + ha
1153 txnid = 'TXN:' + ha
1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1154 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1155
1155
1156 self._writejournal(desc)
1156 self._writejournal(desc)
1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1157 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1158 if report:
1158 if report:
1159 rp = report
1159 rp = report
1160 else:
1160 else:
1161 rp = self.ui.warn
1161 rp = self.ui.warn
1162 vfsmap = {'plain': self.vfs} # root of .hg/
1162 vfsmap = {'plain': self.vfs} # root of .hg/
1163 # we must avoid cyclic reference between repo and transaction.
1163 # we must avoid cyclic reference between repo and transaction.
1164 reporef = weakref.ref(self)
1164 reporef = weakref.ref(self)
1165 # Code to track tag movement
1165 # Code to track tag movement
1166 #
1166 #
1167 # Since tags are all handled as file content, it is actually quite hard
1167 # Since tags are all handled as file content, it is actually quite hard
1168 # to track these movement from a code perspective. So we fallback to a
1168 # to track these movement from a code perspective. So we fallback to a
1169 # tracking at the repository level. One could envision to track changes
1169 # tracking at the repository level. One could envision to track changes
1170 # to the '.hgtags' file through changegroup apply but that fails to
1170 # to the '.hgtags' file through changegroup apply but that fails to
1171 # cope with case where transaction expose new heads without changegroup
1171 # cope with case where transaction expose new heads without changegroup
1172 # being involved (eg: phase movement).
1172 # being involved (eg: phase movement).
1173 #
1173 #
1174 # For now, We gate the feature behind a flag since this likely comes
1174 # For now, We gate the feature behind a flag since this likely comes
1175 # with performance impacts. The current code run more often than needed
1175 # with performance impacts. The current code run more often than needed
1176 # and do not use caches as much as it could. The current focus is on
1176 # and do not use caches as much as it could. The current focus is on
1177 # the behavior of the feature so we disable it by default. The flag
1177 # the behavior of the feature so we disable it by default. The flag
1178 # will be removed when we are happy with the performance impact.
1178 # will be removed when we are happy with the performance impact.
1179 #
1179 #
1180 # Once this feature is no longer experimental move the following
1180 # Once this feature is no longer experimental move the following
1181 # documentation to the appropriate help section:
1181 # documentation to the appropriate help section:
1182 #
1182 #
1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1183 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1184 # tags (new or changed or deleted tags). In addition the details of
1184 # tags (new or changed or deleted tags). In addition the details of
1185 # these changes are made available in a file at:
1185 # these changes are made available in a file at:
1186 # ``REPOROOT/.hg/changes/tags.changes``.
1186 # ``REPOROOT/.hg/changes/tags.changes``.
1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1187 # Make sure you check for HG_TAG_MOVED before reading that file as it
1188 # might exist from a previous transaction even if no tag were touched
1188 # might exist from a previous transaction even if no tag were touched
1189 # in this one. Changes are recorded in a line base format::
1189 # in this one. Changes are recorded in a line base format::
1190 #
1190 #
1191 # <action> <hex-node> <tag-name>\n
1191 # <action> <hex-node> <tag-name>\n
1192 #
1192 #
1193 # Actions are defined as follow:
1193 # Actions are defined as follow:
1194 # "-R": tag is removed,
1194 # "-R": tag is removed,
1195 # "+A": tag is added,
1195 # "+A": tag is added,
1196 # "-M": tag is moved (old value),
1196 # "-M": tag is moved (old value),
1197 # "+M": tag is moved (new value),
1197 # "+M": tag is moved (new value),
1198 tracktags = lambda x: None
1198 tracktags = lambda x: None
1199 # experimental config: experimental.hook-track-tags
1199 # experimental config: experimental.hook-track-tags
1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1200 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1201 if desc != 'strip' and shouldtracktags:
1201 if desc != 'strip' and shouldtracktags:
1202 oldheads = self.changelog.headrevs()
1202 oldheads = self.changelog.headrevs()
1203 def tracktags(tr2):
1203 def tracktags(tr2):
1204 repo = reporef()
1204 repo = reporef()
1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1205 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1206 newheads = repo.changelog.headrevs()
1206 newheads = repo.changelog.headrevs()
1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1207 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1208 # notes: we compare lists here.
1208 # notes: we compare lists here.
1209 # As we do it only once buiding set would not be cheaper
1209 # As we do it only once buiding set would not be cheaper
1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1210 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1211 if changes:
1211 if changes:
1212 tr2.hookargs['tag_moved'] = '1'
1212 tr2.hookargs['tag_moved'] = '1'
1213 with repo.vfs('changes/tags.changes', 'w',
1213 with repo.vfs('changes/tags.changes', 'w',
1214 atomictemp=True) as changesfile:
1214 atomictemp=True) as changesfile:
1215 # note: we do not register the file to the transaction
1215 # note: we do not register the file to the transaction
1216 # because we needs it to still exist on the transaction
1216 # because we needs it to still exist on the transaction
1217 # is close (for txnclose hooks)
1217 # is close (for txnclose hooks)
1218 tagsmod.writediff(changesfile, changes)
1218 tagsmod.writediff(changesfile, changes)
1219 def validate(tr2):
1219 def validate(tr2):
1220 """will run pre-closing hooks"""
1220 """will run pre-closing hooks"""
1221 # XXX the transaction API is a bit lacking here so we take a hacky
1221 # XXX the transaction API is a bit lacking here so we take a hacky
1222 # path for now
1222 # path for now
1223 #
1223 #
1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1224 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1225 # dict is copied before these run. In addition we needs the data
1225 # dict is copied before these run. In addition we needs the data
1226 # available to in memory hooks too.
1226 # available to in memory hooks too.
1227 #
1227 #
1228 # Moreover, we also need to make sure this runs before txnclose
1228 # Moreover, we also need to make sure this runs before txnclose
1229 # hooks and there is no "pending" mechanism that would execute
1229 # hooks and there is no "pending" mechanism that would execute
1230 # logic only if hooks are about to run.
1230 # logic only if hooks are about to run.
1231 #
1231 #
1232 # Fixing this limitation of the transaction is also needed to track
1232 # Fixing this limitation of the transaction is also needed to track
1233 # other families of changes (bookmarks, phases, obsolescence).
1233 # other families of changes (bookmarks, phases, obsolescence).
1234 #
1234 #
1235 # This will have to be fixed before we remove the experimental
1235 # This will have to be fixed before we remove the experimental
1236 # gating.
1236 # gating.
1237 tracktags(tr2)
1237 tracktags(tr2)
1238 repo = reporef()
1238 repo = reporef()
1239 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1239 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1240 scmutil.enforcesinglehead(repo, tr2, desc)
1240 scmutil.enforcesinglehead(repo, tr2, desc)
1241 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1241 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1242 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1242 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1243 args = tr.hookargs.copy()
1243 args = tr.hookargs.copy()
1244 args.update(bookmarks.preparehookargs(name, old, new))
1244 args.update(bookmarks.preparehookargs(name, old, new))
1245 repo.hook('pretxnclose-bookmark', throw=True,
1245 repo.hook('pretxnclose-bookmark', throw=True,
1246 txnname=desc,
1246 txnname=desc,
1247 **pycompat.strkwargs(args))
1247 **pycompat.strkwargs(args))
1248 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1248 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1249 cl = repo.unfiltered().changelog
1249 cl = repo.unfiltered().changelog
1250 for rev, (old, new) in tr.changes['phases'].items():
1250 for rev, (old, new) in tr.changes['phases'].items():
1251 args = tr.hookargs.copy()
1251 args = tr.hookargs.copy()
1252 node = hex(cl.node(rev))
1252 node = hex(cl.node(rev))
1253 args.update(phases.preparehookargs(node, old, new))
1253 args.update(phases.preparehookargs(node, old, new))
1254 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1254 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1255 **pycompat.strkwargs(args))
1255 **pycompat.strkwargs(args))
1256
1256
1257 repo.hook('pretxnclose', throw=True,
1257 repo.hook('pretxnclose', throw=True,
1258 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1258 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1259 def releasefn(tr, success):
1259 def releasefn(tr, success):
1260 repo = reporef()
1260 repo = reporef()
1261 if success:
1261 if success:
1262 # this should be explicitly invoked here, because
1262 # this should be explicitly invoked here, because
1263 # in-memory changes aren't written out at closing
1263 # in-memory changes aren't written out at closing
1264 # transaction, if tr.addfilegenerator (via
1264 # transaction, if tr.addfilegenerator (via
1265 # dirstate.write or so) isn't invoked while
1265 # dirstate.write or so) isn't invoked while
1266 # transaction running
1266 # transaction running
1267 repo.dirstate.write(None)
1267 repo.dirstate.write(None)
1268 else:
1268 else:
1269 # discard all changes (including ones already written
1269 # discard all changes (including ones already written
1270 # out) in this transaction
1270 # out) in this transaction
1271 repo.dirstate.restorebackup(None, 'journal.dirstate')
1271 repo.dirstate.restorebackup(None, 'journal.dirstate')
1272
1272
1273 repo.invalidate(clearfilecache=True)
1273 repo.invalidate(clearfilecache=True)
1274
1274
1275 tr = transaction.transaction(rp, self.svfs, vfsmap,
1275 tr = transaction.transaction(rp, self.svfs, vfsmap,
1276 "journal",
1276 "journal",
1277 "undo",
1277 "undo",
1278 aftertrans(renames),
1278 aftertrans(renames),
1279 self.store.createmode,
1279 self.store.createmode,
1280 validator=validate,
1280 validator=validate,
1281 releasefn=releasefn,
1281 releasefn=releasefn,
1282 checkambigfiles=_cachedfiles)
1282 checkambigfiles=_cachedfiles)
1283 tr.changes['revs'] = xrange(0, 0)
1283 tr.changes['revs'] = xrange(0, 0)
1284 tr.changes['obsmarkers'] = set()
1284 tr.changes['obsmarkers'] = set()
1285 tr.changes['phases'] = {}
1285 tr.changes['phases'] = {}
1286 tr.changes['bookmarks'] = {}
1286 tr.changes['bookmarks'] = {}
1287
1287
1288 tr.hookargs['txnid'] = txnid
1288 tr.hookargs['txnid'] = txnid
1289 # note: writing the fncache only during finalize mean that the file is
1289 # note: writing the fncache only during finalize mean that the file is
1290 # outdated when running hooks. As fncache is used for streaming clone,
1290 # outdated when running hooks. As fncache is used for streaming clone,
1291 # this is not expected to break anything that happen during the hooks.
1291 # this is not expected to break anything that happen during the hooks.
1292 tr.addfinalize('flush-fncache', self.store.write)
1292 tr.addfinalize('flush-fncache', self.store.write)
1293 def txnclosehook(tr2):
1293 def txnclosehook(tr2):
1294 """To be run if transaction is successful, will schedule a hook run
1294 """To be run if transaction is successful, will schedule a hook run
1295 """
1295 """
1296 # Don't reference tr2 in hook() so we don't hold a reference.
1296 # Don't reference tr2 in hook() so we don't hold a reference.
1297 # This reduces memory consumption when there are multiple
1297 # This reduces memory consumption when there are multiple
1298 # transactions per lock. This can likely go away if issue5045
1298 # transactions per lock. This can likely go away if issue5045
1299 # fixes the function accumulation.
1299 # fixes the function accumulation.
1300 hookargs = tr2.hookargs
1300 hookargs = tr2.hookargs
1301
1301
1302 def hookfunc():
1302 def hookfunc():
1303 repo = reporef()
1303 repo = reporef()
1304 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1304 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1305 bmchanges = sorted(tr.changes['bookmarks'].items())
1305 bmchanges = sorted(tr.changes['bookmarks'].items())
1306 for name, (old, new) in bmchanges:
1306 for name, (old, new) in bmchanges:
1307 args = tr.hookargs.copy()
1307 args = tr.hookargs.copy()
1308 args.update(bookmarks.preparehookargs(name, old, new))
1308 args.update(bookmarks.preparehookargs(name, old, new))
1309 repo.hook('txnclose-bookmark', throw=False,
1309 repo.hook('txnclose-bookmark', throw=False,
1310 txnname=desc, **pycompat.strkwargs(args))
1310 txnname=desc, **pycompat.strkwargs(args))
1311
1311
1312 if hook.hashook(repo.ui, 'txnclose-phase'):
1312 if hook.hashook(repo.ui, 'txnclose-phase'):
1313 cl = repo.unfiltered().changelog
1313 cl = repo.unfiltered().changelog
1314 phasemv = sorted(tr.changes['phases'].items())
1314 phasemv = sorted(tr.changes['phases'].items())
1315 for rev, (old, new) in phasemv:
1315 for rev, (old, new) in phasemv:
1316 args = tr.hookargs.copy()
1316 args = tr.hookargs.copy()
1317 node = hex(cl.node(rev))
1317 node = hex(cl.node(rev))
1318 args.update(phases.preparehookargs(node, old, new))
1318 args.update(phases.preparehookargs(node, old, new))
1319 repo.hook('txnclose-phase', throw=False, txnname=desc,
1319 repo.hook('txnclose-phase', throw=False, txnname=desc,
1320 **pycompat.strkwargs(args))
1320 **pycompat.strkwargs(args))
1321
1321
1322 repo.hook('txnclose', throw=False, txnname=desc,
1322 repo.hook('txnclose', throw=False, txnname=desc,
1323 **pycompat.strkwargs(hookargs))
1323 **pycompat.strkwargs(hookargs))
1324 reporef()._afterlock(hookfunc)
1324 reporef()._afterlock(hookfunc)
1325 tr.addfinalize('txnclose-hook', txnclosehook)
1325 tr.addfinalize('txnclose-hook', txnclosehook)
1326 # Include a leading "-" to make it happen before the transaction summary
1326 # Include a leading "-" to make it happen before the transaction summary
1327 # reports registered via scmutil.registersummarycallback() whose names
1327 # reports registered via scmutil.registersummarycallback() whose names
1328 # are 00-txnreport etc. That way, the caches will be warm when the
1328 # are 00-txnreport etc. That way, the caches will be warm when the
1329 # callbacks run.
1329 # callbacks run.
1330 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1330 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1331 def txnaborthook(tr2):
1331 def txnaborthook(tr2):
1332 """To be run if transaction is aborted
1332 """To be run if transaction is aborted
1333 """
1333 """
1334 reporef().hook('txnabort', throw=False, txnname=desc,
1334 reporef().hook('txnabort', throw=False, txnname=desc,
1335 **tr2.hookargs)
1335 **pycompat.strkwargs(tr2.hookargs))
1336 tr.addabort('txnabort-hook', txnaborthook)
1336 tr.addabort('txnabort-hook', txnaborthook)
1337 # avoid eager cache invalidation. in-memory data should be identical
1337 # avoid eager cache invalidation. in-memory data should be identical
1338 # to stored data if transaction has no error.
1338 # to stored data if transaction has no error.
1339 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1339 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1340 self._transref = weakref.ref(tr)
1340 self._transref = weakref.ref(tr)
1341 scmutil.registersummarycallback(self, tr, desc)
1341 scmutil.registersummarycallback(self, tr, desc)
1342 return tr
1342 return tr
1343
1343
1344 def _journalfiles(self):
1344 def _journalfiles(self):
1345 return ((self.svfs, 'journal'),
1345 return ((self.svfs, 'journal'),
1346 (self.vfs, 'journal.dirstate'),
1346 (self.vfs, 'journal.dirstate'),
1347 (self.vfs, 'journal.branch'),
1347 (self.vfs, 'journal.branch'),
1348 (self.vfs, 'journal.desc'),
1348 (self.vfs, 'journal.desc'),
1349 (self.vfs, 'journal.bookmarks'),
1349 (self.vfs, 'journal.bookmarks'),
1350 (self.svfs, 'journal.phaseroots'))
1350 (self.svfs, 'journal.phaseroots'))
1351
1351
1352 def undofiles(self):
1352 def undofiles(self):
1353 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1353 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1354
1354
1355 @unfilteredmethod
1355 @unfilteredmethod
1356 def _writejournal(self, desc):
1356 def _writejournal(self, desc):
1357 self.dirstate.savebackup(None, 'journal.dirstate')
1357 self.dirstate.savebackup(None, 'journal.dirstate')
1358 self.vfs.write("journal.branch",
1358 self.vfs.write("journal.branch",
1359 encoding.fromlocal(self.dirstate.branch()))
1359 encoding.fromlocal(self.dirstate.branch()))
1360 self.vfs.write("journal.desc",
1360 self.vfs.write("journal.desc",
1361 "%d\n%s\n" % (len(self), desc))
1361 "%d\n%s\n" % (len(self), desc))
1362 self.vfs.write("journal.bookmarks",
1362 self.vfs.write("journal.bookmarks",
1363 self.vfs.tryread("bookmarks"))
1363 self.vfs.tryread("bookmarks"))
1364 self.svfs.write("journal.phaseroots",
1364 self.svfs.write("journal.phaseroots",
1365 self.svfs.tryread("phaseroots"))
1365 self.svfs.tryread("phaseroots"))
1366
1366
1367 def recover(self):
1367 def recover(self):
1368 with self.lock():
1368 with self.lock():
1369 if self.svfs.exists("journal"):
1369 if self.svfs.exists("journal"):
1370 self.ui.status(_("rolling back interrupted transaction\n"))
1370 self.ui.status(_("rolling back interrupted transaction\n"))
1371 vfsmap = {'': self.svfs,
1371 vfsmap = {'': self.svfs,
1372 'plain': self.vfs,}
1372 'plain': self.vfs,}
1373 transaction.rollback(self.svfs, vfsmap, "journal",
1373 transaction.rollback(self.svfs, vfsmap, "journal",
1374 self.ui.warn,
1374 self.ui.warn,
1375 checkambigfiles=_cachedfiles)
1375 checkambigfiles=_cachedfiles)
1376 self.invalidate()
1376 self.invalidate()
1377 return True
1377 return True
1378 else:
1378 else:
1379 self.ui.warn(_("no interrupted transaction available\n"))
1379 self.ui.warn(_("no interrupted transaction available\n"))
1380 return False
1380 return False
1381
1381
1382 def rollback(self, dryrun=False, force=False):
1382 def rollback(self, dryrun=False, force=False):
1383 wlock = lock = dsguard = None
1383 wlock = lock = dsguard = None
1384 try:
1384 try:
1385 wlock = self.wlock()
1385 wlock = self.wlock()
1386 lock = self.lock()
1386 lock = self.lock()
1387 if self.svfs.exists("undo"):
1387 if self.svfs.exists("undo"):
1388 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1388 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1389
1389
1390 return self._rollback(dryrun, force, dsguard)
1390 return self._rollback(dryrun, force, dsguard)
1391 else:
1391 else:
1392 self.ui.warn(_("no rollback information available\n"))
1392 self.ui.warn(_("no rollback information available\n"))
1393 return 1
1393 return 1
1394 finally:
1394 finally:
1395 release(dsguard, lock, wlock)
1395 release(dsguard, lock, wlock)
1396
1396
1397 @unfilteredmethod # Until we get smarter cache management
1397 @unfilteredmethod # Until we get smarter cache management
1398 def _rollback(self, dryrun, force, dsguard):
1398 def _rollback(self, dryrun, force, dsguard):
1399 ui = self.ui
1399 ui = self.ui
1400 try:
1400 try:
1401 args = self.vfs.read('undo.desc').splitlines()
1401 args = self.vfs.read('undo.desc').splitlines()
1402 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1402 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1403 if len(args) >= 3:
1403 if len(args) >= 3:
1404 detail = args[2]
1404 detail = args[2]
1405 oldtip = oldlen - 1
1405 oldtip = oldlen - 1
1406
1406
1407 if detail and ui.verbose:
1407 if detail and ui.verbose:
1408 msg = (_('repository tip rolled back to revision %d'
1408 msg = (_('repository tip rolled back to revision %d'
1409 ' (undo %s: %s)\n')
1409 ' (undo %s: %s)\n')
1410 % (oldtip, desc, detail))
1410 % (oldtip, desc, detail))
1411 else:
1411 else:
1412 msg = (_('repository tip rolled back to revision %d'
1412 msg = (_('repository tip rolled back to revision %d'
1413 ' (undo %s)\n')
1413 ' (undo %s)\n')
1414 % (oldtip, desc))
1414 % (oldtip, desc))
1415 except IOError:
1415 except IOError:
1416 msg = _('rolling back unknown transaction\n')
1416 msg = _('rolling back unknown transaction\n')
1417 desc = None
1417 desc = None
1418
1418
1419 if not force and self['.'] != self['tip'] and desc == 'commit':
1419 if not force and self['.'] != self['tip'] and desc == 'commit':
1420 raise error.Abort(
1420 raise error.Abort(
1421 _('rollback of last commit while not checked out '
1421 _('rollback of last commit while not checked out '
1422 'may lose data'), hint=_('use -f to force'))
1422 'may lose data'), hint=_('use -f to force'))
1423
1423
1424 ui.status(msg)
1424 ui.status(msg)
1425 if dryrun:
1425 if dryrun:
1426 return 0
1426 return 0
1427
1427
1428 parents = self.dirstate.parents()
1428 parents = self.dirstate.parents()
1429 self.destroying()
1429 self.destroying()
1430 vfsmap = {'plain': self.vfs, '': self.svfs}
1430 vfsmap = {'plain': self.vfs, '': self.svfs}
1431 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1431 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1432 checkambigfiles=_cachedfiles)
1432 checkambigfiles=_cachedfiles)
1433 if self.vfs.exists('undo.bookmarks'):
1433 if self.vfs.exists('undo.bookmarks'):
1434 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1434 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1435 if self.svfs.exists('undo.phaseroots'):
1435 if self.svfs.exists('undo.phaseroots'):
1436 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1436 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1437 self.invalidate()
1437 self.invalidate()
1438
1438
1439 parentgone = (parents[0] not in self.changelog.nodemap or
1439 parentgone = (parents[0] not in self.changelog.nodemap or
1440 parents[1] not in self.changelog.nodemap)
1440 parents[1] not in self.changelog.nodemap)
1441 if parentgone:
1441 if parentgone:
1442 # prevent dirstateguard from overwriting already restored one
1442 # prevent dirstateguard from overwriting already restored one
1443 dsguard.close()
1443 dsguard.close()
1444
1444
1445 self.dirstate.restorebackup(None, 'undo.dirstate')
1445 self.dirstate.restorebackup(None, 'undo.dirstate')
1446 try:
1446 try:
1447 branch = self.vfs.read('undo.branch')
1447 branch = self.vfs.read('undo.branch')
1448 self.dirstate.setbranch(encoding.tolocal(branch))
1448 self.dirstate.setbranch(encoding.tolocal(branch))
1449 except IOError:
1449 except IOError:
1450 ui.warn(_('named branch could not be reset: '
1450 ui.warn(_('named branch could not be reset: '
1451 'current branch is still \'%s\'\n')
1451 'current branch is still \'%s\'\n')
1452 % self.dirstate.branch())
1452 % self.dirstate.branch())
1453
1453
1454 parents = tuple([p.rev() for p in self[None].parents()])
1454 parents = tuple([p.rev() for p in self[None].parents()])
1455 if len(parents) > 1:
1455 if len(parents) > 1:
1456 ui.status(_('working directory now based on '
1456 ui.status(_('working directory now based on '
1457 'revisions %d and %d\n') % parents)
1457 'revisions %d and %d\n') % parents)
1458 else:
1458 else:
1459 ui.status(_('working directory now based on '
1459 ui.status(_('working directory now based on '
1460 'revision %d\n') % parents)
1460 'revision %d\n') % parents)
1461 mergemod.mergestate.clean(self, self['.'].node())
1461 mergemod.mergestate.clean(self, self['.'].node())
1462
1462
1463 # TODO: if we know which new heads may result from this rollback, pass
1463 # TODO: if we know which new heads may result from this rollback, pass
1464 # them to destroy(), which will prevent the branchhead cache from being
1464 # them to destroy(), which will prevent the branchhead cache from being
1465 # invalidated.
1465 # invalidated.
1466 self.destroyed()
1466 self.destroyed()
1467 return 0
1467 return 0
1468
1468
1469 def _buildcacheupdater(self, newtransaction):
1469 def _buildcacheupdater(self, newtransaction):
1470 """called during transaction to build the callback updating cache
1470 """called during transaction to build the callback updating cache
1471
1471
1472 Lives on the repository to help extension who might want to augment
1472 Lives on the repository to help extension who might want to augment
1473 this logic. For this purpose, the created transaction is passed to the
1473 this logic. For this purpose, the created transaction is passed to the
1474 method.
1474 method.
1475 """
1475 """
1476 # we must avoid cyclic reference between repo and transaction.
1476 # we must avoid cyclic reference between repo and transaction.
1477 reporef = weakref.ref(self)
1477 reporef = weakref.ref(self)
1478 def updater(tr):
1478 def updater(tr):
1479 repo = reporef()
1479 repo = reporef()
1480 repo.updatecaches(tr)
1480 repo.updatecaches(tr)
1481 return updater
1481 return updater
1482
1482
1483 @unfilteredmethod
1483 @unfilteredmethod
1484 def updatecaches(self, tr=None):
1484 def updatecaches(self, tr=None):
1485 """warm appropriate caches
1485 """warm appropriate caches
1486
1486
1487 If this function is called after a transaction closed. The transaction
1487 If this function is called after a transaction closed. The transaction
1488 will be available in the 'tr' argument. This can be used to selectively
1488 will be available in the 'tr' argument. This can be used to selectively
1489 update caches relevant to the changes in that transaction.
1489 update caches relevant to the changes in that transaction.
1490 """
1490 """
1491 if tr is not None and tr.hookargs.get('source') == 'strip':
1491 if tr is not None and tr.hookargs.get('source') == 'strip':
1492 # During strip, many caches are invalid but
1492 # During strip, many caches are invalid but
1493 # later call to `destroyed` will refresh them.
1493 # later call to `destroyed` will refresh them.
1494 return
1494 return
1495
1495
1496 if tr is None or tr.changes['revs']:
1496 if tr is None or tr.changes['revs']:
1497 # updating the unfiltered branchmap should refresh all the others,
1497 # updating the unfiltered branchmap should refresh all the others,
1498 self.ui.debug('updating the branch cache\n')
1498 self.ui.debug('updating the branch cache\n')
1499 branchmap.updatecache(self.filtered('served'))
1499 branchmap.updatecache(self.filtered('served'))
1500
1500
1501 def invalidatecaches(self):
1501 def invalidatecaches(self):
1502
1502
1503 if '_tagscache' in vars(self):
1503 if '_tagscache' in vars(self):
1504 # can't use delattr on proxy
1504 # can't use delattr on proxy
1505 del self.__dict__['_tagscache']
1505 del self.__dict__['_tagscache']
1506
1506
1507 self.unfiltered()._branchcaches.clear()
1507 self.unfiltered()._branchcaches.clear()
1508 self.invalidatevolatilesets()
1508 self.invalidatevolatilesets()
1509 self._sparsesignaturecache.clear()
1509 self._sparsesignaturecache.clear()
1510
1510
1511 def invalidatevolatilesets(self):
1511 def invalidatevolatilesets(self):
1512 self.filteredrevcache.clear()
1512 self.filteredrevcache.clear()
1513 obsolete.clearobscaches(self)
1513 obsolete.clearobscaches(self)
1514
1514
1515 def invalidatedirstate(self):
1515 def invalidatedirstate(self):
1516 '''Invalidates the dirstate, causing the next call to dirstate
1516 '''Invalidates the dirstate, causing the next call to dirstate
1517 to check if it was modified since the last time it was read,
1517 to check if it was modified since the last time it was read,
1518 rereading it if it has.
1518 rereading it if it has.
1519
1519
1520 This is different to dirstate.invalidate() that it doesn't always
1520 This is different to dirstate.invalidate() that it doesn't always
1521 rereads the dirstate. Use dirstate.invalidate() if you want to
1521 rereads the dirstate. Use dirstate.invalidate() if you want to
1522 explicitly read the dirstate again (i.e. restoring it to a previous
1522 explicitly read the dirstate again (i.e. restoring it to a previous
1523 known good state).'''
1523 known good state).'''
1524 if hasunfilteredcache(self, 'dirstate'):
1524 if hasunfilteredcache(self, 'dirstate'):
1525 for k in self.dirstate._filecache:
1525 for k in self.dirstate._filecache:
1526 try:
1526 try:
1527 delattr(self.dirstate, k)
1527 delattr(self.dirstate, k)
1528 except AttributeError:
1528 except AttributeError:
1529 pass
1529 pass
1530 delattr(self.unfiltered(), 'dirstate')
1530 delattr(self.unfiltered(), 'dirstate')
1531
1531
1532 def invalidate(self, clearfilecache=False):
1532 def invalidate(self, clearfilecache=False):
1533 '''Invalidates both store and non-store parts other than dirstate
1533 '''Invalidates both store and non-store parts other than dirstate
1534
1534
1535 If a transaction is running, invalidation of store is omitted,
1535 If a transaction is running, invalidation of store is omitted,
1536 because discarding in-memory changes might cause inconsistency
1536 because discarding in-memory changes might cause inconsistency
1537 (e.g. incomplete fncache causes unintentional failure, but
1537 (e.g. incomplete fncache causes unintentional failure, but
1538 redundant one doesn't).
1538 redundant one doesn't).
1539 '''
1539 '''
1540 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1540 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1541 for k in list(self._filecache.keys()):
1541 for k in list(self._filecache.keys()):
1542 # dirstate is invalidated separately in invalidatedirstate()
1542 # dirstate is invalidated separately in invalidatedirstate()
1543 if k == 'dirstate':
1543 if k == 'dirstate':
1544 continue
1544 continue
1545 if (k == 'changelog' and
1545 if (k == 'changelog' and
1546 self.currenttransaction() and
1546 self.currenttransaction() and
1547 self.changelog._delayed):
1547 self.changelog._delayed):
1548 # The changelog object may store unwritten revisions. We don't
1548 # The changelog object may store unwritten revisions. We don't
1549 # want to lose them.
1549 # want to lose them.
1550 # TODO: Solve the problem instead of working around it.
1550 # TODO: Solve the problem instead of working around it.
1551 continue
1551 continue
1552
1552
1553 if clearfilecache:
1553 if clearfilecache:
1554 del self._filecache[k]
1554 del self._filecache[k]
1555 try:
1555 try:
1556 delattr(unfiltered, k)
1556 delattr(unfiltered, k)
1557 except AttributeError:
1557 except AttributeError:
1558 pass
1558 pass
1559 self.invalidatecaches()
1559 self.invalidatecaches()
1560 if not self.currenttransaction():
1560 if not self.currenttransaction():
1561 # TODO: Changing contents of store outside transaction
1561 # TODO: Changing contents of store outside transaction
1562 # causes inconsistency. We should make in-memory store
1562 # causes inconsistency. We should make in-memory store
1563 # changes detectable, and abort if changed.
1563 # changes detectable, and abort if changed.
1564 self.store.invalidatecaches()
1564 self.store.invalidatecaches()
1565
1565
1566 def invalidateall(self):
1566 def invalidateall(self):
1567 '''Fully invalidates both store and non-store parts, causing the
1567 '''Fully invalidates both store and non-store parts, causing the
1568 subsequent operation to reread any outside changes.'''
1568 subsequent operation to reread any outside changes.'''
1569 # extension should hook this to invalidate its caches
1569 # extension should hook this to invalidate its caches
1570 self.invalidate()
1570 self.invalidate()
1571 self.invalidatedirstate()
1571 self.invalidatedirstate()
1572
1572
1573 @unfilteredmethod
1573 @unfilteredmethod
1574 def _refreshfilecachestats(self, tr):
1574 def _refreshfilecachestats(self, tr):
1575 """Reload stats of cached files so that they are flagged as valid"""
1575 """Reload stats of cached files so that they are flagged as valid"""
1576 for k, ce in self._filecache.items():
1576 for k, ce in self._filecache.items():
1577 k = pycompat.sysstr(k)
1577 k = pycompat.sysstr(k)
1578 if k == r'dirstate' or k not in self.__dict__:
1578 if k == r'dirstate' or k not in self.__dict__:
1579 continue
1579 continue
1580 ce.refresh()
1580 ce.refresh()
1581
1581
1582 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1582 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1583 inheritchecker=None, parentenvvar=None):
1583 inheritchecker=None, parentenvvar=None):
1584 parentlock = None
1584 parentlock = None
1585 # the contents of parentenvvar are used by the underlying lock to
1585 # the contents of parentenvvar are used by the underlying lock to
1586 # determine whether it can be inherited
1586 # determine whether it can be inherited
1587 if parentenvvar is not None:
1587 if parentenvvar is not None:
1588 parentlock = encoding.environ.get(parentenvvar)
1588 parentlock = encoding.environ.get(parentenvvar)
1589
1589
1590 timeout = 0
1590 timeout = 0
1591 warntimeout = 0
1591 warntimeout = 0
1592 if wait:
1592 if wait:
1593 timeout = self.ui.configint("ui", "timeout")
1593 timeout = self.ui.configint("ui", "timeout")
1594 warntimeout = self.ui.configint("ui", "timeout.warn")
1594 warntimeout = self.ui.configint("ui", "timeout.warn")
1595
1595
1596 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1596 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1597 releasefn=releasefn,
1597 releasefn=releasefn,
1598 acquirefn=acquirefn, desc=desc,
1598 acquirefn=acquirefn, desc=desc,
1599 inheritchecker=inheritchecker,
1599 inheritchecker=inheritchecker,
1600 parentlock=parentlock)
1600 parentlock=parentlock)
1601 return l
1601 return l
1602
1602
1603 def _afterlock(self, callback):
1603 def _afterlock(self, callback):
1604 """add a callback to be run when the repository is fully unlocked
1604 """add a callback to be run when the repository is fully unlocked
1605
1605
1606 The callback will be executed when the outermost lock is released
1606 The callback will be executed when the outermost lock is released
1607 (with wlock being higher level than 'lock')."""
1607 (with wlock being higher level than 'lock')."""
1608 for ref in (self._wlockref, self._lockref):
1608 for ref in (self._wlockref, self._lockref):
1609 l = ref and ref()
1609 l = ref and ref()
1610 if l and l.held:
1610 if l and l.held:
1611 l.postrelease.append(callback)
1611 l.postrelease.append(callback)
1612 break
1612 break
1613 else: # no lock have been found.
1613 else: # no lock have been found.
1614 callback()
1614 callback()
1615
1615
1616 def lock(self, wait=True):
1616 def lock(self, wait=True):
1617 '''Lock the repository store (.hg/store) and return a weak reference
1617 '''Lock the repository store (.hg/store) and return a weak reference
1618 to the lock. Use this before modifying the store (e.g. committing or
1618 to the lock. Use this before modifying the store (e.g. committing or
1619 stripping). If you are opening a transaction, get a lock as well.)
1619 stripping). If you are opening a transaction, get a lock as well.)
1620
1620
1621 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1621 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1622 'wlock' first to avoid a dead-lock hazard.'''
1622 'wlock' first to avoid a dead-lock hazard.'''
1623 l = self._currentlock(self._lockref)
1623 l = self._currentlock(self._lockref)
1624 if l is not None:
1624 if l is not None:
1625 l.lock()
1625 l.lock()
1626 return l
1626 return l
1627
1627
1628 l = self._lock(self.svfs, "lock", wait, None,
1628 l = self._lock(self.svfs, "lock", wait, None,
1629 self.invalidate, _('repository %s') % self.origroot)
1629 self.invalidate, _('repository %s') % self.origroot)
1630 self._lockref = weakref.ref(l)
1630 self._lockref = weakref.ref(l)
1631 return l
1631 return l
1632
1632
1633 def _wlockchecktransaction(self):
1633 def _wlockchecktransaction(self):
1634 if self.currenttransaction() is not None:
1634 if self.currenttransaction() is not None:
1635 raise error.LockInheritanceContractViolation(
1635 raise error.LockInheritanceContractViolation(
1636 'wlock cannot be inherited in the middle of a transaction')
1636 'wlock cannot be inherited in the middle of a transaction')
1637
1637
1638 def wlock(self, wait=True):
1638 def wlock(self, wait=True):
1639 '''Lock the non-store parts of the repository (everything under
1639 '''Lock the non-store parts of the repository (everything under
1640 .hg except .hg/store) and return a weak reference to the lock.
1640 .hg except .hg/store) and return a weak reference to the lock.
1641
1641
1642 Use this before modifying files in .hg.
1642 Use this before modifying files in .hg.
1643
1643
1644 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1644 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1645 'wlock' first to avoid a dead-lock hazard.'''
1645 'wlock' first to avoid a dead-lock hazard.'''
1646 l = self._wlockref and self._wlockref()
1646 l = self._wlockref and self._wlockref()
1647 if l is not None and l.held:
1647 if l is not None and l.held:
1648 l.lock()
1648 l.lock()
1649 return l
1649 return l
1650
1650
1651 # We do not need to check for non-waiting lock acquisition. Such
1651 # We do not need to check for non-waiting lock acquisition. Such
1652 # acquisition would not cause dead-lock as they would just fail.
1652 # acquisition would not cause dead-lock as they would just fail.
1653 if wait and (self.ui.configbool('devel', 'all-warnings')
1653 if wait and (self.ui.configbool('devel', 'all-warnings')
1654 or self.ui.configbool('devel', 'check-locks')):
1654 or self.ui.configbool('devel', 'check-locks')):
1655 if self._currentlock(self._lockref) is not None:
1655 if self._currentlock(self._lockref) is not None:
1656 self.ui.develwarn('"wlock" acquired after "lock"')
1656 self.ui.develwarn('"wlock" acquired after "lock"')
1657
1657
1658 def unlock():
1658 def unlock():
1659 if self.dirstate.pendingparentchange():
1659 if self.dirstate.pendingparentchange():
1660 self.dirstate.invalidate()
1660 self.dirstate.invalidate()
1661 else:
1661 else:
1662 self.dirstate.write(None)
1662 self.dirstate.write(None)
1663
1663
1664 self._filecache['dirstate'].refresh()
1664 self._filecache['dirstate'].refresh()
1665
1665
1666 l = self._lock(self.vfs, "wlock", wait, unlock,
1666 l = self._lock(self.vfs, "wlock", wait, unlock,
1667 self.invalidatedirstate, _('working directory of %s') %
1667 self.invalidatedirstate, _('working directory of %s') %
1668 self.origroot,
1668 self.origroot,
1669 inheritchecker=self._wlockchecktransaction,
1669 inheritchecker=self._wlockchecktransaction,
1670 parentenvvar='HG_WLOCK_LOCKER')
1670 parentenvvar='HG_WLOCK_LOCKER')
1671 self._wlockref = weakref.ref(l)
1671 self._wlockref = weakref.ref(l)
1672 return l
1672 return l
1673
1673
1674 def _currentlock(self, lockref):
1674 def _currentlock(self, lockref):
1675 """Returns the lock if it's held, or None if it's not."""
1675 """Returns the lock if it's held, or None if it's not."""
1676 if lockref is None:
1676 if lockref is None:
1677 return None
1677 return None
1678 l = lockref()
1678 l = lockref()
1679 if l is None or not l.held:
1679 if l is None or not l.held:
1680 return None
1680 return None
1681 return l
1681 return l
1682
1682
1683 def currentwlock(self):
1683 def currentwlock(self):
1684 """Returns the wlock if it's held, or None if it's not."""
1684 """Returns the wlock if it's held, or None if it's not."""
1685 return self._currentlock(self._wlockref)
1685 return self._currentlock(self._wlockref)
1686
1686
1687 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1687 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1688 """
1688 """
1689 commit an individual file as part of a larger transaction
1689 commit an individual file as part of a larger transaction
1690 """
1690 """
1691
1691
1692 fname = fctx.path()
1692 fname = fctx.path()
1693 fparent1 = manifest1.get(fname, nullid)
1693 fparent1 = manifest1.get(fname, nullid)
1694 fparent2 = manifest2.get(fname, nullid)
1694 fparent2 = manifest2.get(fname, nullid)
1695 if isinstance(fctx, context.filectx):
1695 if isinstance(fctx, context.filectx):
1696 node = fctx.filenode()
1696 node = fctx.filenode()
1697 if node in [fparent1, fparent2]:
1697 if node in [fparent1, fparent2]:
1698 self.ui.debug('reusing %s filelog entry\n' % fname)
1698 self.ui.debug('reusing %s filelog entry\n' % fname)
1699 if manifest1.flags(fname) != fctx.flags():
1699 if manifest1.flags(fname) != fctx.flags():
1700 changelist.append(fname)
1700 changelist.append(fname)
1701 return node
1701 return node
1702
1702
1703 flog = self.file(fname)
1703 flog = self.file(fname)
1704 meta = {}
1704 meta = {}
1705 copy = fctx.renamed()
1705 copy = fctx.renamed()
1706 if copy and copy[0] != fname:
1706 if copy and copy[0] != fname:
1707 # Mark the new revision of this file as a copy of another
1707 # Mark the new revision of this file as a copy of another
1708 # file. This copy data will effectively act as a parent
1708 # file. This copy data will effectively act as a parent
1709 # of this new revision. If this is a merge, the first
1709 # of this new revision. If this is a merge, the first
1710 # parent will be the nullid (meaning "look up the copy data")
1710 # parent will be the nullid (meaning "look up the copy data")
1711 # and the second one will be the other parent. For example:
1711 # and the second one will be the other parent. For example:
1712 #
1712 #
1713 # 0 --- 1 --- 3 rev1 changes file foo
1713 # 0 --- 1 --- 3 rev1 changes file foo
1714 # \ / rev2 renames foo to bar and changes it
1714 # \ / rev2 renames foo to bar and changes it
1715 # \- 2 -/ rev3 should have bar with all changes and
1715 # \- 2 -/ rev3 should have bar with all changes and
1716 # should record that bar descends from
1716 # should record that bar descends from
1717 # bar in rev2 and foo in rev1
1717 # bar in rev2 and foo in rev1
1718 #
1718 #
1719 # this allows this merge to succeed:
1719 # this allows this merge to succeed:
1720 #
1720 #
1721 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1721 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1722 # \ / merging rev3 and rev4 should use bar@rev2
1722 # \ / merging rev3 and rev4 should use bar@rev2
1723 # \- 2 --- 4 as the merge base
1723 # \- 2 --- 4 as the merge base
1724 #
1724 #
1725
1725
1726 cfname = copy[0]
1726 cfname = copy[0]
1727 crev = manifest1.get(cfname)
1727 crev = manifest1.get(cfname)
1728 newfparent = fparent2
1728 newfparent = fparent2
1729
1729
1730 if manifest2: # branch merge
1730 if manifest2: # branch merge
1731 if fparent2 == nullid or crev is None: # copied on remote side
1731 if fparent2 == nullid or crev is None: # copied on remote side
1732 if cfname in manifest2:
1732 if cfname in manifest2:
1733 crev = manifest2[cfname]
1733 crev = manifest2[cfname]
1734 newfparent = fparent1
1734 newfparent = fparent1
1735
1735
1736 # Here, we used to search backwards through history to try to find
1736 # Here, we used to search backwards through history to try to find
1737 # where the file copy came from if the source of a copy was not in
1737 # where the file copy came from if the source of a copy was not in
1738 # the parent directory. However, this doesn't actually make sense to
1738 # the parent directory. However, this doesn't actually make sense to
1739 # do (what does a copy from something not in your working copy even
1739 # do (what does a copy from something not in your working copy even
1740 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1740 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1741 # the user that copy information was dropped, so if they didn't
1741 # the user that copy information was dropped, so if they didn't
1742 # expect this outcome it can be fixed, but this is the correct
1742 # expect this outcome it can be fixed, but this is the correct
1743 # behavior in this circumstance.
1743 # behavior in this circumstance.
1744
1744
1745 if crev:
1745 if crev:
1746 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1746 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1747 meta["copy"] = cfname
1747 meta["copy"] = cfname
1748 meta["copyrev"] = hex(crev)
1748 meta["copyrev"] = hex(crev)
1749 fparent1, fparent2 = nullid, newfparent
1749 fparent1, fparent2 = nullid, newfparent
1750 else:
1750 else:
1751 self.ui.warn(_("warning: can't find ancestor for '%s' "
1751 self.ui.warn(_("warning: can't find ancestor for '%s' "
1752 "copied from '%s'!\n") % (fname, cfname))
1752 "copied from '%s'!\n") % (fname, cfname))
1753
1753
1754 elif fparent1 == nullid:
1754 elif fparent1 == nullid:
1755 fparent1, fparent2 = fparent2, nullid
1755 fparent1, fparent2 = fparent2, nullid
1756 elif fparent2 != nullid:
1756 elif fparent2 != nullid:
1757 # is one parent an ancestor of the other?
1757 # is one parent an ancestor of the other?
1758 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1758 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1759 if fparent1 in fparentancestors:
1759 if fparent1 in fparentancestors:
1760 fparent1, fparent2 = fparent2, nullid
1760 fparent1, fparent2 = fparent2, nullid
1761 elif fparent2 in fparentancestors:
1761 elif fparent2 in fparentancestors:
1762 fparent2 = nullid
1762 fparent2 = nullid
1763
1763
1764 # is the file changed?
1764 # is the file changed?
1765 text = fctx.data()
1765 text = fctx.data()
1766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1767 changelist.append(fname)
1767 changelist.append(fname)
1768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1769 # are just the flags changed during merge?
1769 # are just the flags changed during merge?
1770 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1770 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1771 changelist.append(fname)
1771 changelist.append(fname)
1772
1772
1773 return fparent1
1773 return fparent1
1774
1774
1775 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1775 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1776 """check for commit arguments that aren't committable"""
1776 """check for commit arguments that aren't committable"""
1777 if match.isexact() or match.prefix():
1777 if match.isexact() or match.prefix():
1778 matched = set(status.modified + status.added + status.removed)
1778 matched = set(status.modified + status.added + status.removed)
1779
1779
1780 for f in match.files():
1780 for f in match.files():
1781 f = self.dirstate.normalize(f)
1781 f = self.dirstate.normalize(f)
1782 if f == '.' or f in matched or f in wctx.substate:
1782 if f == '.' or f in matched or f in wctx.substate:
1783 continue
1783 continue
1784 if f in status.deleted:
1784 if f in status.deleted:
1785 fail(f, _('file not found!'))
1785 fail(f, _('file not found!'))
1786 if f in vdirs: # visited directory
1786 if f in vdirs: # visited directory
1787 d = f + '/'
1787 d = f + '/'
1788 for mf in matched:
1788 for mf in matched:
1789 if mf.startswith(d):
1789 if mf.startswith(d):
1790 break
1790 break
1791 else:
1791 else:
1792 fail(f, _("no match under directory!"))
1792 fail(f, _("no match under directory!"))
1793 elif f not in self.dirstate:
1793 elif f not in self.dirstate:
1794 fail(f, _("file not tracked!"))
1794 fail(f, _("file not tracked!"))
1795
1795
1796 @unfilteredmethod
1796 @unfilteredmethod
1797 def commit(self, text="", user=None, date=None, match=None, force=False,
1797 def commit(self, text="", user=None, date=None, match=None, force=False,
1798 editor=False, extra=None):
1798 editor=False, extra=None):
1799 """Add a new revision to current repository.
1799 """Add a new revision to current repository.
1800
1800
1801 Revision information is gathered from the working directory,
1801 Revision information is gathered from the working directory,
1802 match can be used to filter the committed files. If editor is
1802 match can be used to filter the committed files. If editor is
1803 supplied, it is called to get a commit message.
1803 supplied, it is called to get a commit message.
1804 """
1804 """
1805 if extra is None:
1805 if extra is None:
1806 extra = {}
1806 extra = {}
1807
1807
1808 def fail(f, msg):
1808 def fail(f, msg):
1809 raise error.Abort('%s: %s' % (f, msg))
1809 raise error.Abort('%s: %s' % (f, msg))
1810
1810
1811 if not match:
1811 if not match:
1812 match = matchmod.always(self.root, '')
1812 match = matchmod.always(self.root, '')
1813
1813
1814 if not force:
1814 if not force:
1815 vdirs = []
1815 vdirs = []
1816 match.explicitdir = vdirs.append
1816 match.explicitdir = vdirs.append
1817 match.bad = fail
1817 match.bad = fail
1818
1818
1819 wlock = lock = tr = None
1819 wlock = lock = tr = None
1820 try:
1820 try:
1821 wlock = self.wlock()
1821 wlock = self.wlock()
1822 lock = self.lock() # for recent changelog (see issue4368)
1822 lock = self.lock() # for recent changelog (see issue4368)
1823
1823
1824 wctx = self[None]
1824 wctx = self[None]
1825 merge = len(wctx.parents()) > 1
1825 merge = len(wctx.parents()) > 1
1826
1826
1827 if not force and merge and not match.always():
1827 if not force and merge and not match.always():
1828 raise error.Abort(_('cannot partially commit a merge '
1828 raise error.Abort(_('cannot partially commit a merge '
1829 '(do not specify files or patterns)'))
1829 '(do not specify files or patterns)'))
1830
1830
1831 status = self.status(match=match, clean=force)
1831 status = self.status(match=match, clean=force)
1832 if force:
1832 if force:
1833 status.modified.extend(status.clean) # mq may commit clean files
1833 status.modified.extend(status.clean) # mq may commit clean files
1834
1834
1835 # check subrepos
1835 # check subrepos
1836 subs, commitsubs, newstate = subrepo.precommit(
1836 subs, commitsubs, newstate = subrepo.precommit(
1837 self.ui, wctx, status, match, force=force)
1837 self.ui, wctx, status, match, force=force)
1838
1838
1839 # make sure all explicit patterns are matched
1839 # make sure all explicit patterns are matched
1840 if not force:
1840 if not force:
1841 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1841 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1842
1842
1843 cctx = context.workingcommitctx(self, status,
1843 cctx = context.workingcommitctx(self, status,
1844 text, user, date, extra)
1844 text, user, date, extra)
1845
1845
1846 # internal config: ui.allowemptycommit
1846 # internal config: ui.allowemptycommit
1847 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1847 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1848 or extra.get('close') or merge or cctx.files()
1848 or extra.get('close') or merge or cctx.files()
1849 or self.ui.configbool('ui', 'allowemptycommit'))
1849 or self.ui.configbool('ui', 'allowemptycommit'))
1850 if not allowemptycommit:
1850 if not allowemptycommit:
1851 return None
1851 return None
1852
1852
1853 if merge and cctx.deleted():
1853 if merge and cctx.deleted():
1854 raise error.Abort(_("cannot commit merge with missing files"))
1854 raise error.Abort(_("cannot commit merge with missing files"))
1855
1855
1856 ms = mergemod.mergestate.read(self)
1856 ms = mergemod.mergestate.read(self)
1857 mergeutil.checkunresolved(ms)
1857 mergeutil.checkunresolved(ms)
1858
1858
1859 if editor:
1859 if editor:
1860 cctx._text = editor(self, cctx, subs)
1860 cctx._text = editor(self, cctx, subs)
1861 edited = (text != cctx._text)
1861 edited = (text != cctx._text)
1862
1862
1863 # Save commit message in case this transaction gets rolled back
1863 # Save commit message in case this transaction gets rolled back
1864 # (e.g. by a pretxncommit hook). Leave the content alone on
1864 # (e.g. by a pretxncommit hook). Leave the content alone on
1865 # the assumption that the user will use the same editor again.
1865 # the assumption that the user will use the same editor again.
1866 msgfn = self.savecommitmessage(cctx._text)
1866 msgfn = self.savecommitmessage(cctx._text)
1867
1867
1868 # commit subs and write new state
1868 # commit subs and write new state
1869 if subs:
1869 if subs:
1870 for s in sorted(commitsubs):
1870 for s in sorted(commitsubs):
1871 sub = wctx.sub(s)
1871 sub = wctx.sub(s)
1872 self.ui.status(_('committing subrepository %s\n') %
1872 self.ui.status(_('committing subrepository %s\n') %
1873 subrepo.subrelpath(sub))
1873 subrepo.subrelpath(sub))
1874 sr = sub.commit(cctx._text, user, date)
1874 sr = sub.commit(cctx._text, user, date)
1875 newstate[s] = (newstate[s][0], sr)
1875 newstate[s] = (newstate[s][0], sr)
1876 subrepo.writestate(self, newstate)
1876 subrepo.writestate(self, newstate)
1877
1877
1878 p1, p2 = self.dirstate.parents()
1878 p1, p2 = self.dirstate.parents()
1879 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1879 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1880 try:
1880 try:
1881 self.hook("precommit", throw=True, parent1=hookp1,
1881 self.hook("precommit", throw=True, parent1=hookp1,
1882 parent2=hookp2)
1882 parent2=hookp2)
1883 tr = self.transaction('commit')
1883 tr = self.transaction('commit')
1884 ret = self.commitctx(cctx, True)
1884 ret = self.commitctx(cctx, True)
1885 except: # re-raises
1885 except: # re-raises
1886 if edited:
1886 if edited:
1887 self.ui.write(
1887 self.ui.write(
1888 _('note: commit message saved in %s\n') % msgfn)
1888 _('note: commit message saved in %s\n') % msgfn)
1889 raise
1889 raise
1890 # update bookmarks, dirstate and mergestate
1890 # update bookmarks, dirstate and mergestate
1891 bookmarks.update(self, [p1, p2], ret)
1891 bookmarks.update(self, [p1, p2], ret)
1892 cctx.markcommitted(ret)
1892 cctx.markcommitted(ret)
1893 ms.reset()
1893 ms.reset()
1894 tr.close()
1894 tr.close()
1895
1895
1896 finally:
1896 finally:
1897 lockmod.release(tr, lock, wlock)
1897 lockmod.release(tr, lock, wlock)
1898
1898
1899 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1899 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1900 # hack for command that use a temporary commit (eg: histedit)
1900 # hack for command that use a temporary commit (eg: histedit)
1901 # temporary commit got stripped before hook release
1901 # temporary commit got stripped before hook release
1902 if self.changelog.hasnode(ret):
1902 if self.changelog.hasnode(ret):
1903 self.hook("commit", node=node, parent1=parent1,
1903 self.hook("commit", node=node, parent1=parent1,
1904 parent2=parent2)
1904 parent2=parent2)
1905 self._afterlock(commithook)
1905 self._afterlock(commithook)
1906 return ret
1906 return ret
1907
1907
1908 @unfilteredmethod
1908 @unfilteredmethod
1909 def commitctx(self, ctx, error=False):
1909 def commitctx(self, ctx, error=False):
1910 """Add a new revision to current repository.
1910 """Add a new revision to current repository.
1911 Revision information is passed via the context argument.
1911 Revision information is passed via the context argument.
1912 """
1912 """
1913
1913
1914 tr = None
1914 tr = None
1915 p1, p2 = ctx.p1(), ctx.p2()
1915 p1, p2 = ctx.p1(), ctx.p2()
1916 user = ctx.user()
1916 user = ctx.user()
1917
1917
1918 lock = self.lock()
1918 lock = self.lock()
1919 try:
1919 try:
1920 tr = self.transaction("commit")
1920 tr = self.transaction("commit")
1921 trp = weakref.proxy(tr)
1921 trp = weakref.proxy(tr)
1922
1922
1923 if ctx.manifestnode():
1923 if ctx.manifestnode():
1924 # reuse an existing manifest revision
1924 # reuse an existing manifest revision
1925 mn = ctx.manifestnode()
1925 mn = ctx.manifestnode()
1926 files = ctx.files()
1926 files = ctx.files()
1927 elif ctx.files():
1927 elif ctx.files():
1928 m1ctx = p1.manifestctx()
1928 m1ctx = p1.manifestctx()
1929 m2ctx = p2.manifestctx()
1929 m2ctx = p2.manifestctx()
1930 mctx = m1ctx.copy()
1930 mctx = m1ctx.copy()
1931
1931
1932 m = mctx.read()
1932 m = mctx.read()
1933 m1 = m1ctx.read()
1933 m1 = m1ctx.read()
1934 m2 = m2ctx.read()
1934 m2 = m2ctx.read()
1935
1935
1936 # check in files
1936 # check in files
1937 added = []
1937 added = []
1938 changed = []
1938 changed = []
1939 removed = list(ctx.removed())
1939 removed = list(ctx.removed())
1940 linkrev = len(self)
1940 linkrev = len(self)
1941 self.ui.note(_("committing files:\n"))
1941 self.ui.note(_("committing files:\n"))
1942 for f in sorted(ctx.modified() + ctx.added()):
1942 for f in sorted(ctx.modified() + ctx.added()):
1943 self.ui.note(f + "\n")
1943 self.ui.note(f + "\n")
1944 try:
1944 try:
1945 fctx = ctx[f]
1945 fctx = ctx[f]
1946 if fctx is None:
1946 if fctx is None:
1947 removed.append(f)
1947 removed.append(f)
1948 else:
1948 else:
1949 added.append(f)
1949 added.append(f)
1950 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1950 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1951 trp, changed)
1951 trp, changed)
1952 m.setflag(f, fctx.flags())
1952 m.setflag(f, fctx.flags())
1953 except OSError as inst:
1953 except OSError as inst:
1954 self.ui.warn(_("trouble committing %s!\n") % f)
1954 self.ui.warn(_("trouble committing %s!\n") % f)
1955 raise
1955 raise
1956 except IOError as inst:
1956 except IOError as inst:
1957 errcode = getattr(inst, 'errno', errno.ENOENT)
1957 errcode = getattr(inst, 'errno', errno.ENOENT)
1958 if error or errcode and errcode != errno.ENOENT:
1958 if error or errcode and errcode != errno.ENOENT:
1959 self.ui.warn(_("trouble committing %s!\n") % f)
1959 self.ui.warn(_("trouble committing %s!\n") % f)
1960 raise
1960 raise
1961
1961
1962 # update manifest
1962 # update manifest
1963 self.ui.note(_("committing manifest\n"))
1963 self.ui.note(_("committing manifest\n"))
1964 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1964 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1965 drop = [f for f in removed if f in m]
1965 drop = [f for f in removed if f in m]
1966 for f in drop:
1966 for f in drop:
1967 del m[f]
1967 del m[f]
1968 mn = mctx.write(trp, linkrev,
1968 mn = mctx.write(trp, linkrev,
1969 p1.manifestnode(), p2.manifestnode(),
1969 p1.manifestnode(), p2.manifestnode(),
1970 added, drop)
1970 added, drop)
1971 files = changed + removed
1971 files = changed + removed
1972 else:
1972 else:
1973 mn = p1.manifestnode()
1973 mn = p1.manifestnode()
1974 files = []
1974 files = []
1975
1975
1976 # update changelog
1976 # update changelog
1977 self.ui.note(_("committing changelog\n"))
1977 self.ui.note(_("committing changelog\n"))
1978 self.changelog.delayupdate(tr)
1978 self.changelog.delayupdate(tr)
1979 n = self.changelog.add(mn, files, ctx.description(),
1979 n = self.changelog.add(mn, files, ctx.description(),
1980 trp, p1.node(), p2.node(),
1980 trp, p1.node(), p2.node(),
1981 user, ctx.date(), ctx.extra().copy())
1981 user, ctx.date(), ctx.extra().copy())
1982 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1982 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1983 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1983 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1984 parent2=xp2)
1984 parent2=xp2)
1985 # set the new commit is proper phase
1985 # set the new commit is proper phase
1986 targetphase = subrepo.newcommitphase(self.ui, ctx)
1986 targetphase = subrepo.newcommitphase(self.ui, ctx)
1987 if targetphase:
1987 if targetphase:
1988 # retract boundary do not alter parent changeset.
1988 # retract boundary do not alter parent changeset.
1989 # if a parent have higher the resulting phase will
1989 # if a parent have higher the resulting phase will
1990 # be compliant anyway
1990 # be compliant anyway
1991 #
1991 #
1992 # if minimal phase was 0 we don't need to retract anything
1992 # if minimal phase was 0 we don't need to retract anything
1993 phases.registernew(self, tr, targetphase, [n])
1993 phases.registernew(self, tr, targetphase, [n])
1994 tr.close()
1994 tr.close()
1995 return n
1995 return n
1996 finally:
1996 finally:
1997 if tr:
1997 if tr:
1998 tr.release()
1998 tr.release()
1999 lock.release()
1999 lock.release()
2000
2000
2001 @unfilteredmethod
2001 @unfilteredmethod
2002 def destroying(self):
2002 def destroying(self):
2003 '''Inform the repository that nodes are about to be destroyed.
2003 '''Inform the repository that nodes are about to be destroyed.
2004 Intended for use by strip and rollback, so there's a common
2004 Intended for use by strip and rollback, so there's a common
2005 place for anything that has to be done before destroying history.
2005 place for anything that has to be done before destroying history.
2006
2006
2007 This is mostly useful for saving state that is in memory and waiting
2007 This is mostly useful for saving state that is in memory and waiting
2008 to be flushed when the current lock is released. Because a call to
2008 to be flushed when the current lock is released. Because a call to
2009 destroyed is imminent, the repo will be invalidated causing those
2009 destroyed is imminent, the repo will be invalidated causing those
2010 changes to stay in memory (waiting for the next unlock), or vanish
2010 changes to stay in memory (waiting for the next unlock), or vanish
2011 completely.
2011 completely.
2012 '''
2012 '''
2013 # When using the same lock to commit and strip, the phasecache is left
2013 # When using the same lock to commit and strip, the phasecache is left
2014 # dirty after committing. Then when we strip, the repo is invalidated,
2014 # dirty after committing. Then when we strip, the repo is invalidated,
2015 # causing those changes to disappear.
2015 # causing those changes to disappear.
2016 if '_phasecache' in vars(self):
2016 if '_phasecache' in vars(self):
2017 self._phasecache.write()
2017 self._phasecache.write()
2018
2018
2019 @unfilteredmethod
2019 @unfilteredmethod
2020 def destroyed(self):
2020 def destroyed(self):
2021 '''Inform the repository that nodes have been destroyed.
2021 '''Inform the repository that nodes have been destroyed.
2022 Intended for use by strip and rollback, so there's a common
2022 Intended for use by strip and rollback, so there's a common
2023 place for anything that has to be done after destroying history.
2023 place for anything that has to be done after destroying history.
2024 '''
2024 '''
2025 # When one tries to:
2025 # When one tries to:
2026 # 1) destroy nodes thus calling this method (e.g. strip)
2026 # 1) destroy nodes thus calling this method (e.g. strip)
2027 # 2) use phasecache somewhere (e.g. commit)
2027 # 2) use phasecache somewhere (e.g. commit)
2028 #
2028 #
2029 # then 2) will fail because the phasecache contains nodes that were
2029 # then 2) will fail because the phasecache contains nodes that were
2030 # removed. We can either remove phasecache from the filecache,
2030 # removed. We can either remove phasecache from the filecache,
2031 # causing it to reload next time it is accessed, or simply filter
2031 # causing it to reload next time it is accessed, or simply filter
2032 # the removed nodes now and write the updated cache.
2032 # the removed nodes now and write the updated cache.
2033 self._phasecache.filterunknown(self)
2033 self._phasecache.filterunknown(self)
2034 self._phasecache.write()
2034 self._phasecache.write()
2035
2035
2036 # refresh all repository caches
2036 # refresh all repository caches
2037 self.updatecaches()
2037 self.updatecaches()
2038
2038
2039 # Ensure the persistent tag cache is updated. Doing it now
2039 # Ensure the persistent tag cache is updated. Doing it now
2040 # means that the tag cache only has to worry about destroyed
2040 # means that the tag cache only has to worry about destroyed
2041 # heads immediately after a strip/rollback. That in turn
2041 # heads immediately after a strip/rollback. That in turn
2042 # guarantees that "cachetip == currenttip" (comparing both rev
2042 # guarantees that "cachetip == currenttip" (comparing both rev
2043 # and node) always means no nodes have been added or destroyed.
2043 # and node) always means no nodes have been added or destroyed.
2044
2044
2045 # XXX this is suboptimal when qrefresh'ing: we strip the current
2045 # XXX this is suboptimal when qrefresh'ing: we strip the current
2046 # head, refresh the tag cache, then immediately add a new head.
2046 # head, refresh the tag cache, then immediately add a new head.
2047 # But I think doing it this way is necessary for the "instant
2047 # But I think doing it this way is necessary for the "instant
2048 # tag cache retrieval" case to work.
2048 # tag cache retrieval" case to work.
2049 self.invalidate()
2049 self.invalidate()
2050
2050
2051 def walk(self, match, node=None):
2051 def walk(self, match, node=None):
2052 '''
2052 '''
2053 walk recursively through the directory tree or a given
2053 walk recursively through the directory tree or a given
2054 changeset, finding all files matched by the match
2054 changeset, finding all files matched by the match
2055 function
2055 function
2056 '''
2056 '''
2057 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2057 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
2058 return self[node].walk(match)
2058 return self[node].walk(match)
2059
2059
2060 def status(self, node1='.', node2=None, match=None,
2060 def status(self, node1='.', node2=None, match=None,
2061 ignored=False, clean=False, unknown=False,
2061 ignored=False, clean=False, unknown=False,
2062 listsubrepos=False):
2062 listsubrepos=False):
2063 '''a convenience method that calls node1.status(node2)'''
2063 '''a convenience method that calls node1.status(node2)'''
2064 return self[node1].status(node2, match, ignored, clean, unknown,
2064 return self[node1].status(node2, match, ignored, clean, unknown,
2065 listsubrepos)
2065 listsubrepos)
2066
2066
2067 def addpostdsstatus(self, ps):
2067 def addpostdsstatus(self, ps):
2068 """Add a callback to run within the wlock, at the point at which status
2068 """Add a callback to run within the wlock, at the point at which status
2069 fixups happen.
2069 fixups happen.
2070
2070
2071 On status completion, callback(wctx, status) will be called with the
2071 On status completion, callback(wctx, status) will be called with the
2072 wlock held, unless the dirstate has changed from underneath or the wlock
2072 wlock held, unless the dirstate has changed from underneath or the wlock
2073 couldn't be grabbed.
2073 couldn't be grabbed.
2074
2074
2075 Callbacks should not capture and use a cached copy of the dirstate --
2075 Callbacks should not capture and use a cached copy of the dirstate --
2076 it might change in the meanwhile. Instead, they should access the
2076 it might change in the meanwhile. Instead, they should access the
2077 dirstate via wctx.repo().dirstate.
2077 dirstate via wctx.repo().dirstate.
2078
2078
2079 This list is emptied out after each status run -- extensions should
2079 This list is emptied out after each status run -- extensions should
2080 make sure it adds to this list each time dirstate.status is called.
2080 make sure it adds to this list each time dirstate.status is called.
2081 Extensions should also make sure they don't call this for statuses
2081 Extensions should also make sure they don't call this for statuses
2082 that don't involve the dirstate.
2082 that don't involve the dirstate.
2083 """
2083 """
2084
2084
2085 # The list is located here for uniqueness reasons -- it is actually
2085 # The list is located here for uniqueness reasons -- it is actually
2086 # managed by the workingctx, but that isn't unique per-repo.
2086 # managed by the workingctx, but that isn't unique per-repo.
2087 self._postdsstatus.append(ps)
2087 self._postdsstatus.append(ps)
2088
2088
2089 def postdsstatus(self):
2089 def postdsstatus(self):
2090 """Used by workingctx to get the list of post-dirstate-status hooks."""
2090 """Used by workingctx to get the list of post-dirstate-status hooks."""
2091 return self._postdsstatus
2091 return self._postdsstatus
2092
2092
2093 def clearpostdsstatus(self):
2093 def clearpostdsstatus(self):
2094 """Used by workingctx to clear post-dirstate-status hooks."""
2094 """Used by workingctx to clear post-dirstate-status hooks."""
2095 del self._postdsstatus[:]
2095 del self._postdsstatus[:]
2096
2096
2097 def heads(self, start=None):
2097 def heads(self, start=None):
2098 if start is None:
2098 if start is None:
2099 cl = self.changelog
2099 cl = self.changelog
2100 headrevs = reversed(cl.headrevs())
2100 headrevs = reversed(cl.headrevs())
2101 return [cl.node(rev) for rev in headrevs]
2101 return [cl.node(rev) for rev in headrevs]
2102
2102
2103 heads = self.changelog.heads(start)
2103 heads = self.changelog.heads(start)
2104 # sort the output in rev descending order
2104 # sort the output in rev descending order
2105 return sorted(heads, key=self.changelog.rev, reverse=True)
2105 return sorted(heads, key=self.changelog.rev, reverse=True)
2106
2106
2107 def branchheads(self, branch=None, start=None, closed=False):
2107 def branchheads(self, branch=None, start=None, closed=False):
2108 '''return a (possibly filtered) list of heads for the given branch
2108 '''return a (possibly filtered) list of heads for the given branch
2109
2109
2110 Heads are returned in topological order, from newest to oldest.
2110 Heads are returned in topological order, from newest to oldest.
2111 If branch is None, use the dirstate branch.
2111 If branch is None, use the dirstate branch.
2112 If start is not None, return only heads reachable from start.
2112 If start is not None, return only heads reachable from start.
2113 If closed is True, return heads that are marked as closed as well.
2113 If closed is True, return heads that are marked as closed as well.
2114 '''
2114 '''
2115 if branch is None:
2115 if branch is None:
2116 branch = self[None].branch()
2116 branch = self[None].branch()
2117 branches = self.branchmap()
2117 branches = self.branchmap()
2118 if branch not in branches:
2118 if branch not in branches:
2119 return []
2119 return []
2120 # the cache returns heads ordered lowest to highest
2120 # the cache returns heads ordered lowest to highest
2121 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2121 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2122 if start is not None:
2122 if start is not None:
2123 # filter out the heads that cannot be reached from startrev
2123 # filter out the heads that cannot be reached from startrev
2124 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2124 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2125 bheads = [h for h in bheads if h in fbheads]
2125 bheads = [h for h in bheads if h in fbheads]
2126 return bheads
2126 return bheads
2127
2127
2128 def branches(self, nodes):
2128 def branches(self, nodes):
2129 if not nodes:
2129 if not nodes:
2130 nodes = [self.changelog.tip()]
2130 nodes = [self.changelog.tip()]
2131 b = []
2131 b = []
2132 for n in nodes:
2132 for n in nodes:
2133 t = n
2133 t = n
2134 while True:
2134 while True:
2135 p = self.changelog.parents(n)
2135 p = self.changelog.parents(n)
2136 if p[1] != nullid or p[0] == nullid:
2136 if p[1] != nullid or p[0] == nullid:
2137 b.append((t, n, p[0], p[1]))
2137 b.append((t, n, p[0], p[1]))
2138 break
2138 break
2139 n = p[0]
2139 n = p[0]
2140 return b
2140 return b
2141
2141
2142 def between(self, pairs):
2142 def between(self, pairs):
2143 r = []
2143 r = []
2144
2144
2145 for top, bottom in pairs:
2145 for top, bottom in pairs:
2146 n, l, i = top, [], 0
2146 n, l, i = top, [], 0
2147 f = 1
2147 f = 1
2148
2148
2149 while n != bottom and n != nullid:
2149 while n != bottom and n != nullid:
2150 p = self.changelog.parents(n)[0]
2150 p = self.changelog.parents(n)[0]
2151 if i == f:
2151 if i == f:
2152 l.append(n)
2152 l.append(n)
2153 f = f * 2
2153 f = f * 2
2154 n = p
2154 n = p
2155 i += 1
2155 i += 1
2156
2156
2157 r.append(l)
2157 r.append(l)
2158
2158
2159 return r
2159 return r
2160
2160
2161 def checkpush(self, pushop):
2161 def checkpush(self, pushop):
2162 """Extensions can override this function if additional checks have
2162 """Extensions can override this function if additional checks have
2163 to be performed before pushing, or call it if they override push
2163 to be performed before pushing, or call it if they override push
2164 command.
2164 command.
2165 """
2165 """
2166
2166
2167 @unfilteredpropertycache
2167 @unfilteredpropertycache
2168 def prepushoutgoinghooks(self):
2168 def prepushoutgoinghooks(self):
2169 """Return util.hooks consists of a pushop with repo, remote, outgoing
2169 """Return util.hooks consists of a pushop with repo, remote, outgoing
2170 methods, which are called before pushing changesets.
2170 methods, which are called before pushing changesets.
2171 """
2171 """
2172 return util.hooks()
2172 return util.hooks()
2173
2173
2174 def pushkey(self, namespace, key, old, new):
2174 def pushkey(self, namespace, key, old, new):
2175 try:
2175 try:
2176 tr = self.currenttransaction()
2176 tr = self.currenttransaction()
2177 hookargs = {}
2177 hookargs = {}
2178 if tr is not None:
2178 if tr is not None:
2179 hookargs.update(tr.hookargs)
2179 hookargs.update(tr.hookargs)
2180 hookargs['namespace'] = namespace
2180 hookargs['namespace'] = namespace
2181 hookargs['key'] = key
2181 hookargs['key'] = key
2182 hookargs['old'] = old
2182 hookargs['old'] = old
2183 hookargs['new'] = new
2183 hookargs['new'] = new
2184 self.hook('prepushkey', throw=True, **hookargs)
2184 self.hook('prepushkey', throw=True, **hookargs)
2185 except error.HookAbort as exc:
2185 except error.HookAbort as exc:
2186 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2186 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2187 if exc.hint:
2187 if exc.hint:
2188 self.ui.write_err(_("(%s)\n") % exc.hint)
2188 self.ui.write_err(_("(%s)\n") % exc.hint)
2189 return False
2189 return False
2190 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2190 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2191 ret = pushkey.push(self, namespace, key, old, new)
2191 ret = pushkey.push(self, namespace, key, old, new)
2192 def runhook():
2192 def runhook():
2193 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2193 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2194 ret=ret)
2194 ret=ret)
2195 self._afterlock(runhook)
2195 self._afterlock(runhook)
2196 return ret
2196 return ret
2197
2197
2198 def listkeys(self, namespace):
2198 def listkeys(self, namespace):
2199 self.hook('prelistkeys', throw=True, namespace=namespace)
2199 self.hook('prelistkeys', throw=True, namespace=namespace)
2200 self.ui.debug('listing keys for "%s"\n' % namespace)
2200 self.ui.debug('listing keys for "%s"\n' % namespace)
2201 values = pushkey.list(self, namespace)
2201 values = pushkey.list(self, namespace)
2202 self.hook('listkeys', namespace=namespace, values=values)
2202 self.hook('listkeys', namespace=namespace, values=values)
2203 return values
2203 return values
2204
2204
2205 def debugwireargs(self, one, two, three=None, four=None, five=None):
2205 def debugwireargs(self, one, two, three=None, four=None, five=None):
2206 '''used to test argument passing over the wire'''
2206 '''used to test argument passing over the wire'''
2207 return "%s %s %s %s %s" % (one, two, three, four, five)
2207 return "%s %s %s %s %s" % (one, two, three, four, five)
2208
2208
2209 def savecommitmessage(self, text):
2209 def savecommitmessage(self, text):
2210 fp = self.vfs('last-message.txt', 'wb')
2210 fp = self.vfs('last-message.txt', 'wb')
2211 try:
2211 try:
2212 fp.write(text)
2212 fp.write(text)
2213 finally:
2213 finally:
2214 fp.close()
2214 fp.close()
2215 return self.pathto(fp.name[len(self.root) + 1:])
2215 return self.pathto(fp.name[len(self.root) + 1:])
2216
2216
2217 # used to avoid circular references so destructors work
2217 # used to avoid circular references so destructors work
2218 def aftertrans(files):
2218 def aftertrans(files):
2219 renamefiles = [tuple(t) for t in files]
2219 renamefiles = [tuple(t) for t in files]
2220 def a():
2220 def a():
2221 for vfs, src, dest in renamefiles:
2221 for vfs, src, dest in renamefiles:
2222 # if src and dest refer to a same file, vfs.rename is a no-op,
2222 # if src and dest refer to a same file, vfs.rename is a no-op,
2223 # leaving both src and dest on disk. delete dest to make sure
2223 # leaving both src and dest on disk. delete dest to make sure
2224 # the rename couldn't be such a no-op.
2224 # the rename couldn't be such a no-op.
2225 vfs.tryunlink(dest)
2225 vfs.tryunlink(dest)
2226 try:
2226 try:
2227 vfs.rename(src, dest)
2227 vfs.rename(src, dest)
2228 except OSError: # journal file does not yet exist
2228 except OSError: # journal file does not yet exist
2229 pass
2229 pass
2230 return a
2230 return a
2231
2231
2232 def undoname(fn):
2232 def undoname(fn):
2233 base, name = os.path.split(fn)
2233 base, name = os.path.split(fn)
2234 assert name.startswith('journal')
2234 assert name.startswith('journal')
2235 return os.path.join(base, name.replace('journal', 'undo', 1))
2235 return os.path.join(base, name.replace('journal', 'undo', 1))
2236
2236
2237 def instance(ui, path, create):
2237 def instance(ui, path, create):
2238 return localrepository(ui, util.urllocalpath(path), create)
2238 return localrepository(ui, util.urllocalpath(path), create)
2239
2239
2240 def islocal(path):
2240 def islocal(path):
2241 return True
2241 return True
2242
2242
2243 def newreporequirements(repo):
2243 def newreporequirements(repo):
2244 """Determine the set of requirements for a new local repository.
2244 """Determine the set of requirements for a new local repository.
2245
2245
2246 Extensions can wrap this function to specify custom requirements for
2246 Extensions can wrap this function to specify custom requirements for
2247 new repositories.
2247 new repositories.
2248 """
2248 """
2249 ui = repo.ui
2249 ui = repo.ui
2250 requirements = {'revlogv1'}
2250 requirements = {'revlogv1'}
2251 if ui.configbool('format', 'usestore'):
2251 if ui.configbool('format', 'usestore'):
2252 requirements.add('store')
2252 requirements.add('store')
2253 if ui.configbool('format', 'usefncache'):
2253 if ui.configbool('format', 'usefncache'):
2254 requirements.add('fncache')
2254 requirements.add('fncache')
2255 if ui.configbool('format', 'dotencode'):
2255 if ui.configbool('format', 'dotencode'):
2256 requirements.add('dotencode')
2256 requirements.add('dotencode')
2257
2257
2258 compengine = ui.config('experimental', 'format.compression')
2258 compengine = ui.config('experimental', 'format.compression')
2259 if compengine not in util.compengines:
2259 if compengine not in util.compengines:
2260 raise error.Abort(_('compression engine %s defined by '
2260 raise error.Abort(_('compression engine %s defined by '
2261 'experimental.format.compression not available') %
2261 'experimental.format.compression not available') %
2262 compengine,
2262 compengine,
2263 hint=_('run "hg debuginstall" to list available '
2263 hint=_('run "hg debuginstall" to list available '
2264 'compression engines'))
2264 'compression engines'))
2265
2265
2266 # zlib is the historical default and doesn't need an explicit requirement.
2266 # zlib is the historical default and doesn't need an explicit requirement.
2267 if compengine != 'zlib':
2267 if compengine != 'zlib':
2268 requirements.add('exp-compression-%s' % compengine)
2268 requirements.add('exp-compression-%s' % compengine)
2269
2269
2270 if scmutil.gdinitconfig(ui):
2270 if scmutil.gdinitconfig(ui):
2271 requirements.add('generaldelta')
2271 requirements.add('generaldelta')
2272 if ui.configbool('experimental', 'treemanifest'):
2272 if ui.configbool('experimental', 'treemanifest'):
2273 requirements.add('treemanifest')
2273 requirements.add('treemanifest')
2274 if ui.configbool('experimental', 'manifestv2'):
2274 if ui.configbool('experimental', 'manifestv2'):
2275 requirements.add('manifestv2')
2275 requirements.add('manifestv2')
2276
2276
2277 revlogv2 = ui.config('experimental', 'revlogv2')
2277 revlogv2 = ui.config('experimental', 'revlogv2')
2278 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2278 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2279 requirements.remove('revlogv1')
2279 requirements.remove('revlogv1')
2280 # generaldelta is implied by revlogv2.
2280 # generaldelta is implied by revlogv2.
2281 requirements.discard('generaldelta')
2281 requirements.discard('generaldelta')
2282 requirements.add(REVLOGV2_REQUIREMENT)
2282 requirements.add(REVLOGV2_REQUIREMENT)
2283
2283
2284 return requirements
2284 return requirements
General Comments 0
You need to be logged in to leave comments. Login now