##// END OF EJS Templates
transaction: add a name and a __repr__ implementation (API)...
Martin von Zweigbergk -
r36837:aff5996f default
parent child Browse files
Show More
@@ -1,2313 +1,2314
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 branchmap,
25 branchmap,
26 bundle2,
26 bundle2,
27 changegroup,
27 changegroup,
28 changelog,
28 changelog,
29 color,
29 color,
30 context,
30 context,
31 dirstate,
31 dirstate,
32 dirstateguard,
32 dirstateguard,
33 discovery,
33 discovery,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 narrowspec,
46 narrowspec,
47 obsolete,
47 obsolete,
48 pathutil,
48 pathutil,
49 peer,
49 peer,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67
67
68 release = lockmod.release
68 release = lockmod.release
69 urlerr = util.urlerr
69 urlerr = util.urlerr
70 urlreq = util.urlreq
70 urlreq = util.urlreq
71
71
72 # set of (path, vfs-location) tuples. vfs-location is:
72 # set of (path, vfs-location) tuples. vfs-location is:
73 # - 'plain for vfs relative paths
73 # - 'plain for vfs relative paths
74 # - '' for svfs relative paths
74 # - '' for svfs relative paths
75 _cachedfiles = set()
75 _cachedfiles = set()
76
76
77 class _basefilecache(scmutil.filecache):
77 class _basefilecache(scmutil.filecache):
78 """All filecache usage on repo are done for logic that should be unfiltered
78 """All filecache usage on repo are done for logic that should be unfiltered
79 """
79 """
80 def __get__(self, repo, type=None):
80 def __get__(self, repo, type=None):
81 if repo is None:
81 if repo is None:
82 return self
82 return self
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
83 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
84 def __set__(self, repo, value):
84 def __set__(self, repo, value):
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
85 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
86 def __delete__(self, repo):
86 def __delete__(self, repo):
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
87 return super(_basefilecache, self).__delete__(repo.unfiltered())
88
88
89 class repofilecache(_basefilecache):
89 class repofilecache(_basefilecache):
90 """filecache for files in .hg but outside of .hg/store"""
90 """filecache for files in .hg but outside of .hg/store"""
91 def __init__(self, *paths):
91 def __init__(self, *paths):
92 super(repofilecache, self).__init__(*paths)
92 super(repofilecache, self).__init__(*paths)
93 for path in paths:
93 for path in paths:
94 _cachedfiles.add((path, 'plain'))
94 _cachedfiles.add((path, 'plain'))
95
95
96 def join(self, obj, fname):
96 def join(self, obj, fname):
97 return obj.vfs.join(fname)
97 return obj.vfs.join(fname)
98
98
99 class storecache(_basefilecache):
99 class storecache(_basefilecache):
100 """filecache for files in the store"""
100 """filecache for files in the store"""
101 def __init__(self, *paths):
101 def __init__(self, *paths):
102 super(storecache, self).__init__(*paths)
102 super(storecache, self).__init__(*paths)
103 for path in paths:
103 for path in paths:
104 _cachedfiles.add((path, ''))
104 _cachedfiles.add((path, ''))
105
105
106 def join(self, obj, fname):
106 def join(self, obj, fname):
107 return obj.sjoin(fname)
107 return obj.sjoin(fname)
108
108
109 def isfilecached(repo, name):
109 def isfilecached(repo, name):
110 """check if a repo has already cached "name" filecache-ed property
110 """check if a repo has already cached "name" filecache-ed property
111
111
112 This returns (cachedobj-or-None, iscached) tuple.
112 This returns (cachedobj-or-None, iscached) tuple.
113 """
113 """
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
114 cacheentry = repo.unfiltered()._filecache.get(name, None)
115 if not cacheentry:
115 if not cacheentry:
116 return None, False
116 return None, False
117 return cacheentry.obj, True
117 return cacheentry.obj, True
118
118
119 class unfilteredpropertycache(util.propertycache):
119 class unfilteredpropertycache(util.propertycache):
120 """propertycache that apply to unfiltered repo only"""
120 """propertycache that apply to unfiltered repo only"""
121
121
122 def __get__(self, repo, type=None):
122 def __get__(self, repo, type=None):
123 unfi = repo.unfiltered()
123 unfi = repo.unfiltered()
124 if unfi is repo:
124 if unfi is repo:
125 return super(unfilteredpropertycache, self).__get__(unfi)
125 return super(unfilteredpropertycache, self).__get__(unfi)
126 return getattr(unfi, self.name)
126 return getattr(unfi, self.name)
127
127
128 class filteredpropertycache(util.propertycache):
128 class filteredpropertycache(util.propertycache):
129 """propertycache that must take filtering in account"""
129 """propertycache that must take filtering in account"""
130
130
131 def cachevalue(self, obj, value):
131 def cachevalue(self, obj, value):
132 object.__setattr__(obj, self.name, value)
132 object.__setattr__(obj, self.name, value)
133
133
134
134
135 def hasunfilteredcache(repo, name):
135 def hasunfilteredcache(repo, name):
136 """check if a repo has an unfilteredpropertycache value for <name>"""
136 """check if a repo has an unfilteredpropertycache value for <name>"""
137 return name in vars(repo.unfiltered())
137 return name in vars(repo.unfiltered())
138
138
139 def unfilteredmethod(orig):
139 def unfilteredmethod(orig):
140 """decorate method that always need to be run on unfiltered version"""
140 """decorate method that always need to be run on unfiltered version"""
141 def wrapper(repo, *args, **kwargs):
141 def wrapper(repo, *args, **kwargs):
142 return orig(repo.unfiltered(), *args, **kwargs)
142 return orig(repo.unfiltered(), *args, **kwargs)
143 return wrapper
143 return wrapper
144
144
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
145 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
146 'unbundle'}
146 'unbundle'}
147 legacycaps = moderncaps.union({'changegroupsubset'})
147 legacycaps = moderncaps.union({'changegroupsubset'})
148
148
149 class localpeer(repository.peer):
149 class localpeer(repository.peer):
150 '''peer for a local repo; reflects only the most recent API'''
150 '''peer for a local repo; reflects only the most recent API'''
151
151
152 def __init__(self, repo, caps=None):
152 def __init__(self, repo, caps=None):
153 super(localpeer, self).__init__()
153 super(localpeer, self).__init__()
154
154
155 if caps is None:
155 if caps is None:
156 caps = moderncaps.copy()
156 caps = moderncaps.copy()
157 self._repo = repo.filtered('served')
157 self._repo = repo.filtered('served')
158 self._ui = repo.ui
158 self._ui = repo.ui
159 self._caps = repo._restrictcapabilities(caps)
159 self._caps = repo._restrictcapabilities(caps)
160
160
161 # Begin of _basepeer interface.
161 # Begin of _basepeer interface.
162
162
163 @util.propertycache
163 @util.propertycache
164 def ui(self):
164 def ui(self):
165 return self._ui
165 return self._ui
166
166
167 def url(self):
167 def url(self):
168 return self._repo.url()
168 return self._repo.url()
169
169
170 def local(self):
170 def local(self):
171 return self._repo
171 return self._repo
172
172
173 def peer(self):
173 def peer(self):
174 return self
174 return self
175
175
176 def canpush(self):
176 def canpush(self):
177 return True
177 return True
178
178
179 def close(self):
179 def close(self):
180 self._repo.close()
180 self._repo.close()
181
181
182 # End of _basepeer interface.
182 # End of _basepeer interface.
183
183
184 # Begin of _basewirecommands interface.
184 # Begin of _basewirecommands interface.
185
185
186 def branchmap(self):
186 def branchmap(self):
187 return self._repo.branchmap()
187 return self._repo.branchmap()
188
188
189 def capabilities(self):
189 def capabilities(self):
190 return self._caps
190 return self._caps
191
191
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
192 def debugwireargs(self, one, two, three=None, four=None, five=None):
193 """Used to test argument passing over the wire"""
193 """Used to test argument passing over the wire"""
194 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
194 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
195 pycompat.bytestr(four),
195 pycompat.bytestr(four),
196 pycompat.bytestr(five))
196 pycompat.bytestr(five))
197
197
198 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
198 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
199 **kwargs):
199 **kwargs):
200 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
200 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
201 common=common, bundlecaps=bundlecaps,
201 common=common, bundlecaps=bundlecaps,
202 **kwargs)[1]
202 **kwargs)[1]
203 cb = util.chunkbuffer(chunks)
203 cb = util.chunkbuffer(chunks)
204
204
205 if exchange.bundle2requested(bundlecaps):
205 if exchange.bundle2requested(bundlecaps):
206 # When requesting a bundle2, getbundle returns a stream to make the
206 # When requesting a bundle2, getbundle returns a stream to make the
207 # wire level function happier. We need to build a proper object
207 # wire level function happier. We need to build a proper object
208 # from it in local peer.
208 # from it in local peer.
209 return bundle2.getunbundler(self.ui, cb)
209 return bundle2.getunbundler(self.ui, cb)
210 else:
210 else:
211 return changegroup.getunbundler('01', cb, None)
211 return changegroup.getunbundler('01', cb, None)
212
212
213 def heads(self):
213 def heads(self):
214 return self._repo.heads()
214 return self._repo.heads()
215
215
216 def known(self, nodes):
216 def known(self, nodes):
217 return self._repo.known(nodes)
217 return self._repo.known(nodes)
218
218
219 def listkeys(self, namespace):
219 def listkeys(self, namespace):
220 return self._repo.listkeys(namespace)
220 return self._repo.listkeys(namespace)
221
221
222 def lookup(self, key):
222 def lookup(self, key):
223 return self._repo.lookup(key)
223 return self._repo.lookup(key)
224
224
225 def pushkey(self, namespace, key, old, new):
225 def pushkey(self, namespace, key, old, new):
226 return self._repo.pushkey(namespace, key, old, new)
226 return self._repo.pushkey(namespace, key, old, new)
227
227
228 def stream_out(self):
228 def stream_out(self):
229 raise error.Abort(_('cannot perform stream clone against local '
229 raise error.Abort(_('cannot perform stream clone against local '
230 'peer'))
230 'peer'))
231
231
232 def unbundle(self, cg, heads, url):
232 def unbundle(self, cg, heads, url):
233 """apply a bundle on a repo
233 """apply a bundle on a repo
234
234
235 This function handles the repo locking itself."""
235 This function handles the repo locking itself."""
236 try:
236 try:
237 try:
237 try:
238 cg = exchange.readbundle(self.ui, cg, None)
238 cg = exchange.readbundle(self.ui, cg, None)
239 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
239 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
240 if util.safehasattr(ret, 'getchunks'):
240 if util.safehasattr(ret, 'getchunks'):
241 # This is a bundle20 object, turn it into an unbundler.
241 # This is a bundle20 object, turn it into an unbundler.
242 # This little dance should be dropped eventually when the
242 # This little dance should be dropped eventually when the
243 # API is finally improved.
243 # API is finally improved.
244 stream = util.chunkbuffer(ret.getchunks())
244 stream = util.chunkbuffer(ret.getchunks())
245 ret = bundle2.getunbundler(self.ui, stream)
245 ret = bundle2.getunbundler(self.ui, stream)
246 return ret
246 return ret
247 except Exception as exc:
247 except Exception as exc:
248 # If the exception contains output salvaged from a bundle2
248 # If the exception contains output salvaged from a bundle2
249 # reply, we need to make sure it is printed before continuing
249 # reply, we need to make sure it is printed before continuing
250 # to fail. So we build a bundle2 with such output and consume
250 # to fail. So we build a bundle2 with such output and consume
251 # it directly.
251 # it directly.
252 #
252 #
253 # This is not very elegant but allows a "simple" solution for
253 # This is not very elegant but allows a "simple" solution for
254 # issue4594
254 # issue4594
255 output = getattr(exc, '_bundle2salvagedoutput', ())
255 output = getattr(exc, '_bundle2salvagedoutput', ())
256 if output:
256 if output:
257 bundler = bundle2.bundle20(self._repo.ui)
257 bundler = bundle2.bundle20(self._repo.ui)
258 for out in output:
258 for out in output:
259 bundler.addpart(out)
259 bundler.addpart(out)
260 stream = util.chunkbuffer(bundler.getchunks())
260 stream = util.chunkbuffer(bundler.getchunks())
261 b = bundle2.getunbundler(self.ui, stream)
261 b = bundle2.getunbundler(self.ui, stream)
262 bundle2.processbundle(self._repo, b)
262 bundle2.processbundle(self._repo, b)
263 raise
263 raise
264 except error.PushRaced as exc:
264 except error.PushRaced as exc:
265 raise error.ResponseError(_('push failed:'),
265 raise error.ResponseError(_('push failed:'),
266 util.forcebytestr(exc))
266 util.forcebytestr(exc))
267
267
268 # End of _basewirecommands interface.
268 # End of _basewirecommands interface.
269
269
270 # Begin of peer interface.
270 # Begin of peer interface.
271
271
272 def iterbatch(self):
272 def iterbatch(self):
273 return peer.localiterbatcher(self)
273 return peer.localiterbatcher(self)
274
274
275 # End of peer interface.
275 # End of peer interface.
276
276
277 class locallegacypeer(repository.legacypeer, localpeer):
277 class locallegacypeer(repository.legacypeer, localpeer):
278 '''peer extension which implements legacy methods too; used for tests with
278 '''peer extension which implements legacy methods too; used for tests with
279 restricted capabilities'''
279 restricted capabilities'''
280
280
281 def __init__(self, repo):
281 def __init__(self, repo):
282 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
282 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
283
283
284 # Begin of baselegacywirecommands interface.
284 # Begin of baselegacywirecommands interface.
285
285
286 def between(self, pairs):
286 def between(self, pairs):
287 return self._repo.between(pairs)
287 return self._repo.between(pairs)
288
288
289 def branches(self, nodes):
289 def branches(self, nodes):
290 return self._repo.branches(nodes)
290 return self._repo.branches(nodes)
291
291
292 def changegroup(self, basenodes, source):
292 def changegroup(self, basenodes, source):
293 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
293 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
294 missingheads=self._repo.heads())
294 missingheads=self._repo.heads())
295 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
295 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
296
296
297 def changegroupsubset(self, bases, heads, source):
297 def changegroupsubset(self, bases, heads, source):
298 outgoing = discovery.outgoing(self._repo, missingroots=bases,
298 outgoing = discovery.outgoing(self._repo, missingroots=bases,
299 missingheads=heads)
299 missingheads=heads)
300 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
300 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
301
301
302 # End of baselegacywirecommands interface.
302 # End of baselegacywirecommands interface.
303
303
304 # Increment the sub-version when the revlog v2 format changes to lock out old
304 # Increment the sub-version when the revlog v2 format changes to lock out old
305 # clients.
305 # clients.
306 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
306 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
307
307
308 class localrepository(object):
308 class localrepository(object):
309
309
310 # obsolete experimental requirements:
310 # obsolete experimental requirements:
311 # - manifestv2: An experimental new manifest format that allowed
311 # - manifestv2: An experimental new manifest format that allowed
312 # for stem compression of long paths. Experiment ended up not
312 # for stem compression of long paths. Experiment ended up not
313 # being successful (repository sizes went up due to worse delta
313 # being successful (repository sizes went up due to worse delta
314 # chains), and the code was deleted in 4.6.
314 # chains), and the code was deleted in 4.6.
315 supportedformats = {
315 supportedformats = {
316 'revlogv1',
316 'revlogv1',
317 'generaldelta',
317 'generaldelta',
318 'treemanifest',
318 'treemanifest',
319 REVLOGV2_REQUIREMENT,
319 REVLOGV2_REQUIREMENT,
320 }
320 }
321 _basesupported = supportedformats | {
321 _basesupported = supportedformats | {
322 'store',
322 'store',
323 'fncache',
323 'fncache',
324 'shared',
324 'shared',
325 'relshared',
325 'relshared',
326 'dotencode',
326 'dotencode',
327 'exp-sparse',
327 'exp-sparse',
328 }
328 }
329 openerreqs = {
329 openerreqs = {
330 'revlogv1',
330 'revlogv1',
331 'generaldelta',
331 'generaldelta',
332 'treemanifest',
332 'treemanifest',
333 }
333 }
334
334
335 # a list of (ui, featureset) functions.
335 # a list of (ui, featureset) functions.
336 # only functions defined in module of enabled extensions are invoked
336 # only functions defined in module of enabled extensions are invoked
337 featuresetupfuncs = set()
337 featuresetupfuncs = set()
338
338
339 # list of prefix for file which can be written without 'wlock'
339 # list of prefix for file which can be written without 'wlock'
340 # Extensions should extend this list when needed
340 # Extensions should extend this list when needed
341 _wlockfreeprefix = {
341 _wlockfreeprefix = {
342 # We migh consider requiring 'wlock' for the next
342 # We migh consider requiring 'wlock' for the next
343 # two, but pretty much all the existing code assume
343 # two, but pretty much all the existing code assume
344 # wlock is not needed so we keep them excluded for
344 # wlock is not needed so we keep them excluded for
345 # now.
345 # now.
346 'hgrc',
346 'hgrc',
347 'requires',
347 'requires',
348 # XXX cache is a complicatged business someone
348 # XXX cache is a complicatged business someone
349 # should investigate this in depth at some point
349 # should investigate this in depth at some point
350 'cache/',
350 'cache/',
351 # XXX shouldn't be dirstate covered by the wlock?
351 # XXX shouldn't be dirstate covered by the wlock?
352 'dirstate',
352 'dirstate',
353 # XXX bisect was still a bit too messy at the time
353 # XXX bisect was still a bit too messy at the time
354 # this changeset was introduced. Someone should fix
354 # this changeset was introduced. Someone should fix
355 # the remainig bit and drop this line
355 # the remainig bit and drop this line
356 'bisect.state',
356 'bisect.state',
357 }
357 }
358
358
359 def __init__(self, baseui, path, create=False):
359 def __init__(self, baseui, path, create=False):
360 self.requirements = set()
360 self.requirements = set()
361 self.filtername = None
361 self.filtername = None
362 # wvfs: rooted at the repository root, used to access the working copy
362 # wvfs: rooted at the repository root, used to access the working copy
363 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
363 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
364 # vfs: rooted at .hg, used to access repo files outside of .hg/store
364 # vfs: rooted at .hg, used to access repo files outside of .hg/store
365 self.vfs = None
365 self.vfs = None
366 # svfs: usually rooted at .hg/store, used to access repository history
366 # svfs: usually rooted at .hg/store, used to access repository history
367 # If this is a shared repository, this vfs may point to another
367 # If this is a shared repository, this vfs may point to another
368 # repository's .hg/store directory.
368 # repository's .hg/store directory.
369 self.svfs = None
369 self.svfs = None
370 self.root = self.wvfs.base
370 self.root = self.wvfs.base
371 self.path = self.wvfs.join(".hg")
371 self.path = self.wvfs.join(".hg")
372 self.origroot = path
372 self.origroot = path
373 # This is only used by context.workingctx.match in order to
373 # This is only used by context.workingctx.match in order to
374 # detect files in subrepos.
374 # detect files in subrepos.
375 self.auditor = pathutil.pathauditor(
375 self.auditor = pathutil.pathauditor(
376 self.root, callback=self._checknested)
376 self.root, callback=self._checknested)
377 # This is only used by context.basectx.match in order to detect
377 # This is only used by context.basectx.match in order to detect
378 # files in subrepos.
378 # files in subrepos.
379 self.nofsauditor = pathutil.pathauditor(
379 self.nofsauditor = pathutil.pathauditor(
380 self.root, callback=self._checknested, realfs=False, cached=True)
380 self.root, callback=self._checknested, realfs=False, cached=True)
381 self.baseui = baseui
381 self.baseui = baseui
382 self.ui = baseui.copy()
382 self.ui = baseui.copy()
383 self.ui.copy = baseui.copy # prevent copying repo configuration
383 self.ui.copy = baseui.copy # prevent copying repo configuration
384 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
384 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
385 if (self.ui.configbool('devel', 'all-warnings') or
385 if (self.ui.configbool('devel', 'all-warnings') or
386 self.ui.configbool('devel', 'check-locks')):
386 self.ui.configbool('devel', 'check-locks')):
387 self.vfs.audit = self._getvfsward(self.vfs.audit)
387 self.vfs.audit = self._getvfsward(self.vfs.audit)
388 # A list of callback to shape the phase if no data were found.
388 # A list of callback to shape the phase if no data were found.
389 # Callback are in the form: func(repo, roots) --> processed root.
389 # Callback are in the form: func(repo, roots) --> processed root.
390 # This list it to be filled by extension during repo setup
390 # This list it to be filled by extension during repo setup
391 self._phasedefaults = []
391 self._phasedefaults = []
392 try:
392 try:
393 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
393 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
394 self._loadextensions()
394 self._loadextensions()
395 except IOError:
395 except IOError:
396 pass
396 pass
397
397
398 if self.featuresetupfuncs:
398 if self.featuresetupfuncs:
399 self.supported = set(self._basesupported) # use private copy
399 self.supported = set(self._basesupported) # use private copy
400 extmods = set(m.__name__ for n, m
400 extmods = set(m.__name__ for n, m
401 in extensions.extensions(self.ui))
401 in extensions.extensions(self.ui))
402 for setupfunc in self.featuresetupfuncs:
402 for setupfunc in self.featuresetupfuncs:
403 if setupfunc.__module__ in extmods:
403 if setupfunc.__module__ in extmods:
404 setupfunc(self.ui, self.supported)
404 setupfunc(self.ui, self.supported)
405 else:
405 else:
406 self.supported = self._basesupported
406 self.supported = self._basesupported
407 color.setup(self.ui)
407 color.setup(self.ui)
408
408
409 # Add compression engines.
409 # Add compression engines.
410 for name in util.compengines:
410 for name in util.compengines:
411 engine = util.compengines[name]
411 engine = util.compengines[name]
412 if engine.revlogheader():
412 if engine.revlogheader():
413 self.supported.add('exp-compression-%s' % name)
413 self.supported.add('exp-compression-%s' % name)
414
414
415 if not self.vfs.isdir():
415 if not self.vfs.isdir():
416 if create:
416 if create:
417 self.requirements = newreporequirements(self)
417 self.requirements = newreporequirements(self)
418
418
419 if not self.wvfs.exists():
419 if not self.wvfs.exists():
420 self.wvfs.makedirs()
420 self.wvfs.makedirs()
421 self.vfs.makedir(notindexed=True)
421 self.vfs.makedir(notindexed=True)
422
422
423 if 'store' in self.requirements:
423 if 'store' in self.requirements:
424 self.vfs.mkdir("store")
424 self.vfs.mkdir("store")
425
425
426 # create an invalid changelog
426 # create an invalid changelog
427 self.vfs.append(
427 self.vfs.append(
428 "00changelog.i",
428 "00changelog.i",
429 '\0\0\0\2' # represents revlogv2
429 '\0\0\0\2' # represents revlogv2
430 ' dummy changelog to prevent using the old repo layout'
430 ' dummy changelog to prevent using the old repo layout'
431 )
431 )
432 else:
432 else:
433 raise error.RepoError(_("repository %s not found") % path)
433 raise error.RepoError(_("repository %s not found") % path)
434 elif create:
434 elif create:
435 raise error.RepoError(_("repository %s already exists") % path)
435 raise error.RepoError(_("repository %s already exists") % path)
436 else:
436 else:
437 try:
437 try:
438 self.requirements = scmutil.readrequires(
438 self.requirements = scmutil.readrequires(
439 self.vfs, self.supported)
439 self.vfs, self.supported)
440 except IOError as inst:
440 except IOError as inst:
441 if inst.errno != errno.ENOENT:
441 if inst.errno != errno.ENOENT:
442 raise
442 raise
443
443
444 cachepath = self.vfs.join('cache')
444 cachepath = self.vfs.join('cache')
445 self.sharedpath = self.path
445 self.sharedpath = self.path
446 try:
446 try:
447 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
447 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
448 if 'relshared' in self.requirements:
448 if 'relshared' in self.requirements:
449 sharedpath = self.vfs.join(sharedpath)
449 sharedpath = self.vfs.join(sharedpath)
450 vfs = vfsmod.vfs(sharedpath, realpath=True)
450 vfs = vfsmod.vfs(sharedpath, realpath=True)
451 cachepath = vfs.join('cache')
451 cachepath = vfs.join('cache')
452 s = vfs.base
452 s = vfs.base
453 if not vfs.exists():
453 if not vfs.exists():
454 raise error.RepoError(
454 raise error.RepoError(
455 _('.hg/sharedpath points to nonexistent directory %s') % s)
455 _('.hg/sharedpath points to nonexistent directory %s') % s)
456 self.sharedpath = s
456 self.sharedpath = s
457 except IOError as inst:
457 except IOError as inst:
458 if inst.errno != errno.ENOENT:
458 if inst.errno != errno.ENOENT:
459 raise
459 raise
460
460
461 if 'exp-sparse' in self.requirements and not sparse.enabled:
461 if 'exp-sparse' in self.requirements and not sparse.enabled:
462 raise error.RepoError(_('repository is using sparse feature but '
462 raise error.RepoError(_('repository is using sparse feature but '
463 'sparse is not enabled; enable the '
463 'sparse is not enabled; enable the '
464 '"sparse" extensions to access'))
464 '"sparse" extensions to access'))
465
465
466 self.store = store.store(
466 self.store = store.store(
467 self.requirements, self.sharedpath,
467 self.requirements, self.sharedpath,
468 lambda base: vfsmod.vfs(base, cacheaudited=True))
468 lambda base: vfsmod.vfs(base, cacheaudited=True))
469 self.spath = self.store.path
469 self.spath = self.store.path
470 self.svfs = self.store.vfs
470 self.svfs = self.store.vfs
471 self.sjoin = self.store.join
471 self.sjoin = self.store.join
472 self.vfs.createmode = self.store.createmode
472 self.vfs.createmode = self.store.createmode
473 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
473 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
474 self.cachevfs.createmode = self.store.createmode
474 self.cachevfs.createmode = self.store.createmode
475 if (self.ui.configbool('devel', 'all-warnings') or
475 if (self.ui.configbool('devel', 'all-warnings') or
476 self.ui.configbool('devel', 'check-locks')):
476 self.ui.configbool('devel', 'check-locks')):
477 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
477 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
478 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
478 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
479 else: # standard vfs
479 else: # standard vfs
480 self.svfs.audit = self._getsvfsward(self.svfs.audit)
480 self.svfs.audit = self._getsvfsward(self.svfs.audit)
481 self._applyopenerreqs()
481 self._applyopenerreqs()
482 if create:
482 if create:
483 self._writerequirements()
483 self._writerequirements()
484
484
485 self._dirstatevalidatewarned = False
485 self._dirstatevalidatewarned = False
486
486
487 self._branchcaches = {}
487 self._branchcaches = {}
488 self._revbranchcache = None
488 self._revbranchcache = None
489 self.filterpats = {}
489 self.filterpats = {}
490 self._datafilters = {}
490 self._datafilters = {}
491 self._transref = self._lockref = self._wlockref = None
491 self._transref = self._lockref = self._wlockref = None
492
492
493 # A cache for various files under .hg/ that tracks file changes,
493 # A cache for various files under .hg/ that tracks file changes,
494 # (used by the filecache decorator)
494 # (used by the filecache decorator)
495 #
495 #
496 # Maps a property name to its util.filecacheentry
496 # Maps a property name to its util.filecacheentry
497 self._filecache = {}
497 self._filecache = {}
498
498
499 # hold sets of revision to be filtered
499 # hold sets of revision to be filtered
500 # should be cleared when something might have changed the filter value:
500 # should be cleared when something might have changed the filter value:
501 # - new changesets,
501 # - new changesets,
502 # - phase change,
502 # - phase change,
503 # - new obsolescence marker,
503 # - new obsolescence marker,
504 # - working directory parent change,
504 # - working directory parent change,
505 # - bookmark changes
505 # - bookmark changes
506 self.filteredrevcache = {}
506 self.filteredrevcache = {}
507
507
508 # post-dirstate-status hooks
508 # post-dirstate-status hooks
509 self._postdsstatus = []
509 self._postdsstatus = []
510
510
511 # generic mapping between names and nodes
511 # generic mapping between names and nodes
512 self.names = namespaces.namespaces()
512 self.names = namespaces.namespaces()
513
513
514 # Key to signature value.
514 # Key to signature value.
515 self._sparsesignaturecache = {}
515 self._sparsesignaturecache = {}
516 # Signature to cached matcher instance.
516 # Signature to cached matcher instance.
517 self._sparsematchercache = {}
517 self._sparsematchercache = {}
518
518
519 def _getvfsward(self, origfunc):
519 def _getvfsward(self, origfunc):
520 """build a ward for self.vfs"""
520 """build a ward for self.vfs"""
521 rref = weakref.ref(self)
521 rref = weakref.ref(self)
522 def checkvfs(path, mode=None):
522 def checkvfs(path, mode=None):
523 ret = origfunc(path, mode=mode)
523 ret = origfunc(path, mode=mode)
524 repo = rref()
524 repo = rref()
525 if (repo is None
525 if (repo is None
526 or not util.safehasattr(repo, '_wlockref')
526 or not util.safehasattr(repo, '_wlockref')
527 or not util.safehasattr(repo, '_lockref')):
527 or not util.safehasattr(repo, '_lockref')):
528 return
528 return
529 if mode in (None, 'r', 'rb'):
529 if mode in (None, 'r', 'rb'):
530 return
530 return
531 if path.startswith(repo.path):
531 if path.startswith(repo.path):
532 # truncate name relative to the repository (.hg)
532 # truncate name relative to the repository (.hg)
533 path = path[len(repo.path) + 1:]
533 path = path[len(repo.path) + 1:]
534 if path.startswith('cache/'):
534 if path.startswith('cache/'):
535 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
535 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
536 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
536 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
537 if path.startswith('journal.'):
537 if path.startswith('journal.'):
538 # journal is covered by 'lock'
538 # journal is covered by 'lock'
539 if repo._currentlock(repo._lockref) is None:
539 if repo._currentlock(repo._lockref) is None:
540 repo.ui.develwarn('write with no lock: "%s"' % path,
540 repo.ui.develwarn('write with no lock: "%s"' % path,
541 stacklevel=2, config='check-locks')
541 stacklevel=2, config='check-locks')
542 elif repo._currentlock(repo._wlockref) is None:
542 elif repo._currentlock(repo._wlockref) is None:
543 # rest of vfs files are covered by 'wlock'
543 # rest of vfs files are covered by 'wlock'
544 #
544 #
545 # exclude special files
545 # exclude special files
546 for prefix in self._wlockfreeprefix:
546 for prefix in self._wlockfreeprefix:
547 if path.startswith(prefix):
547 if path.startswith(prefix):
548 return
548 return
549 repo.ui.develwarn('write with no wlock: "%s"' % path,
549 repo.ui.develwarn('write with no wlock: "%s"' % path,
550 stacklevel=2, config='check-locks')
550 stacklevel=2, config='check-locks')
551 return ret
551 return ret
552 return checkvfs
552 return checkvfs
553
553
554 def _getsvfsward(self, origfunc):
554 def _getsvfsward(self, origfunc):
555 """build a ward for self.svfs"""
555 """build a ward for self.svfs"""
556 rref = weakref.ref(self)
556 rref = weakref.ref(self)
557 def checksvfs(path, mode=None):
557 def checksvfs(path, mode=None):
558 ret = origfunc(path, mode=mode)
558 ret = origfunc(path, mode=mode)
559 repo = rref()
559 repo = rref()
560 if repo is None or not util.safehasattr(repo, '_lockref'):
560 if repo is None or not util.safehasattr(repo, '_lockref'):
561 return
561 return
562 if mode in (None, 'r', 'rb'):
562 if mode in (None, 'r', 'rb'):
563 return
563 return
564 if path.startswith(repo.sharedpath):
564 if path.startswith(repo.sharedpath):
565 # truncate name relative to the repository (.hg)
565 # truncate name relative to the repository (.hg)
566 path = path[len(repo.sharedpath) + 1:]
566 path = path[len(repo.sharedpath) + 1:]
567 if repo._currentlock(repo._lockref) is None:
567 if repo._currentlock(repo._lockref) is None:
568 repo.ui.develwarn('write with no lock: "%s"' % path,
568 repo.ui.develwarn('write with no lock: "%s"' % path,
569 stacklevel=3)
569 stacklevel=3)
570 return ret
570 return ret
571 return checksvfs
571 return checksvfs
572
572
573 def close(self):
573 def close(self):
574 self._writecaches()
574 self._writecaches()
575
575
576 def _loadextensions(self):
576 def _loadextensions(self):
577 extensions.loadall(self.ui)
577 extensions.loadall(self.ui)
578
578
579 def _writecaches(self):
579 def _writecaches(self):
580 if self._revbranchcache:
580 if self._revbranchcache:
581 self._revbranchcache.write()
581 self._revbranchcache.write()
582
582
583 def _restrictcapabilities(self, caps):
583 def _restrictcapabilities(self, caps):
584 if self.ui.configbool('experimental', 'bundle2-advertise'):
584 if self.ui.configbool('experimental', 'bundle2-advertise'):
585 caps = set(caps)
585 caps = set(caps)
586 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
586 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
587 role='client'))
587 role='client'))
588 caps.add('bundle2=' + urlreq.quote(capsblob))
588 caps.add('bundle2=' + urlreq.quote(capsblob))
589 return caps
589 return caps
590
590
591 def _applyopenerreqs(self):
591 def _applyopenerreqs(self):
592 self.svfs.options = dict((r, 1) for r in self.requirements
592 self.svfs.options = dict((r, 1) for r in self.requirements
593 if r in self.openerreqs)
593 if r in self.openerreqs)
594 # experimental config: format.chunkcachesize
594 # experimental config: format.chunkcachesize
595 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
595 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
596 if chunkcachesize is not None:
596 if chunkcachesize is not None:
597 self.svfs.options['chunkcachesize'] = chunkcachesize
597 self.svfs.options['chunkcachesize'] = chunkcachesize
598 # experimental config: format.maxchainlen
598 # experimental config: format.maxchainlen
599 maxchainlen = self.ui.configint('format', 'maxchainlen')
599 maxchainlen = self.ui.configint('format', 'maxchainlen')
600 if maxchainlen is not None:
600 if maxchainlen is not None:
601 self.svfs.options['maxchainlen'] = maxchainlen
601 self.svfs.options['maxchainlen'] = maxchainlen
602 # experimental config: format.manifestcachesize
602 # experimental config: format.manifestcachesize
603 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
603 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
604 if manifestcachesize is not None:
604 if manifestcachesize is not None:
605 self.svfs.options['manifestcachesize'] = manifestcachesize
605 self.svfs.options['manifestcachesize'] = manifestcachesize
606 # experimental config: format.aggressivemergedeltas
606 # experimental config: format.aggressivemergedeltas
607 aggressivemergedeltas = self.ui.configbool('format',
607 aggressivemergedeltas = self.ui.configbool('format',
608 'aggressivemergedeltas')
608 'aggressivemergedeltas')
609 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
609 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
610 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
610 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
611 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
611 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
612 if 0 <= chainspan:
612 if 0 <= chainspan:
613 self.svfs.options['maxdeltachainspan'] = chainspan
613 self.svfs.options['maxdeltachainspan'] = chainspan
614 mmapindexthreshold = self.ui.configbytes('experimental',
614 mmapindexthreshold = self.ui.configbytes('experimental',
615 'mmapindexthreshold')
615 'mmapindexthreshold')
616 if mmapindexthreshold is not None:
616 if mmapindexthreshold is not None:
617 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
617 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
618 withsparseread = self.ui.configbool('experimental', 'sparse-read')
618 withsparseread = self.ui.configbool('experimental', 'sparse-read')
619 srdensitythres = float(self.ui.config('experimental',
619 srdensitythres = float(self.ui.config('experimental',
620 'sparse-read.density-threshold'))
620 'sparse-read.density-threshold'))
621 srmingapsize = self.ui.configbytes('experimental',
621 srmingapsize = self.ui.configbytes('experimental',
622 'sparse-read.min-gap-size')
622 'sparse-read.min-gap-size')
623 self.svfs.options['with-sparse-read'] = withsparseread
623 self.svfs.options['with-sparse-read'] = withsparseread
624 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
624 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
625 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
625 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
626
626
627 for r in self.requirements:
627 for r in self.requirements:
628 if r.startswith('exp-compression-'):
628 if r.startswith('exp-compression-'):
629 self.svfs.options['compengine'] = r[len('exp-compression-'):]
629 self.svfs.options['compengine'] = r[len('exp-compression-'):]
630
630
631 # TODO move "revlogv2" to openerreqs once finalized.
631 # TODO move "revlogv2" to openerreqs once finalized.
632 if REVLOGV2_REQUIREMENT in self.requirements:
632 if REVLOGV2_REQUIREMENT in self.requirements:
633 self.svfs.options['revlogv2'] = True
633 self.svfs.options['revlogv2'] = True
634
634
635 def _writerequirements(self):
635 def _writerequirements(self):
636 scmutil.writerequires(self.vfs, self.requirements)
636 scmutil.writerequires(self.vfs, self.requirements)
637
637
638 def _checknested(self, path):
638 def _checknested(self, path):
639 """Determine if path is a legal nested repository."""
639 """Determine if path is a legal nested repository."""
640 if not path.startswith(self.root):
640 if not path.startswith(self.root):
641 return False
641 return False
642 subpath = path[len(self.root) + 1:]
642 subpath = path[len(self.root) + 1:]
643 normsubpath = util.pconvert(subpath)
643 normsubpath = util.pconvert(subpath)
644
644
645 # XXX: Checking against the current working copy is wrong in
645 # XXX: Checking against the current working copy is wrong in
646 # the sense that it can reject things like
646 # the sense that it can reject things like
647 #
647 #
648 # $ hg cat -r 10 sub/x.txt
648 # $ hg cat -r 10 sub/x.txt
649 #
649 #
650 # if sub/ is no longer a subrepository in the working copy
650 # if sub/ is no longer a subrepository in the working copy
651 # parent revision.
651 # parent revision.
652 #
652 #
653 # However, it can of course also allow things that would have
653 # However, it can of course also allow things that would have
654 # been rejected before, such as the above cat command if sub/
654 # been rejected before, such as the above cat command if sub/
655 # is a subrepository now, but was a normal directory before.
655 # is a subrepository now, but was a normal directory before.
656 # The old path auditor would have rejected by mistake since it
656 # The old path auditor would have rejected by mistake since it
657 # panics when it sees sub/.hg/.
657 # panics when it sees sub/.hg/.
658 #
658 #
659 # All in all, checking against the working copy seems sensible
659 # All in all, checking against the working copy seems sensible
660 # since we want to prevent access to nested repositories on
660 # since we want to prevent access to nested repositories on
661 # the filesystem *now*.
661 # the filesystem *now*.
662 ctx = self[None]
662 ctx = self[None]
663 parts = util.splitpath(subpath)
663 parts = util.splitpath(subpath)
664 while parts:
664 while parts:
665 prefix = '/'.join(parts)
665 prefix = '/'.join(parts)
666 if prefix in ctx.substate:
666 if prefix in ctx.substate:
667 if prefix == normsubpath:
667 if prefix == normsubpath:
668 return True
668 return True
669 else:
669 else:
670 sub = ctx.sub(prefix)
670 sub = ctx.sub(prefix)
671 return sub.checknested(subpath[len(prefix) + 1:])
671 return sub.checknested(subpath[len(prefix) + 1:])
672 else:
672 else:
673 parts.pop()
673 parts.pop()
674 return False
674 return False
675
675
676 def peer(self):
676 def peer(self):
677 return localpeer(self) # not cached to avoid reference cycle
677 return localpeer(self) # not cached to avoid reference cycle
678
678
679 def unfiltered(self):
679 def unfiltered(self):
680 """Return unfiltered version of the repository
680 """Return unfiltered version of the repository
681
681
682 Intended to be overwritten by filtered repo."""
682 Intended to be overwritten by filtered repo."""
683 return self
683 return self
684
684
685 def filtered(self, name, visibilityexceptions=None):
685 def filtered(self, name, visibilityexceptions=None):
686 """Return a filtered version of a repository"""
686 """Return a filtered version of a repository"""
687 cls = repoview.newtype(self.unfiltered().__class__)
687 cls = repoview.newtype(self.unfiltered().__class__)
688 return cls(self, name, visibilityexceptions)
688 return cls(self, name, visibilityexceptions)
689
689
690 @repofilecache('bookmarks', 'bookmarks.current')
690 @repofilecache('bookmarks', 'bookmarks.current')
691 def _bookmarks(self):
691 def _bookmarks(self):
692 return bookmarks.bmstore(self)
692 return bookmarks.bmstore(self)
693
693
694 @property
694 @property
695 def _activebookmark(self):
695 def _activebookmark(self):
696 return self._bookmarks.active
696 return self._bookmarks.active
697
697
698 # _phasesets depend on changelog. what we need is to call
698 # _phasesets depend on changelog. what we need is to call
699 # _phasecache.invalidate() if '00changelog.i' was changed, but it
699 # _phasecache.invalidate() if '00changelog.i' was changed, but it
700 # can't be easily expressed in filecache mechanism.
700 # can't be easily expressed in filecache mechanism.
701 @storecache('phaseroots', '00changelog.i')
701 @storecache('phaseroots', '00changelog.i')
702 def _phasecache(self):
702 def _phasecache(self):
703 return phases.phasecache(self, self._phasedefaults)
703 return phases.phasecache(self, self._phasedefaults)
704
704
705 @storecache('obsstore')
705 @storecache('obsstore')
706 def obsstore(self):
706 def obsstore(self):
707 return obsolete.makestore(self.ui, self)
707 return obsolete.makestore(self.ui, self)
708
708
709 @storecache('00changelog.i')
709 @storecache('00changelog.i')
710 def changelog(self):
710 def changelog(self):
711 return changelog.changelog(self.svfs,
711 return changelog.changelog(self.svfs,
712 trypending=txnutil.mayhavepending(self.root))
712 trypending=txnutil.mayhavepending(self.root))
713
713
714 def _constructmanifest(self):
714 def _constructmanifest(self):
715 # This is a temporary function while we migrate from manifest to
715 # This is a temporary function while we migrate from manifest to
716 # manifestlog. It allows bundlerepo and unionrepo to intercept the
716 # manifestlog. It allows bundlerepo and unionrepo to intercept the
717 # manifest creation.
717 # manifest creation.
718 return manifest.manifestrevlog(self.svfs)
718 return manifest.manifestrevlog(self.svfs)
719
719
720 @storecache('00manifest.i')
720 @storecache('00manifest.i')
721 def manifestlog(self):
721 def manifestlog(self):
722 return manifest.manifestlog(self.svfs, self)
722 return manifest.manifestlog(self.svfs, self)
723
723
724 @repofilecache('dirstate')
724 @repofilecache('dirstate')
725 def dirstate(self):
725 def dirstate(self):
726 sparsematchfn = lambda: sparse.matcher(self)
726 sparsematchfn = lambda: sparse.matcher(self)
727
727
728 return dirstate.dirstate(self.vfs, self.ui, self.root,
728 return dirstate.dirstate(self.vfs, self.ui, self.root,
729 self._dirstatevalidate, sparsematchfn)
729 self._dirstatevalidate, sparsematchfn)
730
730
731 def _dirstatevalidate(self, node):
731 def _dirstatevalidate(self, node):
732 try:
732 try:
733 self.changelog.rev(node)
733 self.changelog.rev(node)
734 return node
734 return node
735 except error.LookupError:
735 except error.LookupError:
736 if not self._dirstatevalidatewarned:
736 if not self._dirstatevalidatewarned:
737 self._dirstatevalidatewarned = True
737 self._dirstatevalidatewarned = True
738 self.ui.warn(_("warning: ignoring unknown"
738 self.ui.warn(_("warning: ignoring unknown"
739 " working parent %s!\n") % short(node))
739 " working parent %s!\n") % short(node))
740 return nullid
740 return nullid
741
741
742 @repofilecache(narrowspec.FILENAME)
742 @repofilecache(narrowspec.FILENAME)
743 def narrowpats(self):
743 def narrowpats(self):
744 """matcher patterns for this repository's narrowspec
744 """matcher patterns for this repository's narrowspec
745
745
746 A tuple of (includes, excludes).
746 A tuple of (includes, excludes).
747 """
747 """
748 source = self
748 source = self
749 if self.shared():
749 if self.shared():
750 from . import hg
750 from . import hg
751 source = hg.sharedreposource(self)
751 source = hg.sharedreposource(self)
752 return narrowspec.load(source)
752 return narrowspec.load(source)
753
753
754 @repofilecache(narrowspec.FILENAME)
754 @repofilecache(narrowspec.FILENAME)
755 def _narrowmatch(self):
755 def _narrowmatch(self):
756 if changegroup.NARROW_REQUIREMENT not in self.requirements:
756 if changegroup.NARROW_REQUIREMENT not in self.requirements:
757 return matchmod.always(self.root, '')
757 return matchmod.always(self.root, '')
758 include, exclude = self.narrowpats
758 include, exclude = self.narrowpats
759 return narrowspec.match(self.root, include=include, exclude=exclude)
759 return narrowspec.match(self.root, include=include, exclude=exclude)
760
760
761 # TODO(martinvonz): make this property-like instead?
761 # TODO(martinvonz): make this property-like instead?
762 def narrowmatch(self):
762 def narrowmatch(self):
763 return self._narrowmatch
763 return self._narrowmatch
764
764
765 def setnarrowpats(self, newincludes, newexcludes):
765 def setnarrowpats(self, newincludes, newexcludes):
766 target = self
766 target = self
767 if self.shared():
767 if self.shared():
768 from . import hg
768 from . import hg
769 target = hg.sharedreposource(self)
769 target = hg.sharedreposource(self)
770 narrowspec.save(target, newincludes, newexcludes)
770 narrowspec.save(target, newincludes, newexcludes)
771 self.invalidate(clearfilecache=True)
771 self.invalidate(clearfilecache=True)
772
772
773 def __getitem__(self, changeid):
773 def __getitem__(self, changeid):
774 if changeid is None:
774 if changeid is None:
775 return context.workingctx(self)
775 return context.workingctx(self)
776 if isinstance(changeid, slice):
776 if isinstance(changeid, slice):
777 # wdirrev isn't contiguous so the slice shouldn't include it
777 # wdirrev isn't contiguous so the slice shouldn't include it
778 return [context.changectx(self, i)
778 return [context.changectx(self, i)
779 for i in xrange(*changeid.indices(len(self)))
779 for i in xrange(*changeid.indices(len(self)))
780 if i not in self.changelog.filteredrevs]
780 if i not in self.changelog.filteredrevs]
781 try:
781 try:
782 return context.changectx(self, changeid)
782 return context.changectx(self, changeid)
783 except error.WdirUnsupported:
783 except error.WdirUnsupported:
784 return context.workingctx(self)
784 return context.workingctx(self)
785
785
786 def __contains__(self, changeid):
786 def __contains__(self, changeid):
787 """True if the given changeid exists
787 """True if the given changeid exists
788
788
789 error.LookupError is raised if an ambiguous node specified.
789 error.LookupError is raised if an ambiguous node specified.
790 """
790 """
791 try:
791 try:
792 self[changeid]
792 self[changeid]
793 return True
793 return True
794 except error.RepoLookupError:
794 except error.RepoLookupError:
795 return False
795 return False
796
796
797 def __nonzero__(self):
797 def __nonzero__(self):
798 return True
798 return True
799
799
800 __bool__ = __nonzero__
800 __bool__ = __nonzero__
801
801
802 def __len__(self):
802 def __len__(self):
803 # no need to pay the cost of repoview.changelog
803 # no need to pay the cost of repoview.changelog
804 unfi = self.unfiltered()
804 unfi = self.unfiltered()
805 return len(unfi.changelog)
805 return len(unfi.changelog)
806
806
807 def __iter__(self):
807 def __iter__(self):
808 return iter(self.changelog)
808 return iter(self.changelog)
809
809
810 def revs(self, expr, *args):
810 def revs(self, expr, *args):
811 '''Find revisions matching a revset.
811 '''Find revisions matching a revset.
812
812
813 The revset is specified as a string ``expr`` that may contain
813 The revset is specified as a string ``expr`` that may contain
814 %-formatting to escape certain types. See ``revsetlang.formatspec``.
814 %-formatting to escape certain types. See ``revsetlang.formatspec``.
815
815
816 Revset aliases from the configuration are not expanded. To expand
816 Revset aliases from the configuration are not expanded. To expand
817 user aliases, consider calling ``scmutil.revrange()`` or
817 user aliases, consider calling ``scmutil.revrange()`` or
818 ``repo.anyrevs([expr], user=True)``.
818 ``repo.anyrevs([expr], user=True)``.
819
819
820 Returns a revset.abstractsmartset, which is a list-like interface
820 Returns a revset.abstractsmartset, which is a list-like interface
821 that contains integer revisions.
821 that contains integer revisions.
822 '''
822 '''
823 expr = revsetlang.formatspec(expr, *args)
823 expr = revsetlang.formatspec(expr, *args)
824 m = revset.match(None, expr)
824 m = revset.match(None, expr)
825 return m(self)
825 return m(self)
826
826
827 def set(self, expr, *args):
827 def set(self, expr, *args):
828 '''Find revisions matching a revset and emit changectx instances.
828 '''Find revisions matching a revset and emit changectx instances.
829
829
830 This is a convenience wrapper around ``revs()`` that iterates the
830 This is a convenience wrapper around ``revs()`` that iterates the
831 result and is a generator of changectx instances.
831 result and is a generator of changectx instances.
832
832
833 Revset aliases from the configuration are not expanded. To expand
833 Revset aliases from the configuration are not expanded. To expand
834 user aliases, consider calling ``scmutil.revrange()``.
834 user aliases, consider calling ``scmutil.revrange()``.
835 '''
835 '''
836 for r in self.revs(expr, *args):
836 for r in self.revs(expr, *args):
837 yield self[r]
837 yield self[r]
838
838
839 def anyrevs(self, specs, user=False, localalias=None):
839 def anyrevs(self, specs, user=False, localalias=None):
840 '''Find revisions matching one of the given revsets.
840 '''Find revisions matching one of the given revsets.
841
841
842 Revset aliases from the configuration are not expanded by default. To
842 Revset aliases from the configuration are not expanded by default. To
843 expand user aliases, specify ``user=True``. To provide some local
843 expand user aliases, specify ``user=True``. To provide some local
844 definitions overriding user aliases, set ``localalias`` to
844 definitions overriding user aliases, set ``localalias`` to
845 ``{name: definitionstring}``.
845 ``{name: definitionstring}``.
846 '''
846 '''
847 if user:
847 if user:
848 m = revset.matchany(self.ui, specs, repo=self,
848 m = revset.matchany(self.ui, specs, repo=self,
849 localalias=localalias)
849 localalias=localalias)
850 else:
850 else:
851 m = revset.matchany(None, specs, localalias=localalias)
851 m = revset.matchany(None, specs, localalias=localalias)
852 return m(self)
852 return m(self)
853
853
854 def url(self):
854 def url(self):
855 return 'file:' + self.root
855 return 'file:' + self.root
856
856
857 def hook(self, name, throw=False, **args):
857 def hook(self, name, throw=False, **args):
858 """Call a hook, passing this repo instance.
858 """Call a hook, passing this repo instance.
859
859
860 This a convenience method to aid invoking hooks. Extensions likely
860 This a convenience method to aid invoking hooks. Extensions likely
861 won't call this unless they have registered a custom hook or are
861 won't call this unless they have registered a custom hook or are
862 replacing code that is expected to call a hook.
862 replacing code that is expected to call a hook.
863 """
863 """
864 return hook.hook(self.ui, self, name, throw, **args)
864 return hook.hook(self.ui, self, name, throw, **args)
865
865
866 @filteredpropertycache
866 @filteredpropertycache
867 def _tagscache(self):
867 def _tagscache(self):
868 '''Returns a tagscache object that contains various tags related
868 '''Returns a tagscache object that contains various tags related
869 caches.'''
869 caches.'''
870
870
871 # This simplifies its cache management by having one decorated
871 # This simplifies its cache management by having one decorated
872 # function (this one) and the rest simply fetch things from it.
872 # function (this one) and the rest simply fetch things from it.
873 class tagscache(object):
873 class tagscache(object):
874 def __init__(self):
874 def __init__(self):
875 # These two define the set of tags for this repository. tags
875 # These two define the set of tags for this repository. tags
876 # maps tag name to node; tagtypes maps tag name to 'global' or
876 # maps tag name to node; tagtypes maps tag name to 'global' or
877 # 'local'. (Global tags are defined by .hgtags across all
877 # 'local'. (Global tags are defined by .hgtags across all
878 # heads, and local tags are defined in .hg/localtags.)
878 # heads, and local tags are defined in .hg/localtags.)
879 # They constitute the in-memory cache of tags.
879 # They constitute the in-memory cache of tags.
880 self.tags = self.tagtypes = None
880 self.tags = self.tagtypes = None
881
881
882 self.nodetagscache = self.tagslist = None
882 self.nodetagscache = self.tagslist = None
883
883
884 cache = tagscache()
884 cache = tagscache()
885 cache.tags, cache.tagtypes = self._findtags()
885 cache.tags, cache.tagtypes = self._findtags()
886
886
887 return cache
887 return cache
888
888
889 def tags(self):
889 def tags(self):
890 '''return a mapping of tag to node'''
890 '''return a mapping of tag to node'''
891 t = {}
891 t = {}
892 if self.changelog.filteredrevs:
892 if self.changelog.filteredrevs:
893 tags, tt = self._findtags()
893 tags, tt = self._findtags()
894 else:
894 else:
895 tags = self._tagscache.tags
895 tags = self._tagscache.tags
896 for k, v in tags.iteritems():
896 for k, v in tags.iteritems():
897 try:
897 try:
898 # ignore tags to unknown nodes
898 # ignore tags to unknown nodes
899 self.changelog.rev(v)
899 self.changelog.rev(v)
900 t[k] = v
900 t[k] = v
901 except (error.LookupError, ValueError):
901 except (error.LookupError, ValueError):
902 pass
902 pass
903 return t
903 return t
904
904
905 def _findtags(self):
905 def _findtags(self):
906 '''Do the hard work of finding tags. Return a pair of dicts
906 '''Do the hard work of finding tags. Return a pair of dicts
907 (tags, tagtypes) where tags maps tag name to node, and tagtypes
907 (tags, tagtypes) where tags maps tag name to node, and tagtypes
908 maps tag name to a string like \'global\' or \'local\'.
908 maps tag name to a string like \'global\' or \'local\'.
909 Subclasses or extensions are free to add their own tags, but
909 Subclasses or extensions are free to add their own tags, but
910 should be aware that the returned dicts will be retained for the
910 should be aware that the returned dicts will be retained for the
911 duration of the localrepo object.'''
911 duration of the localrepo object.'''
912
912
913 # XXX what tagtype should subclasses/extensions use? Currently
913 # XXX what tagtype should subclasses/extensions use? Currently
914 # mq and bookmarks add tags, but do not set the tagtype at all.
914 # mq and bookmarks add tags, but do not set the tagtype at all.
915 # Should each extension invent its own tag type? Should there
915 # Should each extension invent its own tag type? Should there
916 # be one tagtype for all such "virtual" tags? Or is the status
916 # be one tagtype for all such "virtual" tags? Or is the status
917 # quo fine?
917 # quo fine?
918
918
919
919
920 # map tag name to (node, hist)
920 # map tag name to (node, hist)
921 alltags = tagsmod.findglobaltags(self.ui, self)
921 alltags = tagsmod.findglobaltags(self.ui, self)
922 # map tag name to tag type
922 # map tag name to tag type
923 tagtypes = dict((tag, 'global') for tag in alltags)
923 tagtypes = dict((tag, 'global') for tag in alltags)
924
924
925 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
925 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
926
926
927 # Build the return dicts. Have to re-encode tag names because
927 # Build the return dicts. Have to re-encode tag names because
928 # the tags module always uses UTF-8 (in order not to lose info
928 # the tags module always uses UTF-8 (in order not to lose info
929 # writing to the cache), but the rest of Mercurial wants them in
929 # writing to the cache), but the rest of Mercurial wants them in
930 # local encoding.
930 # local encoding.
931 tags = {}
931 tags = {}
932 for (name, (node, hist)) in alltags.iteritems():
932 for (name, (node, hist)) in alltags.iteritems():
933 if node != nullid:
933 if node != nullid:
934 tags[encoding.tolocal(name)] = node
934 tags[encoding.tolocal(name)] = node
935 tags['tip'] = self.changelog.tip()
935 tags['tip'] = self.changelog.tip()
936 tagtypes = dict([(encoding.tolocal(name), value)
936 tagtypes = dict([(encoding.tolocal(name), value)
937 for (name, value) in tagtypes.iteritems()])
937 for (name, value) in tagtypes.iteritems()])
938 return (tags, tagtypes)
938 return (tags, tagtypes)
939
939
940 def tagtype(self, tagname):
940 def tagtype(self, tagname):
941 '''
941 '''
942 return the type of the given tag. result can be:
942 return the type of the given tag. result can be:
943
943
944 'local' : a local tag
944 'local' : a local tag
945 'global' : a global tag
945 'global' : a global tag
946 None : tag does not exist
946 None : tag does not exist
947 '''
947 '''
948
948
949 return self._tagscache.tagtypes.get(tagname)
949 return self._tagscache.tagtypes.get(tagname)
950
950
951 def tagslist(self):
951 def tagslist(self):
952 '''return a list of tags ordered by revision'''
952 '''return a list of tags ordered by revision'''
953 if not self._tagscache.tagslist:
953 if not self._tagscache.tagslist:
954 l = []
954 l = []
955 for t, n in self.tags().iteritems():
955 for t, n in self.tags().iteritems():
956 l.append((self.changelog.rev(n), t, n))
956 l.append((self.changelog.rev(n), t, n))
957 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
957 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
958
958
959 return self._tagscache.tagslist
959 return self._tagscache.tagslist
960
960
961 def nodetags(self, node):
961 def nodetags(self, node):
962 '''return the tags associated with a node'''
962 '''return the tags associated with a node'''
963 if not self._tagscache.nodetagscache:
963 if not self._tagscache.nodetagscache:
964 nodetagscache = {}
964 nodetagscache = {}
965 for t, n in self._tagscache.tags.iteritems():
965 for t, n in self._tagscache.tags.iteritems():
966 nodetagscache.setdefault(n, []).append(t)
966 nodetagscache.setdefault(n, []).append(t)
967 for tags in nodetagscache.itervalues():
967 for tags in nodetagscache.itervalues():
968 tags.sort()
968 tags.sort()
969 self._tagscache.nodetagscache = nodetagscache
969 self._tagscache.nodetagscache = nodetagscache
970 return self._tagscache.nodetagscache.get(node, [])
970 return self._tagscache.nodetagscache.get(node, [])
971
971
972 def nodebookmarks(self, node):
972 def nodebookmarks(self, node):
973 """return the list of bookmarks pointing to the specified node"""
973 """return the list of bookmarks pointing to the specified node"""
974 marks = []
974 marks = []
975 for bookmark, n in self._bookmarks.iteritems():
975 for bookmark, n in self._bookmarks.iteritems():
976 if n == node:
976 if n == node:
977 marks.append(bookmark)
977 marks.append(bookmark)
978 return sorted(marks)
978 return sorted(marks)
979
979
980 def branchmap(self):
980 def branchmap(self):
981 '''returns a dictionary {branch: [branchheads]} with branchheads
981 '''returns a dictionary {branch: [branchheads]} with branchheads
982 ordered by increasing revision number'''
982 ordered by increasing revision number'''
983 branchmap.updatecache(self)
983 branchmap.updatecache(self)
984 return self._branchcaches[self.filtername]
984 return self._branchcaches[self.filtername]
985
985
986 @unfilteredmethod
986 @unfilteredmethod
987 def revbranchcache(self):
987 def revbranchcache(self):
988 if not self._revbranchcache:
988 if not self._revbranchcache:
989 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
989 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
990 return self._revbranchcache
990 return self._revbranchcache
991
991
992 def branchtip(self, branch, ignoremissing=False):
992 def branchtip(self, branch, ignoremissing=False):
993 '''return the tip node for a given branch
993 '''return the tip node for a given branch
994
994
995 If ignoremissing is True, then this method will not raise an error.
995 If ignoremissing is True, then this method will not raise an error.
996 This is helpful for callers that only expect None for a missing branch
996 This is helpful for callers that only expect None for a missing branch
997 (e.g. namespace).
997 (e.g. namespace).
998
998
999 '''
999 '''
1000 try:
1000 try:
1001 return self.branchmap().branchtip(branch)
1001 return self.branchmap().branchtip(branch)
1002 except KeyError:
1002 except KeyError:
1003 if not ignoremissing:
1003 if not ignoremissing:
1004 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1004 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1005 else:
1005 else:
1006 pass
1006 pass
1007
1007
1008 def lookup(self, key):
1008 def lookup(self, key):
1009 return self[key].node()
1009 return self[key].node()
1010
1010
1011 def lookupbranch(self, key, remote=None):
1011 def lookupbranch(self, key, remote=None):
1012 repo = remote or self
1012 repo = remote or self
1013 if key in repo.branchmap():
1013 if key in repo.branchmap():
1014 return key
1014 return key
1015
1015
1016 repo = (remote and remote.local()) and remote or self
1016 repo = (remote and remote.local()) and remote or self
1017 return repo[key].branch()
1017 return repo[key].branch()
1018
1018
1019 def known(self, nodes):
1019 def known(self, nodes):
1020 cl = self.changelog
1020 cl = self.changelog
1021 nm = cl.nodemap
1021 nm = cl.nodemap
1022 filtered = cl.filteredrevs
1022 filtered = cl.filteredrevs
1023 result = []
1023 result = []
1024 for n in nodes:
1024 for n in nodes:
1025 r = nm.get(n)
1025 r = nm.get(n)
1026 resp = not (r is None or r in filtered)
1026 resp = not (r is None or r in filtered)
1027 result.append(resp)
1027 result.append(resp)
1028 return result
1028 return result
1029
1029
1030 def local(self):
1030 def local(self):
1031 return self
1031 return self
1032
1032
1033 def publishing(self):
1033 def publishing(self):
1034 # it's safe (and desirable) to trust the publish flag unconditionally
1034 # it's safe (and desirable) to trust the publish flag unconditionally
1035 # so that we don't finalize changes shared between users via ssh or nfs
1035 # so that we don't finalize changes shared between users via ssh or nfs
1036 return self.ui.configbool('phases', 'publish', untrusted=True)
1036 return self.ui.configbool('phases', 'publish', untrusted=True)
1037
1037
1038 def cancopy(self):
1038 def cancopy(self):
1039 # so statichttprepo's override of local() works
1039 # so statichttprepo's override of local() works
1040 if not self.local():
1040 if not self.local():
1041 return False
1041 return False
1042 if not self.publishing():
1042 if not self.publishing():
1043 return True
1043 return True
1044 # if publishing we can't copy if there is filtered content
1044 # if publishing we can't copy if there is filtered content
1045 return not self.filtered('visible').changelog.filteredrevs
1045 return not self.filtered('visible').changelog.filteredrevs
1046
1046
1047 def shared(self):
1047 def shared(self):
1048 '''the type of shared repository (None if not shared)'''
1048 '''the type of shared repository (None if not shared)'''
1049 if self.sharedpath != self.path:
1049 if self.sharedpath != self.path:
1050 return 'store'
1050 return 'store'
1051 return None
1051 return None
1052
1052
1053 def wjoin(self, f, *insidef):
1053 def wjoin(self, f, *insidef):
1054 return self.vfs.reljoin(self.root, f, *insidef)
1054 return self.vfs.reljoin(self.root, f, *insidef)
1055
1055
1056 def file(self, f):
1056 def file(self, f):
1057 if f[0] == '/':
1057 if f[0] == '/':
1058 f = f[1:]
1058 f = f[1:]
1059 return filelog.filelog(self.svfs, f)
1059 return filelog.filelog(self.svfs, f)
1060
1060
1061 def changectx(self, changeid):
1061 def changectx(self, changeid):
1062 return self[changeid]
1062 return self[changeid]
1063
1063
1064 def setparents(self, p1, p2=nullid):
1064 def setparents(self, p1, p2=nullid):
1065 with self.dirstate.parentchange():
1065 with self.dirstate.parentchange():
1066 copies = self.dirstate.setparents(p1, p2)
1066 copies = self.dirstate.setparents(p1, p2)
1067 pctx = self[p1]
1067 pctx = self[p1]
1068 if copies:
1068 if copies:
1069 # Adjust copy records, the dirstate cannot do it, it
1069 # Adjust copy records, the dirstate cannot do it, it
1070 # requires access to parents manifests. Preserve them
1070 # requires access to parents manifests. Preserve them
1071 # only for entries added to first parent.
1071 # only for entries added to first parent.
1072 for f in copies:
1072 for f in copies:
1073 if f not in pctx and copies[f] in pctx:
1073 if f not in pctx and copies[f] in pctx:
1074 self.dirstate.copy(copies[f], f)
1074 self.dirstate.copy(copies[f], f)
1075 if p2 == nullid:
1075 if p2 == nullid:
1076 for f, s in sorted(self.dirstate.copies().items()):
1076 for f, s in sorted(self.dirstate.copies().items()):
1077 if f not in pctx and s not in pctx:
1077 if f not in pctx and s not in pctx:
1078 self.dirstate.copy(None, f)
1078 self.dirstate.copy(None, f)
1079
1079
1080 def filectx(self, path, changeid=None, fileid=None):
1080 def filectx(self, path, changeid=None, fileid=None):
1081 """changeid can be a changeset revision, node, or tag.
1081 """changeid can be a changeset revision, node, or tag.
1082 fileid can be a file revision or node."""
1082 fileid can be a file revision or node."""
1083 return context.filectx(self, path, changeid, fileid)
1083 return context.filectx(self, path, changeid, fileid)
1084
1084
1085 def getcwd(self):
1085 def getcwd(self):
1086 return self.dirstate.getcwd()
1086 return self.dirstate.getcwd()
1087
1087
1088 def pathto(self, f, cwd=None):
1088 def pathto(self, f, cwd=None):
1089 return self.dirstate.pathto(f, cwd)
1089 return self.dirstate.pathto(f, cwd)
1090
1090
1091 def _loadfilter(self, filter):
1091 def _loadfilter(self, filter):
1092 if filter not in self.filterpats:
1092 if filter not in self.filterpats:
1093 l = []
1093 l = []
1094 for pat, cmd in self.ui.configitems(filter):
1094 for pat, cmd in self.ui.configitems(filter):
1095 if cmd == '!':
1095 if cmd == '!':
1096 continue
1096 continue
1097 mf = matchmod.match(self.root, '', [pat])
1097 mf = matchmod.match(self.root, '', [pat])
1098 fn = None
1098 fn = None
1099 params = cmd
1099 params = cmd
1100 for name, filterfn in self._datafilters.iteritems():
1100 for name, filterfn in self._datafilters.iteritems():
1101 if cmd.startswith(name):
1101 if cmd.startswith(name):
1102 fn = filterfn
1102 fn = filterfn
1103 params = cmd[len(name):].lstrip()
1103 params = cmd[len(name):].lstrip()
1104 break
1104 break
1105 if not fn:
1105 if not fn:
1106 fn = lambda s, c, **kwargs: util.filter(s, c)
1106 fn = lambda s, c, **kwargs: util.filter(s, c)
1107 # Wrap old filters not supporting keyword arguments
1107 # Wrap old filters not supporting keyword arguments
1108 if not pycompat.getargspec(fn)[2]:
1108 if not pycompat.getargspec(fn)[2]:
1109 oldfn = fn
1109 oldfn = fn
1110 fn = lambda s, c, **kwargs: oldfn(s, c)
1110 fn = lambda s, c, **kwargs: oldfn(s, c)
1111 l.append((mf, fn, params))
1111 l.append((mf, fn, params))
1112 self.filterpats[filter] = l
1112 self.filterpats[filter] = l
1113 return self.filterpats[filter]
1113 return self.filterpats[filter]
1114
1114
1115 def _filter(self, filterpats, filename, data):
1115 def _filter(self, filterpats, filename, data):
1116 for mf, fn, cmd in filterpats:
1116 for mf, fn, cmd in filterpats:
1117 if mf(filename):
1117 if mf(filename):
1118 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1118 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1119 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1119 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1120 break
1120 break
1121
1121
1122 return data
1122 return data
1123
1123
1124 @unfilteredpropertycache
1124 @unfilteredpropertycache
1125 def _encodefilterpats(self):
1125 def _encodefilterpats(self):
1126 return self._loadfilter('encode')
1126 return self._loadfilter('encode')
1127
1127
1128 @unfilteredpropertycache
1128 @unfilteredpropertycache
1129 def _decodefilterpats(self):
1129 def _decodefilterpats(self):
1130 return self._loadfilter('decode')
1130 return self._loadfilter('decode')
1131
1131
1132 def adddatafilter(self, name, filter):
1132 def adddatafilter(self, name, filter):
1133 self._datafilters[name] = filter
1133 self._datafilters[name] = filter
1134
1134
1135 def wread(self, filename):
1135 def wread(self, filename):
1136 if self.wvfs.islink(filename):
1136 if self.wvfs.islink(filename):
1137 data = self.wvfs.readlink(filename)
1137 data = self.wvfs.readlink(filename)
1138 else:
1138 else:
1139 data = self.wvfs.read(filename)
1139 data = self.wvfs.read(filename)
1140 return self._filter(self._encodefilterpats, filename, data)
1140 return self._filter(self._encodefilterpats, filename, data)
1141
1141
1142 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1142 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1143 """write ``data`` into ``filename`` in the working directory
1143 """write ``data`` into ``filename`` in the working directory
1144
1144
1145 This returns length of written (maybe decoded) data.
1145 This returns length of written (maybe decoded) data.
1146 """
1146 """
1147 data = self._filter(self._decodefilterpats, filename, data)
1147 data = self._filter(self._decodefilterpats, filename, data)
1148 if 'l' in flags:
1148 if 'l' in flags:
1149 self.wvfs.symlink(data, filename)
1149 self.wvfs.symlink(data, filename)
1150 else:
1150 else:
1151 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1151 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1152 **kwargs)
1152 **kwargs)
1153 if 'x' in flags:
1153 if 'x' in flags:
1154 self.wvfs.setflags(filename, False, True)
1154 self.wvfs.setflags(filename, False, True)
1155 else:
1155 else:
1156 self.wvfs.setflags(filename, False, False)
1156 self.wvfs.setflags(filename, False, False)
1157 return len(data)
1157 return len(data)
1158
1158
1159 def wwritedata(self, filename, data):
1159 def wwritedata(self, filename, data):
1160 return self._filter(self._decodefilterpats, filename, data)
1160 return self._filter(self._decodefilterpats, filename, data)
1161
1161
1162 def currenttransaction(self):
1162 def currenttransaction(self):
1163 """return the current transaction or None if non exists"""
1163 """return the current transaction or None if non exists"""
1164 if self._transref:
1164 if self._transref:
1165 tr = self._transref()
1165 tr = self._transref()
1166 else:
1166 else:
1167 tr = None
1167 tr = None
1168
1168
1169 if tr and tr.running():
1169 if tr and tr.running():
1170 return tr
1170 return tr
1171 return None
1171 return None
1172
1172
1173 def transaction(self, desc, report=None):
1173 def transaction(self, desc, report=None):
1174 if (self.ui.configbool('devel', 'all-warnings')
1174 if (self.ui.configbool('devel', 'all-warnings')
1175 or self.ui.configbool('devel', 'check-locks')):
1175 or self.ui.configbool('devel', 'check-locks')):
1176 if self._currentlock(self._lockref) is None:
1176 if self._currentlock(self._lockref) is None:
1177 raise error.ProgrammingError('transaction requires locking')
1177 raise error.ProgrammingError('transaction requires locking')
1178 tr = self.currenttransaction()
1178 tr = self.currenttransaction()
1179 if tr is not None:
1179 if tr is not None:
1180 return tr.nest()
1180 return tr.nest(name=desc)
1181
1181
1182 # abort here if the journal already exists
1182 # abort here if the journal already exists
1183 if self.svfs.exists("journal"):
1183 if self.svfs.exists("journal"):
1184 raise error.RepoError(
1184 raise error.RepoError(
1185 _("abandoned transaction found"),
1185 _("abandoned transaction found"),
1186 hint=_("run 'hg recover' to clean up transaction"))
1186 hint=_("run 'hg recover' to clean up transaction"))
1187
1187
1188 idbase = "%.40f#%f" % (random.random(), time.time())
1188 idbase = "%.40f#%f" % (random.random(), time.time())
1189 ha = hex(hashlib.sha1(idbase).digest())
1189 ha = hex(hashlib.sha1(idbase).digest())
1190 txnid = 'TXN:' + ha
1190 txnid = 'TXN:' + ha
1191 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1191 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1192
1192
1193 self._writejournal(desc)
1193 self._writejournal(desc)
1194 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1194 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1195 if report:
1195 if report:
1196 rp = report
1196 rp = report
1197 else:
1197 else:
1198 rp = self.ui.warn
1198 rp = self.ui.warn
1199 vfsmap = {'plain': self.vfs} # root of .hg/
1199 vfsmap = {'plain': self.vfs} # root of .hg/
1200 # we must avoid cyclic reference between repo and transaction.
1200 # we must avoid cyclic reference between repo and transaction.
1201 reporef = weakref.ref(self)
1201 reporef = weakref.ref(self)
1202 # Code to track tag movement
1202 # Code to track tag movement
1203 #
1203 #
1204 # Since tags are all handled as file content, it is actually quite hard
1204 # Since tags are all handled as file content, it is actually quite hard
1205 # to track these movement from a code perspective. So we fallback to a
1205 # to track these movement from a code perspective. So we fallback to a
1206 # tracking at the repository level. One could envision to track changes
1206 # tracking at the repository level. One could envision to track changes
1207 # to the '.hgtags' file through changegroup apply but that fails to
1207 # to the '.hgtags' file through changegroup apply but that fails to
1208 # cope with case where transaction expose new heads without changegroup
1208 # cope with case where transaction expose new heads without changegroup
1209 # being involved (eg: phase movement).
1209 # being involved (eg: phase movement).
1210 #
1210 #
1211 # For now, We gate the feature behind a flag since this likely comes
1211 # For now, We gate the feature behind a flag since this likely comes
1212 # with performance impacts. The current code run more often than needed
1212 # with performance impacts. The current code run more often than needed
1213 # and do not use caches as much as it could. The current focus is on
1213 # and do not use caches as much as it could. The current focus is on
1214 # the behavior of the feature so we disable it by default. The flag
1214 # the behavior of the feature so we disable it by default. The flag
1215 # will be removed when we are happy with the performance impact.
1215 # will be removed when we are happy with the performance impact.
1216 #
1216 #
1217 # Once this feature is no longer experimental move the following
1217 # Once this feature is no longer experimental move the following
1218 # documentation to the appropriate help section:
1218 # documentation to the appropriate help section:
1219 #
1219 #
1220 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1220 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1221 # tags (new or changed or deleted tags). In addition the details of
1221 # tags (new or changed or deleted tags). In addition the details of
1222 # these changes are made available in a file at:
1222 # these changes are made available in a file at:
1223 # ``REPOROOT/.hg/changes/tags.changes``.
1223 # ``REPOROOT/.hg/changes/tags.changes``.
1224 # Make sure you check for HG_TAG_MOVED before reading that file as it
1224 # Make sure you check for HG_TAG_MOVED before reading that file as it
1225 # might exist from a previous transaction even if no tag were touched
1225 # might exist from a previous transaction even if no tag were touched
1226 # in this one. Changes are recorded in a line base format::
1226 # in this one. Changes are recorded in a line base format::
1227 #
1227 #
1228 # <action> <hex-node> <tag-name>\n
1228 # <action> <hex-node> <tag-name>\n
1229 #
1229 #
1230 # Actions are defined as follow:
1230 # Actions are defined as follow:
1231 # "-R": tag is removed,
1231 # "-R": tag is removed,
1232 # "+A": tag is added,
1232 # "+A": tag is added,
1233 # "-M": tag is moved (old value),
1233 # "-M": tag is moved (old value),
1234 # "+M": tag is moved (new value),
1234 # "+M": tag is moved (new value),
1235 tracktags = lambda x: None
1235 tracktags = lambda x: None
1236 # experimental config: experimental.hook-track-tags
1236 # experimental config: experimental.hook-track-tags
1237 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1237 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1238 if desc != 'strip' and shouldtracktags:
1238 if desc != 'strip' and shouldtracktags:
1239 oldheads = self.changelog.headrevs()
1239 oldheads = self.changelog.headrevs()
1240 def tracktags(tr2):
1240 def tracktags(tr2):
1241 repo = reporef()
1241 repo = reporef()
1242 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1242 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1243 newheads = repo.changelog.headrevs()
1243 newheads = repo.changelog.headrevs()
1244 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1244 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1245 # notes: we compare lists here.
1245 # notes: we compare lists here.
1246 # As we do it only once buiding set would not be cheaper
1246 # As we do it only once buiding set would not be cheaper
1247 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1247 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1248 if changes:
1248 if changes:
1249 tr2.hookargs['tag_moved'] = '1'
1249 tr2.hookargs['tag_moved'] = '1'
1250 with repo.vfs('changes/tags.changes', 'w',
1250 with repo.vfs('changes/tags.changes', 'w',
1251 atomictemp=True) as changesfile:
1251 atomictemp=True) as changesfile:
1252 # note: we do not register the file to the transaction
1252 # note: we do not register the file to the transaction
1253 # because we needs it to still exist on the transaction
1253 # because we needs it to still exist on the transaction
1254 # is close (for txnclose hooks)
1254 # is close (for txnclose hooks)
1255 tagsmod.writediff(changesfile, changes)
1255 tagsmod.writediff(changesfile, changes)
1256 def validate(tr2):
1256 def validate(tr2):
1257 """will run pre-closing hooks"""
1257 """will run pre-closing hooks"""
1258 # XXX the transaction API is a bit lacking here so we take a hacky
1258 # XXX the transaction API is a bit lacking here so we take a hacky
1259 # path for now
1259 # path for now
1260 #
1260 #
1261 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1261 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1262 # dict is copied before these run. In addition we needs the data
1262 # dict is copied before these run. In addition we needs the data
1263 # available to in memory hooks too.
1263 # available to in memory hooks too.
1264 #
1264 #
1265 # Moreover, we also need to make sure this runs before txnclose
1265 # Moreover, we also need to make sure this runs before txnclose
1266 # hooks and there is no "pending" mechanism that would execute
1266 # hooks and there is no "pending" mechanism that would execute
1267 # logic only if hooks are about to run.
1267 # logic only if hooks are about to run.
1268 #
1268 #
1269 # Fixing this limitation of the transaction is also needed to track
1269 # Fixing this limitation of the transaction is also needed to track
1270 # other families of changes (bookmarks, phases, obsolescence).
1270 # other families of changes (bookmarks, phases, obsolescence).
1271 #
1271 #
1272 # This will have to be fixed before we remove the experimental
1272 # This will have to be fixed before we remove the experimental
1273 # gating.
1273 # gating.
1274 tracktags(tr2)
1274 tracktags(tr2)
1275 repo = reporef()
1275 repo = reporef()
1276 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1276 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1277 scmutil.enforcesinglehead(repo, tr2, desc)
1277 scmutil.enforcesinglehead(repo, tr2, desc)
1278 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1278 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1279 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1279 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1280 args = tr.hookargs.copy()
1280 args = tr.hookargs.copy()
1281 args.update(bookmarks.preparehookargs(name, old, new))
1281 args.update(bookmarks.preparehookargs(name, old, new))
1282 repo.hook('pretxnclose-bookmark', throw=True,
1282 repo.hook('pretxnclose-bookmark', throw=True,
1283 txnname=desc,
1283 txnname=desc,
1284 **pycompat.strkwargs(args))
1284 **pycompat.strkwargs(args))
1285 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1285 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1286 cl = repo.unfiltered().changelog
1286 cl = repo.unfiltered().changelog
1287 for rev, (old, new) in tr.changes['phases'].items():
1287 for rev, (old, new) in tr.changes['phases'].items():
1288 args = tr.hookargs.copy()
1288 args = tr.hookargs.copy()
1289 node = hex(cl.node(rev))
1289 node = hex(cl.node(rev))
1290 args.update(phases.preparehookargs(node, old, new))
1290 args.update(phases.preparehookargs(node, old, new))
1291 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1291 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1292 **pycompat.strkwargs(args))
1292 **pycompat.strkwargs(args))
1293
1293
1294 repo.hook('pretxnclose', throw=True,
1294 repo.hook('pretxnclose', throw=True,
1295 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1295 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1296 def releasefn(tr, success):
1296 def releasefn(tr, success):
1297 repo = reporef()
1297 repo = reporef()
1298 if success:
1298 if success:
1299 # this should be explicitly invoked here, because
1299 # this should be explicitly invoked here, because
1300 # in-memory changes aren't written out at closing
1300 # in-memory changes aren't written out at closing
1301 # transaction, if tr.addfilegenerator (via
1301 # transaction, if tr.addfilegenerator (via
1302 # dirstate.write or so) isn't invoked while
1302 # dirstate.write or so) isn't invoked while
1303 # transaction running
1303 # transaction running
1304 repo.dirstate.write(None)
1304 repo.dirstate.write(None)
1305 else:
1305 else:
1306 # discard all changes (including ones already written
1306 # discard all changes (including ones already written
1307 # out) in this transaction
1307 # out) in this transaction
1308 repo.dirstate.restorebackup(None, 'journal.dirstate')
1308 repo.dirstate.restorebackup(None, 'journal.dirstate')
1309
1309
1310 repo.invalidate(clearfilecache=True)
1310 repo.invalidate(clearfilecache=True)
1311
1311
1312 tr = transaction.transaction(rp, self.svfs, vfsmap,
1312 tr = transaction.transaction(rp, self.svfs, vfsmap,
1313 "journal",
1313 "journal",
1314 "undo",
1314 "undo",
1315 aftertrans(renames),
1315 aftertrans(renames),
1316 self.store.createmode,
1316 self.store.createmode,
1317 validator=validate,
1317 validator=validate,
1318 releasefn=releasefn,
1318 releasefn=releasefn,
1319 checkambigfiles=_cachedfiles)
1319 checkambigfiles=_cachedfiles,
1320 name=desc)
1320 tr.changes['revs'] = xrange(0, 0)
1321 tr.changes['revs'] = xrange(0, 0)
1321 tr.changes['obsmarkers'] = set()
1322 tr.changes['obsmarkers'] = set()
1322 tr.changes['phases'] = {}
1323 tr.changes['phases'] = {}
1323 tr.changes['bookmarks'] = {}
1324 tr.changes['bookmarks'] = {}
1324
1325
1325 tr.hookargs['txnid'] = txnid
1326 tr.hookargs['txnid'] = txnid
1326 # note: writing the fncache only during finalize mean that the file is
1327 # note: writing the fncache only during finalize mean that the file is
1327 # outdated when running hooks. As fncache is used for streaming clone,
1328 # outdated when running hooks. As fncache is used for streaming clone,
1328 # this is not expected to break anything that happen during the hooks.
1329 # this is not expected to break anything that happen during the hooks.
1329 tr.addfinalize('flush-fncache', self.store.write)
1330 tr.addfinalize('flush-fncache', self.store.write)
1330 def txnclosehook(tr2):
1331 def txnclosehook(tr2):
1331 """To be run if transaction is successful, will schedule a hook run
1332 """To be run if transaction is successful, will schedule a hook run
1332 """
1333 """
1333 # Don't reference tr2 in hook() so we don't hold a reference.
1334 # Don't reference tr2 in hook() so we don't hold a reference.
1334 # This reduces memory consumption when there are multiple
1335 # This reduces memory consumption when there are multiple
1335 # transactions per lock. This can likely go away if issue5045
1336 # transactions per lock. This can likely go away if issue5045
1336 # fixes the function accumulation.
1337 # fixes the function accumulation.
1337 hookargs = tr2.hookargs
1338 hookargs = tr2.hookargs
1338
1339
1339 def hookfunc():
1340 def hookfunc():
1340 repo = reporef()
1341 repo = reporef()
1341 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1342 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1342 bmchanges = sorted(tr.changes['bookmarks'].items())
1343 bmchanges = sorted(tr.changes['bookmarks'].items())
1343 for name, (old, new) in bmchanges:
1344 for name, (old, new) in bmchanges:
1344 args = tr.hookargs.copy()
1345 args = tr.hookargs.copy()
1345 args.update(bookmarks.preparehookargs(name, old, new))
1346 args.update(bookmarks.preparehookargs(name, old, new))
1346 repo.hook('txnclose-bookmark', throw=False,
1347 repo.hook('txnclose-bookmark', throw=False,
1347 txnname=desc, **pycompat.strkwargs(args))
1348 txnname=desc, **pycompat.strkwargs(args))
1348
1349
1349 if hook.hashook(repo.ui, 'txnclose-phase'):
1350 if hook.hashook(repo.ui, 'txnclose-phase'):
1350 cl = repo.unfiltered().changelog
1351 cl = repo.unfiltered().changelog
1351 phasemv = sorted(tr.changes['phases'].items())
1352 phasemv = sorted(tr.changes['phases'].items())
1352 for rev, (old, new) in phasemv:
1353 for rev, (old, new) in phasemv:
1353 args = tr.hookargs.copy()
1354 args = tr.hookargs.copy()
1354 node = hex(cl.node(rev))
1355 node = hex(cl.node(rev))
1355 args.update(phases.preparehookargs(node, old, new))
1356 args.update(phases.preparehookargs(node, old, new))
1356 repo.hook('txnclose-phase', throw=False, txnname=desc,
1357 repo.hook('txnclose-phase', throw=False, txnname=desc,
1357 **pycompat.strkwargs(args))
1358 **pycompat.strkwargs(args))
1358
1359
1359 repo.hook('txnclose', throw=False, txnname=desc,
1360 repo.hook('txnclose', throw=False, txnname=desc,
1360 **pycompat.strkwargs(hookargs))
1361 **pycompat.strkwargs(hookargs))
1361 reporef()._afterlock(hookfunc)
1362 reporef()._afterlock(hookfunc)
1362 tr.addfinalize('txnclose-hook', txnclosehook)
1363 tr.addfinalize('txnclose-hook', txnclosehook)
1363 # Include a leading "-" to make it happen before the transaction summary
1364 # Include a leading "-" to make it happen before the transaction summary
1364 # reports registered via scmutil.registersummarycallback() whose names
1365 # reports registered via scmutil.registersummarycallback() whose names
1365 # are 00-txnreport etc. That way, the caches will be warm when the
1366 # are 00-txnreport etc. That way, the caches will be warm when the
1366 # callbacks run.
1367 # callbacks run.
1367 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1368 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1368 def txnaborthook(tr2):
1369 def txnaborthook(tr2):
1369 """To be run if transaction is aborted
1370 """To be run if transaction is aborted
1370 """
1371 """
1371 reporef().hook('txnabort', throw=False, txnname=desc,
1372 reporef().hook('txnabort', throw=False, txnname=desc,
1372 **pycompat.strkwargs(tr2.hookargs))
1373 **pycompat.strkwargs(tr2.hookargs))
1373 tr.addabort('txnabort-hook', txnaborthook)
1374 tr.addabort('txnabort-hook', txnaborthook)
1374 # avoid eager cache invalidation. in-memory data should be identical
1375 # avoid eager cache invalidation. in-memory data should be identical
1375 # to stored data if transaction has no error.
1376 # to stored data if transaction has no error.
1376 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1377 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1377 self._transref = weakref.ref(tr)
1378 self._transref = weakref.ref(tr)
1378 scmutil.registersummarycallback(self, tr, desc)
1379 scmutil.registersummarycallback(self, tr, desc)
1379 return tr
1380 return tr
1380
1381
1381 def _journalfiles(self):
1382 def _journalfiles(self):
1382 return ((self.svfs, 'journal'),
1383 return ((self.svfs, 'journal'),
1383 (self.vfs, 'journal.dirstate'),
1384 (self.vfs, 'journal.dirstate'),
1384 (self.vfs, 'journal.branch'),
1385 (self.vfs, 'journal.branch'),
1385 (self.vfs, 'journal.desc'),
1386 (self.vfs, 'journal.desc'),
1386 (self.vfs, 'journal.bookmarks'),
1387 (self.vfs, 'journal.bookmarks'),
1387 (self.svfs, 'journal.phaseroots'))
1388 (self.svfs, 'journal.phaseroots'))
1388
1389
1389 def undofiles(self):
1390 def undofiles(self):
1390 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1391 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1391
1392
1392 @unfilteredmethod
1393 @unfilteredmethod
1393 def _writejournal(self, desc):
1394 def _writejournal(self, desc):
1394 self.dirstate.savebackup(None, 'journal.dirstate')
1395 self.dirstate.savebackup(None, 'journal.dirstate')
1395 self.vfs.write("journal.branch",
1396 self.vfs.write("journal.branch",
1396 encoding.fromlocal(self.dirstate.branch()))
1397 encoding.fromlocal(self.dirstate.branch()))
1397 self.vfs.write("journal.desc",
1398 self.vfs.write("journal.desc",
1398 "%d\n%s\n" % (len(self), desc))
1399 "%d\n%s\n" % (len(self), desc))
1399 self.vfs.write("journal.bookmarks",
1400 self.vfs.write("journal.bookmarks",
1400 self.vfs.tryread("bookmarks"))
1401 self.vfs.tryread("bookmarks"))
1401 self.svfs.write("journal.phaseroots",
1402 self.svfs.write("journal.phaseroots",
1402 self.svfs.tryread("phaseroots"))
1403 self.svfs.tryread("phaseroots"))
1403
1404
1404 def recover(self):
1405 def recover(self):
1405 with self.lock():
1406 with self.lock():
1406 if self.svfs.exists("journal"):
1407 if self.svfs.exists("journal"):
1407 self.ui.status(_("rolling back interrupted transaction\n"))
1408 self.ui.status(_("rolling back interrupted transaction\n"))
1408 vfsmap = {'': self.svfs,
1409 vfsmap = {'': self.svfs,
1409 'plain': self.vfs,}
1410 'plain': self.vfs,}
1410 transaction.rollback(self.svfs, vfsmap, "journal",
1411 transaction.rollback(self.svfs, vfsmap, "journal",
1411 self.ui.warn,
1412 self.ui.warn,
1412 checkambigfiles=_cachedfiles)
1413 checkambigfiles=_cachedfiles)
1413 self.invalidate()
1414 self.invalidate()
1414 return True
1415 return True
1415 else:
1416 else:
1416 self.ui.warn(_("no interrupted transaction available\n"))
1417 self.ui.warn(_("no interrupted transaction available\n"))
1417 return False
1418 return False
1418
1419
1419 def rollback(self, dryrun=False, force=False):
1420 def rollback(self, dryrun=False, force=False):
1420 wlock = lock = dsguard = None
1421 wlock = lock = dsguard = None
1421 try:
1422 try:
1422 wlock = self.wlock()
1423 wlock = self.wlock()
1423 lock = self.lock()
1424 lock = self.lock()
1424 if self.svfs.exists("undo"):
1425 if self.svfs.exists("undo"):
1425 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1426 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1426
1427
1427 return self._rollback(dryrun, force, dsguard)
1428 return self._rollback(dryrun, force, dsguard)
1428 else:
1429 else:
1429 self.ui.warn(_("no rollback information available\n"))
1430 self.ui.warn(_("no rollback information available\n"))
1430 return 1
1431 return 1
1431 finally:
1432 finally:
1432 release(dsguard, lock, wlock)
1433 release(dsguard, lock, wlock)
1433
1434
1434 @unfilteredmethod # Until we get smarter cache management
1435 @unfilteredmethod # Until we get smarter cache management
1435 def _rollback(self, dryrun, force, dsguard):
1436 def _rollback(self, dryrun, force, dsguard):
1436 ui = self.ui
1437 ui = self.ui
1437 try:
1438 try:
1438 args = self.vfs.read('undo.desc').splitlines()
1439 args = self.vfs.read('undo.desc').splitlines()
1439 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1440 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1440 if len(args) >= 3:
1441 if len(args) >= 3:
1441 detail = args[2]
1442 detail = args[2]
1442 oldtip = oldlen - 1
1443 oldtip = oldlen - 1
1443
1444
1444 if detail and ui.verbose:
1445 if detail and ui.verbose:
1445 msg = (_('repository tip rolled back to revision %d'
1446 msg = (_('repository tip rolled back to revision %d'
1446 ' (undo %s: %s)\n')
1447 ' (undo %s: %s)\n')
1447 % (oldtip, desc, detail))
1448 % (oldtip, desc, detail))
1448 else:
1449 else:
1449 msg = (_('repository tip rolled back to revision %d'
1450 msg = (_('repository tip rolled back to revision %d'
1450 ' (undo %s)\n')
1451 ' (undo %s)\n')
1451 % (oldtip, desc))
1452 % (oldtip, desc))
1452 except IOError:
1453 except IOError:
1453 msg = _('rolling back unknown transaction\n')
1454 msg = _('rolling back unknown transaction\n')
1454 desc = None
1455 desc = None
1455
1456
1456 if not force and self['.'] != self['tip'] and desc == 'commit':
1457 if not force and self['.'] != self['tip'] and desc == 'commit':
1457 raise error.Abort(
1458 raise error.Abort(
1458 _('rollback of last commit while not checked out '
1459 _('rollback of last commit while not checked out '
1459 'may lose data'), hint=_('use -f to force'))
1460 'may lose data'), hint=_('use -f to force'))
1460
1461
1461 ui.status(msg)
1462 ui.status(msg)
1462 if dryrun:
1463 if dryrun:
1463 return 0
1464 return 0
1464
1465
1465 parents = self.dirstate.parents()
1466 parents = self.dirstate.parents()
1466 self.destroying()
1467 self.destroying()
1467 vfsmap = {'plain': self.vfs, '': self.svfs}
1468 vfsmap = {'plain': self.vfs, '': self.svfs}
1468 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1469 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1469 checkambigfiles=_cachedfiles)
1470 checkambigfiles=_cachedfiles)
1470 if self.vfs.exists('undo.bookmarks'):
1471 if self.vfs.exists('undo.bookmarks'):
1471 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1472 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1472 if self.svfs.exists('undo.phaseroots'):
1473 if self.svfs.exists('undo.phaseroots'):
1473 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1474 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1474 self.invalidate()
1475 self.invalidate()
1475
1476
1476 parentgone = (parents[0] not in self.changelog.nodemap or
1477 parentgone = (parents[0] not in self.changelog.nodemap or
1477 parents[1] not in self.changelog.nodemap)
1478 parents[1] not in self.changelog.nodemap)
1478 if parentgone:
1479 if parentgone:
1479 # prevent dirstateguard from overwriting already restored one
1480 # prevent dirstateguard from overwriting already restored one
1480 dsguard.close()
1481 dsguard.close()
1481
1482
1482 self.dirstate.restorebackup(None, 'undo.dirstate')
1483 self.dirstate.restorebackup(None, 'undo.dirstate')
1483 try:
1484 try:
1484 branch = self.vfs.read('undo.branch')
1485 branch = self.vfs.read('undo.branch')
1485 self.dirstate.setbranch(encoding.tolocal(branch))
1486 self.dirstate.setbranch(encoding.tolocal(branch))
1486 except IOError:
1487 except IOError:
1487 ui.warn(_('named branch could not be reset: '
1488 ui.warn(_('named branch could not be reset: '
1488 'current branch is still \'%s\'\n')
1489 'current branch is still \'%s\'\n')
1489 % self.dirstate.branch())
1490 % self.dirstate.branch())
1490
1491
1491 parents = tuple([p.rev() for p in self[None].parents()])
1492 parents = tuple([p.rev() for p in self[None].parents()])
1492 if len(parents) > 1:
1493 if len(parents) > 1:
1493 ui.status(_('working directory now based on '
1494 ui.status(_('working directory now based on '
1494 'revisions %d and %d\n') % parents)
1495 'revisions %d and %d\n') % parents)
1495 else:
1496 else:
1496 ui.status(_('working directory now based on '
1497 ui.status(_('working directory now based on '
1497 'revision %d\n') % parents)
1498 'revision %d\n') % parents)
1498 mergemod.mergestate.clean(self, self['.'].node())
1499 mergemod.mergestate.clean(self, self['.'].node())
1499
1500
1500 # TODO: if we know which new heads may result from this rollback, pass
1501 # TODO: if we know which new heads may result from this rollback, pass
1501 # them to destroy(), which will prevent the branchhead cache from being
1502 # them to destroy(), which will prevent the branchhead cache from being
1502 # invalidated.
1503 # invalidated.
1503 self.destroyed()
1504 self.destroyed()
1504 return 0
1505 return 0
1505
1506
1506 def _buildcacheupdater(self, newtransaction):
1507 def _buildcacheupdater(self, newtransaction):
1507 """called during transaction to build the callback updating cache
1508 """called during transaction to build the callback updating cache
1508
1509
1509 Lives on the repository to help extension who might want to augment
1510 Lives on the repository to help extension who might want to augment
1510 this logic. For this purpose, the created transaction is passed to the
1511 this logic. For this purpose, the created transaction is passed to the
1511 method.
1512 method.
1512 """
1513 """
1513 # we must avoid cyclic reference between repo and transaction.
1514 # we must avoid cyclic reference between repo and transaction.
1514 reporef = weakref.ref(self)
1515 reporef = weakref.ref(self)
1515 def updater(tr):
1516 def updater(tr):
1516 repo = reporef()
1517 repo = reporef()
1517 repo.updatecaches(tr)
1518 repo.updatecaches(tr)
1518 return updater
1519 return updater
1519
1520
1520 @unfilteredmethod
1521 @unfilteredmethod
1521 def updatecaches(self, tr=None):
1522 def updatecaches(self, tr=None):
1522 """warm appropriate caches
1523 """warm appropriate caches
1523
1524
1524 If this function is called after a transaction closed. The transaction
1525 If this function is called after a transaction closed. The transaction
1525 will be available in the 'tr' argument. This can be used to selectively
1526 will be available in the 'tr' argument. This can be used to selectively
1526 update caches relevant to the changes in that transaction.
1527 update caches relevant to the changes in that transaction.
1527 """
1528 """
1528 if tr is not None and tr.hookargs.get('source') == 'strip':
1529 if tr is not None and tr.hookargs.get('source') == 'strip':
1529 # During strip, many caches are invalid but
1530 # During strip, many caches are invalid but
1530 # later call to `destroyed` will refresh them.
1531 # later call to `destroyed` will refresh them.
1531 return
1532 return
1532
1533
1533 if tr is None or tr.changes['revs']:
1534 if tr is None or tr.changes['revs']:
1534 # updating the unfiltered branchmap should refresh all the others,
1535 # updating the unfiltered branchmap should refresh all the others,
1535 self.ui.debug('updating the branch cache\n')
1536 self.ui.debug('updating the branch cache\n')
1536 branchmap.updatecache(self.filtered('served'))
1537 branchmap.updatecache(self.filtered('served'))
1537
1538
1538 def invalidatecaches(self):
1539 def invalidatecaches(self):
1539
1540
1540 if '_tagscache' in vars(self):
1541 if '_tagscache' in vars(self):
1541 # can't use delattr on proxy
1542 # can't use delattr on proxy
1542 del self.__dict__['_tagscache']
1543 del self.__dict__['_tagscache']
1543
1544
1544 self.unfiltered()._branchcaches.clear()
1545 self.unfiltered()._branchcaches.clear()
1545 self.invalidatevolatilesets()
1546 self.invalidatevolatilesets()
1546 self._sparsesignaturecache.clear()
1547 self._sparsesignaturecache.clear()
1547
1548
1548 def invalidatevolatilesets(self):
1549 def invalidatevolatilesets(self):
1549 self.filteredrevcache.clear()
1550 self.filteredrevcache.clear()
1550 obsolete.clearobscaches(self)
1551 obsolete.clearobscaches(self)
1551
1552
1552 def invalidatedirstate(self):
1553 def invalidatedirstate(self):
1553 '''Invalidates the dirstate, causing the next call to dirstate
1554 '''Invalidates the dirstate, causing the next call to dirstate
1554 to check if it was modified since the last time it was read,
1555 to check if it was modified since the last time it was read,
1555 rereading it if it has.
1556 rereading it if it has.
1556
1557
1557 This is different to dirstate.invalidate() that it doesn't always
1558 This is different to dirstate.invalidate() that it doesn't always
1558 rereads the dirstate. Use dirstate.invalidate() if you want to
1559 rereads the dirstate. Use dirstate.invalidate() if you want to
1559 explicitly read the dirstate again (i.e. restoring it to a previous
1560 explicitly read the dirstate again (i.e. restoring it to a previous
1560 known good state).'''
1561 known good state).'''
1561 if hasunfilteredcache(self, 'dirstate'):
1562 if hasunfilteredcache(self, 'dirstate'):
1562 for k in self.dirstate._filecache:
1563 for k in self.dirstate._filecache:
1563 try:
1564 try:
1564 delattr(self.dirstate, k)
1565 delattr(self.dirstate, k)
1565 except AttributeError:
1566 except AttributeError:
1566 pass
1567 pass
1567 delattr(self.unfiltered(), 'dirstate')
1568 delattr(self.unfiltered(), 'dirstate')
1568
1569
1569 def invalidate(self, clearfilecache=False):
1570 def invalidate(self, clearfilecache=False):
1570 '''Invalidates both store and non-store parts other than dirstate
1571 '''Invalidates both store and non-store parts other than dirstate
1571
1572
1572 If a transaction is running, invalidation of store is omitted,
1573 If a transaction is running, invalidation of store is omitted,
1573 because discarding in-memory changes might cause inconsistency
1574 because discarding in-memory changes might cause inconsistency
1574 (e.g. incomplete fncache causes unintentional failure, but
1575 (e.g. incomplete fncache causes unintentional failure, but
1575 redundant one doesn't).
1576 redundant one doesn't).
1576 '''
1577 '''
1577 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1578 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1578 for k in list(self._filecache.keys()):
1579 for k in list(self._filecache.keys()):
1579 # dirstate is invalidated separately in invalidatedirstate()
1580 # dirstate is invalidated separately in invalidatedirstate()
1580 if k == 'dirstate':
1581 if k == 'dirstate':
1581 continue
1582 continue
1582 if (k == 'changelog' and
1583 if (k == 'changelog' and
1583 self.currenttransaction() and
1584 self.currenttransaction() and
1584 self.changelog._delayed):
1585 self.changelog._delayed):
1585 # The changelog object may store unwritten revisions. We don't
1586 # The changelog object may store unwritten revisions. We don't
1586 # want to lose them.
1587 # want to lose them.
1587 # TODO: Solve the problem instead of working around it.
1588 # TODO: Solve the problem instead of working around it.
1588 continue
1589 continue
1589
1590
1590 if clearfilecache:
1591 if clearfilecache:
1591 del self._filecache[k]
1592 del self._filecache[k]
1592 try:
1593 try:
1593 delattr(unfiltered, k)
1594 delattr(unfiltered, k)
1594 except AttributeError:
1595 except AttributeError:
1595 pass
1596 pass
1596 self.invalidatecaches()
1597 self.invalidatecaches()
1597 if not self.currenttransaction():
1598 if not self.currenttransaction():
1598 # TODO: Changing contents of store outside transaction
1599 # TODO: Changing contents of store outside transaction
1599 # causes inconsistency. We should make in-memory store
1600 # causes inconsistency. We should make in-memory store
1600 # changes detectable, and abort if changed.
1601 # changes detectable, and abort if changed.
1601 self.store.invalidatecaches()
1602 self.store.invalidatecaches()
1602
1603
1603 def invalidateall(self):
1604 def invalidateall(self):
1604 '''Fully invalidates both store and non-store parts, causing the
1605 '''Fully invalidates both store and non-store parts, causing the
1605 subsequent operation to reread any outside changes.'''
1606 subsequent operation to reread any outside changes.'''
1606 # extension should hook this to invalidate its caches
1607 # extension should hook this to invalidate its caches
1607 self.invalidate()
1608 self.invalidate()
1608 self.invalidatedirstate()
1609 self.invalidatedirstate()
1609
1610
1610 @unfilteredmethod
1611 @unfilteredmethod
1611 def _refreshfilecachestats(self, tr):
1612 def _refreshfilecachestats(self, tr):
1612 """Reload stats of cached files so that they are flagged as valid"""
1613 """Reload stats of cached files so that they are flagged as valid"""
1613 for k, ce in self._filecache.items():
1614 for k, ce in self._filecache.items():
1614 k = pycompat.sysstr(k)
1615 k = pycompat.sysstr(k)
1615 if k == r'dirstate' or k not in self.__dict__:
1616 if k == r'dirstate' or k not in self.__dict__:
1616 continue
1617 continue
1617 ce.refresh()
1618 ce.refresh()
1618
1619
1619 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1620 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1620 inheritchecker=None, parentenvvar=None):
1621 inheritchecker=None, parentenvvar=None):
1621 parentlock = None
1622 parentlock = None
1622 # the contents of parentenvvar are used by the underlying lock to
1623 # the contents of parentenvvar are used by the underlying lock to
1623 # determine whether it can be inherited
1624 # determine whether it can be inherited
1624 if parentenvvar is not None:
1625 if parentenvvar is not None:
1625 parentlock = encoding.environ.get(parentenvvar)
1626 parentlock = encoding.environ.get(parentenvvar)
1626
1627
1627 timeout = 0
1628 timeout = 0
1628 warntimeout = 0
1629 warntimeout = 0
1629 if wait:
1630 if wait:
1630 timeout = self.ui.configint("ui", "timeout")
1631 timeout = self.ui.configint("ui", "timeout")
1631 warntimeout = self.ui.configint("ui", "timeout.warn")
1632 warntimeout = self.ui.configint("ui", "timeout.warn")
1632
1633
1633 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1634 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1634 releasefn=releasefn,
1635 releasefn=releasefn,
1635 acquirefn=acquirefn, desc=desc,
1636 acquirefn=acquirefn, desc=desc,
1636 inheritchecker=inheritchecker,
1637 inheritchecker=inheritchecker,
1637 parentlock=parentlock)
1638 parentlock=parentlock)
1638 return l
1639 return l
1639
1640
1640 def _afterlock(self, callback):
1641 def _afterlock(self, callback):
1641 """add a callback to be run when the repository is fully unlocked
1642 """add a callback to be run when the repository is fully unlocked
1642
1643
1643 The callback will be executed when the outermost lock is released
1644 The callback will be executed when the outermost lock is released
1644 (with wlock being higher level than 'lock')."""
1645 (with wlock being higher level than 'lock')."""
1645 for ref in (self._wlockref, self._lockref):
1646 for ref in (self._wlockref, self._lockref):
1646 l = ref and ref()
1647 l = ref and ref()
1647 if l and l.held:
1648 if l and l.held:
1648 l.postrelease.append(callback)
1649 l.postrelease.append(callback)
1649 break
1650 break
1650 else: # no lock have been found.
1651 else: # no lock have been found.
1651 callback()
1652 callback()
1652
1653
1653 def lock(self, wait=True):
1654 def lock(self, wait=True):
1654 '''Lock the repository store (.hg/store) and return a weak reference
1655 '''Lock the repository store (.hg/store) and return a weak reference
1655 to the lock. Use this before modifying the store (e.g. committing or
1656 to the lock. Use this before modifying the store (e.g. committing or
1656 stripping). If you are opening a transaction, get a lock as well.)
1657 stripping). If you are opening a transaction, get a lock as well.)
1657
1658
1658 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1659 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1659 'wlock' first to avoid a dead-lock hazard.'''
1660 'wlock' first to avoid a dead-lock hazard.'''
1660 l = self._currentlock(self._lockref)
1661 l = self._currentlock(self._lockref)
1661 if l is not None:
1662 if l is not None:
1662 l.lock()
1663 l.lock()
1663 return l
1664 return l
1664
1665
1665 l = self._lock(self.svfs, "lock", wait, None,
1666 l = self._lock(self.svfs, "lock", wait, None,
1666 self.invalidate, _('repository %s') % self.origroot)
1667 self.invalidate, _('repository %s') % self.origroot)
1667 self._lockref = weakref.ref(l)
1668 self._lockref = weakref.ref(l)
1668 return l
1669 return l
1669
1670
1670 def _wlockchecktransaction(self):
1671 def _wlockchecktransaction(self):
1671 if self.currenttransaction() is not None:
1672 if self.currenttransaction() is not None:
1672 raise error.LockInheritanceContractViolation(
1673 raise error.LockInheritanceContractViolation(
1673 'wlock cannot be inherited in the middle of a transaction')
1674 'wlock cannot be inherited in the middle of a transaction')
1674
1675
1675 def wlock(self, wait=True):
1676 def wlock(self, wait=True):
1676 '''Lock the non-store parts of the repository (everything under
1677 '''Lock the non-store parts of the repository (everything under
1677 .hg except .hg/store) and return a weak reference to the lock.
1678 .hg except .hg/store) and return a weak reference to the lock.
1678
1679
1679 Use this before modifying files in .hg.
1680 Use this before modifying files in .hg.
1680
1681
1681 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1682 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1682 'wlock' first to avoid a dead-lock hazard.'''
1683 'wlock' first to avoid a dead-lock hazard.'''
1683 l = self._wlockref and self._wlockref()
1684 l = self._wlockref and self._wlockref()
1684 if l is not None and l.held:
1685 if l is not None and l.held:
1685 l.lock()
1686 l.lock()
1686 return l
1687 return l
1687
1688
1688 # We do not need to check for non-waiting lock acquisition. Such
1689 # We do not need to check for non-waiting lock acquisition. Such
1689 # acquisition would not cause dead-lock as they would just fail.
1690 # acquisition would not cause dead-lock as they would just fail.
1690 if wait and (self.ui.configbool('devel', 'all-warnings')
1691 if wait and (self.ui.configbool('devel', 'all-warnings')
1691 or self.ui.configbool('devel', 'check-locks')):
1692 or self.ui.configbool('devel', 'check-locks')):
1692 if self._currentlock(self._lockref) is not None:
1693 if self._currentlock(self._lockref) is not None:
1693 self.ui.develwarn('"wlock" acquired after "lock"')
1694 self.ui.develwarn('"wlock" acquired after "lock"')
1694
1695
1695 def unlock():
1696 def unlock():
1696 if self.dirstate.pendingparentchange():
1697 if self.dirstate.pendingparentchange():
1697 self.dirstate.invalidate()
1698 self.dirstate.invalidate()
1698 else:
1699 else:
1699 self.dirstate.write(None)
1700 self.dirstate.write(None)
1700
1701
1701 self._filecache['dirstate'].refresh()
1702 self._filecache['dirstate'].refresh()
1702
1703
1703 l = self._lock(self.vfs, "wlock", wait, unlock,
1704 l = self._lock(self.vfs, "wlock", wait, unlock,
1704 self.invalidatedirstate, _('working directory of %s') %
1705 self.invalidatedirstate, _('working directory of %s') %
1705 self.origroot,
1706 self.origroot,
1706 inheritchecker=self._wlockchecktransaction,
1707 inheritchecker=self._wlockchecktransaction,
1707 parentenvvar='HG_WLOCK_LOCKER')
1708 parentenvvar='HG_WLOCK_LOCKER')
1708 self._wlockref = weakref.ref(l)
1709 self._wlockref = weakref.ref(l)
1709 return l
1710 return l
1710
1711
1711 def _currentlock(self, lockref):
1712 def _currentlock(self, lockref):
1712 """Returns the lock if it's held, or None if it's not."""
1713 """Returns the lock if it's held, or None if it's not."""
1713 if lockref is None:
1714 if lockref is None:
1714 return None
1715 return None
1715 l = lockref()
1716 l = lockref()
1716 if l is None or not l.held:
1717 if l is None or not l.held:
1717 return None
1718 return None
1718 return l
1719 return l
1719
1720
1720 def currentwlock(self):
1721 def currentwlock(self):
1721 """Returns the wlock if it's held, or None if it's not."""
1722 """Returns the wlock if it's held, or None if it's not."""
1722 return self._currentlock(self._wlockref)
1723 return self._currentlock(self._wlockref)
1723
1724
1724 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1725 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1725 """
1726 """
1726 commit an individual file as part of a larger transaction
1727 commit an individual file as part of a larger transaction
1727 """
1728 """
1728
1729
1729 fname = fctx.path()
1730 fname = fctx.path()
1730 fparent1 = manifest1.get(fname, nullid)
1731 fparent1 = manifest1.get(fname, nullid)
1731 fparent2 = manifest2.get(fname, nullid)
1732 fparent2 = manifest2.get(fname, nullid)
1732 if isinstance(fctx, context.filectx):
1733 if isinstance(fctx, context.filectx):
1733 node = fctx.filenode()
1734 node = fctx.filenode()
1734 if node in [fparent1, fparent2]:
1735 if node in [fparent1, fparent2]:
1735 self.ui.debug('reusing %s filelog entry\n' % fname)
1736 self.ui.debug('reusing %s filelog entry\n' % fname)
1736 if manifest1.flags(fname) != fctx.flags():
1737 if manifest1.flags(fname) != fctx.flags():
1737 changelist.append(fname)
1738 changelist.append(fname)
1738 return node
1739 return node
1739
1740
1740 flog = self.file(fname)
1741 flog = self.file(fname)
1741 meta = {}
1742 meta = {}
1742 copy = fctx.renamed()
1743 copy = fctx.renamed()
1743 if copy and copy[0] != fname:
1744 if copy and copy[0] != fname:
1744 # Mark the new revision of this file as a copy of another
1745 # Mark the new revision of this file as a copy of another
1745 # file. This copy data will effectively act as a parent
1746 # file. This copy data will effectively act as a parent
1746 # of this new revision. If this is a merge, the first
1747 # of this new revision. If this is a merge, the first
1747 # parent will be the nullid (meaning "look up the copy data")
1748 # parent will be the nullid (meaning "look up the copy data")
1748 # and the second one will be the other parent. For example:
1749 # and the second one will be the other parent. For example:
1749 #
1750 #
1750 # 0 --- 1 --- 3 rev1 changes file foo
1751 # 0 --- 1 --- 3 rev1 changes file foo
1751 # \ / rev2 renames foo to bar and changes it
1752 # \ / rev2 renames foo to bar and changes it
1752 # \- 2 -/ rev3 should have bar with all changes and
1753 # \- 2 -/ rev3 should have bar with all changes and
1753 # should record that bar descends from
1754 # should record that bar descends from
1754 # bar in rev2 and foo in rev1
1755 # bar in rev2 and foo in rev1
1755 #
1756 #
1756 # this allows this merge to succeed:
1757 # this allows this merge to succeed:
1757 #
1758 #
1758 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1759 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1759 # \ / merging rev3 and rev4 should use bar@rev2
1760 # \ / merging rev3 and rev4 should use bar@rev2
1760 # \- 2 --- 4 as the merge base
1761 # \- 2 --- 4 as the merge base
1761 #
1762 #
1762
1763
1763 cfname = copy[0]
1764 cfname = copy[0]
1764 crev = manifest1.get(cfname)
1765 crev = manifest1.get(cfname)
1765 newfparent = fparent2
1766 newfparent = fparent2
1766
1767
1767 if manifest2: # branch merge
1768 if manifest2: # branch merge
1768 if fparent2 == nullid or crev is None: # copied on remote side
1769 if fparent2 == nullid or crev is None: # copied on remote side
1769 if cfname in manifest2:
1770 if cfname in manifest2:
1770 crev = manifest2[cfname]
1771 crev = manifest2[cfname]
1771 newfparent = fparent1
1772 newfparent = fparent1
1772
1773
1773 # Here, we used to search backwards through history to try to find
1774 # Here, we used to search backwards through history to try to find
1774 # where the file copy came from if the source of a copy was not in
1775 # where the file copy came from if the source of a copy was not in
1775 # the parent directory. However, this doesn't actually make sense to
1776 # the parent directory. However, this doesn't actually make sense to
1776 # do (what does a copy from something not in your working copy even
1777 # do (what does a copy from something not in your working copy even
1777 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1778 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1778 # the user that copy information was dropped, so if they didn't
1779 # the user that copy information was dropped, so if they didn't
1779 # expect this outcome it can be fixed, but this is the correct
1780 # expect this outcome it can be fixed, but this is the correct
1780 # behavior in this circumstance.
1781 # behavior in this circumstance.
1781
1782
1782 if crev:
1783 if crev:
1783 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1784 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1784 meta["copy"] = cfname
1785 meta["copy"] = cfname
1785 meta["copyrev"] = hex(crev)
1786 meta["copyrev"] = hex(crev)
1786 fparent1, fparent2 = nullid, newfparent
1787 fparent1, fparent2 = nullid, newfparent
1787 else:
1788 else:
1788 self.ui.warn(_("warning: can't find ancestor for '%s' "
1789 self.ui.warn(_("warning: can't find ancestor for '%s' "
1789 "copied from '%s'!\n") % (fname, cfname))
1790 "copied from '%s'!\n") % (fname, cfname))
1790
1791
1791 elif fparent1 == nullid:
1792 elif fparent1 == nullid:
1792 fparent1, fparent2 = fparent2, nullid
1793 fparent1, fparent2 = fparent2, nullid
1793 elif fparent2 != nullid:
1794 elif fparent2 != nullid:
1794 # is one parent an ancestor of the other?
1795 # is one parent an ancestor of the other?
1795 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1796 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1796 if fparent1 in fparentancestors:
1797 if fparent1 in fparentancestors:
1797 fparent1, fparent2 = fparent2, nullid
1798 fparent1, fparent2 = fparent2, nullid
1798 elif fparent2 in fparentancestors:
1799 elif fparent2 in fparentancestors:
1799 fparent2 = nullid
1800 fparent2 = nullid
1800
1801
1801 # is the file changed?
1802 # is the file changed?
1802 text = fctx.data()
1803 text = fctx.data()
1803 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1804 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1804 changelist.append(fname)
1805 changelist.append(fname)
1805 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1806 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1806 # are just the flags changed during merge?
1807 # are just the flags changed during merge?
1807 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1808 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1808 changelist.append(fname)
1809 changelist.append(fname)
1809
1810
1810 return fparent1
1811 return fparent1
1811
1812
1812 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1813 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1813 """check for commit arguments that aren't committable"""
1814 """check for commit arguments that aren't committable"""
1814 if match.isexact() or match.prefix():
1815 if match.isexact() or match.prefix():
1815 matched = set(status.modified + status.added + status.removed)
1816 matched = set(status.modified + status.added + status.removed)
1816
1817
1817 for f in match.files():
1818 for f in match.files():
1818 f = self.dirstate.normalize(f)
1819 f = self.dirstate.normalize(f)
1819 if f == '.' or f in matched or f in wctx.substate:
1820 if f == '.' or f in matched or f in wctx.substate:
1820 continue
1821 continue
1821 if f in status.deleted:
1822 if f in status.deleted:
1822 fail(f, _('file not found!'))
1823 fail(f, _('file not found!'))
1823 if f in vdirs: # visited directory
1824 if f in vdirs: # visited directory
1824 d = f + '/'
1825 d = f + '/'
1825 for mf in matched:
1826 for mf in matched:
1826 if mf.startswith(d):
1827 if mf.startswith(d):
1827 break
1828 break
1828 else:
1829 else:
1829 fail(f, _("no match under directory!"))
1830 fail(f, _("no match under directory!"))
1830 elif f not in self.dirstate:
1831 elif f not in self.dirstate:
1831 fail(f, _("file not tracked!"))
1832 fail(f, _("file not tracked!"))
1832
1833
1833 @unfilteredmethod
1834 @unfilteredmethod
1834 def commit(self, text="", user=None, date=None, match=None, force=False,
1835 def commit(self, text="", user=None, date=None, match=None, force=False,
1835 editor=False, extra=None):
1836 editor=False, extra=None):
1836 """Add a new revision to current repository.
1837 """Add a new revision to current repository.
1837
1838
1838 Revision information is gathered from the working directory,
1839 Revision information is gathered from the working directory,
1839 match can be used to filter the committed files. If editor is
1840 match can be used to filter the committed files. If editor is
1840 supplied, it is called to get a commit message.
1841 supplied, it is called to get a commit message.
1841 """
1842 """
1842 if extra is None:
1843 if extra is None:
1843 extra = {}
1844 extra = {}
1844
1845
1845 def fail(f, msg):
1846 def fail(f, msg):
1846 raise error.Abort('%s: %s' % (f, msg))
1847 raise error.Abort('%s: %s' % (f, msg))
1847
1848
1848 if not match:
1849 if not match:
1849 match = matchmod.always(self.root, '')
1850 match = matchmod.always(self.root, '')
1850
1851
1851 if not force:
1852 if not force:
1852 vdirs = []
1853 vdirs = []
1853 match.explicitdir = vdirs.append
1854 match.explicitdir = vdirs.append
1854 match.bad = fail
1855 match.bad = fail
1855
1856
1856 wlock = lock = tr = None
1857 wlock = lock = tr = None
1857 try:
1858 try:
1858 wlock = self.wlock()
1859 wlock = self.wlock()
1859 lock = self.lock() # for recent changelog (see issue4368)
1860 lock = self.lock() # for recent changelog (see issue4368)
1860
1861
1861 wctx = self[None]
1862 wctx = self[None]
1862 merge = len(wctx.parents()) > 1
1863 merge = len(wctx.parents()) > 1
1863
1864
1864 if not force and merge and not match.always():
1865 if not force and merge and not match.always():
1865 raise error.Abort(_('cannot partially commit a merge '
1866 raise error.Abort(_('cannot partially commit a merge '
1866 '(do not specify files or patterns)'))
1867 '(do not specify files or patterns)'))
1867
1868
1868 status = self.status(match=match, clean=force)
1869 status = self.status(match=match, clean=force)
1869 if force:
1870 if force:
1870 status.modified.extend(status.clean) # mq may commit clean files
1871 status.modified.extend(status.clean) # mq may commit clean files
1871
1872
1872 # check subrepos
1873 # check subrepos
1873 subs, commitsubs, newstate = subrepoutil.precommit(
1874 subs, commitsubs, newstate = subrepoutil.precommit(
1874 self.ui, wctx, status, match, force=force)
1875 self.ui, wctx, status, match, force=force)
1875
1876
1876 # make sure all explicit patterns are matched
1877 # make sure all explicit patterns are matched
1877 if not force:
1878 if not force:
1878 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1879 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1879
1880
1880 cctx = context.workingcommitctx(self, status,
1881 cctx = context.workingcommitctx(self, status,
1881 text, user, date, extra)
1882 text, user, date, extra)
1882
1883
1883 # internal config: ui.allowemptycommit
1884 # internal config: ui.allowemptycommit
1884 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1885 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1885 or extra.get('close') or merge or cctx.files()
1886 or extra.get('close') or merge or cctx.files()
1886 or self.ui.configbool('ui', 'allowemptycommit'))
1887 or self.ui.configbool('ui', 'allowemptycommit'))
1887 if not allowemptycommit:
1888 if not allowemptycommit:
1888 return None
1889 return None
1889
1890
1890 if merge and cctx.deleted():
1891 if merge and cctx.deleted():
1891 raise error.Abort(_("cannot commit merge with missing files"))
1892 raise error.Abort(_("cannot commit merge with missing files"))
1892
1893
1893 ms = mergemod.mergestate.read(self)
1894 ms = mergemod.mergestate.read(self)
1894 mergeutil.checkunresolved(ms)
1895 mergeutil.checkunresolved(ms)
1895
1896
1896 if editor:
1897 if editor:
1897 cctx._text = editor(self, cctx, subs)
1898 cctx._text = editor(self, cctx, subs)
1898 edited = (text != cctx._text)
1899 edited = (text != cctx._text)
1899
1900
1900 # Save commit message in case this transaction gets rolled back
1901 # Save commit message in case this transaction gets rolled back
1901 # (e.g. by a pretxncommit hook). Leave the content alone on
1902 # (e.g. by a pretxncommit hook). Leave the content alone on
1902 # the assumption that the user will use the same editor again.
1903 # the assumption that the user will use the same editor again.
1903 msgfn = self.savecommitmessage(cctx._text)
1904 msgfn = self.savecommitmessage(cctx._text)
1904
1905
1905 # commit subs and write new state
1906 # commit subs and write new state
1906 if subs:
1907 if subs:
1907 for s in sorted(commitsubs):
1908 for s in sorted(commitsubs):
1908 sub = wctx.sub(s)
1909 sub = wctx.sub(s)
1909 self.ui.status(_('committing subrepository %s\n') %
1910 self.ui.status(_('committing subrepository %s\n') %
1910 subrepoutil.subrelpath(sub))
1911 subrepoutil.subrelpath(sub))
1911 sr = sub.commit(cctx._text, user, date)
1912 sr = sub.commit(cctx._text, user, date)
1912 newstate[s] = (newstate[s][0], sr)
1913 newstate[s] = (newstate[s][0], sr)
1913 subrepoutil.writestate(self, newstate)
1914 subrepoutil.writestate(self, newstate)
1914
1915
1915 p1, p2 = self.dirstate.parents()
1916 p1, p2 = self.dirstate.parents()
1916 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1917 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1917 try:
1918 try:
1918 self.hook("precommit", throw=True, parent1=hookp1,
1919 self.hook("precommit", throw=True, parent1=hookp1,
1919 parent2=hookp2)
1920 parent2=hookp2)
1920 tr = self.transaction('commit')
1921 tr = self.transaction('commit')
1921 ret = self.commitctx(cctx, True)
1922 ret = self.commitctx(cctx, True)
1922 except: # re-raises
1923 except: # re-raises
1923 if edited:
1924 if edited:
1924 self.ui.write(
1925 self.ui.write(
1925 _('note: commit message saved in %s\n') % msgfn)
1926 _('note: commit message saved in %s\n') % msgfn)
1926 raise
1927 raise
1927 # update bookmarks, dirstate and mergestate
1928 # update bookmarks, dirstate and mergestate
1928 bookmarks.update(self, [p1, p2], ret)
1929 bookmarks.update(self, [p1, p2], ret)
1929 cctx.markcommitted(ret)
1930 cctx.markcommitted(ret)
1930 ms.reset()
1931 ms.reset()
1931 tr.close()
1932 tr.close()
1932
1933
1933 finally:
1934 finally:
1934 lockmod.release(tr, lock, wlock)
1935 lockmod.release(tr, lock, wlock)
1935
1936
1936 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1937 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1937 # hack for command that use a temporary commit (eg: histedit)
1938 # hack for command that use a temporary commit (eg: histedit)
1938 # temporary commit got stripped before hook release
1939 # temporary commit got stripped before hook release
1939 if self.changelog.hasnode(ret):
1940 if self.changelog.hasnode(ret):
1940 self.hook("commit", node=node, parent1=parent1,
1941 self.hook("commit", node=node, parent1=parent1,
1941 parent2=parent2)
1942 parent2=parent2)
1942 self._afterlock(commithook)
1943 self._afterlock(commithook)
1943 return ret
1944 return ret
1944
1945
1945 @unfilteredmethod
1946 @unfilteredmethod
1946 def commitctx(self, ctx, error=False):
1947 def commitctx(self, ctx, error=False):
1947 """Add a new revision to current repository.
1948 """Add a new revision to current repository.
1948 Revision information is passed via the context argument.
1949 Revision information is passed via the context argument.
1949 """
1950 """
1950
1951
1951 tr = None
1952 tr = None
1952 p1, p2 = ctx.p1(), ctx.p2()
1953 p1, p2 = ctx.p1(), ctx.p2()
1953 user = ctx.user()
1954 user = ctx.user()
1954
1955
1955 lock = self.lock()
1956 lock = self.lock()
1956 try:
1957 try:
1957 tr = self.transaction("commit")
1958 tr = self.transaction("commit")
1958 trp = weakref.proxy(tr)
1959 trp = weakref.proxy(tr)
1959
1960
1960 if ctx.manifestnode():
1961 if ctx.manifestnode():
1961 # reuse an existing manifest revision
1962 # reuse an existing manifest revision
1962 mn = ctx.manifestnode()
1963 mn = ctx.manifestnode()
1963 files = ctx.files()
1964 files = ctx.files()
1964 elif ctx.files():
1965 elif ctx.files():
1965 m1ctx = p1.manifestctx()
1966 m1ctx = p1.manifestctx()
1966 m2ctx = p2.manifestctx()
1967 m2ctx = p2.manifestctx()
1967 mctx = m1ctx.copy()
1968 mctx = m1ctx.copy()
1968
1969
1969 m = mctx.read()
1970 m = mctx.read()
1970 m1 = m1ctx.read()
1971 m1 = m1ctx.read()
1971 m2 = m2ctx.read()
1972 m2 = m2ctx.read()
1972
1973
1973 # check in files
1974 # check in files
1974 added = []
1975 added = []
1975 changed = []
1976 changed = []
1976 removed = list(ctx.removed())
1977 removed = list(ctx.removed())
1977 linkrev = len(self)
1978 linkrev = len(self)
1978 self.ui.note(_("committing files:\n"))
1979 self.ui.note(_("committing files:\n"))
1979 for f in sorted(ctx.modified() + ctx.added()):
1980 for f in sorted(ctx.modified() + ctx.added()):
1980 self.ui.note(f + "\n")
1981 self.ui.note(f + "\n")
1981 try:
1982 try:
1982 fctx = ctx[f]
1983 fctx = ctx[f]
1983 if fctx is None:
1984 if fctx is None:
1984 removed.append(f)
1985 removed.append(f)
1985 else:
1986 else:
1986 added.append(f)
1987 added.append(f)
1987 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1988 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1988 trp, changed)
1989 trp, changed)
1989 m.setflag(f, fctx.flags())
1990 m.setflag(f, fctx.flags())
1990 except OSError as inst:
1991 except OSError as inst:
1991 self.ui.warn(_("trouble committing %s!\n") % f)
1992 self.ui.warn(_("trouble committing %s!\n") % f)
1992 raise
1993 raise
1993 except IOError as inst:
1994 except IOError as inst:
1994 errcode = getattr(inst, 'errno', errno.ENOENT)
1995 errcode = getattr(inst, 'errno', errno.ENOENT)
1995 if error or errcode and errcode != errno.ENOENT:
1996 if error or errcode and errcode != errno.ENOENT:
1996 self.ui.warn(_("trouble committing %s!\n") % f)
1997 self.ui.warn(_("trouble committing %s!\n") % f)
1997 raise
1998 raise
1998
1999
1999 # update manifest
2000 # update manifest
2000 self.ui.note(_("committing manifest\n"))
2001 self.ui.note(_("committing manifest\n"))
2001 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2002 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2002 drop = [f for f in removed if f in m]
2003 drop = [f for f in removed if f in m]
2003 for f in drop:
2004 for f in drop:
2004 del m[f]
2005 del m[f]
2005 mn = mctx.write(trp, linkrev,
2006 mn = mctx.write(trp, linkrev,
2006 p1.manifestnode(), p2.manifestnode(),
2007 p1.manifestnode(), p2.manifestnode(),
2007 added, drop)
2008 added, drop)
2008 files = changed + removed
2009 files = changed + removed
2009 else:
2010 else:
2010 mn = p1.manifestnode()
2011 mn = p1.manifestnode()
2011 files = []
2012 files = []
2012
2013
2013 # update changelog
2014 # update changelog
2014 self.ui.note(_("committing changelog\n"))
2015 self.ui.note(_("committing changelog\n"))
2015 self.changelog.delayupdate(tr)
2016 self.changelog.delayupdate(tr)
2016 n = self.changelog.add(mn, files, ctx.description(),
2017 n = self.changelog.add(mn, files, ctx.description(),
2017 trp, p1.node(), p2.node(),
2018 trp, p1.node(), p2.node(),
2018 user, ctx.date(), ctx.extra().copy())
2019 user, ctx.date(), ctx.extra().copy())
2019 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2020 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2020 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2021 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2021 parent2=xp2)
2022 parent2=xp2)
2022 # set the new commit is proper phase
2023 # set the new commit is proper phase
2023 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2024 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2024 if targetphase:
2025 if targetphase:
2025 # retract boundary do not alter parent changeset.
2026 # retract boundary do not alter parent changeset.
2026 # if a parent have higher the resulting phase will
2027 # if a parent have higher the resulting phase will
2027 # be compliant anyway
2028 # be compliant anyway
2028 #
2029 #
2029 # if minimal phase was 0 we don't need to retract anything
2030 # if minimal phase was 0 we don't need to retract anything
2030 phases.registernew(self, tr, targetphase, [n])
2031 phases.registernew(self, tr, targetphase, [n])
2031 tr.close()
2032 tr.close()
2032 return n
2033 return n
2033 finally:
2034 finally:
2034 if tr:
2035 if tr:
2035 tr.release()
2036 tr.release()
2036 lock.release()
2037 lock.release()
2037
2038
2038 @unfilteredmethod
2039 @unfilteredmethod
2039 def destroying(self):
2040 def destroying(self):
2040 '''Inform the repository that nodes are about to be destroyed.
2041 '''Inform the repository that nodes are about to be destroyed.
2041 Intended for use by strip and rollback, so there's a common
2042 Intended for use by strip and rollback, so there's a common
2042 place for anything that has to be done before destroying history.
2043 place for anything that has to be done before destroying history.
2043
2044
2044 This is mostly useful for saving state that is in memory and waiting
2045 This is mostly useful for saving state that is in memory and waiting
2045 to be flushed when the current lock is released. Because a call to
2046 to be flushed when the current lock is released. Because a call to
2046 destroyed is imminent, the repo will be invalidated causing those
2047 destroyed is imminent, the repo will be invalidated causing those
2047 changes to stay in memory (waiting for the next unlock), or vanish
2048 changes to stay in memory (waiting for the next unlock), or vanish
2048 completely.
2049 completely.
2049 '''
2050 '''
2050 # When using the same lock to commit and strip, the phasecache is left
2051 # When using the same lock to commit and strip, the phasecache is left
2051 # dirty after committing. Then when we strip, the repo is invalidated,
2052 # dirty after committing. Then when we strip, the repo is invalidated,
2052 # causing those changes to disappear.
2053 # causing those changes to disappear.
2053 if '_phasecache' in vars(self):
2054 if '_phasecache' in vars(self):
2054 self._phasecache.write()
2055 self._phasecache.write()
2055
2056
2056 @unfilteredmethod
2057 @unfilteredmethod
2057 def destroyed(self):
2058 def destroyed(self):
2058 '''Inform the repository that nodes have been destroyed.
2059 '''Inform the repository that nodes have been destroyed.
2059 Intended for use by strip and rollback, so there's a common
2060 Intended for use by strip and rollback, so there's a common
2060 place for anything that has to be done after destroying history.
2061 place for anything that has to be done after destroying history.
2061 '''
2062 '''
2062 # When one tries to:
2063 # When one tries to:
2063 # 1) destroy nodes thus calling this method (e.g. strip)
2064 # 1) destroy nodes thus calling this method (e.g. strip)
2064 # 2) use phasecache somewhere (e.g. commit)
2065 # 2) use phasecache somewhere (e.g. commit)
2065 #
2066 #
2066 # then 2) will fail because the phasecache contains nodes that were
2067 # then 2) will fail because the phasecache contains nodes that were
2067 # removed. We can either remove phasecache from the filecache,
2068 # removed. We can either remove phasecache from the filecache,
2068 # causing it to reload next time it is accessed, or simply filter
2069 # causing it to reload next time it is accessed, or simply filter
2069 # the removed nodes now and write the updated cache.
2070 # the removed nodes now and write the updated cache.
2070 self._phasecache.filterunknown(self)
2071 self._phasecache.filterunknown(self)
2071 self._phasecache.write()
2072 self._phasecache.write()
2072
2073
2073 # refresh all repository caches
2074 # refresh all repository caches
2074 self.updatecaches()
2075 self.updatecaches()
2075
2076
2076 # Ensure the persistent tag cache is updated. Doing it now
2077 # Ensure the persistent tag cache is updated. Doing it now
2077 # means that the tag cache only has to worry about destroyed
2078 # means that the tag cache only has to worry about destroyed
2078 # heads immediately after a strip/rollback. That in turn
2079 # heads immediately after a strip/rollback. That in turn
2079 # guarantees that "cachetip == currenttip" (comparing both rev
2080 # guarantees that "cachetip == currenttip" (comparing both rev
2080 # and node) always means no nodes have been added or destroyed.
2081 # and node) always means no nodes have been added or destroyed.
2081
2082
2082 # XXX this is suboptimal when qrefresh'ing: we strip the current
2083 # XXX this is suboptimal when qrefresh'ing: we strip the current
2083 # head, refresh the tag cache, then immediately add a new head.
2084 # head, refresh the tag cache, then immediately add a new head.
2084 # But I think doing it this way is necessary for the "instant
2085 # But I think doing it this way is necessary for the "instant
2085 # tag cache retrieval" case to work.
2086 # tag cache retrieval" case to work.
2086 self.invalidate()
2087 self.invalidate()
2087
2088
2088 def status(self, node1='.', node2=None, match=None,
2089 def status(self, node1='.', node2=None, match=None,
2089 ignored=False, clean=False, unknown=False,
2090 ignored=False, clean=False, unknown=False,
2090 listsubrepos=False):
2091 listsubrepos=False):
2091 '''a convenience method that calls node1.status(node2)'''
2092 '''a convenience method that calls node1.status(node2)'''
2092 return self[node1].status(node2, match, ignored, clean, unknown,
2093 return self[node1].status(node2, match, ignored, clean, unknown,
2093 listsubrepos)
2094 listsubrepos)
2094
2095
2095 def addpostdsstatus(self, ps):
2096 def addpostdsstatus(self, ps):
2096 """Add a callback to run within the wlock, at the point at which status
2097 """Add a callback to run within the wlock, at the point at which status
2097 fixups happen.
2098 fixups happen.
2098
2099
2099 On status completion, callback(wctx, status) will be called with the
2100 On status completion, callback(wctx, status) will be called with the
2100 wlock held, unless the dirstate has changed from underneath or the wlock
2101 wlock held, unless the dirstate has changed from underneath or the wlock
2101 couldn't be grabbed.
2102 couldn't be grabbed.
2102
2103
2103 Callbacks should not capture and use a cached copy of the dirstate --
2104 Callbacks should not capture and use a cached copy of the dirstate --
2104 it might change in the meanwhile. Instead, they should access the
2105 it might change in the meanwhile. Instead, they should access the
2105 dirstate via wctx.repo().dirstate.
2106 dirstate via wctx.repo().dirstate.
2106
2107
2107 This list is emptied out after each status run -- extensions should
2108 This list is emptied out after each status run -- extensions should
2108 make sure it adds to this list each time dirstate.status is called.
2109 make sure it adds to this list each time dirstate.status is called.
2109 Extensions should also make sure they don't call this for statuses
2110 Extensions should also make sure they don't call this for statuses
2110 that don't involve the dirstate.
2111 that don't involve the dirstate.
2111 """
2112 """
2112
2113
2113 # The list is located here for uniqueness reasons -- it is actually
2114 # The list is located here for uniqueness reasons -- it is actually
2114 # managed by the workingctx, but that isn't unique per-repo.
2115 # managed by the workingctx, but that isn't unique per-repo.
2115 self._postdsstatus.append(ps)
2116 self._postdsstatus.append(ps)
2116
2117
2117 def postdsstatus(self):
2118 def postdsstatus(self):
2118 """Used by workingctx to get the list of post-dirstate-status hooks."""
2119 """Used by workingctx to get the list of post-dirstate-status hooks."""
2119 return self._postdsstatus
2120 return self._postdsstatus
2120
2121
2121 def clearpostdsstatus(self):
2122 def clearpostdsstatus(self):
2122 """Used by workingctx to clear post-dirstate-status hooks."""
2123 """Used by workingctx to clear post-dirstate-status hooks."""
2123 del self._postdsstatus[:]
2124 del self._postdsstatus[:]
2124
2125
2125 def heads(self, start=None):
2126 def heads(self, start=None):
2126 if start is None:
2127 if start is None:
2127 cl = self.changelog
2128 cl = self.changelog
2128 headrevs = reversed(cl.headrevs())
2129 headrevs = reversed(cl.headrevs())
2129 return [cl.node(rev) for rev in headrevs]
2130 return [cl.node(rev) for rev in headrevs]
2130
2131
2131 heads = self.changelog.heads(start)
2132 heads = self.changelog.heads(start)
2132 # sort the output in rev descending order
2133 # sort the output in rev descending order
2133 return sorted(heads, key=self.changelog.rev, reverse=True)
2134 return sorted(heads, key=self.changelog.rev, reverse=True)
2134
2135
2135 def branchheads(self, branch=None, start=None, closed=False):
2136 def branchheads(self, branch=None, start=None, closed=False):
2136 '''return a (possibly filtered) list of heads for the given branch
2137 '''return a (possibly filtered) list of heads for the given branch
2137
2138
2138 Heads are returned in topological order, from newest to oldest.
2139 Heads are returned in topological order, from newest to oldest.
2139 If branch is None, use the dirstate branch.
2140 If branch is None, use the dirstate branch.
2140 If start is not None, return only heads reachable from start.
2141 If start is not None, return only heads reachable from start.
2141 If closed is True, return heads that are marked as closed as well.
2142 If closed is True, return heads that are marked as closed as well.
2142 '''
2143 '''
2143 if branch is None:
2144 if branch is None:
2144 branch = self[None].branch()
2145 branch = self[None].branch()
2145 branches = self.branchmap()
2146 branches = self.branchmap()
2146 if branch not in branches:
2147 if branch not in branches:
2147 return []
2148 return []
2148 # the cache returns heads ordered lowest to highest
2149 # the cache returns heads ordered lowest to highest
2149 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2150 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2150 if start is not None:
2151 if start is not None:
2151 # filter out the heads that cannot be reached from startrev
2152 # filter out the heads that cannot be reached from startrev
2152 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2153 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2153 bheads = [h for h in bheads if h in fbheads]
2154 bheads = [h for h in bheads if h in fbheads]
2154 return bheads
2155 return bheads
2155
2156
2156 def branches(self, nodes):
2157 def branches(self, nodes):
2157 if not nodes:
2158 if not nodes:
2158 nodes = [self.changelog.tip()]
2159 nodes = [self.changelog.tip()]
2159 b = []
2160 b = []
2160 for n in nodes:
2161 for n in nodes:
2161 t = n
2162 t = n
2162 while True:
2163 while True:
2163 p = self.changelog.parents(n)
2164 p = self.changelog.parents(n)
2164 if p[1] != nullid or p[0] == nullid:
2165 if p[1] != nullid or p[0] == nullid:
2165 b.append((t, n, p[0], p[1]))
2166 b.append((t, n, p[0], p[1]))
2166 break
2167 break
2167 n = p[0]
2168 n = p[0]
2168 return b
2169 return b
2169
2170
2170 def between(self, pairs):
2171 def between(self, pairs):
2171 r = []
2172 r = []
2172
2173
2173 for top, bottom in pairs:
2174 for top, bottom in pairs:
2174 n, l, i = top, [], 0
2175 n, l, i = top, [], 0
2175 f = 1
2176 f = 1
2176
2177
2177 while n != bottom and n != nullid:
2178 while n != bottom and n != nullid:
2178 p = self.changelog.parents(n)[0]
2179 p = self.changelog.parents(n)[0]
2179 if i == f:
2180 if i == f:
2180 l.append(n)
2181 l.append(n)
2181 f = f * 2
2182 f = f * 2
2182 n = p
2183 n = p
2183 i += 1
2184 i += 1
2184
2185
2185 r.append(l)
2186 r.append(l)
2186
2187
2187 return r
2188 return r
2188
2189
2189 def checkpush(self, pushop):
2190 def checkpush(self, pushop):
2190 """Extensions can override this function if additional checks have
2191 """Extensions can override this function if additional checks have
2191 to be performed before pushing, or call it if they override push
2192 to be performed before pushing, or call it if they override push
2192 command.
2193 command.
2193 """
2194 """
2194
2195
2195 @unfilteredpropertycache
2196 @unfilteredpropertycache
2196 def prepushoutgoinghooks(self):
2197 def prepushoutgoinghooks(self):
2197 """Return util.hooks consists of a pushop with repo, remote, outgoing
2198 """Return util.hooks consists of a pushop with repo, remote, outgoing
2198 methods, which are called before pushing changesets.
2199 methods, which are called before pushing changesets.
2199 """
2200 """
2200 return util.hooks()
2201 return util.hooks()
2201
2202
2202 def pushkey(self, namespace, key, old, new):
2203 def pushkey(self, namespace, key, old, new):
2203 try:
2204 try:
2204 tr = self.currenttransaction()
2205 tr = self.currenttransaction()
2205 hookargs = {}
2206 hookargs = {}
2206 if tr is not None:
2207 if tr is not None:
2207 hookargs.update(tr.hookargs)
2208 hookargs.update(tr.hookargs)
2208 hookargs = pycompat.strkwargs(hookargs)
2209 hookargs = pycompat.strkwargs(hookargs)
2209 hookargs[r'namespace'] = namespace
2210 hookargs[r'namespace'] = namespace
2210 hookargs[r'key'] = key
2211 hookargs[r'key'] = key
2211 hookargs[r'old'] = old
2212 hookargs[r'old'] = old
2212 hookargs[r'new'] = new
2213 hookargs[r'new'] = new
2213 self.hook('prepushkey', throw=True, **hookargs)
2214 self.hook('prepushkey', throw=True, **hookargs)
2214 except error.HookAbort as exc:
2215 except error.HookAbort as exc:
2215 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2216 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2216 if exc.hint:
2217 if exc.hint:
2217 self.ui.write_err(_("(%s)\n") % exc.hint)
2218 self.ui.write_err(_("(%s)\n") % exc.hint)
2218 return False
2219 return False
2219 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2220 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2220 ret = pushkey.push(self, namespace, key, old, new)
2221 ret = pushkey.push(self, namespace, key, old, new)
2221 def runhook():
2222 def runhook():
2222 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2223 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2223 ret=ret)
2224 ret=ret)
2224 self._afterlock(runhook)
2225 self._afterlock(runhook)
2225 return ret
2226 return ret
2226
2227
2227 def listkeys(self, namespace):
2228 def listkeys(self, namespace):
2228 self.hook('prelistkeys', throw=True, namespace=namespace)
2229 self.hook('prelistkeys', throw=True, namespace=namespace)
2229 self.ui.debug('listing keys for "%s"\n' % namespace)
2230 self.ui.debug('listing keys for "%s"\n' % namespace)
2230 values = pushkey.list(self, namespace)
2231 values = pushkey.list(self, namespace)
2231 self.hook('listkeys', namespace=namespace, values=values)
2232 self.hook('listkeys', namespace=namespace, values=values)
2232 return values
2233 return values
2233
2234
2234 def debugwireargs(self, one, two, three=None, four=None, five=None):
2235 def debugwireargs(self, one, two, three=None, four=None, five=None):
2235 '''used to test argument passing over the wire'''
2236 '''used to test argument passing over the wire'''
2236 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2237 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2237 pycompat.bytestr(four),
2238 pycompat.bytestr(four),
2238 pycompat.bytestr(five))
2239 pycompat.bytestr(five))
2239
2240
2240 def savecommitmessage(self, text):
2241 def savecommitmessage(self, text):
2241 fp = self.vfs('last-message.txt', 'wb')
2242 fp = self.vfs('last-message.txt', 'wb')
2242 try:
2243 try:
2243 fp.write(text)
2244 fp.write(text)
2244 finally:
2245 finally:
2245 fp.close()
2246 fp.close()
2246 return self.pathto(fp.name[len(self.root) + 1:])
2247 return self.pathto(fp.name[len(self.root) + 1:])
2247
2248
2248 # used to avoid circular references so destructors work
2249 # used to avoid circular references so destructors work
2249 def aftertrans(files):
2250 def aftertrans(files):
2250 renamefiles = [tuple(t) for t in files]
2251 renamefiles = [tuple(t) for t in files]
2251 def a():
2252 def a():
2252 for vfs, src, dest in renamefiles:
2253 for vfs, src, dest in renamefiles:
2253 # if src and dest refer to a same file, vfs.rename is a no-op,
2254 # if src and dest refer to a same file, vfs.rename is a no-op,
2254 # leaving both src and dest on disk. delete dest to make sure
2255 # leaving both src and dest on disk. delete dest to make sure
2255 # the rename couldn't be such a no-op.
2256 # the rename couldn't be such a no-op.
2256 vfs.tryunlink(dest)
2257 vfs.tryunlink(dest)
2257 try:
2258 try:
2258 vfs.rename(src, dest)
2259 vfs.rename(src, dest)
2259 except OSError: # journal file does not yet exist
2260 except OSError: # journal file does not yet exist
2260 pass
2261 pass
2261 return a
2262 return a
2262
2263
2263 def undoname(fn):
2264 def undoname(fn):
2264 base, name = os.path.split(fn)
2265 base, name = os.path.split(fn)
2265 assert name.startswith('journal')
2266 assert name.startswith('journal')
2266 return os.path.join(base, name.replace('journal', 'undo', 1))
2267 return os.path.join(base, name.replace('journal', 'undo', 1))
2267
2268
2268 def instance(ui, path, create):
2269 def instance(ui, path, create):
2269 return localrepository(ui, util.urllocalpath(path), create)
2270 return localrepository(ui, util.urllocalpath(path), create)
2270
2271
2271 def islocal(path):
2272 def islocal(path):
2272 return True
2273 return True
2273
2274
2274 def newreporequirements(repo):
2275 def newreporequirements(repo):
2275 """Determine the set of requirements for a new local repository.
2276 """Determine the set of requirements for a new local repository.
2276
2277
2277 Extensions can wrap this function to specify custom requirements for
2278 Extensions can wrap this function to specify custom requirements for
2278 new repositories.
2279 new repositories.
2279 """
2280 """
2280 ui = repo.ui
2281 ui = repo.ui
2281 requirements = {'revlogv1'}
2282 requirements = {'revlogv1'}
2282 if ui.configbool('format', 'usestore'):
2283 if ui.configbool('format', 'usestore'):
2283 requirements.add('store')
2284 requirements.add('store')
2284 if ui.configbool('format', 'usefncache'):
2285 if ui.configbool('format', 'usefncache'):
2285 requirements.add('fncache')
2286 requirements.add('fncache')
2286 if ui.configbool('format', 'dotencode'):
2287 if ui.configbool('format', 'dotencode'):
2287 requirements.add('dotencode')
2288 requirements.add('dotencode')
2288
2289
2289 compengine = ui.config('experimental', 'format.compression')
2290 compengine = ui.config('experimental', 'format.compression')
2290 if compengine not in util.compengines:
2291 if compengine not in util.compengines:
2291 raise error.Abort(_('compression engine %s defined by '
2292 raise error.Abort(_('compression engine %s defined by '
2292 'experimental.format.compression not available') %
2293 'experimental.format.compression not available') %
2293 compengine,
2294 compengine,
2294 hint=_('run "hg debuginstall" to list available '
2295 hint=_('run "hg debuginstall" to list available '
2295 'compression engines'))
2296 'compression engines'))
2296
2297
2297 # zlib is the historical default and doesn't need an explicit requirement.
2298 # zlib is the historical default and doesn't need an explicit requirement.
2298 if compengine != 'zlib':
2299 if compengine != 'zlib':
2299 requirements.add('exp-compression-%s' % compengine)
2300 requirements.add('exp-compression-%s' % compengine)
2300
2301
2301 if scmutil.gdinitconfig(ui):
2302 if scmutil.gdinitconfig(ui):
2302 requirements.add('generaldelta')
2303 requirements.add('generaldelta')
2303 if ui.configbool('experimental', 'treemanifest'):
2304 if ui.configbool('experimental', 'treemanifest'):
2304 requirements.add('treemanifest')
2305 requirements.add('treemanifest')
2305
2306
2306 revlogv2 = ui.config('experimental', 'revlogv2')
2307 revlogv2 = ui.config('experimental', 'revlogv2')
2307 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2308 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2308 requirements.remove('revlogv1')
2309 requirements.remove('revlogv1')
2309 # generaldelta is implied by revlogv2.
2310 # generaldelta is implied by revlogv2.
2310 requirements.discard('generaldelta')
2311 requirements.discard('generaldelta')
2311 requirements.add(REVLOGV2_REQUIREMENT)
2312 requirements.add(REVLOGV2_REQUIREMENT)
2312
2313
2313 return requirements
2314 return requirements
@@ -1,629 +1,639
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24
24
25 version = 2
25 version = 2
26
26
27 # These are the file generators that should only be executed after the
27 # These are the file generators that should only be executed after the
28 # finalizers are done, since they rely on the output of the finalizers (like
28 # finalizers are done, since they rely on the output of the finalizers (like
29 # the changelog having been written).
29 # the changelog having been written).
30 postfinalizegenerators = {
30 postfinalizegenerators = {
31 'bookmarks',
31 'bookmarks',
32 'dirstate'
32 'dirstate'
33 }
33 }
34
34
35 gengroupall='all'
35 gengroupall='all'
36 gengroupprefinalize='prefinalize'
36 gengroupprefinalize='prefinalize'
37 gengrouppostfinalize='postfinalize'
37 gengrouppostfinalize='postfinalize'
38
38
39 def active(func):
39 def active(func):
40 def _active(self, *args, **kwds):
40 def _active(self, *args, **kwds):
41 if self.count == 0:
41 if self.count == 0:
42 raise error.Abort(_(
42 raise error.Abort(_(
43 'cannot use transaction when it is already committed/aborted'))
43 'cannot use transaction when it is already committed/aborted'))
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45 return _active
45 return _active
46
46
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 unlink=True, checkambigfiles=None):
48 unlink=True, checkambigfiles=None):
49 for f, o, _ignore in entries:
49 for f, o, _ignore in entries:
50 if o or not unlink:
50 if o or not unlink:
51 checkambig = checkambigfiles and (f, '') in checkambigfiles
51 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 try:
52 try:
53 fp = opener(f, 'a', checkambig=checkambig)
53 fp = opener(f, 'a', checkambig=checkambig)
54 fp.truncate(o)
54 fp.truncate(o)
55 fp.close()
55 fp.close()
56 except IOError:
56 except IOError:
57 report(_("failed to truncate %s\n") % f)
57 report(_("failed to truncate %s\n") % f)
58 raise
58 raise
59 else:
59 else:
60 try:
60 try:
61 opener.unlink(f)
61 opener.unlink(f)
62 except (IOError, OSError) as inst:
62 except (IOError, OSError) as inst:
63 if inst.errno != errno.ENOENT:
63 if inst.errno != errno.ENOENT:
64 raise
64 raise
65
65
66 backupfiles = []
66 backupfiles = []
67 for l, f, b, c in backupentries:
67 for l, f, b, c in backupentries:
68 if l not in vfsmap and c:
68 if l not in vfsmap and c:
69 report("couldn't handle %s: unknown cache location %s\n"
69 report("couldn't handle %s: unknown cache location %s\n"
70 % (b, l))
70 % (b, l))
71 vfs = vfsmap[l]
71 vfs = vfsmap[l]
72 try:
72 try:
73 if f and b:
73 if f and b:
74 filepath = vfs.join(f)
74 filepath = vfs.join(f)
75 backuppath = vfs.join(b)
75 backuppath = vfs.join(b)
76 checkambig = checkambigfiles and (f, l) in checkambigfiles
76 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 try:
77 try:
78 util.copyfile(backuppath, filepath, checkambig=checkambig)
78 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 backupfiles.append(b)
79 backupfiles.append(b)
80 except IOError:
80 except IOError:
81 report(_("failed to recover %s\n") % f)
81 report(_("failed to recover %s\n") % f)
82 else:
82 else:
83 target = f or b
83 target = f or b
84 try:
84 try:
85 vfs.unlink(target)
85 vfs.unlink(target)
86 except (IOError, OSError) as inst:
86 except (IOError, OSError) as inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89 except (IOError, OSError, error.Abort) as inst:
89 except (IOError, OSError, error.Abort) as inst:
90 if not c:
90 if not c:
91 raise
91 raise
92
92
93 backuppath = "%s.backupfiles" % journal
93 backuppath = "%s.backupfiles" % journal
94 if opener.exists(backuppath):
94 if opener.exists(backuppath):
95 opener.unlink(backuppath)
95 opener.unlink(backuppath)
96 opener.unlink(journal)
96 opener.unlink(journal)
97 try:
97 try:
98 for f in backupfiles:
98 for f in backupfiles:
99 if opener.exists(f):
99 if opener.exists(f):
100 opener.unlink(f)
100 opener.unlink(f)
101 except (IOError, OSError, error.Abort) as inst:
101 except (IOError, OSError, error.Abort) as inst:
102 # only pure backup file remains, it is sage to ignore any error
102 # only pure backup file remains, it is sage to ignore any error
103 pass
103 pass
104
104
105 class transaction(util.transactional):
105 class transaction(util.transactional):
106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 after=None, createmode=None, validator=None, releasefn=None,
107 after=None, createmode=None, validator=None, releasefn=None,
108 checkambigfiles=None):
108 checkambigfiles=None, name=r'<unnamed>'):
109 """Begin a new transaction
109 """Begin a new transaction
110
110
111 Begins a new transaction that allows rolling back writes in the event of
111 Begins a new transaction that allows rolling back writes in the event of
112 an exception.
112 an exception.
113
113
114 * `after`: called after the transaction has been committed
114 * `after`: called after the transaction has been committed
115 * `createmode`: the mode of the journal file that will be created
115 * `createmode`: the mode of the journal file that will be created
116 * `releasefn`: called after releasing (with transaction and result)
116 * `releasefn`: called after releasing (with transaction and result)
117
117
118 `checkambigfiles` is a set of (path, vfs-location) tuples,
118 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 which determine whether file stat ambiguity should be avoided
119 which determine whether file stat ambiguity should be avoided
120 for corresponded files.
120 for corresponded files.
121 """
121 """
122 self.count = 1
122 self.count = 1
123 self.usages = 1
123 self.usages = 1
124 self.report = report
124 self.report = report
125 # a vfs to the store content
125 # a vfs to the store content
126 self.opener = opener
126 self.opener = opener
127 # a map to access file in various {location -> vfs}
127 # a map to access file in various {location -> vfs}
128 vfsmap = vfsmap.copy()
128 vfsmap = vfsmap.copy()
129 vfsmap[''] = opener # set default value
129 vfsmap[''] = opener # set default value
130 self._vfsmap = vfsmap
130 self._vfsmap = vfsmap
131 self.after = after
131 self.after = after
132 self.entries = []
132 self.entries = []
133 self.map = {}
133 self.map = {}
134 self.journal = journalname
134 self.journal = journalname
135 self.undoname = undoname
135 self.undoname = undoname
136 self._queue = []
136 self._queue = []
137 # A callback to validate transaction content before closing it.
137 # A callback to validate transaction content before closing it.
138 # should raise exception is anything is wrong.
138 # should raise exception is anything is wrong.
139 # target user is repository hooks.
139 # target user is repository hooks.
140 if validator is None:
140 if validator is None:
141 validator = lambda tr: None
141 validator = lambda tr: None
142 self.validator = validator
142 self.validator = validator
143 # A callback to do something just after releasing transaction.
143 # A callback to do something just after releasing transaction.
144 if releasefn is None:
144 if releasefn is None:
145 releasefn = lambda tr, success: None
145 releasefn = lambda tr, success: None
146 self.releasefn = releasefn
146 self.releasefn = releasefn
147
147
148 self.checkambigfiles = set()
148 self.checkambigfiles = set()
149 if checkambigfiles:
149 if checkambigfiles:
150 self.checkambigfiles.update(checkambigfiles)
150 self.checkambigfiles.update(checkambigfiles)
151
151
152 self.names = [name]
153
152 # A dict dedicated to precisely tracking the changes introduced in the
154 # A dict dedicated to precisely tracking the changes introduced in the
153 # transaction.
155 # transaction.
154 self.changes = {}
156 self.changes = {}
155
157
156 # a dict of arguments to be passed to hooks
158 # a dict of arguments to be passed to hooks
157 self.hookargs = {}
159 self.hookargs = {}
158 self.file = opener.open(self.journal, "w")
160 self.file = opener.open(self.journal, "w")
159
161
160 # a list of ('location', 'path', 'backuppath', cache) entries.
162 # a list of ('location', 'path', 'backuppath', cache) entries.
161 # - if 'backuppath' is empty, no file existed at backup time
163 # - if 'backuppath' is empty, no file existed at backup time
162 # - if 'path' is empty, this is a temporary transaction file
164 # - if 'path' is empty, this is a temporary transaction file
163 # - if 'location' is not empty, the path is outside main opener reach.
165 # - if 'location' is not empty, the path is outside main opener reach.
164 # use 'location' value as a key in a vfsmap to find the right 'vfs'
166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
165 # (cache is currently unused)
167 # (cache is currently unused)
166 self._backupentries = []
168 self._backupentries = []
167 self._backupmap = {}
169 self._backupmap = {}
168 self._backupjournal = "%s.backupfiles" % self.journal
170 self._backupjournal = "%s.backupfiles" % self.journal
169 self._backupsfile = opener.open(self._backupjournal, 'w')
171 self._backupsfile = opener.open(self._backupjournal, 'w')
170 self._backupsfile.write('%d\n' % version)
172 self._backupsfile.write('%d\n' % version)
171
173
172 if createmode is not None:
174 if createmode is not None:
173 opener.chmod(self.journal, createmode & 0o666)
175 opener.chmod(self.journal, createmode & 0o666)
174 opener.chmod(self._backupjournal, createmode & 0o666)
176 opener.chmod(self._backupjournal, createmode & 0o666)
175
177
176 # hold file generations to be performed on commit
178 # hold file generations to be performed on commit
177 self._filegenerators = {}
179 self._filegenerators = {}
178 # hold callback to write pending data for hooks
180 # hold callback to write pending data for hooks
179 self._pendingcallback = {}
181 self._pendingcallback = {}
180 # True is any pending data have been written ever
182 # True is any pending data have been written ever
181 self._anypending = False
183 self._anypending = False
182 # holds callback to call when writing the transaction
184 # holds callback to call when writing the transaction
183 self._finalizecallback = {}
185 self._finalizecallback = {}
184 # hold callback for post transaction close
186 # hold callback for post transaction close
185 self._postclosecallback = {}
187 self._postclosecallback = {}
186 # holds callbacks to call during abort
188 # holds callbacks to call during abort
187 self._abortcallback = {}
189 self._abortcallback = {}
188
190
191 def __repr__(self):
192 name = r'/'.join(self.names)
193 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 (name, self.count, self.usages))
195
189 def __del__(self):
196 def __del__(self):
190 if self.journal:
197 if self.journal:
191 self._abort()
198 self._abort()
192
199
193 @active
200 @active
194 def startgroup(self):
201 def startgroup(self):
195 """delay registration of file entry
202 """delay registration of file entry
196
203
197 This is used by strip to delay vision of strip offset. The transaction
204 This is used by strip to delay vision of strip offset. The transaction
198 sees either none or all of the strip actions to be done."""
205 sees either none or all of the strip actions to be done."""
199 self._queue.append([])
206 self._queue.append([])
200
207
201 @active
208 @active
202 def endgroup(self):
209 def endgroup(self):
203 """apply delayed registration of file entry.
210 """apply delayed registration of file entry.
204
211
205 This is used by strip to delay vision of strip offset. The transaction
212 This is used by strip to delay vision of strip offset. The transaction
206 sees either none or all of the strip actions to be done."""
213 sees either none or all of the strip actions to be done."""
207 q = self._queue.pop()
214 q = self._queue.pop()
208 for f, o, data in q:
215 for f, o, data in q:
209 self._addentry(f, o, data)
216 self._addentry(f, o, data)
210
217
211 @active
218 @active
212 def add(self, file, offset, data=None):
219 def add(self, file, offset, data=None):
213 """record the state of an append-only file before update"""
220 """record the state of an append-only file before update"""
214 if file in self.map or file in self._backupmap:
221 if file in self.map or file in self._backupmap:
215 return
222 return
216 if self._queue:
223 if self._queue:
217 self._queue[-1].append((file, offset, data))
224 self._queue[-1].append((file, offset, data))
218 return
225 return
219
226
220 self._addentry(file, offset, data)
227 self._addentry(file, offset, data)
221
228
222 def _addentry(self, file, offset, data):
229 def _addentry(self, file, offset, data):
223 """add a append-only entry to memory and on-disk state"""
230 """add a append-only entry to memory and on-disk state"""
224 if file in self.map or file in self._backupmap:
231 if file in self.map or file in self._backupmap:
225 return
232 return
226 self.entries.append((file, offset, data))
233 self.entries.append((file, offset, data))
227 self.map[file] = len(self.entries) - 1
234 self.map[file] = len(self.entries) - 1
228 # add enough data to the journal to do the truncate
235 # add enough data to the journal to do the truncate
229 self.file.write("%s\0%d\n" % (file, offset))
236 self.file.write("%s\0%d\n" % (file, offset))
230 self.file.flush()
237 self.file.flush()
231
238
232 @active
239 @active
233 def addbackup(self, file, hardlink=True, location=''):
240 def addbackup(self, file, hardlink=True, location=''):
234 """Adds a backup of the file to the transaction
241 """Adds a backup of the file to the transaction
235
242
236 Calling addbackup() creates a hardlink backup of the specified file
243 Calling addbackup() creates a hardlink backup of the specified file
237 that is used to recover the file in the event of the transaction
244 that is used to recover the file in the event of the transaction
238 aborting.
245 aborting.
239
246
240 * `file`: the file path, relative to .hg/store
247 * `file`: the file path, relative to .hg/store
241 * `hardlink`: use a hardlink to quickly create the backup
248 * `hardlink`: use a hardlink to quickly create the backup
242 """
249 """
243 if self._queue:
250 if self._queue:
244 msg = 'cannot use transaction.addbackup inside "group"'
251 msg = 'cannot use transaction.addbackup inside "group"'
245 raise error.ProgrammingError(msg)
252 raise error.ProgrammingError(msg)
246
253
247 if file in self.map or file in self._backupmap:
254 if file in self.map or file in self._backupmap:
248 return
255 return
249 vfs = self._vfsmap[location]
256 vfs = self._vfsmap[location]
250 dirname, filename = vfs.split(file)
257 dirname, filename = vfs.split(file)
251 backupfilename = "%s.backup.%s" % (self.journal, filename)
258 backupfilename = "%s.backup.%s" % (self.journal, filename)
252 backupfile = vfs.reljoin(dirname, backupfilename)
259 backupfile = vfs.reljoin(dirname, backupfilename)
253 if vfs.exists(file):
260 if vfs.exists(file):
254 filepath = vfs.join(file)
261 filepath = vfs.join(file)
255 backuppath = vfs.join(backupfile)
262 backuppath = vfs.join(backupfile)
256 util.copyfile(filepath, backuppath, hardlink=hardlink)
263 util.copyfile(filepath, backuppath, hardlink=hardlink)
257 else:
264 else:
258 backupfile = ''
265 backupfile = ''
259
266
260 self._addbackupentry((location, file, backupfile, False))
267 self._addbackupentry((location, file, backupfile, False))
261
268
262 def _addbackupentry(self, entry):
269 def _addbackupentry(self, entry):
263 """register a new backup entry and write it to disk"""
270 """register a new backup entry and write it to disk"""
264 self._backupentries.append(entry)
271 self._backupentries.append(entry)
265 self._backupmap[entry[1]] = len(self._backupentries) - 1
272 self._backupmap[entry[1]] = len(self._backupentries) - 1
266 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
267 self._backupsfile.flush()
274 self._backupsfile.flush()
268
275
269 @active
276 @active
270 def registertmp(self, tmpfile, location=''):
277 def registertmp(self, tmpfile, location=''):
271 """register a temporary transaction file
278 """register a temporary transaction file
272
279
273 Such files will be deleted when the transaction exits (on both
280 Such files will be deleted when the transaction exits (on both
274 failure and success).
281 failure and success).
275 """
282 """
276 self._addbackupentry((location, '', tmpfile, False))
283 self._addbackupentry((location, '', tmpfile, False))
277
284
278 @active
285 @active
279 def addfilegenerator(self, genid, filenames, genfunc, order=0,
286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
280 location=''):
287 location=''):
281 """add a function to generates some files at transaction commit
288 """add a function to generates some files at transaction commit
282
289
283 The `genfunc` argument is a function capable of generating proper
290 The `genfunc` argument is a function capable of generating proper
284 content of each entry in the `filename` tuple.
291 content of each entry in the `filename` tuple.
285
292
286 At transaction close time, `genfunc` will be called with one file
293 At transaction close time, `genfunc` will be called with one file
287 object argument per entries in `filenames`.
294 object argument per entries in `filenames`.
288
295
289 The transaction itself is responsible for the backup, creation and
296 The transaction itself is responsible for the backup, creation and
290 final write of such file.
297 final write of such file.
291
298
292 The `genid` argument is used to ensure the same set of file is only
299 The `genid` argument is used to ensure the same set of file is only
293 generated once. Call to `addfilegenerator` for a `genid` already
300 generated once. Call to `addfilegenerator` for a `genid` already
294 present will overwrite the old entry.
301 present will overwrite the old entry.
295
302
296 The `order` argument may be used to control the order in which multiple
303 The `order` argument may be used to control the order in which multiple
297 generator will be executed.
304 generator will be executed.
298
305
299 The `location` arguments may be used to indicate the files are located
306 The `location` arguments may be used to indicate the files are located
300 outside of the the standard directory for transaction. It should match
307 outside of the the standard directory for transaction. It should match
301 one of the key of the `transaction.vfsmap` dictionary.
308 one of the key of the `transaction.vfsmap` dictionary.
302 """
309 """
303 # For now, we are unable to do proper backup and restore of custom vfs
310 # For now, we are unable to do proper backup and restore of custom vfs
304 # but for bookmarks that are handled outside this mechanism.
311 # but for bookmarks that are handled outside this mechanism.
305 self._filegenerators[genid] = (order, filenames, genfunc, location)
312 self._filegenerators[genid] = (order, filenames, genfunc, location)
306
313
307 @active
314 @active
308 def removefilegenerator(self, genid):
315 def removefilegenerator(self, genid):
309 """reverse of addfilegenerator, remove a file generator function"""
316 """reverse of addfilegenerator, remove a file generator function"""
310 if genid in self._filegenerators:
317 if genid in self._filegenerators:
311 del self._filegenerators[genid]
318 del self._filegenerators[genid]
312
319
313 def _generatefiles(self, suffix='', group=gengroupall):
320 def _generatefiles(self, suffix='', group=gengroupall):
314 # write files registered for generation
321 # write files registered for generation
315 any = False
322 any = False
316 for id, entry in sorted(self._filegenerators.iteritems()):
323 for id, entry in sorted(self._filegenerators.iteritems()):
317 any = True
324 any = True
318 order, filenames, genfunc, location = entry
325 order, filenames, genfunc, location = entry
319
326
320 # for generation at closing, check if it's before or after finalize
327 # for generation at closing, check if it's before or after finalize
321 postfinalize = group == gengrouppostfinalize
328 postfinalize = group == gengrouppostfinalize
322 if (group != gengroupall and
329 if (group != gengroupall and
323 (id in postfinalizegenerators) != (postfinalize)):
330 (id in postfinalizegenerators) != (postfinalize)):
324 continue
331 continue
325
332
326 vfs = self._vfsmap[location]
333 vfs = self._vfsmap[location]
327 files = []
334 files = []
328 try:
335 try:
329 for name in filenames:
336 for name in filenames:
330 name += suffix
337 name += suffix
331 if suffix:
338 if suffix:
332 self.registertmp(name, location=location)
339 self.registertmp(name, location=location)
333 checkambig = False
340 checkambig = False
334 else:
341 else:
335 self.addbackup(name, location=location)
342 self.addbackup(name, location=location)
336 checkambig = (name, location) in self.checkambigfiles
343 checkambig = (name, location) in self.checkambigfiles
337 files.append(vfs(name, 'w', atomictemp=True,
344 files.append(vfs(name, 'w', atomictemp=True,
338 checkambig=checkambig))
345 checkambig=checkambig))
339 genfunc(*files)
346 genfunc(*files)
340 finally:
347 finally:
341 for f in files:
348 for f in files:
342 f.close()
349 f.close()
343 return any
350 return any
344
351
345 @active
352 @active
346 def find(self, file):
353 def find(self, file):
347 if file in self.map:
354 if file in self.map:
348 return self.entries[self.map[file]]
355 return self.entries[self.map[file]]
349 if file in self._backupmap:
356 if file in self._backupmap:
350 return self._backupentries[self._backupmap[file]]
357 return self._backupentries[self._backupmap[file]]
351 return None
358 return None
352
359
353 @active
360 @active
354 def replace(self, file, offset, data=None):
361 def replace(self, file, offset, data=None):
355 '''
362 '''
356 replace can only replace already committed entries
363 replace can only replace already committed entries
357 that are not pending in the queue
364 that are not pending in the queue
358 '''
365 '''
359
366
360 if file not in self.map:
367 if file not in self.map:
361 raise KeyError(file)
368 raise KeyError(file)
362 index = self.map[file]
369 index = self.map[file]
363 self.entries[index] = (file, offset, data)
370 self.entries[index] = (file, offset, data)
364 self.file.write("%s\0%d\n" % (file, offset))
371 self.file.write("%s\0%d\n" % (file, offset))
365 self.file.flush()
372 self.file.flush()
366
373
367 @active
374 @active
368 def nest(self):
375 def nest(self, name=r'<unnamed>'):
369 self.count += 1
376 self.count += 1
370 self.usages += 1
377 self.usages += 1
378 self.names.append(name)
371 return self
379 return self
372
380
373 def release(self):
381 def release(self):
374 if self.count > 0:
382 if self.count > 0:
375 self.usages -= 1
383 self.usages -= 1
384 if self.names:
385 self.names.pop()
376 # if the transaction scopes are left without being closed, fail
386 # if the transaction scopes are left without being closed, fail
377 if self.count > 0 and self.usages == 0:
387 if self.count > 0 and self.usages == 0:
378 self._abort()
388 self._abort()
379
389
380 def running(self):
390 def running(self):
381 return self.count > 0
391 return self.count > 0
382
392
383 def addpending(self, category, callback):
393 def addpending(self, category, callback):
384 """add a callback to be called when the transaction is pending
394 """add a callback to be called when the transaction is pending
385
395
386 The transaction will be given as callback's first argument.
396 The transaction will be given as callback's first argument.
387
397
388 Category is a unique identifier to allow overwriting an old callback
398 Category is a unique identifier to allow overwriting an old callback
389 with a newer callback.
399 with a newer callback.
390 """
400 """
391 self._pendingcallback[category] = callback
401 self._pendingcallback[category] = callback
392
402
393 @active
403 @active
394 def writepending(self):
404 def writepending(self):
395 '''write pending file to temporary version
405 '''write pending file to temporary version
396
406
397 This is used to allow hooks to view a transaction before commit'''
407 This is used to allow hooks to view a transaction before commit'''
398 categories = sorted(self._pendingcallback)
408 categories = sorted(self._pendingcallback)
399 for cat in categories:
409 for cat in categories:
400 # remove callback since the data will have been flushed
410 # remove callback since the data will have been flushed
401 any = self._pendingcallback.pop(cat)(self)
411 any = self._pendingcallback.pop(cat)(self)
402 self._anypending = self._anypending or any
412 self._anypending = self._anypending or any
403 self._anypending |= self._generatefiles(suffix='.pending')
413 self._anypending |= self._generatefiles(suffix='.pending')
404 return self._anypending
414 return self._anypending
405
415
406 @active
416 @active
407 def addfinalize(self, category, callback):
417 def addfinalize(self, category, callback):
408 """add a callback to be called when the transaction is closed
418 """add a callback to be called when the transaction is closed
409
419
410 The transaction will be given as callback's first argument.
420 The transaction will be given as callback's first argument.
411
421
412 Category is a unique identifier to allow overwriting old callbacks with
422 Category is a unique identifier to allow overwriting old callbacks with
413 newer callbacks.
423 newer callbacks.
414 """
424 """
415 self._finalizecallback[category] = callback
425 self._finalizecallback[category] = callback
416
426
417 @active
427 @active
418 def addpostclose(self, category, callback):
428 def addpostclose(self, category, callback):
419 """add or replace a callback to be called after the transaction closed
429 """add or replace a callback to be called after the transaction closed
420
430
421 The transaction will be given as callback's first argument.
431 The transaction will be given as callback's first argument.
422
432
423 Category is a unique identifier to allow overwriting an old callback
433 Category is a unique identifier to allow overwriting an old callback
424 with a newer callback.
434 with a newer callback.
425 """
435 """
426 self._postclosecallback[category] = callback
436 self._postclosecallback[category] = callback
427
437
428 @active
438 @active
429 def getpostclose(self, category):
439 def getpostclose(self, category):
430 """return a postclose callback added before, or None"""
440 """return a postclose callback added before, or None"""
431 return self._postclosecallback.get(category, None)
441 return self._postclosecallback.get(category, None)
432
442
433 @active
443 @active
434 def addabort(self, category, callback):
444 def addabort(self, category, callback):
435 """add a callback to be called when the transaction is aborted.
445 """add a callback to be called when the transaction is aborted.
436
446
437 The transaction will be given as the first argument to the callback.
447 The transaction will be given as the first argument to the callback.
438
448
439 Category is a unique identifier to allow overwriting an old callback
449 Category is a unique identifier to allow overwriting an old callback
440 with a newer callback.
450 with a newer callback.
441 """
451 """
442 self._abortcallback[category] = callback
452 self._abortcallback[category] = callback
443
453
444 @active
454 @active
445 def close(self):
455 def close(self):
446 '''commit the transaction'''
456 '''commit the transaction'''
447 if self.count == 1:
457 if self.count == 1:
448 self.validator(self) # will raise exception if needed
458 self.validator(self) # will raise exception if needed
449 self.validator = None # Help prevent cycles.
459 self.validator = None # Help prevent cycles.
450 self._generatefiles(group=gengroupprefinalize)
460 self._generatefiles(group=gengroupprefinalize)
451 categories = sorted(self._finalizecallback)
461 categories = sorted(self._finalizecallback)
452 for cat in categories:
462 for cat in categories:
453 self._finalizecallback[cat](self)
463 self._finalizecallback[cat](self)
454 # Prevent double usage and help clear cycles.
464 # Prevent double usage and help clear cycles.
455 self._finalizecallback = None
465 self._finalizecallback = None
456 self._generatefiles(group=gengrouppostfinalize)
466 self._generatefiles(group=gengrouppostfinalize)
457
467
458 self.count -= 1
468 self.count -= 1
459 if self.count != 0:
469 if self.count != 0:
460 return
470 return
461 self.file.close()
471 self.file.close()
462 self._backupsfile.close()
472 self._backupsfile.close()
463 # cleanup temporary files
473 # cleanup temporary files
464 for l, f, b, c in self._backupentries:
474 for l, f, b, c in self._backupentries:
465 if l not in self._vfsmap and c:
475 if l not in self._vfsmap and c:
466 self.report("couldn't remove %s: unknown cache location %s\n"
476 self.report("couldn't remove %s: unknown cache location %s\n"
467 % (b, l))
477 % (b, l))
468 continue
478 continue
469 vfs = self._vfsmap[l]
479 vfs = self._vfsmap[l]
470 if not f and b and vfs.exists(b):
480 if not f and b and vfs.exists(b):
471 try:
481 try:
472 vfs.unlink(b)
482 vfs.unlink(b)
473 except (IOError, OSError, error.Abort) as inst:
483 except (IOError, OSError, error.Abort) as inst:
474 if not c:
484 if not c:
475 raise
485 raise
476 # Abort may be raise by read only opener
486 # Abort may be raise by read only opener
477 self.report("couldn't remove %s: %s\n"
487 self.report("couldn't remove %s: %s\n"
478 % (vfs.join(b), inst))
488 % (vfs.join(b), inst))
479 self.entries = []
489 self.entries = []
480 self._writeundo()
490 self._writeundo()
481 if self.after:
491 if self.after:
482 self.after()
492 self.after()
483 self.after = None # Help prevent cycles.
493 self.after = None # Help prevent cycles.
484 if self.opener.isfile(self._backupjournal):
494 if self.opener.isfile(self._backupjournal):
485 self.opener.unlink(self._backupjournal)
495 self.opener.unlink(self._backupjournal)
486 if self.opener.isfile(self.journal):
496 if self.opener.isfile(self.journal):
487 self.opener.unlink(self.journal)
497 self.opener.unlink(self.journal)
488 for l, _f, b, c in self._backupentries:
498 for l, _f, b, c in self._backupentries:
489 if l not in self._vfsmap and c:
499 if l not in self._vfsmap and c:
490 self.report("couldn't remove %s: unknown cache location"
500 self.report("couldn't remove %s: unknown cache location"
491 "%s\n" % (b, l))
501 "%s\n" % (b, l))
492 continue
502 continue
493 vfs = self._vfsmap[l]
503 vfs = self._vfsmap[l]
494 if b and vfs.exists(b):
504 if b and vfs.exists(b):
495 try:
505 try:
496 vfs.unlink(b)
506 vfs.unlink(b)
497 except (IOError, OSError, error.Abort) as inst:
507 except (IOError, OSError, error.Abort) as inst:
498 if not c:
508 if not c:
499 raise
509 raise
500 # Abort may be raise by read only opener
510 # Abort may be raise by read only opener
501 self.report("couldn't remove %s: %s\n"
511 self.report("couldn't remove %s: %s\n"
502 % (vfs.join(b), inst))
512 % (vfs.join(b), inst))
503 self._backupentries = []
513 self._backupentries = []
504 self.journal = None
514 self.journal = None
505
515
506 self.releasefn(self, True) # notify success of closing transaction
516 self.releasefn(self, True) # notify success of closing transaction
507 self.releasefn = None # Help prevent cycles.
517 self.releasefn = None # Help prevent cycles.
508
518
509 # run post close action
519 # run post close action
510 categories = sorted(self._postclosecallback)
520 categories = sorted(self._postclosecallback)
511 for cat in categories:
521 for cat in categories:
512 self._postclosecallback[cat](self)
522 self._postclosecallback[cat](self)
513 # Prevent double usage and help clear cycles.
523 # Prevent double usage and help clear cycles.
514 self._postclosecallback = None
524 self._postclosecallback = None
515
525
516 @active
526 @active
517 def abort(self):
527 def abort(self):
518 '''abort the transaction (generally called on error, or when the
528 '''abort the transaction (generally called on error, or when the
519 transaction is not explicitly committed before going out of
529 transaction is not explicitly committed before going out of
520 scope)'''
530 scope)'''
521 self._abort()
531 self._abort()
522
532
523 def _writeundo(self):
533 def _writeundo(self):
524 """write transaction data for possible future undo call"""
534 """write transaction data for possible future undo call"""
525 if self.undoname is None:
535 if self.undoname is None:
526 return
536 return
527 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
537 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
528 undobackupfile.write('%d\n' % version)
538 undobackupfile.write('%d\n' % version)
529 for l, f, b, c in self._backupentries:
539 for l, f, b, c in self._backupentries:
530 if not f: # temporary file
540 if not f: # temporary file
531 continue
541 continue
532 if not b:
542 if not b:
533 u = ''
543 u = ''
534 else:
544 else:
535 if l not in self._vfsmap and c:
545 if l not in self._vfsmap and c:
536 self.report("couldn't remove %s: unknown cache location"
546 self.report("couldn't remove %s: unknown cache location"
537 "%s\n" % (b, l))
547 "%s\n" % (b, l))
538 continue
548 continue
539 vfs = self._vfsmap[l]
549 vfs = self._vfsmap[l]
540 base, name = vfs.split(b)
550 base, name = vfs.split(b)
541 assert name.startswith(self.journal), name
551 assert name.startswith(self.journal), name
542 uname = name.replace(self.journal, self.undoname, 1)
552 uname = name.replace(self.journal, self.undoname, 1)
543 u = vfs.reljoin(base, uname)
553 u = vfs.reljoin(base, uname)
544 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
554 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
545 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
555 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
546 undobackupfile.close()
556 undobackupfile.close()
547
557
548
558
549 def _abort(self):
559 def _abort(self):
550 self.count = 0
560 self.count = 0
551 self.usages = 0
561 self.usages = 0
552 self.file.close()
562 self.file.close()
553 self._backupsfile.close()
563 self._backupsfile.close()
554
564
555 try:
565 try:
556 if not self.entries and not self._backupentries:
566 if not self.entries and not self._backupentries:
557 if self._backupjournal:
567 if self._backupjournal:
558 self.opener.unlink(self._backupjournal)
568 self.opener.unlink(self._backupjournal)
559 if self.journal:
569 if self.journal:
560 self.opener.unlink(self.journal)
570 self.opener.unlink(self.journal)
561 return
571 return
562
572
563 self.report(_("transaction abort!\n"))
573 self.report(_("transaction abort!\n"))
564
574
565 try:
575 try:
566 for cat in sorted(self._abortcallback):
576 for cat in sorted(self._abortcallback):
567 self._abortcallback[cat](self)
577 self._abortcallback[cat](self)
568 # Prevent double usage and help clear cycles.
578 # Prevent double usage and help clear cycles.
569 self._abortcallback = None
579 self._abortcallback = None
570 _playback(self.journal, self.report, self.opener, self._vfsmap,
580 _playback(self.journal, self.report, self.opener, self._vfsmap,
571 self.entries, self._backupentries, False,
581 self.entries, self._backupentries, False,
572 checkambigfiles=self.checkambigfiles)
582 checkambigfiles=self.checkambigfiles)
573 self.report(_("rollback completed\n"))
583 self.report(_("rollback completed\n"))
574 except BaseException:
584 except BaseException:
575 self.report(_("rollback failed - please run hg recover\n"))
585 self.report(_("rollback failed - please run hg recover\n"))
576 finally:
586 finally:
577 self.journal = None
587 self.journal = None
578 self.releasefn(self, False) # notify failure of transaction
588 self.releasefn(self, False) # notify failure of transaction
579 self.releasefn = None # Help prevent cycles.
589 self.releasefn = None # Help prevent cycles.
580
590
581 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
591 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
582 """Rolls back the transaction contained in the given file
592 """Rolls back the transaction contained in the given file
583
593
584 Reads the entries in the specified file, and the corresponding
594 Reads the entries in the specified file, and the corresponding
585 '*.backupfiles' file, to recover from an incomplete transaction.
595 '*.backupfiles' file, to recover from an incomplete transaction.
586
596
587 * `file`: a file containing a list of entries, specifying where
597 * `file`: a file containing a list of entries, specifying where
588 to truncate each file. The file should contain a list of
598 to truncate each file. The file should contain a list of
589 file\0offset pairs, delimited by newlines. The corresponding
599 file\0offset pairs, delimited by newlines. The corresponding
590 '*.backupfiles' file should contain a list of file\0backupfile
600 '*.backupfiles' file should contain a list of file\0backupfile
591 pairs, delimited by \0.
601 pairs, delimited by \0.
592
602
593 `checkambigfiles` is a set of (path, vfs-location) tuples,
603 `checkambigfiles` is a set of (path, vfs-location) tuples,
594 which determine whether file stat ambiguity should be avoided at
604 which determine whether file stat ambiguity should be avoided at
595 restoring corresponded files.
605 restoring corresponded files.
596 """
606 """
597 entries = []
607 entries = []
598 backupentries = []
608 backupentries = []
599
609
600 fp = opener.open(file)
610 fp = opener.open(file)
601 lines = fp.readlines()
611 lines = fp.readlines()
602 fp.close()
612 fp.close()
603 for l in lines:
613 for l in lines:
604 try:
614 try:
605 f, o = l.split('\0')
615 f, o = l.split('\0')
606 entries.append((f, int(o), None))
616 entries.append((f, int(o), None))
607 except ValueError:
617 except ValueError:
608 report(
618 report(
609 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
619 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
610
620
611 backupjournal = "%s.backupfiles" % file
621 backupjournal = "%s.backupfiles" % file
612 if opener.exists(backupjournal):
622 if opener.exists(backupjournal):
613 fp = opener.open(backupjournal)
623 fp = opener.open(backupjournal)
614 lines = fp.readlines()
624 lines = fp.readlines()
615 if lines:
625 if lines:
616 ver = lines[0][:-1]
626 ver = lines[0][:-1]
617 if ver == (b'%d' % version):
627 if ver == (b'%d' % version):
618 for line in lines[1:]:
628 for line in lines[1:]:
619 if line:
629 if line:
620 # Shave off the trailing newline
630 # Shave off the trailing newline
621 line = line[:-1]
631 line = line[:-1]
622 l, f, b, c = line.split('\0')
632 l, f, b, c = line.split('\0')
623 backupentries.append((l, f, b, bool(c)))
633 backupentries.append((l, f, b, bool(c)))
624 else:
634 else:
625 report(_("journal was created by a different version of "
635 report(_("journal was created by a different version of "
626 "Mercurial\n"))
636 "Mercurial\n"))
627
637
628 _playback(file, report, opener, vfsmap, entries, backupentries,
638 _playback(file, report, opener, vfsmap, entries, backupentries,
629 checkambigfiles=checkambigfiles)
639 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now