##// END OF EJS Templates
transaction: avoid file stat ambiguity only for files in blacklist...
FUJIWARA Katsunori -
r33278:87bca10a default
parent child Browse files
Show More
@@ -1,2128 +1,2131 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 store,
56 store,
57 subrepo,
57 subrepo,
58 tags as tagsmod,
58 tags as tagsmod,
59 transaction,
59 transaction,
60 txnutil,
60 txnutil,
61 util,
61 util,
62 vfs as vfsmod,
62 vfs as vfsmod,
63 )
63 )
64
64
65 release = lockmod.release
65 release = lockmod.release
66 urlerr = util.urlerr
66 urlerr = util.urlerr
67 urlreq = util.urlreq
67 urlreq = util.urlreq
68
68
69 # set of (path, vfs-location) tuples. vfs-location is:
69 # set of (path, vfs-location) tuples. vfs-location is:
70 # - 'plain for vfs relative paths
70 # - 'plain for vfs relative paths
71 # - '' for svfs relative paths
71 # - '' for svfs relative paths
72 _cachedfiles = set()
72 _cachedfiles = set()
73
73
74 class _basefilecache(scmutil.filecache):
74 class _basefilecache(scmutil.filecache):
75 """All filecache usage on repo are done for logic that should be unfiltered
75 """All filecache usage on repo are done for logic that should be unfiltered
76 """
76 """
77 def __get__(self, repo, type=None):
77 def __get__(self, repo, type=None):
78 if repo is None:
78 if repo is None:
79 return self
79 return self
80 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
80 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
81 def __set__(self, repo, value):
81 def __set__(self, repo, value):
82 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
82 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
83 def __delete__(self, repo):
83 def __delete__(self, repo):
84 return super(_basefilecache, self).__delete__(repo.unfiltered())
84 return super(_basefilecache, self).__delete__(repo.unfiltered())
85
85
86 class repofilecache(_basefilecache):
86 class repofilecache(_basefilecache):
87 """filecache for files in .hg but outside of .hg/store"""
87 """filecache for files in .hg but outside of .hg/store"""
88 def __init__(self, *paths):
88 def __init__(self, *paths):
89 super(repofilecache, self).__init__(*paths)
89 super(repofilecache, self).__init__(*paths)
90 for path in paths:
90 for path in paths:
91 _cachedfiles.add((path, 'plain'))
91 _cachedfiles.add((path, 'plain'))
92
92
93 def join(self, obj, fname):
93 def join(self, obj, fname):
94 return obj.vfs.join(fname)
94 return obj.vfs.join(fname)
95
95
96 class storecache(_basefilecache):
96 class storecache(_basefilecache):
97 """filecache for files in the store"""
97 """filecache for files in the store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(storecache, self).__init__(*paths)
99 super(storecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, ''))
101 _cachedfiles.add((path, ''))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.sjoin(fname)
104 return obj.sjoin(fname)
105
105
106 class unfilteredpropertycache(util.propertycache):
106 class unfilteredpropertycache(util.propertycache):
107 """propertycache that apply to unfiltered repo only"""
107 """propertycache that apply to unfiltered repo only"""
108
108
109 def __get__(self, repo, type=None):
109 def __get__(self, repo, type=None):
110 unfi = repo.unfiltered()
110 unfi = repo.unfiltered()
111 if unfi is repo:
111 if unfi is repo:
112 return super(unfilteredpropertycache, self).__get__(unfi)
112 return super(unfilteredpropertycache, self).__get__(unfi)
113 return getattr(unfi, self.name)
113 return getattr(unfi, self.name)
114
114
115 class filteredpropertycache(util.propertycache):
115 class filteredpropertycache(util.propertycache):
116 """propertycache that must take filtering in account"""
116 """propertycache that must take filtering in account"""
117
117
118 def cachevalue(self, obj, value):
118 def cachevalue(self, obj, value):
119 object.__setattr__(obj, self.name, value)
119 object.__setattr__(obj, self.name, value)
120
120
121
121
122 def hasunfilteredcache(repo, name):
122 def hasunfilteredcache(repo, name):
123 """check if a repo has an unfilteredpropertycache value for <name>"""
123 """check if a repo has an unfilteredpropertycache value for <name>"""
124 return name in vars(repo.unfiltered())
124 return name in vars(repo.unfiltered())
125
125
126 def unfilteredmethod(orig):
126 def unfilteredmethod(orig):
127 """decorate method that always need to be run on unfiltered version"""
127 """decorate method that always need to be run on unfiltered version"""
128 def wrapper(repo, *args, **kwargs):
128 def wrapper(repo, *args, **kwargs):
129 return orig(repo.unfiltered(), *args, **kwargs)
129 return orig(repo.unfiltered(), *args, **kwargs)
130 return wrapper
130 return wrapper
131
131
132 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
132 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
133 'unbundle'}
133 'unbundle'}
134 legacycaps = moderncaps.union({'changegroupsubset'})
134 legacycaps = moderncaps.union({'changegroupsubset'})
135
135
136 class localpeer(peer.peerrepository):
136 class localpeer(peer.peerrepository):
137 '''peer for a local repo; reflects only the most recent API'''
137 '''peer for a local repo; reflects only the most recent API'''
138
138
139 def __init__(self, repo, caps=None):
139 def __init__(self, repo, caps=None):
140 if caps is None:
140 if caps is None:
141 caps = moderncaps.copy()
141 caps = moderncaps.copy()
142 peer.peerrepository.__init__(self)
142 peer.peerrepository.__init__(self)
143 self._repo = repo.filtered('served')
143 self._repo = repo.filtered('served')
144 self.ui = repo.ui
144 self.ui = repo.ui
145 self._caps = repo._restrictcapabilities(caps)
145 self._caps = repo._restrictcapabilities(caps)
146 self.requirements = repo.requirements
146 self.requirements = repo.requirements
147 self.supportedformats = repo.supportedformats
147 self.supportedformats = repo.supportedformats
148
148
149 def close(self):
149 def close(self):
150 self._repo.close()
150 self._repo.close()
151
151
152 def _capabilities(self):
152 def _capabilities(self):
153 return self._caps
153 return self._caps
154
154
155 def local(self):
155 def local(self):
156 return self._repo
156 return self._repo
157
157
158 def canpush(self):
158 def canpush(self):
159 return True
159 return True
160
160
161 def url(self):
161 def url(self):
162 return self._repo.url()
162 return self._repo.url()
163
163
164 def lookup(self, key):
164 def lookup(self, key):
165 return self._repo.lookup(key)
165 return self._repo.lookup(key)
166
166
167 def branchmap(self):
167 def branchmap(self):
168 return self._repo.branchmap()
168 return self._repo.branchmap()
169
169
170 def heads(self):
170 def heads(self):
171 return self._repo.heads()
171 return self._repo.heads()
172
172
173 def known(self, nodes):
173 def known(self, nodes):
174 return self._repo.known(nodes)
174 return self._repo.known(nodes)
175
175
176 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
176 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
177 **kwargs):
177 **kwargs):
178 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
178 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
179 common=common, bundlecaps=bundlecaps,
179 common=common, bundlecaps=bundlecaps,
180 **kwargs)
180 **kwargs)
181 cb = util.chunkbuffer(chunks)
181 cb = util.chunkbuffer(chunks)
182
182
183 if exchange.bundle2requested(bundlecaps):
183 if exchange.bundle2requested(bundlecaps):
184 # When requesting a bundle2, getbundle returns a stream to make the
184 # When requesting a bundle2, getbundle returns a stream to make the
185 # wire level function happier. We need to build a proper object
185 # wire level function happier. We need to build a proper object
186 # from it in local peer.
186 # from it in local peer.
187 return bundle2.getunbundler(self.ui, cb)
187 return bundle2.getunbundler(self.ui, cb)
188 else:
188 else:
189 return changegroup.getunbundler('01', cb, None)
189 return changegroup.getunbundler('01', cb, None)
190
190
191 # TODO We might want to move the next two calls into legacypeer and add
191 # TODO We might want to move the next two calls into legacypeer and add
192 # unbundle instead.
192 # unbundle instead.
193
193
194 def unbundle(self, cg, heads, url):
194 def unbundle(self, cg, heads, url):
195 """apply a bundle on a repo
195 """apply a bundle on a repo
196
196
197 This function handles the repo locking itself."""
197 This function handles the repo locking itself."""
198 try:
198 try:
199 try:
199 try:
200 cg = exchange.readbundle(self.ui, cg, None)
200 cg = exchange.readbundle(self.ui, cg, None)
201 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
201 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
202 if util.safehasattr(ret, 'getchunks'):
202 if util.safehasattr(ret, 'getchunks'):
203 # This is a bundle20 object, turn it into an unbundler.
203 # This is a bundle20 object, turn it into an unbundler.
204 # This little dance should be dropped eventually when the
204 # This little dance should be dropped eventually when the
205 # API is finally improved.
205 # API is finally improved.
206 stream = util.chunkbuffer(ret.getchunks())
206 stream = util.chunkbuffer(ret.getchunks())
207 ret = bundle2.getunbundler(self.ui, stream)
207 ret = bundle2.getunbundler(self.ui, stream)
208 return ret
208 return ret
209 except Exception as exc:
209 except Exception as exc:
210 # If the exception contains output salvaged from a bundle2
210 # If the exception contains output salvaged from a bundle2
211 # reply, we need to make sure it is printed before continuing
211 # reply, we need to make sure it is printed before continuing
212 # to fail. So we build a bundle2 with such output and consume
212 # to fail. So we build a bundle2 with such output and consume
213 # it directly.
213 # it directly.
214 #
214 #
215 # This is not very elegant but allows a "simple" solution for
215 # This is not very elegant but allows a "simple" solution for
216 # issue4594
216 # issue4594
217 output = getattr(exc, '_bundle2salvagedoutput', ())
217 output = getattr(exc, '_bundle2salvagedoutput', ())
218 if output:
218 if output:
219 bundler = bundle2.bundle20(self._repo.ui)
219 bundler = bundle2.bundle20(self._repo.ui)
220 for out in output:
220 for out in output:
221 bundler.addpart(out)
221 bundler.addpart(out)
222 stream = util.chunkbuffer(bundler.getchunks())
222 stream = util.chunkbuffer(bundler.getchunks())
223 b = bundle2.getunbundler(self.ui, stream)
223 b = bundle2.getunbundler(self.ui, stream)
224 bundle2.processbundle(self._repo, b)
224 bundle2.processbundle(self._repo, b)
225 raise
225 raise
226 except error.PushRaced as exc:
226 except error.PushRaced as exc:
227 raise error.ResponseError(_('push failed:'), str(exc))
227 raise error.ResponseError(_('push failed:'), str(exc))
228
228
229 def lock(self):
229 def lock(self):
230 return self._repo.lock()
230 return self._repo.lock()
231
231
232 def pushkey(self, namespace, key, old, new):
232 def pushkey(self, namespace, key, old, new):
233 return self._repo.pushkey(namespace, key, old, new)
233 return self._repo.pushkey(namespace, key, old, new)
234
234
235 def listkeys(self, namespace):
235 def listkeys(self, namespace):
236 return self._repo.listkeys(namespace)
236 return self._repo.listkeys(namespace)
237
237
238 def debugwireargs(self, one, two, three=None, four=None, five=None):
238 def debugwireargs(self, one, two, three=None, four=None, five=None):
239 '''used to test argument passing over the wire'''
239 '''used to test argument passing over the wire'''
240 return "%s %s %s %s %s" % (one, two, three, four, five)
240 return "%s %s %s %s %s" % (one, two, three, four, five)
241
241
242 class locallegacypeer(localpeer):
242 class locallegacypeer(localpeer):
243 '''peer extension which implements legacy methods too; used for tests with
243 '''peer extension which implements legacy methods too; used for tests with
244 restricted capabilities'''
244 restricted capabilities'''
245
245
246 def __init__(self, repo):
246 def __init__(self, repo):
247 localpeer.__init__(self, repo, caps=legacycaps)
247 localpeer.__init__(self, repo, caps=legacycaps)
248
248
249 def branches(self, nodes):
249 def branches(self, nodes):
250 return self._repo.branches(nodes)
250 return self._repo.branches(nodes)
251
251
252 def between(self, pairs):
252 def between(self, pairs):
253 return self._repo.between(pairs)
253 return self._repo.between(pairs)
254
254
255 def changegroup(self, basenodes, source):
255 def changegroup(self, basenodes, source):
256 return changegroup.changegroup(self._repo, basenodes, source)
256 return changegroup.changegroup(self._repo, basenodes, source)
257
257
258 def changegroupsubset(self, bases, heads, source):
258 def changegroupsubset(self, bases, heads, source):
259 return changegroup.changegroupsubset(self._repo, bases, heads, source)
259 return changegroup.changegroupsubset(self._repo, bases, heads, source)
260
260
261 # Increment the sub-version when the revlog v2 format changes to lock out old
261 # Increment the sub-version when the revlog v2 format changes to lock out old
262 # clients.
262 # clients.
263 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
263 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
264
264
265 class localrepository(object):
265 class localrepository(object):
266
266
267 supportedformats = {
267 supportedformats = {
268 'revlogv1',
268 'revlogv1',
269 'generaldelta',
269 'generaldelta',
270 'treemanifest',
270 'treemanifest',
271 'manifestv2',
271 'manifestv2',
272 REVLOGV2_REQUIREMENT,
272 REVLOGV2_REQUIREMENT,
273 }
273 }
274 _basesupported = supportedformats | {
274 _basesupported = supportedformats | {
275 'store',
275 'store',
276 'fncache',
276 'fncache',
277 'shared',
277 'shared',
278 'relshared',
278 'relshared',
279 'dotencode',
279 'dotencode',
280 }
280 }
281 openerreqs = {
281 openerreqs = {
282 'revlogv1',
282 'revlogv1',
283 'generaldelta',
283 'generaldelta',
284 'treemanifest',
284 'treemanifest',
285 'manifestv2',
285 'manifestv2',
286 }
286 }
287
287
288 # a list of (ui, featureset) functions.
288 # a list of (ui, featureset) functions.
289 # only functions defined in module of enabled extensions are invoked
289 # only functions defined in module of enabled extensions are invoked
290 featuresetupfuncs = set()
290 featuresetupfuncs = set()
291
291
292 def __init__(self, baseui, path, create=False):
292 def __init__(self, baseui, path, create=False):
293 self.requirements = set()
293 self.requirements = set()
294 self.filtername = None
294 self.filtername = None
295 # wvfs: rooted at the repository root, used to access the working copy
295 # wvfs: rooted at the repository root, used to access the working copy
296 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
296 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
297 # vfs: rooted at .hg, used to access repo files outside of .hg/store
297 # vfs: rooted at .hg, used to access repo files outside of .hg/store
298 self.vfs = None
298 self.vfs = None
299 # svfs: usually rooted at .hg/store, used to access repository history
299 # svfs: usually rooted at .hg/store, used to access repository history
300 # If this is a shared repository, this vfs may point to another
300 # If this is a shared repository, this vfs may point to another
301 # repository's .hg/store directory.
301 # repository's .hg/store directory.
302 self.svfs = None
302 self.svfs = None
303 self.root = self.wvfs.base
303 self.root = self.wvfs.base
304 self.path = self.wvfs.join(".hg")
304 self.path = self.wvfs.join(".hg")
305 self.origroot = path
305 self.origroot = path
306 # These auditor are not used by the vfs,
306 # These auditor are not used by the vfs,
307 # only used when writing this comment: basectx.match
307 # only used when writing this comment: basectx.match
308 self.auditor = pathutil.pathauditor(self.root, self._checknested)
308 self.auditor = pathutil.pathauditor(self.root, self._checknested)
309 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
309 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
310 realfs=False)
310 realfs=False)
311 self.vfs = vfsmod.vfs(self.path)
311 self.vfs = vfsmod.vfs(self.path)
312 self.baseui = baseui
312 self.baseui = baseui
313 self.ui = baseui.copy()
313 self.ui = baseui.copy()
314 self.ui.copy = baseui.copy # prevent copying repo configuration
314 self.ui.copy = baseui.copy # prevent copying repo configuration
315 # A list of callback to shape the phase if no data were found.
315 # A list of callback to shape the phase if no data were found.
316 # Callback are in the form: func(repo, roots) --> processed root.
316 # Callback are in the form: func(repo, roots) --> processed root.
317 # This list it to be filled by extension during repo setup
317 # This list it to be filled by extension during repo setup
318 self._phasedefaults = []
318 self._phasedefaults = []
319 try:
319 try:
320 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
320 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
321 self._loadextensions()
321 self._loadextensions()
322 except IOError:
322 except IOError:
323 pass
323 pass
324
324
325 if self.featuresetupfuncs:
325 if self.featuresetupfuncs:
326 self.supported = set(self._basesupported) # use private copy
326 self.supported = set(self._basesupported) # use private copy
327 extmods = set(m.__name__ for n, m
327 extmods = set(m.__name__ for n, m
328 in extensions.extensions(self.ui))
328 in extensions.extensions(self.ui))
329 for setupfunc in self.featuresetupfuncs:
329 for setupfunc in self.featuresetupfuncs:
330 if setupfunc.__module__ in extmods:
330 if setupfunc.__module__ in extmods:
331 setupfunc(self.ui, self.supported)
331 setupfunc(self.ui, self.supported)
332 else:
332 else:
333 self.supported = self._basesupported
333 self.supported = self._basesupported
334 color.setup(self.ui)
334 color.setup(self.ui)
335
335
336 # Add compression engines.
336 # Add compression engines.
337 for name in util.compengines:
337 for name in util.compengines:
338 engine = util.compengines[name]
338 engine = util.compengines[name]
339 if engine.revlogheader():
339 if engine.revlogheader():
340 self.supported.add('exp-compression-%s' % name)
340 self.supported.add('exp-compression-%s' % name)
341
341
342 if not self.vfs.isdir():
342 if not self.vfs.isdir():
343 if create:
343 if create:
344 self.requirements = newreporequirements(self)
344 self.requirements = newreporequirements(self)
345
345
346 if not self.wvfs.exists():
346 if not self.wvfs.exists():
347 self.wvfs.makedirs()
347 self.wvfs.makedirs()
348 self.vfs.makedir(notindexed=True)
348 self.vfs.makedir(notindexed=True)
349
349
350 if 'store' in self.requirements:
350 if 'store' in self.requirements:
351 self.vfs.mkdir("store")
351 self.vfs.mkdir("store")
352
352
353 # create an invalid changelog
353 # create an invalid changelog
354 self.vfs.append(
354 self.vfs.append(
355 "00changelog.i",
355 "00changelog.i",
356 '\0\0\0\2' # represents revlogv2
356 '\0\0\0\2' # represents revlogv2
357 ' dummy changelog to prevent using the old repo layout'
357 ' dummy changelog to prevent using the old repo layout'
358 )
358 )
359 else:
359 else:
360 raise error.RepoError(_("repository %s not found") % path)
360 raise error.RepoError(_("repository %s not found") % path)
361 elif create:
361 elif create:
362 raise error.RepoError(_("repository %s already exists") % path)
362 raise error.RepoError(_("repository %s already exists") % path)
363 else:
363 else:
364 try:
364 try:
365 self.requirements = scmutil.readrequires(
365 self.requirements = scmutil.readrequires(
366 self.vfs, self.supported)
366 self.vfs, self.supported)
367 except IOError as inst:
367 except IOError as inst:
368 if inst.errno != errno.ENOENT:
368 if inst.errno != errno.ENOENT:
369 raise
369 raise
370
370
371 self.sharedpath = self.path
371 self.sharedpath = self.path
372 try:
372 try:
373 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
373 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
374 if 'relshared' in self.requirements:
374 if 'relshared' in self.requirements:
375 sharedpath = self.vfs.join(sharedpath)
375 sharedpath = self.vfs.join(sharedpath)
376 vfs = vfsmod.vfs(sharedpath, realpath=True)
376 vfs = vfsmod.vfs(sharedpath, realpath=True)
377 s = vfs.base
377 s = vfs.base
378 if not vfs.exists():
378 if not vfs.exists():
379 raise error.RepoError(
379 raise error.RepoError(
380 _('.hg/sharedpath points to nonexistent directory %s') % s)
380 _('.hg/sharedpath points to nonexistent directory %s') % s)
381 self.sharedpath = s
381 self.sharedpath = s
382 except IOError as inst:
382 except IOError as inst:
383 if inst.errno != errno.ENOENT:
383 if inst.errno != errno.ENOENT:
384 raise
384 raise
385
385
386 self.store = store.store(
386 self.store = store.store(
387 self.requirements, self.sharedpath, vfsmod.vfs)
387 self.requirements, self.sharedpath, vfsmod.vfs)
388 self.spath = self.store.path
388 self.spath = self.store.path
389 self.svfs = self.store.vfs
389 self.svfs = self.store.vfs
390 self.sjoin = self.store.join
390 self.sjoin = self.store.join
391 self.vfs.createmode = self.store.createmode
391 self.vfs.createmode = self.store.createmode
392 self._applyopenerreqs()
392 self._applyopenerreqs()
393 if create:
393 if create:
394 self._writerequirements()
394 self._writerequirements()
395
395
396 self._dirstatevalidatewarned = False
396 self._dirstatevalidatewarned = False
397
397
398 self._branchcaches = {}
398 self._branchcaches = {}
399 self._revbranchcache = None
399 self._revbranchcache = None
400 self.filterpats = {}
400 self.filterpats = {}
401 self._datafilters = {}
401 self._datafilters = {}
402 self._transref = self._lockref = self._wlockref = None
402 self._transref = self._lockref = self._wlockref = None
403
403
404 # A cache for various files under .hg/ that tracks file changes,
404 # A cache for various files under .hg/ that tracks file changes,
405 # (used by the filecache decorator)
405 # (used by the filecache decorator)
406 #
406 #
407 # Maps a property name to its util.filecacheentry
407 # Maps a property name to its util.filecacheentry
408 self._filecache = {}
408 self._filecache = {}
409
409
410 # hold sets of revision to be filtered
410 # hold sets of revision to be filtered
411 # should be cleared when something might have changed the filter value:
411 # should be cleared when something might have changed the filter value:
412 # - new changesets,
412 # - new changesets,
413 # - phase change,
413 # - phase change,
414 # - new obsolescence marker,
414 # - new obsolescence marker,
415 # - working directory parent change,
415 # - working directory parent change,
416 # - bookmark changes
416 # - bookmark changes
417 self.filteredrevcache = {}
417 self.filteredrevcache = {}
418
418
419 # post-dirstate-status hooks
419 # post-dirstate-status hooks
420 self._postdsstatus = []
420 self._postdsstatus = []
421
421
422 # generic mapping between names and nodes
422 # generic mapping between names and nodes
423 self.names = namespaces.namespaces()
423 self.names = namespaces.namespaces()
424
424
425 def close(self):
425 def close(self):
426 self._writecaches()
426 self._writecaches()
427
427
428 def _loadextensions(self):
428 def _loadextensions(self):
429 extensions.loadall(self.ui)
429 extensions.loadall(self.ui)
430
430
431 def _writecaches(self):
431 def _writecaches(self):
432 if self._revbranchcache:
432 if self._revbranchcache:
433 self._revbranchcache.write()
433 self._revbranchcache.write()
434
434
435 def _restrictcapabilities(self, caps):
435 def _restrictcapabilities(self, caps):
436 if self.ui.configbool('experimental', 'bundle2-advertise', True):
436 if self.ui.configbool('experimental', 'bundle2-advertise', True):
437 caps = set(caps)
437 caps = set(caps)
438 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
438 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
439 caps.add('bundle2=' + urlreq.quote(capsblob))
439 caps.add('bundle2=' + urlreq.quote(capsblob))
440 return caps
440 return caps
441
441
442 def _applyopenerreqs(self):
442 def _applyopenerreqs(self):
443 self.svfs.options = dict((r, 1) for r in self.requirements
443 self.svfs.options = dict((r, 1) for r in self.requirements
444 if r in self.openerreqs)
444 if r in self.openerreqs)
445 # experimental config: format.chunkcachesize
445 # experimental config: format.chunkcachesize
446 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
446 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
447 if chunkcachesize is not None:
447 if chunkcachesize is not None:
448 self.svfs.options['chunkcachesize'] = chunkcachesize
448 self.svfs.options['chunkcachesize'] = chunkcachesize
449 # experimental config: format.maxchainlen
449 # experimental config: format.maxchainlen
450 maxchainlen = self.ui.configint('format', 'maxchainlen')
450 maxchainlen = self.ui.configint('format', 'maxchainlen')
451 if maxchainlen is not None:
451 if maxchainlen is not None:
452 self.svfs.options['maxchainlen'] = maxchainlen
452 self.svfs.options['maxchainlen'] = maxchainlen
453 # experimental config: format.manifestcachesize
453 # experimental config: format.manifestcachesize
454 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
454 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
455 if manifestcachesize is not None:
455 if manifestcachesize is not None:
456 self.svfs.options['manifestcachesize'] = manifestcachesize
456 self.svfs.options['manifestcachesize'] = manifestcachesize
457 # experimental config: format.aggressivemergedeltas
457 # experimental config: format.aggressivemergedeltas
458 aggressivemergedeltas = self.ui.configbool('format',
458 aggressivemergedeltas = self.ui.configbool('format',
459 'aggressivemergedeltas')
459 'aggressivemergedeltas')
460 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
460 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
461 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
461 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
462 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
462 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
463 if 0 <= chainspan:
463 if 0 <= chainspan:
464 self.svfs.options['maxdeltachainspan'] = chainspan
464 self.svfs.options['maxdeltachainspan'] = chainspan
465
465
466 for r in self.requirements:
466 for r in self.requirements:
467 if r.startswith('exp-compression-'):
467 if r.startswith('exp-compression-'):
468 self.svfs.options['compengine'] = r[len('exp-compression-'):]
468 self.svfs.options['compengine'] = r[len('exp-compression-'):]
469
469
470 # TODO move "revlogv2" to openerreqs once finalized.
470 # TODO move "revlogv2" to openerreqs once finalized.
471 if REVLOGV2_REQUIREMENT in self.requirements:
471 if REVLOGV2_REQUIREMENT in self.requirements:
472 self.svfs.options['revlogv2'] = True
472 self.svfs.options['revlogv2'] = True
473
473
474 def _writerequirements(self):
474 def _writerequirements(self):
475 scmutil.writerequires(self.vfs, self.requirements)
475 scmutil.writerequires(self.vfs, self.requirements)
476
476
477 def _checknested(self, path):
477 def _checknested(self, path):
478 """Determine if path is a legal nested repository."""
478 """Determine if path is a legal nested repository."""
479 if not path.startswith(self.root):
479 if not path.startswith(self.root):
480 return False
480 return False
481 subpath = path[len(self.root) + 1:]
481 subpath = path[len(self.root) + 1:]
482 normsubpath = util.pconvert(subpath)
482 normsubpath = util.pconvert(subpath)
483
483
484 # XXX: Checking against the current working copy is wrong in
484 # XXX: Checking against the current working copy is wrong in
485 # the sense that it can reject things like
485 # the sense that it can reject things like
486 #
486 #
487 # $ hg cat -r 10 sub/x.txt
487 # $ hg cat -r 10 sub/x.txt
488 #
488 #
489 # if sub/ is no longer a subrepository in the working copy
489 # if sub/ is no longer a subrepository in the working copy
490 # parent revision.
490 # parent revision.
491 #
491 #
492 # However, it can of course also allow things that would have
492 # However, it can of course also allow things that would have
493 # been rejected before, such as the above cat command if sub/
493 # been rejected before, such as the above cat command if sub/
494 # is a subrepository now, but was a normal directory before.
494 # is a subrepository now, but was a normal directory before.
495 # The old path auditor would have rejected by mistake since it
495 # The old path auditor would have rejected by mistake since it
496 # panics when it sees sub/.hg/.
496 # panics when it sees sub/.hg/.
497 #
497 #
498 # All in all, checking against the working copy seems sensible
498 # All in all, checking against the working copy seems sensible
499 # since we want to prevent access to nested repositories on
499 # since we want to prevent access to nested repositories on
500 # the filesystem *now*.
500 # the filesystem *now*.
501 ctx = self[None]
501 ctx = self[None]
502 parts = util.splitpath(subpath)
502 parts = util.splitpath(subpath)
503 while parts:
503 while parts:
504 prefix = '/'.join(parts)
504 prefix = '/'.join(parts)
505 if prefix in ctx.substate:
505 if prefix in ctx.substate:
506 if prefix == normsubpath:
506 if prefix == normsubpath:
507 return True
507 return True
508 else:
508 else:
509 sub = ctx.sub(prefix)
509 sub = ctx.sub(prefix)
510 return sub.checknested(subpath[len(prefix) + 1:])
510 return sub.checknested(subpath[len(prefix) + 1:])
511 else:
511 else:
512 parts.pop()
512 parts.pop()
513 return False
513 return False
514
514
515 def peer(self):
515 def peer(self):
516 return localpeer(self) # not cached to avoid reference cycle
516 return localpeer(self) # not cached to avoid reference cycle
517
517
518 def unfiltered(self):
518 def unfiltered(self):
519 """Return unfiltered version of the repository
519 """Return unfiltered version of the repository
520
520
521 Intended to be overwritten by filtered repo."""
521 Intended to be overwritten by filtered repo."""
522 return self
522 return self
523
523
524 def filtered(self, name):
524 def filtered(self, name):
525 """Return a filtered version of a repository"""
525 """Return a filtered version of a repository"""
526 # build a new class with the mixin and the current class
526 # build a new class with the mixin and the current class
527 # (possibly subclass of the repo)
527 # (possibly subclass of the repo)
528 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
528 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
529 pass
529 pass
530 return filteredrepo(self, name)
530 return filteredrepo(self, name)
531
531
532 @repofilecache('bookmarks', 'bookmarks.current')
532 @repofilecache('bookmarks', 'bookmarks.current')
533 def _bookmarks(self):
533 def _bookmarks(self):
534 return bookmarks.bmstore(self)
534 return bookmarks.bmstore(self)
535
535
536 @property
536 @property
537 def _activebookmark(self):
537 def _activebookmark(self):
538 return self._bookmarks.active
538 return self._bookmarks.active
539
539
540 # _phaserevs and _phasesets depend on changelog. what we need is to
540 # _phaserevs and _phasesets depend on changelog. what we need is to
541 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
541 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
542 # can't be easily expressed in filecache mechanism.
542 # can't be easily expressed in filecache mechanism.
543 @storecache('phaseroots', '00changelog.i')
543 @storecache('phaseroots', '00changelog.i')
544 def _phasecache(self):
544 def _phasecache(self):
545 return phases.phasecache(self, self._phasedefaults)
545 return phases.phasecache(self, self._phasedefaults)
546
546
547 @storecache('obsstore')
547 @storecache('obsstore')
548 def obsstore(self):
548 def obsstore(self):
549 return obsolete.makestore(self.ui, self)
549 return obsolete.makestore(self.ui, self)
550
550
551 @storecache('00changelog.i')
551 @storecache('00changelog.i')
552 def changelog(self):
552 def changelog(self):
553 return changelog.changelog(self.svfs,
553 return changelog.changelog(self.svfs,
554 trypending=txnutil.mayhavepending(self.root))
554 trypending=txnutil.mayhavepending(self.root))
555
555
556 def _constructmanifest(self):
556 def _constructmanifest(self):
557 # This is a temporary function while we migrate from manifest to
557 # This is a temporary function while we migrate from manifest to
558 # manifestlog. It allows bundlerepo and unionrepo to intercept the
558 # manifestlog. It allows bundlerepo and unionrepo to intercept the
559 # manifest creation.
559 # manifest creation.
560 return manifest.manifestrevlog(self.svfs)
560 return manifest.manifestrevlog(self.svfs)
561
561
562 @storecache('00manifest.i')
562 @storecache('00manifest.i')
563 def manifestlog(self):
563 def manifestlog(self):
564 return manifest.manifestlog(self.svfs, self)
564 return manifest.manifestlog(self.svfs, self)
565
565
566 @repofilecache('dirstate')
566 @repofilecache('dirstate')
567 def dirstate(self):
567 def dirstate(self):
568 return dirstate.dirstate(self.vfs, self.ui, self.root,
568 return dirstate.dirstate(self.vfs, self.ui, self.root,
569 self._dirstatevalidate)
569 self._dirstatevalidate)
570
570
571 def _dirstatevalidate(self, node):
571 def _dirstatevalidate(self, node):
572 try:
572 try:
573 self.changelog.rev(node)
573 self.changelog.rev(node)
574 return node
574 return node
575 except error.LookupError:
575 except error.LookupError:
576 if not self._dirstatevalidatewarned:
576 if not self._dirstatevalidatewarned:
577 self._dirstatevalidatewarned = True
577 self._dirstatevalidatewarned = True
578 self.ui.warn(_("warning: ignoring unknown"
578 self.ui.warn(_("warning: ignoring unknown"
579 " working parent %s!\n") % short(node))
579 " working parent %s!\n") % short(node))
580 return nullid
580 return nullid
581
581
582 def __getitem__(self, changeid):
582 def __getitem__(self, changeid):
583 if changeid is None:
583 if changeid is None:
584 return context.workingctx(self)
584 return context.workingctx(self)
585 if isinstance(changeid, slice):
585 if isinstance(changeid, slice):
586 # wdirrev isn't contiguous so the slice shouldn't include it
586 # wdirrev isn't contiguous so the slice shouldn't include it
587 return [context.changectx(self, i)
587 return [context.changectx(self, i)
588 for i in xrange(*changeid.indices(len(self)))
588 for i in xrange(*changeid.indices(len(self)))
589 if i not in self.changelog.filteredrevs]
589 if i not in self.changelog.filteredrevs]
590 try:
590 try:
591 return context.changectx(self, changeid)
591 return context.changectx(self, changeid)
592 except error.WdirUnsupported:
592 except error.WdirUnsupported:
593 return context.workingctx(self)
593 return context.workingctx(self)
594
594
595 def __contains__(self, changeid):
595 def __contains__(self, changeid):
596 """True if the given changeid exists
596 """True if the given changeid exists
597
597
598 error.LookupError is raised if an ambiguous node specified.
598 error.LookupError is raised if an ambiguous node specified.
599 """
599 """
600 try:
600 try:
601 self[changeid]
601 self[changeid]
602 return True
602 return True
603 except error.RepoLookupError:
603 except error.RepoLookupError:
604 return False
604 return False
605
605
606 def __nonzero__(self):
606 def __nonzero__(self):
607 return True
607 return True
608
608
609 __bool__ = __nonzero__
609 __bool__ = __nonzero__
610
610
611 def __len__(self):
611 def __len__(self):
612 return len(self.changelog)
612 return len(self.changelog)
613
613
614 def __iter__(self):
614 def __iter__(self):
615 return iter(self.changelog)
615 return iter(self.changelog)
616
616
617 def revs(self, expr, *args):
617 def revs(self, expr, *args):
618 '''Find revisions matching a revset.
618 '''Find revisions matching a revset.
619
619
620 The revset is specified as a string ``expr`` that may contain
620 The revset is specified as a string ``expr`` that may contain
621 %-formatting to escape certain types. See ``revsetlang.formatspec``.
621 %-formatting to escape certain types. See ``revsetlang.formatspec``.
622
622
623 Revset aliases from the configuration are not expanded. To expand
623 Revset aliases from the configuration are not expanded. To expand
624 user aliases, consider calling ``scmutil.revrange()`` or
624 user aliases, consider calling ``scmutil.revrange()`` or
625 ``repo.anyrevs([expr], user=True)``.
625 ``repo.anyrevs([expr], user=True)``.
626
626
627 Returns a revset.abstractsmartset, which is a list-like interface
627 Returns a revset.abstractsmartset, which is a list-like interface
628 that contains integer revisions.
628 that contains integer revisions.
629 '''
629 '''
630 expr = revsetlang.formatspec(expr, *args)
630 expr = revsetlang.formatspec(expr, *args)
631 m = revset.match(None, expr)
631 m = revset.match(None, expr)
632 return m(self)
632 return m(self)
633
633
634 def set(self, expr, *args):
634 def set(self, expr, *args):
635 '''Find revisions matching a revset and emit changectx instances.
635 '''Find revisions matching a revset and emit changectx instances.
636
636
637 This is a convenience wrapper around ``revs()`` that iterates the
637 This is a convenience wrapper around ``revs()`` that iterates the
638 result and is a generator of changectx instances.
638 result and is a generator of changectx instances.
639
639
640 Revset aliases from the configuration are not expanded. To expand
640 Revset aliases from the configuration are not expanded. To expand
641 user aliases, consider calling ``scmutil.revrange()``.
641 user aliases, consider calling ``scmutil.revrange()``.
642 '''
642 '''
643 for r in self.revs(expr, *args):
643 for r in self.revs(expr, *args):
644 yield self[r]
644 yield self[r]
645
645
646 def anyrevs(self, specs, user=False):
646 def anyrevs(self, specs, user=False):
647 '''Find revisions matching one of the given revsets.
647 '''Find revisions matching one of the given revsets.
648
648
649 Revset aliases from the configuration are not expanded by default. To
649 Revset aliases from the configuration are not expanded by default. To
650 expand user aliases, specify ``user=True``.
650 expand user aliases, specify ``user=True``.
651 '''
651 '''
652 if user:
652 if user:
653 m = revset.matchany(self.ui, specs, repo=self)
653 m = revset.matchany(self.ui, specs, repo=self)
654 else:
654 else:
655 m = revset.matchany(None, specs)
655 m = revset.matchany(None, specs)
656 return m(self)
656 return m(self)
657
657
658 def url(self):
658 def url(self):
659 return 'file:' + self.root
659 return 'file:' + self.root
660
660
661 def hook(self, name, throw=False, **args):
661 def hook(self, name, throw=False, **args):
662 """Call a hook, passing this repo instance.
662 """Call a hook, passing this repo instance.
663
663
664 This a convenience method to aid invoking hooks. Extensions likely
664 This a convenience method to aid invoking hooks. Extensions likely
665 won't call this unless they have registered a custom hook or are
665 won't call this unless they have registered a custom hook or are
666 replacing code that is expected to call a hook.
666 replacing code that is expected to call a hook.
667 """
667 """
668 return hook.hook(self.ui, self, name, throw, **args)
668 return hook.hook(self.ui, self, name, throw, **args)
669
669
670 @filteredpropertycache
670 @filteredpropertycache
671 def _tagscache(self):
671 def _tagscache(self):
672 '''Returns a tagscache object that contains various tags related
672 '''Returns a tagscache object that contains various tags related
673 caches.'''
673 caches.'''
674
674
675 # This simplifies its cache management by having one decorated
675 # This simplifies its cache management by having one decorated
676 # function (this one) and the rest simply fetch things from it.
676 # function (this one) and the rest simply fetch things from it.
677 class tagscache(object):
677 class tagscache(object):
678 def __init__(self):
678 def __init__(self):
679 # These two define the set of tags for this repository. tags
679 # These two define the set of tags for this repository. tags
680 # maps tag name to node; tagtypes maps tag name to 'global' or
680 # maps tag name to node; tagtypes maps tag name to 'global' or
681 # 'local'. (Global tags are defined by .hgtags across all
681 # 'local'. (Global tags are defined by .hgtags across all
682 # heads, and local tags are defined in .hg/localtags.)
682 # heads, and local tags are defined in .hg/localtags.)
683 # They constitute the in-memory cache of tags.
683 # They constitute the in-memory cache of tags.
684 self.tags = self.tagtypes = None
684 self.tags = self.tagtypes = None
685
685
686 self.nodetagscache = self.tagslist = None
686 self.nodetagscache = self.tagslist = None
687
687
688 cache = tagscache()
688 cache = tagscache()
689 cache.tags, cache.tagtypes = self._findtags()
689 cache.tags, cache.tagtypes = self._findtags()
690
690
691 return cache
691 return cache
692
692
693 def tags(self):
693 def tags(self):
694 '''return a mapping of tag to node'''
694 '''return a mapping of tag to node'''
695 t = {}
695 t = {}
696 if self.changelog.filteredrevs:
696 if self.changelog.filteredrevs:
697 tags, tt = self._findtags()
697 tags, tt = self._findtags()
698 else:
698 else:
699 tags = self._tagscache.tags
699 tags = self._tagscache.tags
700 for k, v in tags.iteritems():
700 for k, v in tags.iteritems():
701 try:
701 try:
702 # ignore tags to unknown nodes
702 # ignore tags to unknown nodes
703 self.changelog.rev(v)
703 self.changelog.rev(v)
704 t[k] = v
704 t[k] = v
705 except (error.LookupError, ValueError):
705 except (error.LookupError, ValueError):
706 pass
706 pass
707 return t
707 return t
708
708
709 def _findtags(self):
709 def _findtags(self):
710 '''Do the hard work of finding tags. Return a pair of dicts
710 '''Do the hard work of finding tags. Return a pair of dicts
711 (tags, tagtypes) where tags maps tag name to node, and tagtypes
711 (tags, tagtypes) where tags maps tag name to node, and tagtypes
712 maps tag name to a string like \'global\' or \'local\'.
712 maps tag name to a string like \'global\' or \'local\'.
713 Subclasses or extensions are free to add their own tags, but
713 Subclasses or extensions are free to add their own tags, but
714 should be aware that the returned dicts will be retained for the
714 should be aware that the returned dicts will be retained for the
715 duration of the localrepo object.'''
715 duration of the localrepo object.'''
716
716
717 # XXX what tagtype should subclasses/extensions use? Currently
717 # XXX what tagtype should subclasses/extensions use? Currently
718 # mq and bookmarks add tags, but do not set the tagtype at all.
718 # mq and bookmarks add tags, but do not set the tagtype at all.
719 # Should each extension invent its own tag type? Should there
719 # Should each extension invent its own tag type? Should there
720 # be one tagtype for all such "virtual" tags? Or is the status
720 # be one tagtype for all such "virtual" tags? Or is the status
721 # quo fine?
721 # quo fine?
722
722
723
723
724 # map tag name to (node, hist)
724 # map tag name to (node, hist)
725 alltags = tagsmod.findglobaltags(self.ui, self)
725 alltags = tagsmod.findglobaltags(self.ui, self)
726 # map tag name to tag type
726 # map tag name to tag type
727 tagtypes = dict((tag, 'global') for tag in alltags)
727 tagtypes = dict((tag, 'global') for tag in alltags)
728
728
729 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
729 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
730
730
731 # Build the return dicts. Have to re-encode tag names because
731 # Build the return dicts. Have to re-encode tag names because
732 # the tags module always uses UTF-8 (in order not to lose info
732 # the tags module always uses UTF-8 (in order not to lose info
733 # writing to the cache), but the rest of Mercurial wants them in
733 # writing to the cache), but the rest of Mercurial wants them in
734 # local encoding.
734 # local encoding.
735 tags = {}
735 tags = {}
736 for (name, (node, hist)) in alltags.iteritems():
736 for (name, (node, hist)) in alltags.iteritems():
737 if node != nullid:
737 if node != nullid:
738 tags[encoding.tolocal(name)] = node
738 tags[encoding.tolocal(name)] = node
739 tags['tip'] = self.changelog.tip()
739 tags['tip'] = self.changelog.tip()
740 tagtypes = dict([(encoding.tolocal(name), value)
740 tagtypes = dict([(encoding.tolocal(name), value)
741 for (name, value) in tagtypes.iteritems()])
741 for (name, value) in tagtypes.iteritems()])
742 return (tags, tagtypes)
742 return (tags, tagtypes)
743
743
744 def tagtype(self, tagname):
744 def tagtype(self, tagname):
745 '''
745 '''
746 return the type of the given tag. result can be:
746 return the type of the given tag. result can be:
747
747
748 'local' : a local tag
748 'local' : a local tag
749 'global' : a global tag
749 'global' : a global tag
750 None : tag does not exist
750 None : tag does not exist
751 '''
751 '''
752
752
753 return self._tagscache.tagtypes.get(tagname)
753 return self._tagscache.tagtypes.get(tagname)
754
754
755 def tagslist(self):
755 def tagslist(self):
756 '''return a list of tags ordered by revision'''
756 '''return a list of tags ordered by revision'''
757 if not self._tagscache.tagslist:
757 if not self._tagscache.tagslist:
758 l = []
758 l = []
759 for t, n in self.tags().iteritems():
759 for t, n in self.tags().iteritems():
760 l.append((self.changelog.rev(n), t, n))
760 l.append((self.changelog.rev(n), t, n))
761 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
761 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
762
762
763 return self._tagscache.tagslist
763 return self._tagscache.tagslist
764
764
765 def nodetags(self, node):
765 def nodetags(self, node):
766 '''return the tags associated with a node'''
766 '''return the tags associated with a node'''
767 if not self._tagscache.nodetagscache:
767 if not self._tagscache.nodetagscache:
768 nodetagscache = {}
768 nodetagscache = {}
769 for t, n in self._tagscache.tags.iteritems():
769 for t, n in self._tagscache.tags.iteritems():
770 nodetagscache.setdefault(n, []).append(t)
770 nodetagscache.setdefault(n, []).append(t)
771 for tags in nodetagscache.itervalues():
771 for tags in nodetagscache.itervalues():
772 tags.sort()
772 tags.sort()
773 self._tagscache.nodetagscache = nodetagscache
773 self._tagscache.nodetagscache = nodetagscache
774 return self._tagscache.nodetagscache.get(node, [])
774 return self._tagscache.nodetagscache.get(node, [])
775
775
776 def nodebookmarks(self, node):
776 def nodebookmarks(self, node):
777 """return the list of bookmarks pointing to the specified node"""
777 """return the list of bookmarks pointing to the specified node"""
778 marks = []
778 marks = []
779 for bookmark, n in self._bookmarks.iteritems():
779 for bookmark, n in self._bookmarks.iteritems():
780 if n == node:
780 if n == node:
781 marks.append(bookmark)
781 marks.append(bookmark)
782 return sorted(marks)
782 return sorted(marks)
783
783
784 def branchmap(self):
784 def branchmap(self):
785 '''returns a dictionary {branch: [branchheads]} with branchheads
785 '''returns a dictionary {branch: [branchheads]} with branchheads
786 ordered by increasing revision number'''
786 ordered by increasing revision number'''
787 branchmap.updatecache(self)
787 branchmap.updatecache(self)
788 return self._branchcaches[self.filtername]
788 return self._branchcaches[self.filtername]
789
789
790 @unfilteredmethod
790 @unfilteredmethod
791 def revbranchcache(self):
791 def revbranchcache(self):
792 if not self._revbranchcache:
792 if not self._revbranchcache:
793 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
793 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
794 return self._revbranchcache
794 return self._revbranchcache
795
795
796 def branchtip(self, branch, ignoremissing=False):
796 def branchtip(self, branch, ignoremissing=False):
797 '''return the tip node for a given branch
797 '''return the tip node for a given branch
798
798
799 If ignoremissing is True, then this method will not raise an error.
799 If ignoremissing is True, then this method will not raise an error.
800 This is helpful for callers that only expect None for a missing branch
800 This is helpful for callers that only expect None for a missing branch
801 (e.g. namespace).
801 (e.g. namespace).
802
802
803 '''
803 '''
804 try:
804 try:
805 return self.branchmap().branchtip(branch)
805 return self.branchmap().branchtip(branch)
806 except KeyError:
806 except KeyError:
807 if not ignoremissing:
807 if not ignoremissing:
808 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
808 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
809 else:
809 else:
810 pass
810 pass
811
811
812 def lookup(self, key):
812 def lookup(self, key):
813 return self[key].node()
813 return self[key].node()
814
814
815 def lookupbranch(self, key, remote=None):
815 def lookupbranch(self, key, remote=None):
816 repo = remote or self
816 repo = remote or self
817 if key in repo.branchmap():
817 if key in repo.branchmap():
818 return key
818 return key
819
819
820 repo = (remote and remote.local()) and remote or self
820 repo = (remote and remote.local()) and remote or self
821 return repo[key].branch()
821 return repo[key].branch()
822
822
823 def known(self, nodes):
823 def known(self, nodes):
824 cl = self.changelog
824 cl = self.changelog
825 nm = cl.nodemap
825 nm = cl.nodemap
826 filtered = cl.filteredrevs
826 filtered = cl.filteredrevs
827 result = []
827 result = []
828 for n in nodes:
828 for n in nodes:
829 r = nm.get(n)
829 r = nm.get(n)
830 resp = not (r is None or r in filtered)
830 resp = not (r is None or r in filtered)
831 result.append(resp)
831 result.append(resp)
832 return result
832 return result
833
833
834 def local(self):
834 def local(self):
835 return self
835 return self
836
836
837 def publishing(self):
837 def publishing(self):
838 # it's safe (and desirable) to trust the publish flag unconditionally
838 # it's safe (and desirable) to trust the publish flag unconditionally
839 # so that we don't finalize changes shared between users via ssh or nfs
839 # so that we don't finalize changes shared between users via ssh or nfs
840 return self.ui.configbool('phases', 'publish', True, untrusted=True)
840 return self.ui.configbool('phases', 'publish', True, untrusted=True)
841
841
842 def cancopy(self):
842 def cancopy(self):
843 # so statichttprepo's override of local() works
843 # so statichttprepo's override of local() works
844 if not self.local():
844 if not self.local():
845 return False
845 return False
846 if not self.publishing():
846 if not self.publishing():
847 return True
847 return True
848 # if publishing we can't copy if there is filtered content
848 # if publishing we can't copy if there is filtered content
849 return not self.filtered('visible').changelog.filteredrevs
849 return not self.filtered('visible').changelog.filteredrevs
850
850
851 def shared(self):
851 def shared(self):
852 '''the type of shared repository (None if not shared)'''
852 '''the type of shared repository (None if not shared)'''
853 if self.sharedpath != self.path:
853 if self.sharedpath != self.path:
854 return 'store'
854 return 'store'
855 return None
855 return None
856
856
857 def wjoin(self, f, *insidef):
857 def wjoin(self, f, *insidef):
858 return self.vfs.reljoin(self.root, f, *insidef)
858 return self.vfs.reljoin(self.root, f, *insidef)
859
859
860 def file(self, f):
860 def file(self, f):
861 if f[0] == '/':
861 if f[0] == '/':
862 f = f[1:]
862 f = f[1:]
863 return filelog.filelog(self.svfs, f)
863 return filelog.filelog(self.svfs, f)
864
864
865 def changectx(self, changeid):
865 def changectx(self, changeid):
866 return self[changeid]
866 return self[changeid]
867
867
868 def setparents(self, p1, p2=nullid):
868 def setparents(self, p1, p2=nullid):
869 with self.dirstate.parentchange():
869 with self.dirstate.parentchange():
870 copies = self.dirstate.setparents(p1, p2)
870 copies = self.dirstate.setparents(p1, p2)
871 pctx = self[p1]
871 pctx = self[p1]
872 if copies:
872 if copies:
873 # Adjust copy records, the dirstate cannot do it, it
873 # Adjust copy records, the dirstate cannot do it, it
874 # requires access to parents manifests. Preserve them
874 # requires access to parents manifests. Preserve them
875 # only for entries added to first parent.
875 # only for entries added to first parent.
876 for f in copies:
876 for f in copies:
877 if f not in pctx and copies[f] in pctx:
877 if f not in pctx and copies[f] in pctx:
878 self.dirstate.copy(copies[f], f)
878 self.dirstate.copy(copies[f], f)
879 if p2 == nullid:
879 if p2 == nullid:
880 for f, s in sorted(self.dirstate.copies().items()):
880 for f, s in sorted(self.dirstate.copies().items()):
881 if f not in pctx and s not in pctx:
881 if f not in pctx and s not in pctx:
882 self.dirstate.copy(None, f)
882 self.dirstate.copy(None, f)
883
883
884 def filectx(self, path, changeid=None, fileid=None):
884 def filectx(self, path, changeid=None, fileid=None):
885 """changeid can be a changeset revision, node, or tag.
885 """changeid can be a changeset revision, node, or tag.
886 fileid can be a file revision or node."""
886 fileid can be a file revision or node."""
887 return context.filectx(self, path, changeid, fileid)
887 return context.filectx(self, path, changeid, fileid)
888
888
889 def getcwd(self):
889 def getcwd(self):
890 return self.dirstate.getcwd()
890 return self.dirstate.getcwd()
891
891
892 def pathto(self, f, cwd=None):
892 def pathto(self, f, cwd=None):
893 return self.dirstate.pathto(f, cwd)
893 return self.dirstate.pathto(f, cwd)
894
894
895 def _loadfilter(self, filter):
895 def _loadfilter(self, filter):
896 if filter not in self.filterpats:
896 if filter not in self.filterpats:
897 l = []
897 l = []
898 for pat, cmd in self.ui.configitems(filter):
898 for pat, cmd in self.ui.configitems(filter):
899 if cmd == '!':
899 if cmd == '!':
900 continue
900 continue
901 mf = matchmod.match(self.root, '', [pat])
901 mf = matchmod.match(self.root, '', [pat])
902 fn = None
902 fn = None
903 params = cmd
903 params = cmd
904 for name, filterfn in self._datafilters.iteritems():
904 for name, filterfn in self._datafilters.iteritems():
905 if cmd.startswith(name):
905 if cmd.startswith(name):
906 fn = filterfn
906 fn = filterfn
907 params = cmd[len(name):].lstrip()
907 params = cmd[len(name):].lstrip()
908 break
908 break
909 if not fn:
909 if not fn:
910 fn = lambda s, c, **kwargs: util.filter(s, c)
910 fn = lambda s, c, **kwargs: util.filter(s, c)
911 # Wrap old filters not supporting keyword arguments
911 # Wrap old filters not supporting keyword arguments
912 if not inspect.getargspec(fn)[2]:
912 if not inspect.getargspec(fn)[2]:
913 oldfn = fn
913 oldfn = fn
914 fn = lambda s, c, **kwargs: oldfn(s, c)
914 fn = lambda s, c, **kwargs: oldfn(s, c)
915 l.append((mf, fn, params))
915 l.append((mf, fn, params))
916 self.filterpats[filter] = l
916 self.filterpats[filter] = l
917 return self.filterpats[filter]
917 return self.filterpats[filter]
918
918
919 def _filter(self, filterpats, filename, data):
919 def _filter(self, filterpats, filename, data):
920 for mf, fn, cmd in filterpats:
920 for mf, fn, cmd in filterpats:
921 if mf(filename):
921 if mf(filename):
922 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
923 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
924 break
924 break
925
925
926 return data
926 return data
927
927
928 @unfilteredpropertycache
928 @unfilteredpropertycache
929 def _encodefilterpats(self):
929 def _encodefilterpats(self):
930 return self._loadfilter('encode')
930 return self._loadfilter('encode')
931
931
932 @unfilteredpropertycache
932 @unfilteredpropertycache
933 def _decodefilterpats(self):
933 def _decodefilterpats(self):
934 return self._loadfilter('decode')
934 return self._loadfilter('decode')
935
935
936 def adddatafilter(self, name, filter):
936 def adddatafilter(self, name, filter):
937 self._datafilters[name] = filter
937 self._datafilters[name] = filter
938
938
939 def wread(self, filename):
939 def wread(self, filename):
940 if self.wvfs.islink(filename):
940 if self.wvfs.islink(filename):
941 data = self.wvfs.readlink(filename)
941 data = self.wvfs.readlink(filename)
942 else:
942 else:
943 data = self.wvfs.read(filename)
943 data = self.wvfs.read(filename)
944 return self._filter(self._encodefilterpats, filename, data)
944 return self._filter(self._encodefilterpats, filename, data)
945
945
946 def wwrite(self, filename, data, flags, backgroundclose=False):
946 def wwrite(self, filename, data, flags, backgroundclose=False):
947 """write ``data`` into ``filename`` in the working directory
947 """write ``data`` into ``filename`` in the working directory
948
948
949 This returns length of written (maybe decoded) data.
949 This returns length of written (maybe decoded) data.
950 """
950 """
951 data = self._filter(self._decodefilterpats, filename, data)
951 data = self._filter(self._decodefilterpats, filename, data)
952 if 'l' in flags:
952 if 'l' in flags:
953 self.wvfs.symlink(data, filename)
953 self.wvfs.symlink(data, filename)
954 else:
954 else:
955 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
955 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
956 if 'x' in flags:
956 if 'x' in flags:
957 self.wvfs.setflags(filename, False, True)
957 self.wvfs.setflags(filename, False, True)
958 return len(data)
958 return len(data)
959
959
960 def wwritedata(self, filename, data):
960 def wwritedata(self, filename, data):
961 return self._filter(self._decodefilterpats, filename, data)
961 return self._filter(self._decodefilterpats, filename, data)
962
962
963 def currenttransaction(self):
963 def currenttransaction(self):
964 """return the current transaction or None if non exists"""
964 """return the current transaction or None if non exists"""
965 if self._transref:
965 if self._transref:
966 tr = self._transref()
966 tr = self._transref()
967 else:
967 else:
968 tr = None
968 tr = None
969
969
970 if tr and tr.running():
970 if tr and tr.running():
971 return tr
971 return tr
972 return None
972 return None
973
973
974 def transaction(self, desc, report=None):
974 def transaction(self, desc, report=None):
975 if (self.ui.configbool('devel', 'all-warnings')
975 if (self.ui.configbool('devel', 'all-warnings')
976 or self.ui.configbool('devel', 'check-locks')):
976 or self.ui.configbool('devel', 'check-locks')):
977 if self._currentlock(self._lockref) is None:
977 if self._currentlock(self._lockref) is None:
978 raise error.ProgrammingError('transaction requires locking')
978 raise error.ProgrammingError('transaction requires locking')
979 tr = self.currenttransaction()
979 tr = self.currenttransaction()
980 if tr is not None:
980 if tr is not None:
981 return tr.nest()
981 return tr.nest()
982
982
983 # abort here if the journal already exists
983 # abort here if the journal already exists
984 if self.svfs.exists("journal"):
984 if self.svfs.exists("journal"):
985 raise error.RepoError(
985 raise error.RepoError(
986 _("abandoned transaction found"),
986 _("abandoned transaction found"),
987 hint=_("run 'hg recover' to clean up transaction"))
987 hint=_("run 'hg recover' to clean up transaction"))
988
988
989 idbase = "%.40f#%f" % (random.random(), time.time())
989 idbase = "%.40f#%f" % (random.random(), time.time())
990 ha = hex(hashlib.sha1(idbase).digest())
990 ha = hex(hashlib.sha1(idbase).digest())
991 txnid = 'TXN:' + ha
991 txnid = 'TXN:' + ha
992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
992 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
993
993
994 self._writejournal(desc)
994 self._writejournal(desc)
995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
995 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
996 if report:
996 if report:
997 rp = report
997 rp = report
998 else:
998 else:
999 rp = self.ui.warn
999 rp = self.ui.warn
1000 vfsmap = {'plain': self.vfs} # root of .hg/
1000 vfsmap = {'plain': self.vfs} # root of .hg/
1001 # we must avoid cyclic reference between repo and transaction.
1001 # we must avoid cyclic reference between repo and transaction.
1002 reporef = weakref.ref(self)
1002 reporef = weakref.ref(self)
1003 # Code to track tag movement
1003 # Code to track tag movement
1004 #
1004 #
1005 # Since tags are all handled as file content, it is actually quite hard
1005 # Since tags are all handled as file content, it is actually quite hard
1006 # to track these movement from a code perspective. So we fallback to a
1006 # to track these movement from a code perspective. So we fallback to a
1007 # tracking at the repository level. One could envision to track changes
1007 # tracking at the repository level. One could envision to track changes
1008 # to the '.hgtags' file through changegroup apply but that fails to
1008 # to the '.hgtags' file through changegroup apply but that fails to
1009 # cope with case where transaction expose new heads without changegroup
1009 # cope with case where transaction expose new heads without changegroup
1010 # being involved (eg: phase movement).
1010 # being involved (eg: phase movement).
1011 #
1011 #
1012 # For now, We gate the feature behind a flag since this likely comes
1012 # For now, We gate the feature behind a flag since this likely comes
1013 # with performance impacts. The current code run more often than needed
1013 # with performance impacts. The current code run more often than needed
1014 # and do not use caches as much as it could. The current focus is on
1014 # and do not use caches as much as it could. The current focus is on
1015 # the behavior of the feature so we disable it by default. The flag
1015 # the behavior of the feature so we disable it by default. The flag
1016 # will be removed when we are happy with the performance impact.
1016 # will be removed when we are happy with the performance impact.
1017 #
1017 #
1018 # Once this feature is no longer experimental move the following
1018 # Once this feature is no longer experimental move the following
1019 # documentation to the appropriate help section:
1019 # documentation to the appropriate help section:
1020 #
1020 #
1021 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1021 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1022 # tags (new or changed or deleted tags). In addition the details of
1022 # tags (new or changed or deleted tags). In addition the details of
1023 # these changes are made available in a file at:
1023 # these changes are made available in a file at:
1024 # ``REPOROOT/.hg/changes/tags.changes``.
1024 # ``REPOROOT/.hg/changes/tags.changes``.
1025 # Make sure you check for HG_TAG_MOVED before reading that file as it
1025 # Make sure you check for HG_TAG_MOVED before reading that file as it
1026 # might exist from a previous transaction even if no tag were touched
1026 # might exist from a previous transaction even if no tag were touched
1027 # in this one. Changes are recorded in a line base format::
1027 # in this one. Changes are recorded in a line base format::
1028 #
1028 #
1029 # <action> <hex-node> <tag-name>\n
1029 # <action> <hex-node> <tag-name>\n
1030 #
1030 #
1031 # Actions are defined as follow:
1031 # Actions are defined as follow:
1032 # "-R": tag is removed,
1032 # "-R": tag is removed,
1033 # "+A": tag is added,
1033 # "+A": tag is added,
1034 # "-M": tag is moved (old value),
1034 # "-M": tag is moved (old value),
1035 # "+M": tag is moved (new value),
1035 # "+M": tag is moved (new value),
1036 tracktags = lambda x: None
1036 tracktags = lambda x: None
1037 # experimental config: experimental.hook-track-tags
1037 # experimental config: experimental.hook-track-tags
1038 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1038 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1039 False)
1039 False)
1040 if desc != 'strip' and shouldtracktags:
1040 if desc != 'strip' and shouldtracktags:
1041 oldheads = self.changelog.headrevs()
1041 oldheads = self.changelog.headrevs()
1042 def tracktags(tr2):
1042 def tracktags(tr2):
1043 repo = reporef()
1043 repo = reporef()
1044 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1044 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1045 newheads = repo.changelog.headrevs()
1045 newheads = repo.changelog.headrevs()
1046 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1046 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1047 # notes: we compare lists here.
1047 # notes: we compare lists here.
1048 # As we do it only once buiding set would not be cheaper
1048 # As we do it only once buiding set would not be cheaper
1049 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1049 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1050 if changes:
1050 if changes:
1051 tr2.hookargs['tag_moved'] = '1'
1051 tr2.hookargs['tag_moved'] = '1'
1052 with repo.vfs('changes/tags.changes', 'w',
1052 with repo.vfs('changes/tags.changes', 'w',
1053 atomictemp=True) as changesfile:
1053 atomictemp=True) as changesfile:
1054 # note: we do not register the file to the transaction
1054 # note: we do not register the file to the transaction
1055 # because we needs it to still exist on the transaction
1055 # because we needs it to still exist on the transaction
1056 # is close (for txnclose hooks)
1056 # is close (for txnclose hooks)
1057 tagsmod.writediff(changesfile, changes)
1057 tagsmod.writediff(changesfile, changes)
1058 def validate(tr2):
1058 def validate(tr2):
1059 """will run pre-closing hooks"""
1059 """will run pre-closing hooks"""
1060 # XXX the transaction API is a bit lacking here so we take a hacky
1060 # XXX the transaction API is a bit lacking here so we take a hacky
1061 # path for now
1061 # path for now
1062 #
1062 #
1063 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1063 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1064 # dict is copied before these run. In addition we needs the data
1064 # dict is copied before these run. In addition we needs the data
1065 # available to in memory hooks too.
1065 # available to in memory hooks too.
1066 #
1066 #
1067 # Moreover, we also need to make sure this runs before txnclose
1067 # Moreover, we also need to make sure this runs before txnclose
1068 # hooks and there is no "pending" mechanism that would execute
1068 # hooks and there is no "pending" mechanism that would execute
1069 # logic only if hooks are about to run.
1069 # logic only if hooks are about to run.
1070 #
1070 #
1071 # Fixing this limitation of the transaction is also needed to track
1071 # Fixing this limitation of the transaction is also needed to track
1072 # other families of changes (bookmarks, phases, obsolescence).
1072 # other families of changes (bookmarks, phases, obsolescence).
1073 #
1073 #
1074 # This will have to be fixed before we remove the experimental
1074 # This will have to be fixed before we remove the experimental
1075 # gating.
1075 # gating.
1076 tracktags(tr2)
1076 tracktags(tr2)
1077 reporef().hook('pretxnclose', throw=True,
1077 reporef().hook('pretxnclose', throw=True,
1078 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1078 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1079 def releasefn(tr, success):
1079 def releasefn(tr, success):
1080 repo = reporef()
1080 repo = reporef()
1081 if success:
1081 if success:
1082 # this should be explicitly invoked here, because
1082 # this should be explicitly invoked here, because
1083 # in-memory changes aren't written out at closing
1083 # in-memory changes aren't written out at closing
1084 # transaction, if tr.addfilegenerator (via
1084 # transaction, if tr.addfilegenerator (via
1085 # dirstate.write or so) isn't invoked while
1085 # dirstate.write or so) isn't invoked while
1086 # transaction running
1086 # transaction running
1087 repo.dirstate.write(None)
1087 repo.dirstate.write(None)
1088 else:
1088 else:
1089 # discard all changes (including ones already written
1089 # discard all changes (including ones already written
1090 # out) in this transaction
1090 # out) in this transaction
1091 repo.dirstate.restorebackup(None, prefix='journal.')
1091 repo.dirstate.restorebackup(None, prefix='journal.')
1092
1092
1093 repo.invalidate(clearfilecache=True)
1093 repo.invalidate(clearfilecache=True)
1094
1094
1095 tr = transaction.transaction(rp, self.svfs, vfsmap,
1095 tr = transaction.transaction(rp, self.svfs, vfsmap,
1096 "journal",
1096 "journal",
1097 "undo",
1097 "undo",
1098 aftertrans(renames),
1098 aftertrans(renames),
1099 self.store.createmode,
1099 self.store.createmode,
1100 validator=validate,
1100 validator=validate,
1101 releasefn=releasefn)
1101 releasefn=releasefn,
1102 checkambigfiles=_cachedfiles)
1102 tr.changes['revs'] = set()
1103 tr.changes['revs'] = set()
1103 tr.changes['obsmarkers'] = set()
1104 tr.changes['obsmarkers'] = set()
1104
1105
1105 tr.hookargs['txnid'] = txnid
1106 tr.hookargs['txnid'] = txnid
1106 # note: writing the fncache only during finalize mean that the file is
1107 # note: writing the fncache only during finalize mean that the file is
1107 # outdated when running hooks. As fncache is used for streaming clone,
1108 # outdated when running hooks. As fncache is used for streaming clone,
1108 # this is not expected to break anything that happen during the hooks.
1109 # this is not expected to break anything that happen during the hooks.
1109 tr.addfinalize('flush-fncache', self.store.write)
1110 tr.addfinalize('flush-fncache', self.store.write)
1110 def txnclosehook(tr2):
1111 def txnclosehook(tr2):
1111 """To be run if transaction is successful, will schedule a hook run
1112 """To be run if transaction is successful, will schedule a hook run
1112 """
1113 """
1113 # Don't reference tr2 in hook() so we don't hold a reference.
1114 # Don't reference tr2 in hook() so we don't hold a reference.
1114 # This reduces memory consumption when there are multiple
1115 # This reduces memory consumption when there are multiple
1115 # transactions per lock. This can likely go away if issue5045
1116 # transactions per lock. This can likely go away if issue5045
1116 # fixes the function accumulation.
1117 # fixes the function accumulation.
1117 hookargs = tr2.hookargs
1118 hookargs = tr2.hookargs
1118
1119
1119 def hook():
1120 def hook():
1120 reporef().hook('txnclose', throw=False, txnname=desc,
1121 reporef().hook('txnclose', throw=False, txnname=desc,
1121 **pycompat.strkwargs(hookargs))
1122 **pycompat.strkwargs(hookargs))
1122 reporef()._afterlock(hook)
1123 reporef()._afterlock(hook)
1123 tr.addfinalize('txnclose-hook', txnclosehook)
1124 tr.addfinalize('txnclose-hook', txnclosehook)
1124 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1125 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1125 def txnaborthook(tr2):
1126 def txnaborthook(tr2):
1126 """To be run if transaction is aborted
1127 """To be run if transaction is aborted
1127 """
1128 """
1128 reporef().hook('txnabort', throw=False, txnname=desc,
1129 reporef().hook('txnabort', throw=False, txnname=desc,
1129 **tr2.hookargs)
1130 **tr2.hookargs)
1130 tr.addabort('txnabort-hook', txnaborthook)
1131 tr.addabort('txnabort-hook', txnaborthook)
1131 # avoid eager cache invalidation. in-memory data should be identical
1132 # avoid eager cache invalidation. in-memory data should be identical
1132 # to stored data if transaction has no error.
1133 # to stored data if transaction has no error.
1133 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1134 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1134 self._transref = weakref.ref(tr)
1135 self._transref = weakref.ref(tr)
1135 return tr
1136 return tr
1136
1137
1137 def _journalfiles(self):
1138 def _journalfiles(self):
1138 return ((self.svfs, 'journal'),
1139 return ((self.svfs, 'journal'),
1139 (self.vfs, 'journal.dirstate'),
1140 (self.vfs, 'journal.dirstate'),
1140 (self.vfs, 'journal.branch'),
1141 (self.vfs, 'journal.branch'),
1141 (self.vfs, 'journal.desc'),
1142 (self.vfs, 'journal.desc'),
1142 (self.vfs, 'journal.bookmarks'),
1143 (self.vfs, 'journal.bookmarks'),
1143 (self.svfs, 'journal.phaseroots'))
1144 (self.svfs, 'journal.phaseroots'))
1144
1145
1145 def undofiles(self):
1146 def undofiles(self):
1146 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1147 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1147
1148
1148 @unfilteredmethod
1149 @unfilteredmethod
1149 def _writejournal(self, desc):
1150 def _writejournal(self, desc):
1150 self.dirstate.savebackup(None, prefix='journal.')
1151 self.dirstate.savebackup(None, prefix='journal.')
1151 self.vfs.write("journal.branch",
1152 self.vfs.write("journal.branch",
1152 encoding.fromlocal(self.dirstate.branch()))
1153 encoding.fromlocal(self.dirstate.branch()))
1153 self.vfs.write("journal.desc",
1154 self.vfs.write("journal.desc",
1154 "%d\n%s\n" % (len(self), desc))
1155 "%d\n%s\n" % (len(self), desc))
1155 self.vfs.write("journal.bookmarks",
1156 self.vfs.write("journal.bookmarks",
1156 self.vfs.tryread("bookmarks"))
1157 self.vfs.tryread("bookmarks"))
1157 self.svfs.write("journal.phaseroots",
1158 self.svfs.write("journal.phaseroots",
1158 self.svfs.tryread("phaseroots"))
1159 self.svfs.tryread("phaseroots"))
1159
1160
1160 def recover(self):
1161 def recover(self):
1161 with self.lock():
1162 with self.lock():
1162 if self.svfs.exists("journal"):
1163 if self.svfs.exists("journal"):
1163 self.ui.status(_("rolling back interrupted transaction\n"))
1164 self.ui.status(_("rolling back interrupted transaction\n"))
1164 vfsmap = {'': self.svfs,
1165 vfsmap = {'': self.svfs,
1165 'plain': self.vfs,}
1166 'plain': self.vfs,}
1166 transaction.rollback(self.svfs, vfsmap, "journal",
1167 transaction.rollback(self.svfs, vfsmap, "journal",
1167 self.ui.warn)
1168 self.ui.warn,
1169 checkambigfiles=_cachedfiles)
1168 self.invalidate()
1170 self.invalidate()
1169 return True
1171 return True
1170 else:
1172 else:
1171 self.ui.warn(_("no interrupted transaction available\n"))
1173 self.ui.warn(_("no interrupted transaction available\n"))
1172 return False
1174 return False
1173
1175
1174 def rollback(self, dryrun=False, force=False):
1176 def rollback(self, dryrun=False, force=False):
1175 wlock = lock = dsguard = None
1177 wlock = lock = dsguard = None
1176 try:
1178 try:
1177 wlock = self.wlock()
1179 wlock = self.wlock()
1178 lock = self.lock()
1180 lock = self.lock()
1179 if self.svfs.exists("undo"):
1181 if self.svfs.exists("undo"):
1180 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1182 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1181
1183
1182 return self._rollback(dryrun, force, dsguard)
1184 return self._rollback(dryrun, force, dsguard)
1183 else:
1185 else:
1184 self.ui.warn(_("no rollback information available\n"))
1186 self.ui.warn(_("no rollback information available\n"))
1185 return 1
1187 return 1
1186 finally:
1188 finally:
1187 release(dsguard, lock, wlock)
1189 release(dsguard, lock, wlock)
1188
1190
1189 @unfilteredmethod # Until we get smarter cache management
1191 @unfilteredmethod # Until we get smarter cache management
1190 def _rollback(self, dryrun, force, dsguard):
1192 def _rollback(self, dryrun, force, dsguard):
1191 ui = self.ui
1193 ui = self.ui
1192 try:
1194 try:
1193 args = self.vfs.read('undo.desc').splitlines()
1195 args = self.vfs.read('undo.desc').splitlines()
1194 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1196 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1195 if len(args) >= 3:
1197 if len(args) >= 3:
1196 detail = args[2]
1198 detail = args[2]
1197 oldtip = oldlen - 1
1199 oldtip = oldlen - 1
1198
1200
1199 if detail and ui.verbose:
1201 if detail and ui.verbose:
1200 msg = (_('repository tip rolled back to revision %d'
1202 msg = (_('repository tip rolled back to revision %d'
1201 ' (undo %s: %s)\n')
1203 ' (undo %s: %s)\n')
1202 % (oldtip, desc, detail))
1204 % (oldtip, desc, detail))
1203 else:
1205 else:
1204 msg = (_('repository tip rolled back to revision %d'
1206 msg = (_('repository tip rolled back to revision %d'
1205 ' (undo %s)\n')
1207 ' (undo %s)\n')
1206 % (oldtip, desc))
1208 % (oldtip, desc))
1207 except IOError:
1209 except IOError:
1208 msg = _('rolling back unknown transaction\n')
1210 msg = _('rolling back unknown transaction\n')
1209 desc = None
1211 desc = None
1210
1212
1211 if not force and self['.'] != self['tip'] and desc == 'commit':
1213 if not force and self['.'] != self['tip'] and desc == 'commit':
1212 raise error.Abort(
1214 raise error.Abort(
1213 _('rollback of last commit while not checked out '
1215 _('rollback of last commit while not checked out '
1214 'may lose data'), hint=_('use -f to force'))
1216 'may lose data'), hint=_('use -f to force'))
1215
1217
1216 ui.status(msg)
1218 ui.status(msg)
1217 if dryrun:
1219 if dryrun:
1218 return 0
1220 return 0
1219
1221
1220 parents = self.dirstate.parents()
1222 parents = self.dirstate.parents()
1221 self.destroying()
1223 self.destroying()
1222 vfsmap = {'plain': self.vfs, '': self.svfs}
1224 vfsmap = {'plain': self.vfs, '': self.svfs}
1223 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1225 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1226 checkambigfiles=_cachedfiles)
1224 if self.vfs.exists('undo.bookmarks'):
1227 if self.vfs.exists('undo.bookmarks'):
1225 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1228 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1226 if self.svfs.exists('undo.phaseroots'):
1229 if self.svfs.exists('undo.phaseroots'):
1227 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1230 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1228 self.invalidate()
1231 self.invalidate()
1229
1232
1230 parentgone = (parents[0] not in self.changelog.nodemap or
1233 parentgone = (parents[0] not in self.changelog.nodemap or
1231 parents[1] not in self.changelog.nodemap)
1234 parents[1] not in self.changelog.nodemap)
1232 if parentgone:
1235 if parentgone:
1233 # prevent dirstateguard from overwriting already restored one
1236 # prevent dirstateguard from overwriting already restored one
1234 dsguard.close()
1237 dsguard.close()
1235
1238
1236 self.dirstate.restorebackup(None, prefix='undo.')
1239 self.dirstate.restorebackup(None, prefix='undo.')
1237 try:
1240 try:
1238 branch = self.vfs.read('undo.branch')
1241 branch = self.vfs.read('undo.branch')
1239 self.dirstate.setbranch(encoding.tolocal(branch))
1242 self.dirstate.setbranch(encoding.tolocal(branch))
1240 except IOError:
1243 except IOError:
1241 ui.warn(_('named branch could not be reset: '
1244 ui.warn(_('named branch could not be reset: '
1242 'current branch is still \'%s\'\n')
1245 'current branch is still \'%s\'\n')
1243 % self.dirstate.branch())
1246 % self.dirstate.branch())
1244
1247
1245 parents = tuple([p.rev() for p in self[None].parents()])
1248 parents = tuple([p.rev() for p in self[None].parents()])
1246 if len(parents) > 1:
1249 if len(parents) > 1:
1247 ui.status(_('working directory now based on '
1250 ui.status(_('working directory now based on '
1248 'revisions %d and %d\n') % parents)
1251 'revisions %d and %d\n') % parents)
1249 else:
1252 else:
1250 ui.status(_('working directory now based on '
1253 ui.status(_('working directory now based on '
1251 'revision %d\n') % parents)
1254 'revision %d\n') % parents)
1252 mergemod.mergestate.clean(self, self['.'].node())
1255 mergemod.mergestate.clean(self, self['.'].node())
1253
1256
1254 # TODO: if we know which new heads may result from this rollback, pass
1257 # TODO: if we know which new heads may result from this rollback, pass
1255 # them to destroy(), which will prevent the branchhead cache from being
1258 # them to destroy(), which will prevent the branchhead cache from being
1256 # invalidated.
1259 # invalidated.
1257 self.destroyed()
1260 self.destroyed()
1258 return 0
1261 return 0
1259
1262
1260 def _buildcacheupdater(self, newtransaction):
1263 def _buildcacheupdater(self, newtransaction):
1261 """called during transaction to build the callback updating cache
1264 """called during transaction to build the callback updating cache
1262
1265
1263 Lives on the repository to help extension who might want to augment
1266 Lives on the repository to help extension who might want to augment
1264 this logic. For this purpose, the created transaction is passed to the
1267 this logic. For this purpose, the created transaction is passed to the
1265 method.
1268 method.
1266 """
1269 """
1267 # we must avoid cyclic reference between repo and transaction.
1270 # we must avoid cyclic reference between repo and transaction.
1268 reporef = weakref.ref(self)
1271 reporef = weakref.ref(self)
1269 def updater(tr):
1272 def updater(tr):
1270 repo = reporef()
1273 repo = reporef()
1271 repo.updatecaches(tr)
1274 repo.updatecaches(tr)
1272 return updater
1275 return updater
1273
1276
1274 @unfilteredmethod
1277 @unfilteredmethod
1275 def updatecaches(self, tr=None):
1278 def updatecaches(self, tr=None):
1276 """warm appropriate caches
1279 """warm appropriate caches
1277
1280
1278 If this function is called after a transaction closed. The transaction
1281 If this function is called after a transaction closed. The transaction
1279 will be available in the 'tr' argument. This can be used to selectively
1282 will be available in the 'tr' argument. This can be used to selectively
1280 update caches relevant to the changes in that transaction.
1283 update caches relevant to the changes in that transaction.
1281 """
1284 """
1282 if tr is not None and tr.hookargs.get('source') == 'strip':
1285 if tr is not None and tr.hookargs.get('source') == 'strip':
1283 # During strip, many caches are invalid but
1286 # During strip, many caches are invalid but
1284 # later call to `destroyed` will refresh them.
1287 # later call to `destroyed` will refresh them.
1285 return
1288 return
1286
1289
1287 if tr is None or tr.changes['revs']:
1290 if tr is None or tr.changes['revs']:
1288 # updating the unfiltered branchmap should refresh all the others,
1291 # updating the unfiltered branchmap should refresh all the others,
1289 self.ui.debug('updating the branch cache\n')
1292 self.ui.debug('updating the branch cache\n')
1290 branchmap.updatecache(self.filtered('served'))
1293 branchmap.updatecache(self.filtered('served'))
1291
1294
1292 def invalidatecaches(self):
1295 def invalidatecaches(self):
1293
1296
1294 if '_tagscache' in vars(self):
1297 if '_tagscache' in vars(self):
1295 # can't use delattr on proxy
1298 # can't use delattr on proxy
1296 del self.__dict__['_tagscache']
1299 del self.__dict__['_tagscache']
1297
1300
1298 self.unfiltered()._branchcaches.clear()
1301 self.unfiltered()._branchcaches.clear()
1299 self.invalidatevolatilesets()
1302 self.invalidatevolatilesets()
1300
1303
1301 def invalidatevolatilesets(self):
1304 def invalidatevolatilesets(self):
1302 self.filteredrevcache.clear()
1305 self.filteredrevcache.clear()
1303 obsolete.clearobscaches(self)
1306 obsolete.clearobscaches(self)
1304
1307
1305 def invalidatedirstate(self):
1308 def invalidatedirstate(self):
1306 '''Invalidates the dirstate, causing the next call to dirstate
1309 '''Invalidates the dirstate, causing the next call to dirstate
1307 to check if it was modified since the last time it was read,
1310 to check if it was modified since the last time it was read,
1308 rereading it if it has.
1311 rereading it if it has.
1309
1312
1310 This is different to dirstate.invalidate() that it doesn't always
1313 This is different to dirstate.invalidate() that it doesn't always
1311 rereads the dirstate. Use dirstate.invalidate() if you want to
1314 rereads the dirstate. Use dirstate.invalidate() if you want to
1312 explicitly read the dirstate again (i.e. restoring it to a previous
1315 explicitly read the dirstate again (i.e. restoring it to a previous
1313 known good state).'''
1316 known good state).'''
1314 if hasunfilteredcache(self, 'dirstate'):
1317 if hasunfilteredcache(self, 'dirstate'):
1315 for k in self.dirstate._filecache:
1318 for k in self.dirstate._filecache:
1316 try:
1319 try:
1317 delattr(self.dirstate, k)
1320 delattr(self.dirstate, k)
1318 except AttributeError:
1321 except AttributeError:
1319 pass
1322 pass
1320 delattr(self.unfiltered(), 'dirstate')
1323 delattr(self.unfiltered(), 'dirstate')
1321
1324
1322 def invalidate(self, clearfilecache=False):
1325 def invalidate(self, clearfilecache=False):
1323 '''Invalidates both store and non-store parts other than dirstate
1326 '''Invalidates both store and non-store parts other than dirstate
1324
1327
1325 If a transaction is running, invalidation of store is omitted,
1328 If a transaction is running, invalidation of store is omitted,
1326 because discarding in-memory changes might cause inconsistency
1329 because discarding in-memory changes might cause inconsistency
1327 (e.g. incomplete fncache causes unintentional failure, but
1330 (e.g. incomplete fncache causes unintentional failure, but
1328 redundant one doesn't).
1331 redundant one doesn't).
1329 '''
1332 '''
1330 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1333 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1331 for k in list(self._filecache.keys()):
1334 for k in list(self._filecache.keys()):
1332 # dirstate is invalidated separately in invalidatedirstate()
1335 # dirstate is invalidated separately in invalidatedirstate()
1333 if k == 'dirstate':
1336 if k == 'dirstate':
1334 continue
1337 continue
1335
1338
1336 if clearfilecache:
1339 if clearfilecache:
1337 del self._filecache[k]
1340 del self._filecache[k]
1338 try:
1341 try:
1339 delattr(unfiltered, k)
1342 delattr(unfiltered, k)
1340 except AttributeError:
1343 except AttributeError:
1341 pass
1344 pass
1342 self.invalidatecaches()
1345 self.invalidatecaches()
1343 if not self.currenttransaction():
1346 if not self.currenttransaction():
1344 # TODO: Changing contents of store outside transaction
1347 # TODO: Changing contents of store outside transaction
1345 # causes inconsistency. We should make in-memory store
1348 # causes inconsistency. We should make in-memory store
1346 # changes detectable, and abort if changed.
1349 # changes detectable, and abort if changed.
1347 self.store.invalidatecaches()
1350 self.store.invalidatecaches()
1348
1351
1349 def invalidateall(self):
1352 def invalidateall(self):
1350 '''Fully invalidates both store and non-store parts, causing the
1353 '''Fully invalidates both store and non-store parts, causing the
1351 subsequent operation to reread any outside changes.'''
1354 subsequent operation to reread any outside changes.'''
1352 # extension should hook this to invalidate its caches
1355 # extension should hook this to invalidate its caches
1353 self.invalidate()
1356 self.invalidate()
1354 self.invalidatedirstate()
1357 self.invalidatedirstate()
1355
1358
1356 @unfilteredmethod
1359 @unfilteredmethod
1357 def _refreshfilecachestats(self, tr):
1360 def _refreshfilecachestats(self, tr):
1358 """Reload stats of cached files so that they are flagged as valid"""
1361 """Reload stats of cached files so that they are flagged as valid"""
1359 for k, ce in self._filecache.items():
1362 for k, ce in self._filecache.items():
1360 if k == 'dirstate' or k not in self.__dict__:
1363 if k == 'dirstate' or k not in self.__dict__:
1361 continue
1364 continue
1362 ce.refresh()
1365 ce.refresh()
1363
1366
1364 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1367 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1365 inheritchecker=None, parentenvvar=None):
1368 inheritchecker=None, parentenvvar=None):
1366 parentlock = None
1369 parentlock = None
1367 # the contents of parentenvvar are used by the underlying lock to
1370 # the contents of parentenvvar are used by the underlying lock to
1368 # determine whether it can be inherited
1371 # determine whether it can be inherited
1369 if parentenvvar is not None:
1372 if parentenvvar is not None:
1370 parentlock = encoding.environ.get(parentenvvar)
1373 parentlock = encoding.environ.get(parentenvvar)
1371 try:
1374 try:
1372 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1375 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1373 acquirefn=acquirefn, desc=desc,
1376 acquirefn=acquirefn, desc=desc,
1374 inheritchecker=inheritchecker,
1377 inheritchecker=inheritchecker,
1375 parentlock=parentlock)
1378 parentlock=parentlock)
1376 except error.LockHeld as inst:
1379 except error.LockHeld as inst:
1377 if not wait:
1380 if not wait:
1378 raise
1381 raise
1379 # show more details for new-style locks
1382 # show more details for new-style locks
1380 if ':' in inst.locker:
1383 if ':' in inst.locker:
1381 host, pid = inst.locker.split(":", 1)
1384 host, pid = inst.locker.split(":", 1)
1382 self.ui.warn(
1385 self.ui.warn(
1383 _("waiting for lock on %s held by process %r "
1386 _("waiting for lock on %s held by process %r "
1384 "on host %r\n") % (desc, pid, host))
1387 "on host %r\n") % (desc, pid, host))
1385 else:
1388 else:
1386 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1389 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1387 (desc, inst.locker))
1390 (desc, inst.locker))
1388 # default to 600 seconds timeout
1391 # default to 600 seconds timeout
1389 l = lockmod.lock(vfs, lockname,
1392 l = lockmod.lock(vfs, lockname,
1390 int(self.ui.config("ui", "timeout", "600")),
1393 int(self.ui.config("ui", "timeout", "600")),
1391 releasefn=releasefn, acquirefn=acquirefn,
1394 releasefn=releasefn, acquirefn=acquirefn,
1392 desc=desc)
1395 desc=desc)
1393 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1396 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1394 return l
1397 return l
1395
1398
1396 def _afterlock(self, callback):
1399 def _afterlock(self, callback):
1397 """add a callback to be run when the repository is fully unlocked
1400 """add a callback to be run when the repository is fully unlocked
1398
1401
1399 The callback will be executed when the outermost lock is released
1402 The callback will be executed when the outermost lock is released
1400 (with wlock being higher level than 'lock')."""
1403 (with wlock being higher level than 'lock')."""
1401 for ref in (self._wlockref, self._lockref):
1404 for ref in (self._wlockref, self._lockref):
1402 l = ref and ref()
1405 l = ref and ref()
1403 if l and l.held:
1406 if l and l.held:
1404 l.postrelease.append(callback)
1407 l.postrelease.append(callback)
1405 break
1408 break
1406 else: # no lock have been found.
1409 else: # no lock have been found.
1407 callback()
1410 callback()
1408
1411
1409 def lock(self, wait=True):
1412 def lock(self, wait=True):
1410 '''Lock the repository store (.hg/store) and return a weak reference
1413 '''Lock the repository store (.hg/store) and return a weak reference
1411 to the lock. Use this before modifying the store (e.g. committing or
1414 to the lock. Use this before modifying the store (e.g. committing or
1412 stripping). If you are opening a transaction, get a lock as well.)
1415 stripping). If you are opening a transaction, get a lock as well.)
1413
1416
1414 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1417 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1415 'wlock' first to avoid a dead-lock hazard.'''
1418 'wlock' first to avoid a dead-lock hazard.'''
1416 l = self._currentlock(self._lockref)
1419 l = self._currentlock(self._lockref)
1417 if l is not None:
1420 if l is not None:
1418 l.lock()
1421 l.lock()
1419 return l
1422 return l
1420
1423
1421 l = self._lock(self.svfs, "lock", wait, None,
1424 l = self._lock(self.svfs, "lock", wait, None,
1422 self.invalidate, _('repository %s') % self.origroot)
1425 self.invalidate, _('repository %s') % self.origroot)
1423 self._lockref = weakref.ref(l)
1426 self._lockref = weakref.ref(l)
1424 return l
1427 return l
1425
1428
1426 def _wlockchecktransaction(self):
1429 def _wlockchecktransaction(self):
1427 if self.currenttransaction() is not None:
1430 if self.currenttransaction() is not None:
1428 raise error.LockInheritanceContractViolation(
1431 raise error.LockInheritanceContractViolation(
1429 'wlock cannot be inherited in the middle of a transaction')
1432 'wlock cannot be inherited in the middle of a transaction')
1430
1433
1431 def wlock(self, wait=True):
1434 def wlock(self, wait=True):
1432 '''Lock the non-store parts of the repository (everything under
1435 '''Lock the non-store parts of the repository (everything under
1433 .hg except .hg/store) and return a weak reference to the lock.
1436 .hg except .hg/store) and return a weak reference to the lock.
1434
1437
1435 Use this before modifying files in .hg.
1438 Use this before modifying files in .hg.
1436
1439
1437 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1440 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1438 'wlock' first to avoid a dead-lock hazard.'''
1441 'wlock' first to avoid a dead-lock hazard.'''
1439 l = self._wlockref and self._wlockref()
1442 l = self._wlockref and self._wlockref()
1440 if l is not None and l.held:
1443 if l is not None and l.held:
1441 l.lock()
1444 l.lock()
1442 return l
1445 return l
1443
1446
1444 # We do not need to check for non-waiting lock acquisition. Such
1447 # We do not need to check for non-waiting lock acquisition. Such
1445 # acquisition would not cause dead-lock as they would just fail.
1448 # acquisition would not cause dead-lock as they would just fail.
1446 if wait and (self.ui.configbool('devel', 'all-warnings')
1449 if wait and (self.ui.configbool('devel', 'all-warnings')
1447 or self.ui.configbool('devel', 'check-locks')):
1450 or self.ui.configbool('devel', 'check-locks')):
1448 if self._currentlock(self._lockref) is not None:
1451 if self._currentlock(self._lockref) is not None:
1449 self.ui.develwarn('"wlock" acquired after "lock"')
1452 self.ui.develwarn('"wlock" acquired after "lock"')
1450
1453
1451 def unlock():
1454 def unlock():
1452 if self.dirstate.pendingparentchange():
1455 if self.dirstate.pendingparentchange():
1453 self.dirstate.invalidate()
1456 self.dirstate.invalidate()
1454 else:
1457 else:
1455 self.dirstate.write(None)
1458 self.dirstate.write(None)
1456
1459
1457 self._filecache['dirstate'].refresh()
1460 self._filecache['dirstate'].refresh()
1458
1461
1459 l = self._lock(self.vfs, "wlock", wait, unlock,
1462 l = self._lock(self.vfs, "wlock", wait, unlock,
1460 self.invalidatedirstate, _('working directory of %s') %
1463 self.invalidatedirstate, _('working directory of %s') %
1461 self.origroot,
1464 self.origroot,
1462 inheritchecker=self._wlockchecktransaction,
1465 inheritchecker=self._wlockchecktransaction,
1463 parentenvvar='HG_WLOCK_LOCKER')
1466 parentenvvar='HG_WLOCK_LOCKER')
1464 self._wlockref = weakref.ref(l)
1467 self._wlockref = weakref.ref(l)
1465 return l
1468 return l
1466
1469
1467 def _currentlock(self, lockref):
1470 def _currentlock(self, lockref):
1468 """Returns the lock if it's held, or None if it's not."""
1471 """Returns the lock if it's held, or None if it's not."""
1469 if lockref is None:
1472 if lockref is None:
1470 return None
1473 return None
1471 l = lockref()
1474 l = lockref()
1472 if l is None or not l.held:
1475 if l is None or not l.held:
1473 return None
1476 return None
1474 return l
1477 return l
1475
1478
1476 def currentwlock(self):
1479 def currentwlock(self):
1477 """Returns the wlock if it's held, or None if it's not."""
1480 """Returns the wlock if it's held, or None if it's not."""
1478 return self._currentlock(self._wlockref)
1481 return self._currentlock(self._wlockref)
1479
1482
1480 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1483 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1481 """
1484 """
1482 commit an individual file as part of a larger transaction
1485 commit an individual file as part of a larger transaction
1483 """
1486 """
1484
1487
1485 fname = fctx.path()
1488 fname = fctx.path()
1486 fparent1 = manifest1.get(fname, nullid)
1489 fparent1 = manifest1.get(fname, nullid)
1487 fparent2 = manifest2.get(fname, nullid)
1490 fparent2 = manifest2.get(fname, nullid)
1488 if isinstance(fctx, context.filectx):
1491 if isinstance(fctx, context.filectx):
1489 node = fctx.filenode()
1492 node = fctx.filenode()
1490 if node in [fparent1, fparent2]:
1493 if node in [fparent1, fparent2]:
1491 self.ui.debug('reusing %s filelog entry\n' % fname)
1494 self.ui.debug('reusing %s filelog entry\n' % fname)
1492 if manifest1.flags(fname) != fctx.flags():
1495 if manifest1.flags(fname) != fctx.flags():
1493 changelist.append(fname)
1496 changelist.append(fname)
1494 return node
1497 return node
1495
1498
1496 flog = self.file(fname)
1499 flog = self.file(fname)
1497 meta = {}
1500 meta = {}
1498 copy = fctx.renamed()
1501 copy = fctx.renamed()
1499 if copy and copy[0] != fname:
1502 if copy and copy[0] != fname:
1500 # Mark the new revision of this file as a copy of another
1503 # Mark the new revision of this file as a copy of another
1501 # file. This copy data will effectively act as a parent
1504 # file. This copy data will effectively act as a parent
1502 # of this new revision. If this is a merge, the first
1505 # of this new revision. If this is a merge, the first
1503 # parent will be the nullid (meaning "look up the copy data")
1506 # parent will be the nullid (meaning "look up the copy data")
1504 # and the second one will be the other parent. For example:
1507 # and the second one will be the other parent. For example:
1505 #
1508 #
1506 # 0 --- 1 --- 3 rev1 changes file foo
1509 # 0 --- 1 --- 3 rev1 changes file foo
1507 # \ / rev2 renames foo to bar and changes it
1510 # \ / rev2 renames foo to bar and changes it
1508 # \- 2 -/ rev3 should have bar with all changes and
1511 # \- 2 -/ rev3 should have bar with all changes and
1509 # should record that bar descends from
1512 # should record that bar descends from
1510 # bar in rev2 and foo in rev1
1513 # bar in rev2 and foo in rev1
1511 #
1514 #
1512 # this allows this merge to succeed:
1515 # this allows this merge to succeed:
1513 #
1516 #
1514 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1517 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1515 # \ / merging rev3 and rev4 should use bar@rev2
1518 # \ / merging rev3 and rev4 should use bar@rev2
1516 # \- 2 --- 4 as the merge base
1519 # \- 2 --- 4 as the merge base
1517 #
1520 #
1518
1521
1519 cfname = copy[0]
1522 cfname = copy[0]
1520 crev = manifest1.get(cfname)
1523 crev = manifest1.get(cfname)
1521 newfparent = fparent2
1524 newfparent = fparent2
1522
1525
1523 if manifest2: # branch merge
1526 if manifest2: # branch merge
1524 if fparent2 == nullid or crev is None: # copied on remote side
1527 if fparent2 == nullid or crev is None: # copied on remote side
1525 if cfname in manifest2:
1528 if cfname in manifest2:
1526 crev = manifest2[cfname]
1529 crev = manifest2[cfname]
1527 newfparent = fparent1
1530 newfparent = fparent1
1528
1531
1529 # Here, we used to search backwards through history to try to find
1532 # Here, we used to search backwards through history to try to find
1530 # where the file copy came from if the source of a copy was not in
1533 # where the file copy came from if the source of a copy was not in
1531 # the parent directory. However, this doesn't actually make sense to
1534 # the parent directory. However, this doesn't actually make sense to
1532 # do (what does a copy from something not in your working copy even
1535 # do (what does a copy from something not in your working copy even
1533 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1536 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1534 # the user that copy information was dropped, so if they didn't
1537 # the user that copy information was dropped, so if they didn't
1535 # expect this outcome it can be fixed, but this is the correct
1538 # expect this outcome it can be fixed, but this is the correct
1536 # behavior in this circumstance.
1539 # behavior in this circumstance.
1537
1540
1538 if crev:
1541 if crev:
1539 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1542 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1540 meta["copy"] = cfname
1543 meta["copy"] = cfname
1541 meta["copyrev"] = hex(crev)
1544 meta["copyrev"] = hex(crev)
1542 fparent1, fparent2 = nullid, newfparent
1545 fparent1, fparent2 = nullid, newfparent
1543 else:
1546 else:
1544 self.ui.warn(_("warning: can't find ancestor for '%s' "
1547 self.ui.warn(_("warning: can't find ancestor for '%s' "
1545 "copied from '%s'!\n") % (fname, cfname))
1548 "copied from '%s'!\n") % (fname, cfname))
1546
1549
1547 elif fparent1 == nullid:
1550 elif fparent1 == nullid:
1548 fparent1, fparent2 = fparent2, nullid
1551 fparent1, fparent2 = fparent2, nullid
1549 elif fparent2 != nullid:
1552 elif fparent2 != nullid:
1550 # is one parent an ancestor of the other?
1553 # is one parent an ancestor of the other?
1551 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1554 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1552 if fparent1 in fparentancestors:
1555 if fparent1 in fparentancestors:
1553 fparent1, fparent2 = fparent2, nullid
1556 fparent1, fparent2 = fparent2, nullid
1554 elif fparent2 in fparentancestors:
1557 elif fparent2 in fparentancestors:
1555 fparent2 = nullid
1558 fparent2 = nullid
1556
1559
1557 # is the file changed?
1560 # is the file changed?
1558 text = fctx.data()
1561 text = fctx.data()
1559 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1562 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1560 changelist.append(fname)
1563 changelist.append(fname)
1561 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1564 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1562 # are just the flags changed during merge?
1565 # are just the flags changed during merge?
1563 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1566 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1564 changelist.append(fname)
1567 changelist.append(fname)
1565
1568
1566 return fparent1
1569 return fparent1
1567
1570
1568 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1571 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1569 """check for commit arguments that aren't committable"""
1572 """check for commit arguments that aren't committable"""
1570 if match.isexact() or match.prefix():
1573 if match.isexact() or match.prefix():
1571 matched = set(status.modified + status.added + status.removed)
1574 matched = set(status.modified + status.added + status.removed)
1572
1575
1573 for f in match.files():
1576 for f in match.files():
1574 f = self.dirstate.normalize(f)
1577 f = self.dirstate.normalize(f)
1575 if f == '.' or f in matched or f in wctx.substate:
1578 if f == '.' or f in matched or f in wctx.substate:
1576 continue
1579 continue
1577 if f in status.deleted:
1580 if f in status.deleted:
1578 fail(f, _('file not found!'))
1581 fail(f, _('file not found!'))
1579 if f in vdirs: # visited directory
1582 if f in vdirs: # visited directory
1580 d = f + '/'
1583 d = f + '/'
1581 for mf in matched:
1584 for mf in matched:
1582 if mf.startswith(d):
1585 if mf.startswith(d):
1583 break
1586 break
1584 else:
1587 else:
1585 fail(f, _("no match under directory!"))
1588 fail(f, _("no match under directory!"))
1586 elif f not in self.dirstate:
1589 elif f not in self.dirstate:
1587 fail(f, _("file not tracked!"))
1590 fail(f, _("file not tracked!"))
1588
1591
1589 @unfilteredmethod
1592 @unfilteredmethod
1590 def commit(self, text="", user=None, date=None, match=None, force=False,
1593 def commit(self, text="", user=None, date=None, match=None, force=False,
1591 editor=False, extra=None):
1594 editor=False, extra=None):
1592 """Add a new revision to current repository.
1595 """Add a new revision to current repository.
1593
1596
1594 Revision information is gathered from the working directory,
1597 Revision information is gathered from the working directory,
1595 match can be used to filter the committed files. If editor is
1598 match can be used to filter the committed files. If editor is
1596 supplied, it is called to get a commit message.
1599 supplied, it is called to get a commit message.
1597 """
1600 """
1598 if extra is None:
1601 if extra is None:
1599 extra = {}
1602 extra = {}
1600
1603
1601 def fail(f, msg):
1604 def fail(f, msg):
1602 raise error.Abort('%s: %s' % (f, msg))
1605 raise error.Abort('%s: %s' % (f, msg))
1603
1606
1604 if not match:
1607 if not match:
1605 match = matchmod.always(self.root, '')
1608 match = matchmod.always(self.root, '')
1606
1609
1607 if not force:
1610 if not force:
1608 vdirs = []
1611 vdirs = []
1609 match.explicitdir = vdirs.append
1612 match.explicitdir = vdirs.append
1610 match.bad = fail
1613 match.bad = fail
1611
1614
1612 wlock = lock = tr = None
1615 wlock = lock = tr = None
1613 try:
1616 try:
1614 wlock = self.wlock()
1617 wlock = self.wlock()
1615 lock = self.lock() # for recent changelog (see issue4368)
1618 lock = self.lock() # for recent changelog (see issue4368)
1616
1619
1617 wctx = self[None]
1620 wctx = self[None]
1618 merge = len(wctx.parents()) > 1
1621 merge = len(wctx.parents()) > 1
1619
1622
1620 if not force and merge and not match.always():
1623 if not force and merge and not match.always():
1621 raise error.Abort(_('cannot partially commit a merge '
1624 raise error.Abort(_('cannot partially commit a merge '
1622 '(do not specify files or patterns)'))
1625 '(do not specify files or patterns)'))
1623
1626
1624 status = self.status(match=match, clean=force)
1627 status = self.status(match=match, clean=force)
1625 if force:
1628 if force:
1626 status.modified.extend(status.clean) # mq may commit clean files
1629 status.modified.extend(status.clean) # mq may commit clean files
1627
1630
1628 # check subrepos
1631 # check subrepos
1629 subs = []
1632 subs = []
1630 commitsubs = set()
1633 commitsubs = set()
1631 newstate = wctx.substate.copy()
1634 newstate = wctx.substate.copy()
1632 # only manage subrepos and .hgsubstate if .hgsub is present
1635 # only manage subrepos and .hgsubstate if .hgsub is present
1633 if '.hgsub' in wctx:
1636 if '.hgsub' in wctx:
1634 # we'll decide whether to track this ourselves, thanks
1637 # we'll decide whether to track this ourselves, thanks
1635 for c in status.modified, status.added, status.removed:
1638 for c in status.modified, status.added, status.removed:
1636 if '.hgsubstate' in c:
1639 if '.hgsubstate' in c:
1637 c.remove('.hgsubstate')
1640 c.remove('.hgsubstate')
1638
1641
1639 # compare current state to last committed state
1642 # compare current state to last committed state
1640 # build new substate based on last committed state
1643 # build new substate based on last committed state
1641 oldstate = wctx.p1().substate
1644 oldstate = wctx.p1().substate
1642 for s in sorted(newstate.keys()):
1645 for s in sorted(newstate.keys()):
1643 if not match(s):
1646 if not match(s):
1644 # ignore working copy, use old state if present
1647 # ignore working copy, use old state if present
1645 if s in oldstate:
1648 if s in oldstate:
1646 newstate[s] = oldstate[s]
1649 newstate[s] = oldstate[s]
1647 continue
1650 continue
1648 if not force:
1651 if not force:
1649 raise error.Abort(
1652 raise error.Abort(
1650 _("commit with new subrepo %s excluded") % s)
1653 _("commit with new subrepo %s excluded") % s)
1651 dirtyreason = wctx.sub(s).dirtyreason(True)
1654 dirtyreason = wctx.sub(s).dirtyreason(True)
1652 if dirtyreason:
1655 if dirtyreason:
1653 if not self.ui.configbool('ui', 'commitsubrepos'):
1656 if not self.ui.configbool('ui', 'commitsubrepos'):
1654 raise error.Abort(dirtyreason,
1657 raise error.Abort(dirtyreason,
1655 hint=_("use --subrepos for recursive commit"))
1658 hint=_("use --subrepos for recursive commit"))
1656 subs.append(s)
1659 subs.append(s)
1657 commitsubs.add(s)
1660 commitsubs.add(s)
1658 else:
1661 else:
1659 bs = wctx.sub(s).basestate()
1662 bs = wctx.sub(s).basestate()
1660 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1663 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1661 if oldstate.get(s, (None, None, None))[1] != bs:
1664 if oldstate.get(s, (None, None, None))[1] != bs:
1662 subs.append(s)
1665 subs.append(s)
1663
1666
1664 # check for removed subrepos
1667 # check for removed subrepos
1665 for p in wctx.parents():
1668 for p in wctx.parents():
1666 r = [s for s in p.substate if s not in newstate]
1669 r = [s for s in p.substate if s not in newstate]
1667 subs += [s for s in r if match(s)]
1670 subs += [s for s in r if match(s)]
1668 if subs:
1671 if subs:
1669 if (not match('.hgsub') and
1672 if (not match('.hgsub') and
1670 '.hgsub' in (wctx.modified() + wctx.added())):
1673 '.hgsub' in (wctx.modified() + wctx.added())):
1671 raise error.Abort(
1674 raise error.Abort(
1672 _("can't commit subrepos without .hgsub"))
1675 _("can't commit subrepos without .hgsub"))
1673 status.modified.insert(0, '.hgsubstate')
1676 status.modified.insert(0, '.hgsubstate')
1674
1677
1675 elif '.hgsub' in status.removed:
1678 elif '.hgsub' in status.removed:
1676 # clean up .hgsubstate when .hgsub is removed
1679 # clean up .hgsubstate when .hgsub is removed
1677 if ('.hgsubstate' in wctx and
1680 if ('.hgsubstate' in wctx and
1678 '.hgsubstate' not in (status.modified + status.added +
1681 '.hgsubstate' not in (status.modified + status.added +
1679 status.removed)):
1682 status.removed)):
1680 status.removed.insert(0, '.hgsubstate')
1683 status.removed.insert(0, '.hgsubstate')
1681
1684
1682 # make sure all explicit patterns are matched
1685 # make sure all explicit patterns are matched
1683 if not force:
1686 if not force:
1684 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1687 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1685
1688
1686 cctx = context.workingcommitctx(self, status,
1689 cctx = context.workingcommitctx(self, status,
1687 text, user, date, extra)
1690 text, user, date, extra)
1688
1691
1689 # internal config: ui.allowemptycommit
1692 # internal config: ui.allowemptycommit
1690 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1693 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1691 or extra.get('close') or merge or cctx.files()
1694 or extra.get('close') or merge or cctx.files()
1692 or self.ui.configbool('ui', 'allowemptycommit'))
1695 or self.ui.configbool('ui', 'allowemptycommit'))
1693 if not allowemptycommit:
1696 if not allowemptycommit:
1694 return None
1697 return None
1695
1698
1696 if merge and cctx.deleted():
1699 if merge and cctx.deleted():
1697 raise error.Abort(_("cannot commit merge with missing files"))
1700 raise error.Abort(_("cannot commit merge with missing files"))
1698
1701
1699 ms = mergemod.mergestate.read(self)
1702 ms = mergemod.mergestate.read(self)
1700 mergeutil.checkunresolved(ms)
1703 mergeutil.checkunresolved(ms)
1701
1704
1702 if editor:
1705 if editor:
1703 cctx._text = editor(self, cctx, subs)
1706 cctx._text = editor(self, cctx, subs)
1704 edited = (text != cctx._text)
1707 edited = (text != cctx._text)
1705
1708
1706 # Save commit message in case this transaction gets rolled back
1709 # Save commit message in case this transaction gets rolled back
1707 # (e.g. by a pretxncommit hook). Leave the content alone on
1710 # (e.g. by a pretxncommit hook). Leave the content alone on
1708 # the assumption that the user will use the same editor again.
1711 # the assumption that the user will use the same editor again.
1709 msgfn = self.savecommitmessage(cctx._text)
1712 msgfn = self.savecommitmessage(cctx._text)
1710
1713
1711 # commit subs and write new state
1714 # commit subs and write new state
1712 if subs:
1715 if subs:
1713 for s in sorted(commitsubs):
1716 for s in sorted(commitsubs):
1714 sub = wctx.sub(s)
1717 sub = wctx.sub(s)
1715 self.ui.status(_('committing subrepository %s\n') %
1718 self.ui.status(_('committing subrepository %s\n') %
1716 subrepo.subrelpath(sub))
1719 subrepo.subrelpath(sub))
1717 sr = sub.commit(cctx._text, user, date)
1720 sr = sub.commit(cctx._text, user, date)
1718 newstate[s] = (newstate[s][0], sr)
1721 newstate[s] = (newstate[s][0], sr)
1719 subrepo.writestate(self, newstate)
1722 subrepo.writestate(self, newstate)
1720
1723
1721 p1, p2 = self.dirstate.parents()
1724 p1, p2 = self.dirstate.parents()
1722 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1725 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1723 try:
1726 try:
1724 self.hook("precommit", throw=True, parent1=hookp1,
1727 self.hook("precommit", throw=True, parent1=hookp1,
1725 parent2=hookp2)
1728 parent2=hookp2)
1726 tr = self.transaction('commit')
1729 tr = self.transaction('commit')
1727 ret = self.commitctx(cctx, True)
1730 ret = self.commitctx(cctx, True)
1728 except: # re-raises
1731 except: # re-raises
1729 if edited:
1732 if edited:
1730 self.ui.write(
1733 self.ui.write(
1731 _('note: commit message saved in %s\n') % msgfn)
1734 _('note: commit message saved in %s\n') % msgfn)
1732 raise
1735 raise
1733 # update bookmarks, dirstate and mergestate
1736 # update bookmarks, dirstate and mergestate
1734 bookmarks.update(self, [p1, p2], ret)
1737 bookmarks.update(self, [p1, p2], ret)
1735 cctx.markcommitted(ret)
1738 cctx.markcommitted(ret)
1736 ms.reset()
1739 ms.reset()
1737 tr.close()
1740 tr.close()
1738
1741
1739 finally:
1742 finally:
1740 lockmod.release(tr, lock, wlock)
1743 lockmod.release(tr, lock, wlock)
1741
1744
1742 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1745 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1743 # hack for command that use a temporary commit (eg: histedit)
1746 # hack for command that use a temporary commit (eg: histedit)
1744 # temporary commit got stripped before hook release
1747 # temporary commit got stripped before hook release
1745 if self.changelog.hasnode(ret):
1748 if self.changelog.hasnode(ret):
1746 self.hook("commit", node=node, parent1=parent1,
1749 self.hook("commit", node=node, parent1=parent1,
1747 parent2=parent2)
1750 parent2=parent2)
1748 self._afterlock(commithook)
1751 self._afterlock(commithook)
1749 return ret
1752 return ret
1750
1753
1751 @unfilteredmethod
1754 @unfilteredmethod
1752 def commitctx(self, ctx, error=False):
1755 def commitctx(self, ctx, error=False):
1753 """Add a new revision to current repository.
1756 """Add a new revision to current repository.
1754 Revision information is passed via the context argument.
1757 Revision information is passed via the context argument.
1755 """
1758 """
1756
1759
1757 tr = None
1760 tr = None
1758 p1, p2 = ctx.p1(), ctx.p2()
1761 p1, p2 = ctx.p1(), ctx.p2()
1759 user = ctx.user()
1762 user = ctx.user()
1760
1763
1761 lock = self.lock()
1764 lock = self.lock()
1762 try:
1765 try:
1763 tr = self.transaction("commit")
1766 tr = self.transaction("commit")
1764 trp = weakref.proxy(tr)
1767 trp = weakref.proxy(tr)
1765
1768
1766 if ctx.manifestnode():
1769 if ctx.manifestnode():
1767 # reuse an existing manifest revision
1770 # reuse an existing manifest revision
1768 mn = ctx.manifestnode()
1771 mn = ctx.manifestnode()
1769 files = ctx.files()
1772 files = ctx.files()
1770 elif ctx.files():
1773 elif ctx.files():
1771 m1ctx = p1.manifestctx()
1774 m1ctx = p1.manifestctx()
1772 m2ctx = p2.manifestctx()
1775 m2ctx = p2.manifestctx()
1773 mctx = m1ctx.copy()
1776 mctx = m1ctx.copy()
1774
1777
1775 m = mctx.read()
1778 m = mctx.read()
1776 m1 = m1ctx.read()
1779 m1 = m1ctx.read()
1777 m2 = m2ctx.read()
1780 m2 = m2ctx.read()
1778
1781
1779 # check in files
1782 # check in files
1780 added = []
1783 added = []
1781 changed = []
1784 changed = []
1782 removed = list(ctx.removed())
1785 removed = list(ctx.removed())
1783 linkrev = len(self)
1786 linkrev = len(self)
1784 self.ui.note(_("committing files:\n"))
1787 self.ui.note(_("committing files:\n"))
1785 for f in sorted(ctx.modified() + ctx.added()):
1788 for f in sorted(ctx.modified() + ctx.added()):
1786 self.ui.note(f + "\n")
1789 self.ui.note(f + "\n")
1787 try:
1790 try:
1788 fctx = ctx[f]
1791 fctx = ctx[f]
1789 if fctx is None:
1792 if fctx is None:
1790 removed.append(f)
1793 removed.append(f)
1791 else:
1794 else:
1792 added.append(f)
1795 added.append(f)
1793 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1796 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1794 trp, changed)
1797 trp, changed)
1795 m.setflag(f, fctx.flags())
1798 m.setflag(f, fctx.flags())
1796 except OSError as inst:
1799 except OSError as inst:
1797 self.ui.warn(_("trouble committing %s!\n") % f)
1800 self.ui.warn(_("trouble committing %s!\n") % f)
1798 raise
1801 raise
1799 except IOError as inst:
1802 except IOError as inst:
1800 errcode = getattr(inst, 'errno', errno.ENOENT)
1803 errcode = getattr(inst, 'errno', errno.ENOENT)
1801 if error or errcode and errcode != errno.ENOENT:
1804 if error or errcode and errcode != errno.ENOENT:
1802 self.ui.warn(_("trouble committing %s!\n") % f)
1805 self.ui.warn(_("trouble committing %s!\n") % f)
1803 raise
1806 raise
1804
1807
1805 # update manifest
1808 # update manifest
1806 self.ui.note(_("committing manifest\n"))
1809 self.ui.note(_("committing manifest\n"))
1807 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1810 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1808 drop = [f for f in removed if f in m]
1811 drop = [f for f in removed if f in m]
1809 for f in drop:
1812 for f in drop:
1810 del m[f]
1813 del m[f]
1811 mn = mctx.write(trp, linkrev,
1814 mn = mctx.write(trp, linkrev,
1812 p1.manifestnode(), p2.manifestnode(),
1815 p1.manifestnode(), p2.manifestnode(),
1813 added, drop)
1816 added, drop)
1814 files = changed + removed
1817 files = changed + removed
1815 else:
1818 else:
1816 mn = p1.manifestnode()
1819 mn = p1.manifestnode()
1817 files = []
1820 files = []
1818
1821
1819 # update changelog
1822 # update changelog
1820 self.ui.note(_("committing changelog\n"))
1823 self.ui.note(_("committing changelog\n"))
1821 self.changelog.delayupdate(tr)
1824 self.changelog.delayupdate(tr)
1822 n = self.changelog.add(mn, files, ctx.description(),
1825 n = self.changelog.add(mn, files, ctx.description(),
1823 trp, p1.node(), p2.node(),
1826 trp, p1.node(), p2.node(),
1824 user, ctx.date(), ctx.extra().copy())
1827 user, ctx.date(), ctx.extra().copy())
1825 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1828 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1826 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1829 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1827 parent2=xp2)
1830 parent2=xp2)
1828 # set the new commit is proper phase
1831 # set the new commit is proper phase
1829 targetphase = subrepo.newcommitphase(self.ui, ctx)
1832 targetphase = subrepo.newcommitphase(self.ui, ctx)
1830 if targetphase:
1833 if targetphase:
1831 # retract boundary do not alter parent changeset.
1834 # retract boundary do not alter parent changeset.
1832 # if a parent have higher the resulting phase will
1835 # if a parent have higher the resulting phase will
1833 # be compliant anyway
1836 # be compliant anyway
1834 #
1837 #
1835 # if minimal phase was 0 we don't need to retract anything
1838 # if minimal phase was 0 we don't need to retract anything
1836 phases.retractboundary(self, tr, targetphase, [n])
1839 phases.retractboundary(self, tr, targetphase, [n])
1837 tr.close()
1840 tr.close()
1838 return n
1841 return n
1839 finally:
1842 finally:
1840 if tr:
1843 if tr:
1841 tr.release()
1844 tr.release()
1842 lock.release()
1845 lock.release()
1843
1846
1844 @unfilteredmethod
1847 @unfilteredmethod
1845 def destroying(self):
1848 def destroying(self):
1846 '''Inform the repository that nodes are about to be destroyed.
1849 '''Inform the repository that nodes are about to be destroyed.
1847 Intended for use by strip and rollback, so there's a common
1850 Intended for use by strip and rollback, so there's a common
1848 place for anything that has to be done before destroying history.
1851 place for anything that has to be done before destroying history.
1849
1852
1850 This is mostly useful for saving state that is in memory and waiting
1853 This is mostly useful for saving state that is in memory and waiting
1851 to be flushed when the current lock is released. Because a call to
1854 to be flushed when the current lock is released. Because a call to
1852 destroyed is imminent, the repo will be invalidated causing those
1855 destroyed is imminent, the repo will be invalidated causing those
1853 changes to stay in memory (waiting for the next unlock), or vanish
1856 changes to stay in memory (waiting for the next unlock), or vanish
1854 completely.
1857 completely.
1855 '''
1858 '''
1856 # When using the same lock to commit and strip, the phasecache is left
1859 # When using the same lock to commit and strip, the phasecache is left
1857 # dirty after committing. Then when we strip, the repo is invalidated,
1860 # dirty after committing. Then when we strip, the repo is invalidated,
1858 # causing those changes to disappear.
1861 # causing those changes to disappear.
1859 if '_phasecache' in vars(self):
1862 if '_phasecache' in vars(self):
1860 self._phasecache.write()
1863 self._phasecache.write()
1861
1864
1862 @unfilteredmethod
1865 @unfilteredmethod
1863 def destroyed(self):
1866 def destroyed(self):
1864 '''Inform the repository that nodes have been destroyed.
1867 '''Inform the repository that nodes have been destroyed.
1865 Intended for use by strip and rollback, so there's a common
1868 Intended for use by strip and rollback, so there's a common
1866 place for anything that has to be done after destroying history.
1869 place for anything that has to be done after destroying history.
1867 '''
1870 '''
1868 # When one tries to:
1871 # When one tries to:
1869 # 1) destroy nodes thus calling this method (e.g. strip)
1872 # 1) destroy nodes thus calling this method (e.g. strip)
1870 # 2) use phasecache somewhere (e.g. commit)
1873 # 2) use phasecache somewhere (e.g. commit)
1871 #
1874 #
1872 # then 2) will fail because the phasecache contains nodes that were
1875 # then 2) will fail because the phasecache contains nodes that were
1873 # removed. We can either remove phasecache from the filecache,
1876 # removed. We can either remove phasecache from the filecache,
1874 # causing it to reload next time it is accessed, or simply filter
1877 # causing it to reload next time it is accessed, or simply filter
1875 # the removed nodes now and write the updated cache.
1878 # the removed nodes now and write the updated cache.
1876 self._phasecache.filterunknown(self)
1879 self._phasecache.filterunknown(self)
1877 self._phasecache.write()
1880 self._phasecache.write()
1878
1881
1879 # refresh all repository caches
1882 # refresh all repository caches
1880 self.updatecaches()
1883 self.updatecaches()
1881
1884
1882 # Ensure the persistent tag cache is updated. Doing it now
1885 # Ensure the persistent tag cache is updated. Doing it now
1883 # means that the tag cache only has to worry about destroyed
1886 # means that the tag cache only has to worry about destroyed
1884 # heads immediately after a strip/rollback. That in turn
1887 # heads immediately after a strip/rollback. That in turn
1885 # guarantees that "cachetip == currenttip" (comparing both rev
1888 # guarantees that "cachetip == currenttip" (comparing both rev
1886 # and node) always means no nodes have been added or destroyed.
1889 # and node) always means no nodes have been added or destroyed.
1887
1890
1888 # XXX this is suboptimal when qrefresh'ing: we strip the current
1891 # XXX this is suboptimal when qrefresh'ing: we strip the current
1889 # head, refresh the tag cache, then immediately add a new head.
1892 # head, refresh the tag cache, then immediately add a new head.
1890 # But I think doing it this way is necessary for the "instant
1893 # But I think doing it this way is necessary for the "instant
1891 # tag cache retrieval" case to work.
1894 # tag cache retrieval" case to work.
1892 self.invalidate()
1895 self.invalidate()
1893
1896
1894 def walk(self, match, node=None):
1897 def walk(self, match, node=None):
1895 '''
1898 '''
1896 walk recursively through the directory tree or a given
1899 walk recursively through the directory tree or a given
1897 changeset, finding all files matched by the match
1900 changeset, finding all files matched by the match
1898 function
1901 function
1899 '''
1902 '''
1900 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1903 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1901 return self[node].walk(match)
1904 return self[node].walk(match)
1902
1905
1903 def status(self, node1='.', node2=None, match=None,
1906 def status(self, node1='.', node2=None, match=None,
1904 ignored=False, clean=False, unknown=False,
1907 ignored=False, clean=False, unknown=False,
1905 listsubrepos=False):
1908 listsubrepos=False):
1906 '''a convenience method that calls node1.status(node2)'''
1909 '''a convenience method that calls node1.status(node2)'''
1907 return self[node1].status(node2, match, ignored, clean, unknown,
1910 return self[node1].status(node2, match, ignored, clean, unknown,
1908 listsubrepos)
1911 listsubrepos)
1909
1912
1910 def addpostdsstatus(self, ps):
1913 def addpostdsstatus(self, ps):
1911 """Add a callback to run within the wlock, at the point at which status
1914 """Add a callback to run within the wlock, at the point at which status
1912 fixups happen.
1915 fixups happen.
1913
1916
1914 On status completion, callback(wctx, status) will be called with the
1917 On status completion, callback(wctx, status) will be called with the
1915 wlock held, unless the dirstate has changed from underneath or the wlock
1918 wlock held, unless the dirstate has changed from underneath or the wlock
1916 couldn't be grabbed.
1919 couldn't be grabbed.
1917
1920
1918 Callbacks should not capture and use a cached copy of the dirstate --
1921 Callbacks should not capture and use a cached copy of the dirstate --
1919 it might change in the meanwhile. Instead, they should access the
1922 it might change in the meanwhile. Instead, they should access the
1920 dirstate via wctx.repo().dirstate.
1923 dirstate via wctx.repo().dirstate.
1921
1924
1922 This list is emptied out after each status run -- extensions should
1925 This list is emptied out after each status run -- extensions should
1923 make sure it adds to this list each time dirstate.status is called.
1926 make sure it adds to this list each time dirstate.status is called.
1924 Extensions should also make sure they don't call this for statuses
1927 Extensions should also make sure they don't call this for statuses
1925 that don't involve the dirstate.
1928 that don't involve the dirstate.
1926 """
1929 """
1927
1930
1928 # The list is located here for uniqueness reasons -- it is actually
1931 # The list is located here for uniqueness reasons -- it is actually
1929 # managed by the workingctx, but that isn't unique per-repo.
1932 # managed by the workingctx, but that isn't unique per-repo.
1930 self._postdsstatus.append(ps)
1933 self._postdsstatus.append(ps)
1931
1934
1932 def postdsstatus(self):
1935 def postdsstatus(self):
1933 """Used by workingctx to get the list of post-dirstate-status hooks."""
1936 """Used by workingctx to get the list of post-dirstate-status hooks."""
1934 return self._postdsstatus
1937 return self._postdsstatus
1935
1938
1936 def clearpostdsstatus(self):
1939 def clearpostdsstatus(self):
1937 """Used by workingctx to clear post-dirstate-status hooks."""
1940 """Used by workingctx to clear post-dirstate-status hooks."""
1938 del self._postdsstatus[:]
1941 del self._postdsstatus[:]
1939
1942
1940 def heads(self, start=None):
1943 def heads(self, start=None):
1941 if start is None:
1944 if start is None:
1942 cl = self.changelog
1945 cl = self.changelog
1943 headrevs = reversed(cl.headrevs())
1946 headrevs = reversed(cl.headrevs())
1944 return [cl.node(rev) for rev in headrevs]
1947 return [cl.node(rev) for rev in headrevs]
1945
1948
1946 heads = self.changelog.heads(start)
1949 heads = self.changelog.heads(start)
1947 # sort the output in rev descending order
1950 # sort the output in rev descending order
1948 return sorted(heads, key=self.changelog.rev, reverse=True)
1951 return sorted(heads, key=self.changelog.rev, reverse=True)
1949
1952
1950 def branchheads(self, branch=None, start=None, closed=False):
1953 def branchheads(self, branch=None, start=None, closed=False):
1951 '''return a (possibly filtered) list of heads for the given branch
1954 '''return a (possibly filtered) list of heads for the given branch
1952
1955
1953 Heads are returned in topological order, from newest to oldest.
1956 Heads are returned in topological order, from newest to oldest.
1954 If branch is None, use the dirstate branch.
1957 If branch is None, use the dirstate branch.
1955 If start is not None, return only heads reachable from start.
1958 If start is not None, return only heads reachable from start.
1956 If closed is True, return heads that are marked as closed as well.
1959 If closed is True, return heads that are marked as closed as well.
1957 '''
1960 '''
1958 if branch is None:
1961 if branch is None:
1959 branch = self[None].branch()
1962 branch = self[None].branch()
1960 branches = self.branchmap()
1963 branches = self.branchmap()
1961 if branch not in branches:
1964 if branch not in branches:
1962 return []
1965 return []
1963 # the cache returns heads ordered lowest to highest
1966 # the cache returns heads ordered lowest to highest
1964 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1967 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1965 if start is not None:
1968 if start is not None:
1966 # filter out the heads that cannot be reached from startrev
1969 # filter out the heads that cannot be reached from startrev
1967 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1970 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1968 bheads = [h for h in bheads if h in fbheads]
1971 bheads = [h for h in bheads if h in fbheads]
1969 return bheads
1972 return bheads
1970
1973
1971 def branches(self, nodes):
1974 def branches(self, nodes):
1972 if not nodes:
1975 if not nodes:
1973 nodes = [self.changelog.tip()]
1976 nodes = [self.changelog.tip()]
1974 b = []
1977 b = []
1975 for n in nodes:
1978 for n in nodes:
1976 t = n
1979 t = n
1977 while True:
1980 while True:
1978 p = self.changelog.parents(n)
1981 p = self.changelog.parents(n)
1979 if p[1] != nullid or p[0] == nullid:
1982 if p[1] != nullid or p[0] == nullid:
1980 b.append((t, n, p[0], p[1]))
1983 b.append((t, n, p[0], p[1]))
1981 break
1984 break
1982 n = p[0]
1985 n = p[0]
1983 return b
1986 return b
1984
1987
1985 def between(self, pairs):
1988 def between(self, pairs):
1986 r = []
1989 r = []
1987
1990
1988 for top, bottom in pairs:
1991 for top, bottom in pairs:
1989 n, l, i = top, [], 0
1992 n, l, i = top, [], 0
1990 f = 1
1993 f = 1
1991
1994
1992 while n != bottom and n != nullid:
1995 while n != bottom and n != nullid:
1993 p = self.changelog.parents(n)[0]
1996 p = self.changelog.parents(n)[0]
1994 if i == f:
1997 if i == f:
1995 l.append(n)
1998 l.append(n)
1996 f = f * 2
1999 f = f * 2
1997 n = p
2000 n = p
1998 i += 1
2001 i += 1
1999
2002
2000 r.append(l)
2003 r.append(l)
2001
2004
2002 return r
2005 return r
2003
2006
2004 def checkpush(self, pushop):
2007 def checkpush(self, pushop):
2005 """Extensions can override this function if additional checks have
2008 """Extensions can override this function if additional checks have
2006 to be performed before pushing, or call it if they override push
2009 to be performed before pushing, or call it if they override push
2007 command.
2010 command.
2008 """
2011 """
2009 pass
2012 pass
2010
2013
2011 @unfilteredpropertycache
2014 @unfilteredpropertycache
2012 def prepushoutgoinghooks(self):
2015 def prepushoutgoinghooks(self):
2013 """Return util.hooks consists of a pushop with repo, remote, outgoing
2016 """Return util.hooks consists of a pushop with repo, remote, outgoing
2014 methods, which are called before pushing changesets.
2017 methods, which are called before pushing changesets.
2015 """
2018 """
2016 return util.hooks()
2019 return util.hooks()
2017
2020
2018 def pushkey(self, namespace, key, old, new):
2021 def pushkey(self, namespace, key, old, new):
2019 try:
2022 try:
2020 tr = self.currenttransaction()
2023 tr = self.currenttransaction()
2021 hookargs = {}
2024 hookargs = {}
2022 if tr is not None:
2025 if tr is not None:
2023 hookargs.update(tr.hookargs)
2026 hookargs.update(tr.hookargs)
2024 hookargs['namespace'] = namespace
2027 hookargs['namespace'] = namespace
2025 hookargs['key'] = key
2028 hookargs['key'] = key
2026 hookargs['old'] = old
2029 hookargs['old'] = old
2027 hookargs['new'] = new
2030 hookargs['new'] = new
2028 self.hook('prepushkey', throw=True, **hookargs)
2031 self.hook('prepushkey', throw=True, **hookargs)
2029 except error.HookAbort as exc:
2032 except error.HookAbort as exc:
2030 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2033 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2031 if exc.hint:
2034 if exc.hint:
2032 self.ui.write_err(_("(%s)\n") % exc.hint)
2035 self.ui.write_err(_("(%s)\n") % exc.hint)
2033 return False
2036 return False
2034 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2037 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2035 ret = pushkey.push(self, namespace, key, old, new)
2038 ret = pushkey.push(self, namespace, key, old, new)
2036 def runhook():
2039 def runhook():
2037 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2040 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2038 ret=ret)
2041 ret=ret)
2039 self._afterlock(runhook)
2042 self._afterlock(runhook)
2040 return ret
2043 return ret
2041
2044
2042 def listkeys(self, namespace):
2045 def listkeys(self, namespace):
2043 self.hook('prelistkeys', throw=True, namespace=namespace)
2046 self.hook('prelistkeys', throw=True, namespace=namespace)
2044 self.ui.debug('listing keys for "%s"\n' % namespace)
2047 self.ui.debug('listing keys for "%s"\n' % namespace)
2045 values = pushkey.list(self, namespace)
2048 values = pushkey.list(self, namespace)
2046 self.hook('listkeys', namespace=namespace, values=values)
2049 self.hook('listkeys', namespace=namespace, values=values)
2047 return values
2050 return values
2048
2051
2049 def debugwireargs(self, one, two, three=None, four=None, five=None):
2052 def debugwireargs(self, one, two, three=None, four=None, five=None):
2050 '''used to test argument passing over the wire'''
2053 '''used to test argument passing over the wire'''
2051 return "%s %s %s %s %s" % (one, two, three, four, five)
2054 return "%s %s %s %s %s" % (one, two, three, four, five)
2052
2055
2053 def savecommitmessage(self, text):
2056 def savecommitmessage(self, text):
2054 fp = self.vfs('last-message.txt', 'wb')
2057 fp = self.vfs('last-message.txt', 'wb')
2055 try:
2058 try:
2056 fp.write(text)
2059 fp.write(text)
2057 finally:
2060 finally:
2058 fp.close()
2061 fp.close()
2059 return self.pathto(fp.name[len(self.root) + 1:])
2062 return self.pathto(fp.name[len(self.root) + 1:])
2060
2063
2061 # used to avoid circular references so destructors work
2064 # used to avoid circular references so destructors work
2062 def aftertrans(files):
2065 def aftertrans(files):
2063 renamefiles = [tuple(t) for t in files]
2066 renamefiles = [tuple(t) for t in files]
2064 def a():
2067 def a():
2065 for vfs, src, dest in renamefiles:
2068 for vfs, src, dest in renamefiles:
2066 # if src and dest refer to a same file, vfs.rename is a no-op,
2069 # if src and dest refer to a same file, vfs.rename is a no-op,
2067 # leaving both src and dest on disk. delete dest to make sure
2070 # leaving both src and dest on disk. delete dest to make sure
2068 # the rename couldn't be such a no-op.
2071 # the rename couldn't be such a no-op.
2069 vfs.tryunlink(dest)
2072 vfs.tryunlink(dest)
2070 try:
2073 try:
2071 vfs.rename(src, dest)
2074 vfs.rename(src, dest)
2072 except OSError: # journal file does not yet exist
2075 except OSError: # journal file does not yet exist
2073 pass
2076 pass
2074 return a
2077 return a
2075
2078
2076 def undoname(fn):
2079 def undoname(fn):
2077 base, name = os.path.split(fn)
2080 base, name = os.path.split(fn)
2078 assert name.startswith('journal')
2081 assert name.startswith('journal')
2079 return os.path.join(base, name.replace('journal', 'undo', 1))
2082 return os.path.join(base, name.replace('journal', 'undo', 1))
2080
2083
2081 def instance(ui, path, create):
2084 def instance(ui, path, create):
2082 return localrepository(ui, util.urllocalpath(path), create)
2085 return localrepository(ui, util.urllocalpath(path), create)
2083
2086
2084 def islocal(path):
2087 def islocal(path):
2085 return True
2088 return True
2086
2089
2087 def newreporequirements(repo):
2090 def newreporequirements(repo):
2088 """Determine the set of requirements for a new local repository.
2091 """Determine the set of requirements for a new local repository.
2089
2092
2090 Extensions can wrap this function to specify custom requirements for
2093 Extensions can wrap this function to specify custom requirements for
2091 new repositories.
2094 new repositories.
2092 """
2095 """
2093 ui = repo.ui
2096 ui = repo.ui
2094 requirements = {'revlogv1'}
2097 requirements = {'revlogv1'}
2095 if ui.configbool('format', 'usestore'):
2098 if ui.configbool('format', 'usestore'):
2096 requirements.add('store')
2099 requirements.add('store')
2097 if ui.configbool('format', 'usefncache'):
2100 if ui.configbool('format', 'usefncache'):
2098 requirements.add('fncache')
2101 requirements.add('fncache')
2099 if ui.configbool('format', 'dotencode'):
2102 if ui.configbool('format', 'dotencode'):
2100 requirements.add('dotencode')
2103 requirements.add('dotencode')
2101
2104
2102 compengine = ui.config('experimental', 'format.compression', 'zlib')
2105 compengine = ui.config('experimental', 'format.compression', 'zlib')
2103 if compengine not in util.compengines:
2106 if compengine not in util.compengines:
2104 raise error.Abort(_('compression engine %s defined by '
2107 raise error.Abort(_('compression engine %s defined by '
2105 'experimental.format.compression not available') %
2108 'experimental.format.compression not available') %
2106 compengine,
2109 compengine,
2107 hint=_('run "hg debuginstall" to list available '
2110 hint=_('run "hg debuginstall" to list available '
2108 'compression engines'))
2111 'compression engines'))
2109
2112
2110 # zlib is the historical default and doesn't need an explicit requirement.
2113 # zlib is the historical default and doesn't need an explicit requirement.
2111 if compengine != 'zlib':
2114 if compengine != 'zlib':
2112 requirements.add('exp-compression-%s' % compengine)
2115 requirements.add('exp-compression-%s' % compengine)
2113
2116
2114 if scmutil.gdinitconfig(ui):
2117 if scmutil.gdinitconfig(ui):
2115 requirements.add('generaldelta')
2118 requirements.add('generaldelta')
2116 if ui.configbool('experimental', 'treemanifest', False):
2119 if ui.configbool('experimental', 'treemanifest', False):
2117 requirements.add('treemanifest')
2120 requirements.add('treemanifest')
2118 if ui.configbool('experimental', 'manifestv2', False):
2121 if ui.configbool('experimental', 'manifestv2', False):
2119 requirements.add('manifestv2')
2122 requirements.add('manifestv2')
2120
2123
2121 revlogv2 = ui.config('experimental', 'revlogv2')
2124 revlogv2 = ui.config('experimental', 'revlogv2')
2122 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2125 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2123 requirements.remove('revlogv1')
2126 requirements.remove('revlogv1')
2124 # generaldelta is implied by revlogv2.
2127 # generaldelta is implied by revlogv2.
2125 requirements.discard('generaldelta')
2128 requirements.discard('generaldelta')
2126 requirements.add(REVLOGV2_REQUIREMENT)
2129 requirements.add(REVLOGV2_REQUIREMENT)
2127
2130
2128 return requirements
2131 return requirements
@@ -1,618 +1,634 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = {
29 postfinalizegenerators = {
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 }
32 }
33
33
34 gengroupall='all'
34 gengroupall='all'
35 gengroupprefinalize='prefinalize'
35 gengroupprefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
36 gengrouppostfinalize='postfinalize'
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self.count == 0:
40 if self.count == 0:
41 raise error.Abort(_(
41 raise error.Abort(_(
42 'cannot use transaction when it is already committed/aborted'))
42 'cannot use transaction when it is already committed/aborted'))
43 return func(self, *args, **kwds)
43 return func(self, *args, **kwds)
44 return _active
44 return _active
45
45
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 unlink=True):
47 unlink=True, checkambigfiles=None):
48 for f, o, _ignore in entries:
48 for f, o, _ignore in entries:
49 if o or not unlink:
49 if o or not unlink:
50 checkambig = checkambigfiles and (f, '') in checkambigfiles
50 try:
51 try:
51 fp = opener(f, 'a', checkambig=True)
52 fp = opener(f, 'a', checkambig=checkambig)
52 fp.truncate(o)
53 fp.truncate(o)
53 fp.close()
54 fp.close()
54 except IOError:
55 except IOError:
55 report(_("failed to truncate %s\n") % f)
56 report(_("failed to truncate %s\n") % f)
56 raise
57 raise
57 else:
58 else:
58 try:
59 try:
59 opener.unlink(f)
60 opener.unlink(f)
60 except (IOError, OSError) as inst:
61 except (IOError, OSError) as inst:
61 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
62 raise
63 raise
63
64
64 backupfiles = []
65 backupfiles = []
65 for l, f, b, c in backupentries:
66 for l, f, b, c in backupentries:
66 if l not in vfsmap and c:
67 if l not in vfsmap and c:
67 report("couldn't handle %s: unknown cache location %s\n"
68 report("couldn't handle %s: unknown cache location %s\n"
68 % (b, l))
69 % (b, l))
69 vfs = vfsmap[l]
70 vfs = vfsmap[l]
70 try:
71 try:
71 if f and b:
72 if f and b:
72 filepath = vfs.join(f)
73 filepath = vfs.join(f)
73 backuppath = vfs.join(b)
74 backuppath = vfs.join(b)
74 try:
75 try:
75 util.copyfile(backuppath, filepath, checkambig=True)
76 util.copyfile(backuppath, filepath, checkambig=True)
76 backupfiles.append(b)
77 backupfiles.append(b)
77 except IOError:
78 except IOError:
78 report(_("failed to recover %s\n") % f)
79 report(_("failed to recover %s\n") % f)
79 else:
80 else:
80 target = f or b
81 target = f or b
81 try:
82 try:
82 vfs.unlink(target)
83 vfs.unlink(target)
83 except (IOError, OSError) as inst:
84 except (IOError, OSError) as inst:
84 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
85 raise
86 raise
86 except (IOError, OSError, error.Abort) as inst:
87 except (IOError, OSError, error.Abort) as inst:
87 if not c:
88 if not c:
88 raise
89 raise
89
90
90 backuppath = "%s.backupfiles" % journal
91 backuppath = "%s.backupfiles" % journal
91 if opener.exists(backuppath):
92 if opener.exists(backuppath):
92 opener.unlink(backuppath)
93 opener.unlink(backuppath)
93 opener.unlink(journal)
94 opener.unlink(journal)
94 try:
95 try:
95 for f in backupfiles:
96 for f in backupfiles:
96 if opener.exists(f):
97 if opener.exists(f):
97 opener.unlink(f)
98 opener.unlink(f)
98 except (IOError, OSError, error.Abort) as inst:
99 except (IOError, OSError, error.Abort) as inst:
99 # only pure backup file remains, it is sage to ignore any error
100 # only pure backup file remains, it is sage to ignore any error
100 pass
101 pass
101
102
102 class transaction(object):
103 class transaction(object):
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
104 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
104 after=None, createmode=None, validator=None, releasefn=None):
105 after=None, createmode=None, validator=None, releasefn=None,
106 checkambigfiles=None):
105 """Begin a new transaction
107 """Begin a new transaction
106
108
107 Begins a new transaction that allows rolling back writes in the event of
109 Begins a new transaction that allows rolling back writes in the event of
108 an exception.
110 an exception.
109
111
110 * `after`: called after the transaction has been committed
112 * `after`: called after the transaction has been committed
111 * `createmode`: the mode of the journal file that will be created
113 * `createmode`: the mode of the journal file that will be created
112 * `releasefn`: called after releasing (with transaction and result)
114 * `releasefn`: called after releasing (with transaction and result)
115
116 `checkambigfiles` is a set of (path, vfs-location) tuples,
117 which determine whether file stat ambiguity should be avoided
118 for corresponded files.
113 """
119 """
114 self.count = 1
120 self.count = 1
115 self.usages = 1
121 self.usages = 1
116 self.report = report
122 self.report = report
117 # a vfs to the store content
123 # a vfs to the store content
118 self.opener = opener
124 self.opener = opener
119 # a map to access file in various {location -> vfs}
125 # a map to access file in various {location -> vfs}
120 vfsmap = vfsmap.copy()
126 vfsmap = vfsmap.copy()
121 vfsmap[''] = opener # set default value
127 vfsmap[''] = opener # set default value
122 self._vfsmap = vfsmap
128 self._vfsmap = vfsmap
123 self.after = after
129 self.after = after
124 self.entries = []
130 self.entries = []
125 self.map = {}
131 self.map = {}
126 self.journal = journalname
132 self.journal = journalname
127 self.undoname = undoname
133 self.undoname = undoname
128 self._queue = []
134 self._queue = []
129 # A callback to validate transaction content before closing it.
135 # A callback to validate transaction content before closing it.
130 # should raise exception is anything is wrong.
136 # should raise exception is anything is wrong.
131 # target user is repository hooks.
137 # target user is repository hooks.
132 if validator is None:
138 if validator is None:
133 validator = lambda tr: None
139 validator = lambda tr: None
134 self.validator = validator
140 self.validator = validator
135 # A callback to do something just after releasing transaction.
141 # A callback to do something just after releasing transaction.
136 if releasefn is None:
142 if releasefn is None:
137 releasefn = lambda tr, success: None
143 releasefn = lambda tr, success: None
138 self.releasefn = releasefn
144 self.releasefn = releasefn
139
145
146 self.checkambigfiles = set()
147 if checkambigfiles:
148 self.checkambigfiles.update(checkambigfiles)
149
140 # A dict dedicated to precisely tracking the changes introduced in the
150 # A dict dedicated to precisely tracking the changes introduced in the
141 # transaction.
151 # transaction.
142 self.changes = {}
152 self.changes = {}
143
153
144 # a dict of arguments to be passed to hooks
154 # a dict of arguments to be passed to hooks
145 self.hookargs = {}
155 self.hookargs = {}
146 self.file = opener.open(self.journal, "w")
156 self.file = opener.open(self.journal, "w")
147
157
148 # a list of ('location', 'path', 'backuppath', cache) entries.
158 # a list of ('location', 'path', 'backuppath', cache) entries.
149 # - if 'backuppath' is empty, no file existed at backup time
159 # - if 'backuppath' is empty, no file existed at backup time
150 # - if 'path' is empty, this is a temporary transaction file
160 # - if 'path' is empty, this is a temporary transaction file
151 # - if 'location' is not empty, the path is outside main opener reach.
161 # - if 'location' is not empty, the path is outside main opener reach.
152 # use 'location' value as a key in a vfsmap to find the right 'vfs'
162 # use 'location' value as a key in a vfsmap to find the right 'vfs'
153 # (cache is currently unused)
163 # (cache is currently unused)
154 self._backupentries = []
164 self._backupentries = []
155 self._backupmap = {}
165 self._backupmap = {}
156 self._backupjournal = "%s.backupfiles" % self.journal
166 self._backupjournal = "%s.backupfiles" % self.journal
157 self._backupsfile = opener.open(self._backupjournal, 'w')
167 self._backupsfile = opener.open(self._backupjournal, 'w')
158 self._backupsfile.write('%d\n' % version)
168 self._backupsfile.write('%d\n' % version)
159
169
160 if createmode is not None:
170 if createmode is not None:
161 opener.chmod(self.journal, createmode & 0o666)
171 opener.chmod(self.journal, createmode & 0o666)
162 opener.chmod(self._backupjournal, createmode & 0o666)
172 opener.chmod(self._backupjournal, createmode & 0o666)
163
173
164 # hold file generations to be performed on commit
174 # hold file generations to be performed on commit
165 self._filegenerators = {}
175 self._filegenerators = {}
166 # hold callback to write pending data for hooks
176 # hold callback to write pending data for hooks
167 self._pendingcallback = {}
177 self._pendingcallback = {}
168 # True is any pending data have been written ever
178 # True is any pending data have been written ever
169 self._anypending = False
179 self._anypending = False
170 # holds callback to call when writing the transaction
180 # holds callback to call when writing the transaction
171 self._finalizecallback = {}
181 self._finalizecallback = {}
172 # hold callback for post transaction close
182 # hold callback for post transaction close
173 self._postclosecallback = {}
183 self._postclosecallback = {}
174 # holds callbacks to call during abort
184 # holds callbacks to call during abort
175 self._abortcallback = {}
185 self._abortcallback = {}
176
186
177 def __del__(self):
187 def __del__(self):
178 if self.journal:
188 if self.journal:
179 self._abort()
189 self._abort()
180
190
181 @active
191 @active
182 def startgroup(self):
192 def startgroup(self):
183 """delay registration of file entry
193 """delay registration of file entry
184
194
185 This is used by strip to delay vision of strip offset. The transaction
195 This is used by strip to delay vision of strip offset. The transaction
186 sees either none or all of the strip actions to be done."""
196 sees either none or all of the strip actions to be done."""
187 self._queue.append([])
197 self._queue.append([])
188
198
189 @active
199 @active
190 def endgroup(self):
200 def endgroup(self):
191 """apply delayed registration of file entry.
201 """apply delayed registration of file entry.
192
202
193 This is used by strip to delay vision of strip offset. The transaction
203 This is used by strip to delay vision of strip offset. The transaction
194 sees either none or all of the strip actions to be done."""
204 sees either none or all of the strip actions to be done."""
195 q = self._queue.pop()
205 q = self._queue.pop()
196 for f, o, data in q:
206 for f, o, data in q:
197 self._addentry(f, o, data)
207 self._addentry(f, o, data)
198
208
199 @active
209 @active
200 def add(self, file, offset, data=None):
210 def add(self, file, offset, data=None):
201 """record the state of an append-only file before update"""
211 """record the state of an append-only file before update"""
202 if file in self.map or file in self._backupmap:
212 if file in self.map or file in self._backupmap:
203 return
213 return
204 if self._queue:
214 if self._queue:
205 self._queue[-1].append((file, offset, data))
215 self._queue[-1].append((file, offset, data))
206 return
216 return
207
217
208 self._addentry(file, offset, data)
218 self._addentry(file, offset, data)
209
219
210 def _addentry(self, file, offset, data):
220 def _addentry(self, file, offset, data):
211 """add a append-only entry to memory and on-disk state"""
221 """add a append-only entry to memory and on-disk state"""
212 if file in self.map or file in self._backupmap:
222 if file in self.map or file in self._backupmap:
213 return
223 return
214 self.entries.append((file, offset, data))
224 self.entries.append((file, offset, data))
215 self.map[file] = len(self.entries) - 1
225 self.map[file] = len(self.entries) - 1
216 # add enough data to the journal to do the truncate
226 # add enough data to the journal to do the truncate
217 self.file.write("%s\0%d\n" % (file, offset))
227 self.file.write("%s\0%d\n" % (file, offset))
218 self.file.flush()
228 self.file.flush()
219
229
220 @active
230 @active
221 def addbackup(self, file, hardlink=True, location=''):
231 def addbackup(self, file, hardlink=True, location=''):
222 """Adds a backup of the file to the transaction
232 """Adds a backup of the file to the transaction
223
233
224 Calling addbackup() creates a hardlink backup of the specified file
234 Calling addbackup() creates a hardlink backup of the specified file
225 that is used to recover the file in the event of the transaction
235 that is used to recover the file in the event of the transaction
226 aborting.
236 aborting.
227
237
228 * `file`: the file path, relative to .hg/store
238 * `file`: the file path, relative to .hg/store
229 * `hardlink`: use a hardlink to quickly create the backup
239 * `hardlink`: use a hardlink to quickly create the backup
230 """
240 """
231 if self._queue:
241 if self._queue:
232 msg = 'cannot use transaction.addbackup inside "group"'
242 msg = 'cannot use transaction.addbackup inside "group"'
233 raise error.ProgrammingError(msg)
243 raise error.ProgrammingError(msg)
234
244
235 if file in self.map or file in self._backupmap:
245 if file in self.map or file in self._backupmap:
236 return
246 return
237 vfs = self._vfsmap[location]
247 vfs = self._vfsmap[location]
238 dirname, filename = vfs.split(file)
248 dirname, filename = vfs.split(file)
239 backupfilename = "%s.backup.%s" % (self.journal, filename)
249 backupfilename = "%s.backup.%s" % (self.journal, filename)
240 backupfile = vfs.reljoin(dirname, backupfilename)
250 backupfile = vfs.reljoin(dirname, backupfilename)
241 if vfs.exists(file):
251 if vfs.exists(file):
242 filepath = vfs.join(file)
252 filepath = vfs.join(file)
243 backuppath = vfs.join(backupfile)
253 backuppath = vfs.join(backupfile)
244 util.copyfile(filepath, backuppath, hardlink=hardlink)
254 util.copyfile(filepath, backuppath, hardlink=hardlink)
245 else:
255 else:
246 backupfile = ''
256 backupfile = ''
247
257
248 self._addbackupentry((location, file, backupfile, False))
258 self._addbackupentry((location, file, backupfile, False))
249
259
250 def _addbackupentry(self, entry):
260 def _addbackupentry(self, entry):
251 """register a new backup entry and write it to disk"""
261 """register a new backup entry and write it to disk"""
252 self._backupentries.append(entry)
262 self._backupentries.append(entry)
253 self._backupmap[entry[1]] = len(self._backupentries) - 1
263 self._backupmap[entry[1]] = len(self._backupentries) - 1
254 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
264 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
255 self._backupsfile.flush()
265 self._backupsfile.flush()
256
266
257 @active
267 @active
258 def registertmp(self, tmpfile, location=''):
268 def registertmp(self, tmpfile, location=''):
259 """register a temporary transaction file
269 """register a temporary transaction file
260
270
261 Such files will be deleted when the transaction exits (on both
271 Such files will be deleted when the transaction exits (on both
262 failure and success).
272 failure and success).
263 """
273 """
264 self._addbackupentry((location, '', tmpfile, False))
274 self._addbackupentry((location, '', tmpfile, False))
265
275
266 @active
276 @active
267 def addfilegenerator(self, genid, filenames, genfunc, order=0,
277 def addfilegenerator(self, genid, filenames, genfunc, order=0,
268 location=''):
278 location=''):
269 """add a function to generates some files at transaction commit
279 """add a function to generates some files at transaction commit
270
280
271 The `genfunc` argument is a function capable of generating proper
281 The `genfunc` argument is a function capable of generating proper
272 content of each entry in the `filename` tuple.
282 content of each entry in the `filename` tuple.
273
283
274 At transaction close time, `genfunc` will be called with one file
284 At transaction close time, `genfunc` will be called with one file
275 object argument per entries in `filenames`.
285 object argument per entries in `filenames`.
276
286
277 The transaction itself is responsible for the backup, creation and
287 The transaction itself is responsible for the backup, creation and
278 final write of such file.
288 final write of such file.
279
289
280 The `genid` argument is used to ensure the same set of file is only
290 The `genid` argument is used to ensure the same set of file is only
281 generated once. Call to `addfilegenerator` for a `genid` already
291 generated once. Call to `addfilegenerator` for a `genid` already
282 present will overwrite the old entry.
292 present will overwrite the old entry.
283
293
284 The `order` argument may be used to control the order in which multiple
294 The `order` argument may be used to control the order in which multiple
285 generator will be executed.
295 generator will be executed.
286
296
287 The `location` arguments may be used to indicate the files are located
297 The `location` arguments may be used to indicate the files are located
288 outside of the the standard directory for transaction. It should match
298 outside of the the standard directory for transaction. It should match
289 one of the key of the `transaction.vfsmap` dictionary.
299 one of the key of the `transaction.vfsmap` dictionary.
290 """
300 """
291 # For now, we are unable to do proper backup and restore of custom vfs
301 # For now, we are unable to do proper backup and restore of custom vfs
292 # but for bookmarks that are handled outside this mechanism.
302 # but for bookmarks that are handled outside this mechanism.
293 self._filegenerators[genid] = (order, filenames, genfunc, location)
303 self._filegenerators[genid] = (order, filenames, genfunc, location)
294
304
295 @active
305 @active
296 def removefilegenerator(self, genid):
306 def removefilegenerator(self, genid):
297 """reverse of addfilegenerator, remove a file generator function"""
307 """reverse of addfilegenerator, remove a file generator function"""
298 if genid in self._filegenerators:
308 if genid in self._filegenerators:
299 del self._filegenerators[genid]
309 del self._filegenerators[genid]
300
310
301 def _generatefiles(self, suffix='', group=gengroupall):
311 def _generatefiles(self, suffix='', group=gengroupall):
302 # write files registered for generation
312 # write files registered for generation
303 any = False
313 any = False
304 for id, entry in sorted(self._filegenerators.iteritems()):
314 for id, entry in sorted(self._filegenerators.iteritems()):
305 any = True
315 any = True
306 order, filenames, genfunc, location = entry
316 order, filenames, genfunc, location = entry
307
317
308 # for generation at closing, check if it's before or after finalize
318 # for generation at closing, check if it's before or after finalize
309 postfinalize = group == gengrouppostfinalize
319 postfinalize = group == gengrouppostfinalize
310 if (group != gengroupall and
320 if (group != gengroupall and
311 (id in postfinalizegenerators) != (postfinalize)):
321 (id in postfinalizegenerators) != (postfinalize)):
312 continue
322 continue
313
323
314 vfs = self._vfsmap[location]
324 vfs = self._vfsmap[location]
315 files = []
325 files = []
316 try:
326 try:
317 for name in filenames:
327 for name in filenames:
318 name += suffix
328 name += suffix
319 if suffix:
329 if suffix:
320 self.registertmp(name, location=location)
330 self.registertmp(name, location=location)
321 else:
331 else:
322 self.addbackup(name, location=location)
332 self.addbackup(name, location=location)
323 files.append(vfs(name, 'w', atomictemp=True,
333 files.append(vfs(name, 'w', atomictemp=True,
324 checkambig=not suffix))
334 checkambig=not suffix))
325 genfunc(*files)
335 genfunc(*files)
326 finally:
336 finally:
327 for f in files:
337 for f in files:
328 f.close()
338 f.close()
329 return any
339 return any
330
340
331 @active
341 @active
332 def find(self, file):
342 def find(self, file):
333 if file in self.map:
343 if file in self.map:
334 return self.entries[self.map[file]]
344 return self.entries[self.map[file]]
335 if file in self._backupmap:
345 if file in self._backupmap:
336 return self._backupentries[self._backupmap[file]]
346 return self._backupentries[self._backupmap[file]]
337 return None
347 return None
338
348
339 @active
349 @active
340 def replace(self, file, offset, data=None):
350 def replace(self, file, offset, data=None):
341 '''
351 '''
342 replace can only replace already committed entries
352 replace can only replace already committed entries
343 that are not pending in the queue
353 that are not pending in the queue
344 '''
354 '''
345
355
346 if file not in self.map:
356 if file not in self.map:
347 raise KeyError(file)
357 raise KeyError(file)
348 index = self.map[file]
358 index = self.map[file]
349 self.entries[index] = (file, offset, data)
359 self.entries[index] = (file, offset, data)
350 self.file.write("%s\0%d\n" % (file, offset))
360 self.file.write("%s\0%d\n" % (file, offset))
351 self.file.flush()
361 self.file.flush()
352
362
353 @active
363 @active
354 def nest(self):
364 def nest(self):
355 self.count += 1
365 self.count += 1
356 self.usages += 1
366 self.usages += 1
357 return self
367 return self
358
368
359 def release(self):
369 def release(self):
360 if self.count > 0:
370 if self.count > 0:
361 self.usages -= 1
371 self.usages -= 1
362 # if the transaction scopes are left without being closed, fail
372 # if the transaction scopes are left without being closed, fail
363 if self.count > 0 and self.usages == 0:
373 if self.count > 0 and self.usages == 0:
364 self._abort()
374 self._abort()
365
375
366 def __enter__(self):
376 def __enter__(self):
367 return self
377 return self
368
378
369 def __exit__(self, exc_type, exc_val, exc_tb):
379 def __exit__(self, exc_type, exc_val, exc_tb):
370 try:
380 try:
371 if exc_type is None:
381 if exc_type is None:
372 self.close()
382 self.close()
373 finally:
383 finally:
374 self.release()
384 self.release()
375
385
376 def running(self):
386 def running(self):
377 return self.count > 0
387 return self.count > 0
378
388
379 def addpending(self, category, callback):
389 def addpending(self, category, callback):
380 """add a callback to be called when the transaction is pending
390 """add a callback to be called when the transaction is pending
381
391
382 The transaction will be given as callback's first argument.
392 The transaction will be given as callback's first argument.
383
393
384 Category is a unique identifier to allow overwriting an old callback
394 Category is a unique identifier to allow overwriting an old callback
385 with a newer callback.
395 with a newer callback.
386 """
396 """
387 self._pendingcallback[category] = callback
397 self._pendingcallback[category] = callback
388
398
389 @active
399 @active
390 def writepending(self):
400 def writepending(self):
391 '''write pending file to temporary version
401 '''write pending file to temporary version
392
402
393 This is used to allow hooks to view a transaction before commit'''
403 This is used to allow hooks to view a transaction before commit'''
394 categories = sorted(self._pendingcallback)
404 categories = sorted(self._pendingcallback)
395 for cat in categories:
405 for cat in categories:
396 # remove callback since the data will have been flushed
406 # remove callback since the data will have been flushed
397 any = self._pendingcallback.pop(cat)(self)
407 any = self._pendingcallback.pop(cat)(self)
398 self._anypending = self._anypending or any
408 self._anypending = self._anypending or any
399 self._anypending |= self._generatefiles(suffix='.pending')
409 self._anypending |= self._generatefiles(suffix='.pending')
400 return self._anypending
410 return self._anypending
401
411
402 @active
412 @active
403 def addfinalize(self, category, callback):
413 def addfinalize(self, category, callback):
404 """add a callback to be called when the transaction is closed
414 """add a callback to be called when the transaction is closed
405
415
406 The transaction will be given as callback's first argument.
416 The transaction will be given as callback's first argument.
407
417
408 Category is a unique identifier to allow overwriting old callbacks with
418 Category is a unique identifier to allow overwriting old callbacks with
409 newer callbacks.
419 newer callbacks.
410 """
420 """
411 self._finalizecallback[category] = callback
421 self._finalizecallback[category] = callback
412
422
413 @active
423 @active
414 def addpostclose(self, category, callback):
424 def addpostclose(self, category, callback):
415 """add or replace a callback to be called after the transaction closed
425 """add or replace a callback to be called after the transaction closed
416
426
417 The transaction will be given as callback's first argument.
427 The transaction will be given as callback's first argument.
418
428
419 Category is a unique identifier to allow overwriting an old callback
429 Category is a unique identifier to allow overwriting an old callback
420 with a newer callback.
430 with a newer callback.
421 """
431 """
422 self._postclosecallback[category] = callback
432 self._postclosecallback[category] = callback
423
433
424 @active
434 @active
425 def getpostclose(self, category):
435 def getpostclose(self, category):
426 """return a postclose callback added before, or None"""
436 """return a postclose callback added before, or None"""
427 return self._postclosecallback.get(category, None)
437 return self._postclosecallback.get(category, None)
428
438
429 @active
439 @active
430 def addabort(self, category, callback):
440 def addabort(self, category, callback):
431 """add a callback to be called when the transaction is aborted.
441 """add a callback to be called when the transaction is aborted.
432
442
433 The transaction will be given as the first argument to the callback.
443 The transaction will be given as the first argument to the callback.
434
444
435 Category is a unique identifier to allow overwriting an old callback
445 Category is a unique identifier to allow overwriting an old callback
436 with a newer callback.
446 with a newer callback.
437 """
447 """
438 self._abortcallback[category] = callback
448 self._abortcallback[category] = callback
439
449
440 @active
450 @active
441 def close(self):
451 def close(self):
442 '''commit the transaction'''
452 '''commit the transaction'''
443 if self.count == 1:
453 if self.count == 1:
444 self.validator(self) # will raise exception if needed
454 self.validator(self) # will raise exception if needed
445 self.validator = None # Help prevent cycles.
455 self.validator = None # Help prevent cycles.
446 self._generatefiles(group=gengroupprefinalize)
456 self._generatefiles(group=gengroupprefinalize)
447 categories = sorted(self._finalizecallback)
457 categories = sorted(self._finalizecallback)
448 for cat in categories:
458 for cat in categories:
449 self._finalizecallback[cat](self)
459 self._finalizecallback[cat](self)
450 # Prevent double usage and help clear cycles.
460 # Prevent double usage and help clear cycles.
451 self._finalizecallback = None
461 self._finalizecallback = None
452 self._generatefiles(group=gengrouppostfinalize)
462 self._generatefiles(group=gengrouppostfinalize)
453
463
454 self.count -= 1
464 self.count -= 1
455 if self.count != 0:
465 if self.count != 0:
456 return
466 return
457 self.file.close()
467 self.file.close()
458 self._backupsfile.close()
468 self._backupsfile.close()
459 # cleanup temporary files
469 # cleanup temporary files
460 for l, f, b, c in self._backupentries:
470 for l, f, b, c in self._backupentries:
461 if l not in self._vfsmap and c:
471 if l not in self._vfsmap and c:
462 self.report("couldn't remove %s: unknown cache location %s\n"
472 self.report("couldn't remove %s: unknown cache location %s\n"
463 % (b, l))
473 % (b, l))
464 continue
474 continue
465 vfs = self._vfsmap[l]
475 vfs = self._vfsmap[l]
466 if not f and b and vfs.exists(b):
476 if not f and b and vfs.exists(b):
467 try:
477 try:
468 vfs.unlink(b)
478 vfs.unlink(b)
469 except (IOError, OSError, error.Abort) as inst:
479 except (IOError, OSError, error.Abort) as inst:
470 if not c:
480 if not c:
471 raise
481 raise
472 # Abort may be raise by read only opener
482 # Abort may be raise by read only opener
473 self.report("couldn't remove %s: %s\n"
483 self.report("couldn't remove %s: %s\n"
474 % (vfs.join(b), inst))
484 % (vfs.join(b), inst))
475 self.entries = []
485 self.entries = []
476 self._writeundo()
486 self._writeundo()
477 if self.after:
487 if self.after:
478 self.after()
488 self.after()
479 self.after = None # Help prevent cycles.
489 self.after = None # Help prevent cycles.
480 if self.opener.isfile(self._backupjournal):
490 if self.opener.isfile(self._backupjournal):
481 self.opener.unlink(self._backupjournal)
491 self.opener.unlink(self._backupjournal)
482 if self.opener.isfile(self.journal):
492 if self.opener.isfile(self.journal):
483 self.opener.unlink(self.journal)
493 self.opener.unlink(self.journal)
484 for l, _f, b, c in self._backupentries:
494 for l, _f, b, c in self._backupentries:
485 if l not in self._vfsmap and c:
495 if l not in self._vfsmap and c:
486 self.report("couldn't remove %s: unknown cache location"
496 self.report("couldn't remove %s: unknown cache location"
487 "%s\n" % (b, l))
497 "%s\n" % (b, l))
488 continue
498 continue
489 vfs = self._vfsmap[l]
499 vfs = self._vfsmap[l]
490 if b and vfs.exists(b):
500 if b and vfs.exists(b):
491 try:
501 try:
492 vfs.unlink(b)
502 vfs.unlink(b)
493 except (IOError, OSError, error.Abort) as inst:
503 except (IOError, OSError, error.Abort) as inst:
494 if not c:
504 if not c:
495 raise
505 raise
496 # Abort may be raise by read only opener
506 # Abort may be raise by read only opener
497 self.report("couldn't remove %s: %s\n"
507 self.report("couldn't remove %s: %s\n"
498 % (vfs.join(b), inst))
508 % (vfs.join(b), inst))
499 self._backupentries = []
509 self._backupentries = []
500 self.journal = None
510 self.journal = None
501
511
502 self.releasefn(self, True) # notify success of closing transaction
512 self.releasefn(self, True) # notify success of closing transaction
503 self.releasefn = None # Help prevent cycles.
513 self.releasefn = None # Help prevent cycles.
504
514
505 # run post close action
515 # run post close action
506 categories = sorted(self._postclosecallback)
516 categories = sorted(self._postclosecallback)
507 for cat in categories:
517 for cat in categories:
508 self._postclosecallback[cat](self)
518 self._postclosecallback[cat](self)
509 # Prevent double usage and help clear cycles.
519 # Prevent double usage and help clear cycles.
510 self._postclosecallback = None
520 self._postclosecallback = None
511
521
512 @active
522 @active
513 def abort(self):
523 def abort(self):
514 '''abort the transaction (generally called on error, or when the
524 '''abort the transaction (generally called on error, or when the
515 transaction is not explicitly committed before going out of
525 transaction is not explicitly committed before going out of
516 scope)'''
526 scope)'''
517 self._abort()
527 self._abort()
518
528
519 def _writeundo(self):
529 def _writeundo(self):
520 """write transaction data for possible future undo call"""
530 """write transaction data for possible future undo call"""
521 if self.undoname is None:
531 if self.undoname is None:
522 return
532 return
523 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
533 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
524 undobackupfile.write('%d\n' % version)
534 undobackupfile.write('%d\n' % version)
525 for l, f, b, c in self._backupentries:
535 for l, f, b, c in self._backupentries:
526 if not f: # temporary file
536 if not f: # temporary file
527 continue
537 continue
528 if not b:
538 if not b:
529 u = ''
539 u = ''
530 else:
540 else:
531 if l not in self._vfsmap and c:
541 if l not in self._vfsmap and c:
532 self.report("couldn't remove %s: unknown cache location"
542 self.report("couldn't remove %s: unknown cache location"
533 "%s\n" % (b, l))
543 "%s\n" % (b, l))
534 continue
544 continue
535 vfs = self._vfsmap[l]
545 vfs = self._vfsmap[l]
536 base, name = vfs.split(b)
546 base, name = vfs.split(b)
537 assert name.startswith(self.journal), name
547 assert name.startswith(self.journal), name
538 uname = name.replace(self.journal, self.undoname, 1)
548 uname = name.replace(self.journal, self.undoname, 1)
539 u = vfs.reljoin(base, uname)
549 u = vfs.reljoin(base, uname)
540 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
550 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
541 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
551 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
542 undobackupfile.close()
552 undobackupfile.close()
543
553
544
554
545 def _abort(self):
555 def _abort(self):
546 self.count = 0
556 self.count = 0
547 self.usages = 0
557 self.usages = 0
548 self.file.close()
558 self.file.close()
549 self._backupsfile.close()
559 self._backupsfile.close()
550
560
551 try:
561 try:
552 if not self.entries and not self._backupentries:
562 if not self.entries and not self._backupentries:
553 if self._backupjournal:
563 if self._backupjournal:
554 self.opener.unlink(self._backupjournal)
564 self.opener.unlink(self._backupjournal)
555 if self.journal:
565 if self.journal:
556 self.opener.unlink(self.journal)
566 self.opener.unlink(self.journal)
557 return
567 return
558
568
559 self.report(_("transaction abort!\n"))
569 self.report(_("transaction abort!\n"))
560
570
561 try:
571 try:
562 for cat in sorted(self._abortcallback):
572 for cat in sorted(self._abortcallback):
563 self._abortcallback[cat](self)
573 self._abortcallback[cat](self)
564 # Prevent double usage and help clear cycles.
574 # Prevent double usage and help clear cycles.
565 self._abortcallback = None
575 self._abortcallback = None
566 _playback(self.journal, self.report, self.opener, self._vfsmap,
576 _playback(self.journal, self.report, self.opener, self._vfsmap,
567 self.entries, self._backupentries, False)
577 self.entries, self._backupentries, False,
578 checkambigfiles=self.checkambigfiles)
568 self.report(_("rollback completed\n"))
579 self.report(_("rollback completed\n"))
569 except BaseException:
580 except BaseException:
570 self.report(_("rollback failed - please run hg recover\n"))
581 self.report(_("rollback failed - please run hg recover\n"))
571 finally:
582 finally:
572 self.journal = None
583 self.journal = None
573 self.releasefn(self, False) # notify failure of transaction
584 self.releasefn(self, False) # notify failure of transaction
574 self.releasefn = None # Help prevent cycles.
585 self.releasefn = None # Help prevent cycles.
575
586
576 def rollback(opener, vfsmap, file, report):
587 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
577 """Rolls back the transaction contained in the given file
588 """Rolls back the transaction contained in the given file
578
589
579 Reads the entries in the specified file, and the corresponding
590 Reads the entries in the specified file, and the corresponding
580 '*.backupfiles' file, to recover from an incomplete transaction.
591 '*.backupfiles' file, to recover from an incomplete transaction.
581
592
582 * `file`: a file containing a list of entries, specifying where
593 * `file`: a file containing a list of entries, specifying where
583 to truncate each file. The file should contain a list of
594 to truncate each file. The file should contain a list of
584 file\0offset pairs, delimited by newlines. The corresponding
595 file\0offset pairs, delimited by newlines. The corresponding
585 '*.backupfiles' file should contain a list of file\0backupfile
596 '*.backupfiles' file should contain a list of file\0backupfile
586 pairs, delimited by \0.
597 pairs, delimited by \0.
598
599 `checkambigfiles` is a set of (path, vfs-location) tuples,
600 which determine whether file stat ambiguity should be avoided at
601 restoring corresponded files.
587 """
602 """
588 entries = []
603 entries = []
589 backupentries = []
604 backupentries = []
590
605
591 fp = opener.open(file)
606 fp = opener.open(file)
592 lines = fp.readlines()
607 lines = fp.readlines()
593 fp.close()
608 fp.close()
594 for l in lines:
609 for l in lines:
595 try:
610 try:
596 f, o = l.split('\0')
611 f, o = l.split('\0')
597 entries.append((f, int(o), None))
612 entries.append((f, int(o), None))
598 except ValueError:
613 except ValueError:
599 report(_("couldn't read journal entry %r!\n") % l)
614 report(_("couldn't read journal entry %r!\n") % l)
600
615
601 backupjournal = "%s.backupfiles" % file
616 backupjournal = "%s.backupfiles" % file
602 if opener.exists(backupjournal):
617 if opener.exists(backupjournal):
603 fp = opener.open(backupjournal)
618 fp = opener.open(backupjournal)
604 lines = fp.readlines()
619 lines = fp.readlines()
605 if lines:
620 if lines:
606 ver = lines[0][:-1]
621 ver = lines[0][:-1]
607 if ver == str(version):
622 if ver == str(version):
608 for line in lines[1:]:
623 for line in lines[1:]:
609 if line:
624 if line:
610 # Shave off the trailing newline
625 # Shave off the trailing newline
611 line = line[:-1]
626 line = line[:-1]
612 l, f, b, c = line.split('\0')
627 l, f, b, c = line.split('\0')
613 backupentries.append((l, f, b, bool(c)))
628 backupentries.append((l, f, b, bool(c)))
614 else:
629 else:
615 report(_("journal was created by a different version of "
630 report(_("journal was created by a different version of "
616 "Mercurial\n"))
631 "Mercurial\n"))
617
632
618 _playback(file, report, opener, vfsmap, entries, backupentries)
633 _playback(file, report, opener, vfsmap, entries, backupentries,
634 checkambigfiles=checkambigfiles)
@@ -1,470 +1,471 b''
1 Test that qpush cleans things up if it doesn't complete
1 Test that qpush cleans things up if it doesn't complete
2
2
3 $ echo "[extensions]" >> $HGRCPATH
3 $ echo "[extensions]" >> $HGRCPATH
4 $ echo "mq=" >> $HGRCPATH
4 $ echo "mq=" >> $HGRCPATH
5 $ hg init repo
5 $ hg init repo
6 $ cd repo
6 $ cd repo
7 $ echo foo > foo
7 $ echo foo > foo
8 $ hg ci -Am 'add foo'
8 $ hg ci -Am 'add foo'
9 adding foo
9 adding foo
10 $ touch untracked-file
10 $ touch untracked-file
11 $ echo 'syntax: glob' > .hgignore
11 $ echo 'syntax: glob' > .hgignore
12 $ echo '.hgignore' >> .hgignore
12 $ echo '.hgignore' >> .hgignore
13 $ hg qinit
13 $ hg qinit
14
14
15 test qpush on empty series
15 test qpush on empty series
16
16
17 $ hg qpush
17 $ hg qpush
18 no patches in series
18 no patches in series
19 $ hg qnew patch1
19 $ hg qnew patch1
20 $ echo >> foo
20 $ echo >> foo
21 $ hg qrefresh -m 'patch 1'
21 $ hg qrefresh -m 'patch 1'
22 $ hg qnew patch2
22 $ hg qnew patch2
23 $ echo bar > bar
23 $ echo bar > bar
24 $ hg add bar
24 $ hg add bar
25 $ hg qrefresh -m 'patch 2'
25 $ hg qrefresh -m 'patch 2'
26 $ hg qnew --config 'mq.plain=true' -U bad-patch
26 $ hg qnew --config 'mq.plain=true' -U bad-patch
27 $ echo >> foo
27 $ echo >> foo
28 $ hg qrefresh
28 $ hg qrefresh
29 $ hg qpop -a
29 $ hg qpop -a
30 popping bad-patch
30 popping bad-patch
31 popping patch2
31 popping patch2
32 popping patch1
32 popping patch1
33 patch queue now empty
33 patch queue now empty
34 $ $PYTHON -c 'print "\xe9"' > message
34 $ $PYTHON -c 'print "\xe9"' > message
35 $ cat .hg/patches/bad-patch >> message
35 $ cat .hg/patches/bad-patch >> message
36 $ mv message .hg/patches/bad-patch
36 $ mv message .hg/patches/bad-patch
37 $ cat > $TESTTMP/wrapplayback.py <<EOF
37 $ cat > $TESTTMP/wrapplayback.py <<EOF
38 > import os
38 > import os
39 > from mercurial import extensions, transaction
39 > from mercurial import extensions, transaction
40 > def wrapplayback(orig,
40 > def wrapplayback(orig,
41 > journal, report, opener, vfsmap, entries, backupentries,
41 > journal, report, opener, vfsmap, entries, backupentries,
42 > unlink=True):
42 > unlink=True, checkambigfiles=None):
43 > orig(journal, report, opener, vfsmap, entries, backupentries, unlink)
43 > orig(journal, report, opener, vfsmap, entries, backupentries, unlink,
44 > checkambigfiles)
44 > # Touching files truncated at "transaction.abort" causes
45 > # Touching files truncated at "transaction.abort" causes
45 > # forcible re-loading invalidated filecache properties
46 > # forcible re-loading invalidated filecache properties
46 > # (including repo.changelog)
47 > # (including repo.changelog)
47 > for f, o, _ignore in entries:
48 > for f, o, _ignore in entries:
48 > if o or not unlink:
49 > if o or not unlink:
49 > os.utime(opener.join(f), (0.0, 0.0))
50 > os.utime(opener.join(f), (0.0, 0.0))
50 > def extsetup(ui):
51 > def extsetup(ui):
51 > extensions.wrapfunction(transaction, '_playback', wrapplayback)
52 > extensions.wrapfunction(transaction, '_playback', wrapplayback)
52 > EOF
53 > EOF
53 $ hg qpush -a --config extensions.wrapplayback=$TESTTMP/wrapplayback.py && echo 'qpush succeeded?!'
54 $ hg qpush -a --config extensions.wrapplayback=$TESTTMP/wrapplayback.py && echo 'qpush succeeded?!'
54 applying patch1
55 applying patch1
55 applying patch2
56 applying patch2
56 applying bad-patch
57 applying bad-patch
57 transaction abort!
58 transaction abort!
58 rollback completed
59 rollback completed
59 cleaning up working directory...
60 cleaning up working directory...
60 reverting foo
61 reverting foo
61 done
62 done
62 abort: decoding near '\xe9': 'ascii' codec can't decode byte 0xe9 in position 0: ordinal not in range(128)! (esc)
63 abort: decoding near '\xe9': 'ascii' codec can't decode byte 0xe9 in position 0: ordinal not in range(128)! (esc)
63 [255]
64 [255]
64 $ hg parents
65 $ hg parents
65 changeset: 0:bbd179dfa0a7
66 changeset: 0:bbd179dfa0a7
66 tag: tip
67 tag: tip
67 user: test
68 user: test
68 date: Thu Jan 01 00:00:00 1970 +0000
69 date: Thu Jan 01 00:00:00 1970 +0000
69 summary: add foo
70 summary: add foo
70
71
71
72
72 test corrupt status file
73 test corrupt status file
73 $ hg qpush
74 $ hg qpush
74 applying patch1
75 applying patch1
75 now at: patch1
76 now at: patch1
76 $ cp .hg/patches/status .hg/patches/status.orig
77 $ cp .hg/patches/status .hg/patches/status.orig
77 $ hg qpop
78 $ hg qpop
78 popping patch1
79 popping patch1
79 patch queue now empty
80 patch queue now empty
80 $ cp .hg/patches/status.orig .hg/patches/status
81 $ cp .hg/patches/status.orig .hg/patches/status
81 $ hg qpush
82 $ hg qpush
82 abort: working directory revision is not qtip
83 abort: working directory revision is not qtip
83 [255]
84 [255]
84 $ rm .hg/patches/status .hg/patches/status.orig
85 $ rm .hg/patches/status .hg/patches/status.orig
85
86
86
87
87 bar should be gone; other unknown/ignored files should still be around
88 bar should be gone; other unknown/ignored files should still be around
88
89
89 $ hg status -A
90 $ hg status -A
90 ? untracked-file
91 ? untracked-file
91 I .hgignore
92 I .hgignore
92 C foo
93 C foo
93
94
94 preparing qpush of a missing patch
95 preparing qpush of a missing patch
95
96
96 $ hg qpop -a
97 $ hg qpop -a
97 no patches applied
98 no patches applied
98 $ hg qpush
99 $ hg qpush
99 applying patch1
100 applying patch1
100 now at: patch1
101 now at: patch1
101 $ rm .hg/patches/patch2
102 $ rm .hg/patches/patch2
102
103
103 now we expect the push to fail, but it should NOT complain about patch1
104 now we expect the push to fail, but it should NOT complain about patch1
104
105
105 $ hg qpush
106 $ hg qpush
106 applying patch2
107 applying patch2
107 unable to read patch2
108 unable to read patch2
108 now at: patch1
109 now at: patch1
109 [1]
110 [1]
110
111
111 preparing qpush of missing patch with no patch applied
112 preparing qpush of missing patch with no patch applied
112
113
113 $ hg qpop -a
114 $ hg qpop -a
114 popping patch1
115 popping patch1
115 patch queue now empty
116 patch queue now empty
116 $ rm .hg/patches/patch1
117 $ rm .hg/patches/patch1
117
118
118 qpush should fail the same way as below
119 qpush should fail the same way as below
119
120
120 $ hg qpush
121 $ hg qpush
121 applying patch1
122 applying patch1
122 unable to read patch1
123 unable to read patch1
123 [1]
124 [1]
124
125
125 Test qpush to a patch below the currently applied patch.
126 Test qpush to a patch below the currently applied patch.
126
127
127 $ hg qq -c guardedseriesorder
128 $ hg qq -c guardedseriesorder
128 $ hg qnew a
129 $ hg qnew a
129 $ hg qguard +block
130 $ hg qguard +block
130 $ hg qnew b
131 $ hg qnew b
131 $ hg qnew c
132 $ hg qnew c
132
133
133 $ hg qpop -a
134 $ hg qpop -a
134 popping c
135 popping c
135 popping b
136 popping b
136 popping a
137 popping a
137 patch queue now empty
138 patch queue now empty
138
139
139 try to push and pop while a is guarded
140 try to push and pop while a is guarded
140
141
141 $ hg qpush a
142 $ hg qpush a
142 cannot push 'a' - guarded by '+block'
143 cannot push 'a' - guarded by '+block'
143 [1]
144 [1]
144 $ hg qpush -a
145 $ hg qpush -a
145 applying b
146 applying b
146 patch b is empty
147 patch b is empty
147 applying c
148 applying c
148 patch c is empty
149 patch c is empty
149 now at: c
150 now at: c
150
151
151 now try it when a is unguarded, and we're at the top of the queue
152 now try it when a is unguarded, and we're at the top of the queue
152
153
153 $ hg qapplied -v
154 $ hg qapplied -v
154 0 G a
155 0 G a
155 1 A b
156 1 A b
156 2 A c
157 2 A c
157 $ hg qsel block
158 $ hg qsel block
158 $ hg qpush b
159 $ hg qpush b
159 abort: cannot push to a previous patch: b
160 abort: cannot push to a previous patch: b
160 [255]
161 [255]
161 $ hg qpush a
162 $ hg qpush a
162 abort: cannot push to a previous patch: a
163 abort: cannot push to a previous patch: a
163 [255]
164 [255]
164
165
165 and now we try it one more time with a unguarded, while we're not at the top of the queue
166 and now we try it one more time with a unguarded, while we're not at the top of the queue
166
167
167 $ hg qpop b
168 $ hg qpop b
168 popping c
169 popping c
169 now at: b
170 now at: b
170 $ hg qpush a
171 $ hg qpush a
171 abort: cannot push to a previous patch: a
172 abort: cannot push to a previous patch: a
172 [255]
173 [255]
173
174
174 test qpop --force and backup files
175 test qpop --force and backup files
175
176
176 $ hg qpop -a
177 $ hg qpop -a
177 popping b
178 popping b
178 patch queue now empty
179 patch queue now empty
179 $ hg qq --create force
180 $ hg qq --create force
180 $ echo a > a
181 $ echo a > a
181 $ echo b > b
182 $ echo b > b
182 $ echo c > c
183 $ echo c > c
183 $ hg ci -Am add a b c
184 $ hg ci -Am add a b c
184 $ echo a >> a
185 $ echo a >> a
185 $ hg rm b
186 $ hg rm b
186 $ hg rm c
187 $ hg rm c
187 $ hg qnew p1
188 $ hg qnew p1
188 $ echo a >> a
189 $ echo a >> a
189 $ echo bb > b
190 $ echo bb > b
190 $ hg add b
191 $ hg add b
191 $ echo cc > c
192 $ echo cc > c
192 $ hg add c
193 $ hg add c
193 $ hg qpop --force --verbose
194 $ hg qpop --force --verbose
194 saving current version of a as a.orig
195 saving current version of a as a.orig
195 saving current version of b as b.orig
196 saving current version of b as b.orig
196 saving current version of c as c.orig
197 saving current version of c as c.orig
197 popping p1
198 popping p1
198 patch queue now empty
199 patch queue now empty
199 $ hg st
200 $ hg st
200 ? a.orig
201 ? a.orig
201 ? b.orig
202 ? b.orig
202 ? c.orig
203 ? c.orig
203 ? untracked-file
204 ? untracked-file
204 $ cat a.orig
205 $ cat a.orig
205 a
206 a
206 a
207 a
207 a
208 a
208 $ cat b.orig
209 $ cat b.orig
209 bb
210 bb
210 $ cat c.orig
211 $ cat c.orig
211 cc
212 cc
212
213
213 test qpop --force --no-backup
214 test qpop --force --no-backup
214
215
215 $ hg qpush
216 $ hg qpush
216 applying p1
217 applying p1
217 now at: p1
218 now at: p1
218 $ rm a.orig
219 $ rm a.orig
219 $ echo a >> a
220 $ echo a >> a
220 $ hg qpop --force --no-backup --verbose
221 $ hg qpop --force --no-backup --verbose
221 popping p1
222 popping p1
222 patch queue now empty
223 patch queue now empty
223 $ test -f a.orig && echo 'error: backup with --no-backup'
224 $ test -f a.orig && echo 'error: backup with --no-backup'
224 [1]
225 [1]
225
226
226 test qpop --keep-changes
227 test qpop --keep-changes
227
228
228 $ hg qpush
229 $ hg qpush
229 applying p1
230 applying p1
230 now at: p1
231 now at: p1
231 $ hg qpop --keep-changes --force
232 $ hg qpop --keep-changes --force
232 abort: cannot use both --force and --keep-changes
233 abort: cannot use both --force and --keep-changes
233 [255]
234 [255]
234 $ echo a >> a
235 $ echo a >> a
235 $ hg qpop --keep-changes
236 $ hg qpop --keep-changes
236 abort: local changes found, qrefresh first
237 abort: local changes found, qrefresh first
237 [255]
238 [255]
238 $ hg revert -qa a
239 $ hg revert -qa a
239 $ rm a
240 $ rm a
240 $ hg qpop --keep-changes
241 $ hg qpop --keep-changes
241 abort: local changes found, qrefresh first
242 abort: local changes found, qrefresh first
242 [255]
243 [255]
243 $ hg rm -A a
244 $ hg rm -A a
244 $ hg qpop --keep-changes
245 $ hg qpop --keep-changes
245 abort: local changes found, qrefresh first
246 abort: local changes found, qrefresh first
246 [255]
247 [255]
247 $ hg revert -qa a
248 $ hg revert -qa a
248 $ echo b > b
249 $ echo b > b
249 $ hg add b
250 $ hg add b
250 $ hg qpop --keep-changes
251 $ hg qpop --keep-changes
251 abort: local changes found, qrefresh first
252 abort: local changes found, qrefresh first
252 [255]
253 [255]
253 $ hg forget b
254 $ hg forget b
254 $ echo d > d
255 $ echo d > d
255 $ hg add d
256 $ hg add d
256 $ hg qpop --keep-changes
257 $ hg qpop --keep-changes
257 popping p1
258 popping p1
258 patch queue now empty
259 patch queue now empty
259 $ hg forget d
260 $ hg forget d
260 $ rm d
261 $ rm d
261
262
262 test qpush --force and backup files
263 test qpush --force and backup files
263
264
264 $ echo a >> a
265 $ echo a >> a
265 $ hg qnew p2
266 $ hg qnew p2
266 $ echo b >> b
267 $ echo b >> b
267 $ echo d > d
268 $ echo d > d
268 $ echo e > e
269 $ echo e > e
269 $ hg add d e
270 $ hg add d e
270 $ hg rm c
271 $ hg rm c
271 $ hg qnew p3
272 $ hg qnew p3
272 $ hg qpop -a
273 $ hg qpop -a
273 popping p3
274 popping p3
274 popping p2
275 popping p2
275 patch queue now empty
276 patch queue now empty
276 $ echo a >> a
277 $ echo a >> a
277 $ echo b1 >> b
278 $ echo b1 >> b
278 $ echo d1 > d
279 $ echo d1 > d
279 $ hg add d
280 $ hg add d
280 $ echo e1 > e
281 $ echo e1 > e
281 $ hg qpush -a --force --verbose
282 $ hg qpush -a --force --verbose
282 applying p2
283 applying p2
283 saving current version of a as a.orig
284 saving current version of a as a.orig
284 patching file a
285 patching file a
285 committing files:
286 committing files:
286 a
287 a
287 committing manifest
288 committing manifest
288 committing changelog
289 committing changelog
289 applying p3
290 applying p3
290 saving current version of b as b.orig
291 saving current version of b as b.orig
291 saving current version of d as d.orig
292 saving current version of d as d.orig
292 patching file b
293 patching file b
293 patching file c
294 patching file c
294 patching file d
295 patching file d
295 file d already exists
296 file d already exists
296 1 out of 1 hunks FAILED -- saving rejects to file d.rej
297 1 out of 1 hunks FAILED -- saving rejects to file d.rej
297 patching file e
298 patching file e
298 file e already exists
299 file e already exists
299 1 out of 1 hunks FAILED -- saving rejects to file e.rej
300 1 out of 1 hunks FAILED -- saving rejects to file e.rej
300 patch failed to apply
301 patch failed to apply
301 committing files:
302 committing files:
302 b
303 b
303 committing manifest
304 committing manifest
304 committing changelog
305 committing changelog
305 patch failed, rejects left in working directory
306 patch failed, rejects left in working directory
306 errors during apply, please fix and qrefresh p3
307 errors during apply, please fix and qrefresh p3
307 [2]
308 [2]
308 $ cat a.orig
309 $ cat a.orig
309 a
310 a
310 a
311 a
311 $ cat b.orig
312 $ cat b.orig
312 b
313 b
313 b1
314 b1
314 $ cat d.orig
315 $ cat d.orig
315 d1
316 d1
316
317
317 test qpush --force --no-backup
318 test qpush --force --no-backup
318
319
319 $ hg revert -qa
320 $ hg revert -qa
320 $ hg qpop -a
321 $ hg qpop -a
321 popping p3
322 popping p3
322 popping p2
323 popping p2
323 patch queue now empty
324 patch queue now empty
324 $ echo a >> a
325 $ echo a >> a
325 $ rm a.orig
326 $ rm a.orig
326 $ hg qpush --force --no-backup --verbose
327 $ hg qpush --force --no-backup --verbose
327 applying p2
328 applying p2
328 patching file a
329 patching file a
329 committing files:
330 committing files:
330 a
331 a
331 committing manifest
332 committing manifest
332 committing changelog
333 committing changelog
333 now at: p2
334 now at: p2
334 $ test -f a.orig && echo 'error: backup with --no-backup'
335 $ test -f a.orig && echo 'error: backup with --no-backup'
335 [1]
336 [1]
336
337
337 test qgoto --force --no-backup
338 test qgoto --force --no-backup
338
339
339 $ hg qpop
340 $ hg qpop
340 popping p2
341 popping p2
341 patch queue now empty
342 patch queue now empty
342 $ echo a >> a
343 $ echo a >> a
343 $ hg qgoto --force --no-backup p2 --verbose
344 $ hg qgoto --force --no-backup p2 --verbose
344 applying p2
345 applying p2
345 patching file a
346 patching file a
346 committing files:
347 committing files:
347 a
348 a
348 committing manifest
349 committing manifest
349 committing changelog
350 committing changelog
350 now at: p2
351 now at: p2
351 $ test -f a.orig && echo 'error: backup with --no-backup'
352 $ test -f a.orig && echo 'error: backup with --no-backup'
352 [1]
353 [1]
353
354
354 test qpush --keep-changes
355 test qpush --keep-changes
355
356
356 $ hg qpush --keep-changes --force
357 $ hg qpush --keep-changes --force
357 abort: cannot use both --force and --keep-changes
358 abort: cannot use both --force and --keep-changes
358 [255]
359 [255]
359 $ hg qpush --keep-changes --exact
360 $ hg qpush --keep-changes --exact
360 abort: cannot use --exact and --keep-changes together
361 abort: cannot use --exact and --keep-changes together
361 [255]
362 [255]
362 $ echo b >> b
363 $ echo b >> b
363 $ hg qpush --keep-changes
364 $ hg qpush --keep-changes
364 applying p3
365 applying p3
365 abort: conflicting local changes found
366 abort: conflicting local changes found
366 (did you forget to qrefresh?)
367 (did you forget to qrefresh?)
367 [255]
368 [255]
368 $ rm b
369 $ rm b
369 $ hg qpush --keep-changes
370 $ hg qpush --keep-changes
370 applying p3
371 applying p3
371 abort: conflicting local changes found
372 abort: conflicting local changes found
372 (did you forget to qrefresh?)
373 (did you forget to qrefresh?)
373 [255]
374 [255]
374 $ hg rm -A b
375 $ hg rm -A b
375 $ hg qpush --keep-changes
376 $ hg qpush --keep-changes
376 applying p3
377 applying p3
377 abort: conflicting local changes found
378 abort: conflicting local changes found
378 (did you forget to qrefresh?)
379 (did you forget to qrefresh?)
379 [255]
380 [255]
380 $ hg revert -aq b
381 $ hg revert -aq b
381 $ echo d > d
382 $ echo d > d
382 $ hg add d
383 $ hg add d
383 $ hg qpush --keep-changes
384 $ hg qpush --keep-changes
384 applying p3
385 applying p3
385 abort: conflicting local changes found
386 abort: conflicting local changes found
386 (did you forget to qrefresh?)
387 (did you forget to qrefresh?)
387 [255]
388 [255]
388 $ hg forget d
389 $ hg forget d
389 $ rm d
390 $ rm d
390 $ hg qpop
391 $ hg qpop
391 popping p2
392 popping p2
392 patch queue now empty
393 patch queue now empty
393 $ echo b >> b
394 $ echo b >> b
394 $ hg qpush -a --keep-changes
395 $ hg qpush -a --keep-changes
395 applying p2
396 applying p2
396 applying p3
397 applying p3
397 abort: conflicting local changes found
398 abort: conflicting local changes found
398 (did you forget to qrefresh?)
399 (did you forget to qrefresh?)
399 [255]
400 [255]
400 $ hg qtop
401 $ hg qtop
401 p2
402 p2
402 $ hg parents --template "{rev} {desc}\n"
403 $ hg parents --template "{rev} {desc}\n"
403 2 imported patch p2
404 2 imported patch p2
404 $ hg st b
405 $ hg st b
405 M b
406 M b
406 $ cat b
407 $ cat b
407 b
408 b
408 b
409 b
409
410
410 test qgoto --keep-changes
411 test qgoto --keep-changes
411
412
412 $ hg revert -aq b
413 $ hg revert -aq b
413 $ rm e
414 $ rm e
414 $ hg qgoto --keep-changes --force p3
415 $ hg qgoto --keep-changes --force p3
415 abort: cannot use both --force and --keep-changes
416 abort: cannot use both --force and --keep-changes
416 [255]
417 [255]
417 $ echo a >> a
418 $ echo a >> a
418 $ hg qgoto --keep-changes p3
419 $ hg qgoto --keep-changes p3
419 applying p3
420 applying p3
420 now at: p3
421 now at: p3
421 $ hg st a
422 $ hg st a
422 M a
423 M a
423 $ hg qgoto --keep-changes p2
424 $ hg qgoto --keep-changes p2
424 popping p3
425 popping p3
425 now at: p2
426 now at: p2
426 $ hg st a
427 $ hg st a
427 M a
428 M a
428
429
429 test mq.keepchanges setting
430 test mq.keepchanges setting
430
431
431 $ hg --config mq.keepchanges=1 qpush
432 $ hg --config mq.keepchanges=1 qpush
432 applying p3
433 applying p3
433 now at: p3
434 now at: p3
434 $ hg st a
435 $ hg st a
435 M a
436 M a
436 $ hg --config mq.keepchanges=1 qpop
437 $ hg --config mq.keepchanges=1 qpop
437 popping p3
438 popping p3
438 now at: p2
439 now at: p2
439 $ hg st a
440 $ hg st a
440 M a
441 M a
441 $ hg --config mq.keepchanges=1 qgoto p3
442 $ hg --config mq.keepchanges=1 qgoto p3
442 applying p3
443 applying p3
443 now at: p3
444 now at: p3
444 $ hg st a
445 $ hg st a
445 M a
446 M a
446 $ echo b >> b
447 $ echo b >> b
447 $ hg --config mq.keepchanges=1 qpop --force --config 'ui.origbackuppath=.hg/origbackups'
448 $ hg --config mq.keepchanges=1 qpop --force --config 'ui.origbackuppath=.hg/origbackups'
448 popping p3
449 popping p3
449 now at: p2
450 now at: p2
450 $ hg st b
451 $ hg st b
451 $ hg --config mq.keepchanges=1 qpush --exact
452 $ hg --config mq.keepchanges=1 qpush --exact
452 abort: local changes found, qrefresh first
453 abort: local changes found, qrefresh first
453 [255]
454 [255]
454 $ hg revert -qa a
455 $ hg revert -qa a
455 $ hg qpop
456 $ hg qpop
456 popping p2
457 popping p2
457 patch queue now empty
458 patch queue now empty
458 $ echo a >> a
459 $ echo a >> a
459 $ hg --config mq.keepchanges=1 qpush --force
460 $ hg --config mq.keepchanges=1 qpush --force
460 applying p2
461 applying p2
461 now at: p2
462 now at: p2
462 $ hg st a
463 $ hg st a
463
464
464 test previous qpop (with --force and --config) saved .orig files to where user
465 test previous qpop (with --force and --config) saved .orig files to where user
465 wants them
466 wants them
466 $ ls .hg/origbackups
467 $ ls .hg/origbackups
467 b.orig
468 b.orig
468 $ rm -rf .hg/origbackups
469 $ rm -rf .hg/origbackups
469
470
470 $ cd ..
471 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now