##// END OF EJS Templates
localrepo: add isfilecached to check filecache-ed property is already cached...
FUJIWARA Katsunori -
r33382:b107a766 default
parent child Browse files
Show More
@@ -1,2143 +1,2153 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import inspect
12 import inspect
13 import os
13 import os
14 import random
14 import random
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 encoding,
34 encoding,
35 error,
35 error,
36 exchange,
36 exchange,
37 extensions,
37 extensions,
38 filelog,
38 filelog,
39 hook,
39 hook,
40 lock as lockmod,
40 lock as lockmod,
41 manifest,
41 manifest,
42 match as matchmod,
42 match as matchmod,
43 merge as mergemod,
43 merge as mergemod,
44 mergeutil,
44 mergeutil,
45 namespaces,
45 namespaces,
46 obsolete,
46 obsolete,
47 pathutil,
47 pathutil,
48 peer,
48 peer,
49 phases,
49 phases,
50 pushkey,
50 pushkey,
51 pycompat,
51 pycompat,
52 repoview,
52 repoview,
53 revset,
53 revset,
54 revsetlang,
54 revsetlang,
55 scmutil,
55 scmutil,
56 sparse,
56 sparse,
57 store,
57 store,
58 subrepo,
58 subrepo,
59 tags as tagsmod,
59 tags as tagsmod,
60 transaction,
60 transaction,
61 txnutil,
61 txnutil,
62 util,
62 util,
63 vfs as vfsmod,
63 vfs as vfsmod,
64 )
64 )
65
65
66 release = lockmod.release
66 release = lockmod.release
67 urlerr = util.urlerr
67 urlerr = util.urlerr
68 urlreq = util.urlreq
68 urlreq = util.urlreq
69
69
70 # set of (path, vfs-location) tuples. vfs-location is:
70 # set of (path, vfs-location) tuples. vfs-location is:
71 # - 'plain for vfs relative paths
71 # - 'plain for vfs relative paths
72 # - '' for svfs relative paths
72 # - '' for svfs relative paths
73 _cachedfiles = set()
73 _cachedfiles = set()
74
74
75 class _basefilecache(scmutil.filecache):
75 class _basefilecache(scmutil.filecache):
76 """All filecache usage on repo are done for logic that should be unfiltered
76 """All filecache usage on repo are done for logic that should be unfiltered
77 """
77 """
78 def __get__(self, repo, type=None):
78 def __get__(self, repo, type=None):
79 if repo is None:
79 if repo is None:
80 return self
80 return self
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
81 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
82 def __set__(self, repo, value):
82 def __set__(self, repo, value):
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
83 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
84 def __delete__(self, repo):
84 def __delete__(self, repo):
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
85 return super(_basefilecache, self).__delete__(repo.unfiltered())
86
86
87 class repofilecache(_basefilecache):
87 class repofilecache(_basefilecache):
88 """filecache for files in .hg but outside of .hg/store"""
88 """filecache for files in .hg but outside of .hg/store"""
89 def __init__(self, *paths):
89 def __init__(self, *paths):
90 super(repofilecache, self).__init__(*paths)
90 super(repofilecache, self).__init__(*paths)
91 for path in paths:
91 for path in paths:
92 _cachedfiles.add((path, 'plain'))
92 _cachedfiles.add((path, 'plain'))
93
93
94 def join(self, obj, fname):
94 def join(self, obj, fname):
95 return obj.vfs.join(fname)
95 return obj.vfs.join(fname)
96
96
97 class storecache(_basefilecache):
97 class storecache(_basefilecache):
98 """filecache for files in the store"""
98 """filecache for files in the store"""
99 def __init__(self, *paths):
99 def __init__(self, *paths):
100 super(storecache, self).__init__(*paths)
100 super(storecache, self).__init__(*paths)
101 for path in paths:
101 for path in paths:
102 _cachedfiles.add((path, ''))
102 _cachedfiles.add((path, ''))
103
103
104 def join(self, obj, fname):
104 def join(self, obj, fname):
105 return obj.sjoin(fname)
105 return obj.sjoin(fname)
106
106
107 def isfilecached(repo, name):
108 """check if a repo has already cached "name" filecache-ed property
109
110 This returns (cachedobj-or-None, iscached) tuple.
111 """
112 cacheentry = repo.unfiltered()._filecache.get(name, None)
113 if not cacheentry:
114 return None, False
115 return cacheentry.obj, True
116
107 class unfilteredpropertycache(util.propertycache):
117 class unfilteredpropertycache(util.propertycache):
108 """propertycache that apply to unfiltered repo only"""
118 """propertycache that apply to unfiltered repo only"""
109
119
110 def __get__(self, repo, type=None):
120 def __get__(self, repo, type=None):
111 unfi = repo.unfiltered()
121 unfi = repo.unfiltered()
112 if unfi is repo:
122 if unfi is repo:
113 return super(unfilteredpropertycache, self).__get__(unfi)
123 return super(unfilteredpropertycache, self).__get__(unfi)
114 return getattr(unfi, self.name)
124 return getattr(unfi, self.name)
115
125
116 class filteredpropertycache(util.propertycache):
126 class filteredpropertycache(util.propertycache):
117 """propertycache that must take filtering in account"""
127 """propertycache that must take filtering in account"""
118
128
119 def cachevalue(self, obj, value):
129 def cachevalue(self, obj, value):
120 object.__setattr__(obj, self.name, value)
130 object.__setattr__(obj, self.name, value)
121
131
122
132
123 def hasunfilteredcache(repo, name):
133 def hasunfilteredcache(repo, name):
124 """check if a repo has an unfilteredpropertycache value for <name>"""
134 """check if a repo has an unfilteredpropertycache value for <name>"""
125 return name in vars(repo.unfiltered())
135 return name in vars(repo.unfiltered())
126
136
127 def unfilteredmethod(orig):
137 def unfilteredmethod(orig):
128 """decorate method that always need to be run on unfiltered version"""
138 """decorate method that always need to be run on unfiltered version"""
129 def wrapper(repo, *args, **kwargs):
139 def wrapper(repo, *args, **kwargs):
130 return orig(repo.unfiltered(), *args, **kwargs)
140 return orig(repo.unfiltered(), *args, **kwargs)
131 return wrapper
141 return wrapper
132
142
133 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
143 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
134 'unbundle'}
144 'unbundle'}
135 legacycaps = moderncaps.union({'changegroupsubset'})
145 legacycaps = moderncaps.union({'changegroupsubset'})
136
146
137 class localpeer(peer.peerrepository):
147 class localpeer(peer.peerrepository):
138 '''peer for a local repo; reflects only the most recent API'''
148 '''peer for a local repo; reflects only the most recent API'''
139
149
140 def __init__(self, repo, caps=None):
150 def __init__(self, repo, caps=None):
141 if caps is None:
151 if caps is None:
142 caps = moderncaps.copy()
152 caps = moderncaps.copy()
143 peer.peerrepository.__init__(self)
153 peer.peerrepository.__init__(self)
144 self._repo = repo.filtered('served')
154 self._repo = repo.filtered('served')
145 self.ui = repo.ui
155 self.ui = repo.ui
146 self._caps = repo._restrictcapabilities(caps)
156 self._caps = repo._restrictcapabilities(caps)
147 self.requirements = repo.requirements
157 self.requirements = repo.requirements
148 self.supportedformats = repo.supportedformats
158 self.supportedformats = repo.supportedformats
149
159
150 def close(self):
160 def close(self):
151 self._repo.close()
161 self._repo.close()
152
162
153 def _capabilities(self):
163 def _capabilities(self):
154 return self._caps
164 return self._caps
155
165
156 def local(self):
166 def local(self):
157 return self._repo
167 return self._repo
158
168
159 def canpush(self):
169 def canpush(self):
160 return True
170 return True
161
171
162 def url(self):
172 def url(self):
163 return self._repo.url()
173 return self._repo.url()
164
174
165 def lookup(self, key):
175 def lookup(self, key):
166 return self._repo.lookup(key)
176 return self._repo.lookup(key)
167
177
168 def branchmap(self):
178 def branchmap(self):
169 return self._repo.branchmap()
179 return self._repo.branchmap()
170
180
171 def heads(self):
181 def heads(self):
172 return self._repo.heads()
182 return self._repo.heads()
173
183
174 def known(self, nodes):
184 def known(self, nodes):
175 return self._repo.known(nodes)
185 return self._repo.known(nodes)
176
186
177 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
187 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
178 **kwargs):
188 **kwargs):
179 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
189 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
180 common=common, bundlecaps=bundlecaps,
190 common=common, bundlecaps=bundlecaps,
181 **kwargs)
191 **kwargs)
182 cb = util.chunkbuffer(chunks)
192 cb = util.chunkbuffer(chunks)
183
193
184 if exchange.bundle2requested(bundlecaps):
194 if exchange.bundle2requested(bundlecaps):
185 # When requesting a bundle2, getbundle returns a stream to make the
195 # When requesting a bundle2, getbundle returns a stream to make the
186 # wire level function happier. We need to build a proper object
196 # wire level function happier. We need to build a proper object
187 # from it in local peer.
197 # from it in local peer.
188 return bundle2.getunbundler(self.ui, cb)
198 return bundle2.getunbundler(self.ui, cb)
189 else:
199 else:
190 return changegroup.getunbundler('01', cb, None)
200 return changegroup.getunbundler('01', cb, None)
191
201
192 # TODO We might want to move the next two calls into legacypeer and add
202 # TODO We might want to move the next two calls into legacypeer and add
193 # unbundle instead.
203 # unbundle instead.
194
204
195 def unbundle(self, cg, heads, url):
205 def unbundle(self, cg, heads, url):
196 """apply a bundle on a repo
206 """apply a bundle on a repo
197
207
198 This function handles the repo locking itself."""
208 This function handles the repo locking itself."""
199 try:
209 try:
200 try:
210 try:
201 cg = exchange.readbundle(self.ui, cg, None)
211 cg = exchange.readbundle(self.ui, cg, None)
202 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
212 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
203 if util.safehasattr(ret, 'getchunks'):
213 if util.safehasattr(ret, 'getchunks'):
204 # This is a bundle20 object, turn it into an unbundler.
214 # This is a bundle20 object, turn it into an unbundler.
205 # This little dance should be dropped eventually when the
215 # This little dance should be dropped eventually when the
206 # API is finally improved.
216 # API is finally improved.
207 stream = util.chunkbuffer(ret.getchunks())
217 stream = util.chunkbuffer(ret.getchunks())
208 ret = bundle2.getunbundler(self.ui, stream)
218 ret = bundle2.getunbundler(self.ui, stream)
209 return ret
219 return ret
210 except Exception as exc:
220 except Exception as exc:
211 # If the exception contains output salvaged from a bundle2
221 # If the exception contains output salvaged from a bundle2
212 # reply, we need to make sure it is printed before continuing
222 # reply, we need to make sure it is printed before continuing
213 # to fail. So we build a bundle2 with such output and consume
223 # to fail. So we build a bundle2 with such output and consume
214 # it directly.
224 # it directly.
215 #
225 #
216 # This is not very elegant but allows a "simple" solution for
226 # This is not very elegant but allows a "simple" solution for
217 # issue4594
227 # issue4594
218 output = getattr(exc, '_bundle2salvagedoutput', ())
228 output = getattr(exc, '_bundle2salvagedoutput', ())
219 if output:
229 if output:
220 bundler = bundle2.bundle20(self._repo.ui)
230 bundler = bundle2.bundle20(self._repo.ui)
221 for out in output:
231 for out in output:
222 bundler.addpart(out)
232 bundler.addpart(out)
223 stream = util.chunkbuffer(bundler.getchunks())
233 stream = util.chunkbuffer(bundler.getchunks())
224 b = bundle2.getunbundler(self.ui, stream)
234 b = bundle2.getunbundler(self.ui, stream)
225 bundle2.processbundle(self._repo, b)
235 bundle2.processbundle(self._repo, b)
226 raise
236 raise
227 except error.PushRaced as exc:
237 except error.PushRaced as exc:
228 raise error.ResponseError(_('push failed:'), str(exc))
238 raise error.ResponseError(_('push failed:'), str(exc))
229
239
230 def lock(self):
240 def lock(self):
231 return self._repo.lock()
241 return self._repo.lock()
232
242
233 def pushkey(self, namespace, key, old, new):
243 def pushkey(self, namespace, key, old, new):
234 return self._repo.pushkey(namespace, key, old, new)
244 return self._repo.pushkey(namespace, key, old, new)
235
245
236 def listkeys(self, namespace):
246 def listkeys(self, namespace):
237 return self._repo.listkeys(namespace)
247 return self._repo.listkeys(namespace)
238
248
239 def debugwireargs(self, one, two, three=None, four=None, five=None):
249 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 '''used to test argument passing over the wire'''
250 '''used to test argument passing over the wire'''
241 return "%s %s %s %s %s" % (one, two, three, four, five)
251 return "%s %s %s %s %s" % (one, two, three, four, five)
242
252
243 class locallegacypeer(localpeer):
253 class locallegacypeer(localpeer):
244 '''peer extension which implements legacy methods too; used for tests with
254 '''peer extension which implements legacy methods too; used for tests with
245 restricted capabilities'''
255 restricted capabilities'''
246
256
247 def __init__(self, repo):
257 def __init__(self, repo):
248 localpeer.__init__(self, repo, caps=legacycaps)
258 localpeer.__init__(self, repo, caps=legacycaps)
249
259
250 def branches(self, nodes):
260 def branches(self, nodes):
251 return self._repo.branches(nodes)
261 return self._repo.branches(nodes)
252
262
253 def between(self, pairs):
263 def between(self, pairs):
254 return self._repo.between(pairs)
264 return self._repo.between(pairs)
255
265
256 def changegroup(self, basenodes, source):
266 def changegroup(self, basenodes, source):
257 return changegroup.changegroup(self._repo, basenodes, source)
267 return changegroup.changegroup(self._repo, basenodes, source)
258
268
259 def changegroupsubset(self, bases, heads, source):
269 def changegroupsubset(self, bases, heads, source):
260 return changegroup.changegroupsubset(self._repo, bases, heads, source)
270 return changegroup.changegroupsubset(self._repo, bases, heads, source)
261
271
262 # Increment the sub-version when the revlog v2 format changes to lock out old
272 # Increment the sub-version when the revlog v2 format changes to lock out old
263 # clients.
273 # clients.
264 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
274 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
265
275
266 class localrepository(object):
276 class localrepository(object):
267
277
268 supportedformats = {
278 supportedformats = {
269 'revlogv1',
279 'revlogv1',
270 'generaldelta',
280 'generaldelta',
271 'treemanifest',
281 'treemanifest',
272 'manifestv2',
282 'manifestv2',
273 REVLOGV2_REQUIREMENT,
283 REVLOGV2_REQUIREMENT,
274 }
284 }
275 _basesupported = supportedformats | {
285 _basesupported = supportedformats | {
276 'store',
286 'store',
277 'fncache',
287 'fncache',
278 'shared',
288 'shared',
279 'relshared',
289 'relshared',
280 'dotencode',
290 'dotencode',
281 }
291 }
282 openerreqs = {
292 openerreqs = {
283 'revlogv1',
293 'revlogv1',
284 'generaldelta',
294 'generaldelta',
285 'treemanifest',
295 'treemanifest',
286 'manifestv2',
296 'manifestv2',
287 }
297 }
288
298
289 # a list of (ui, featureset) functions.
299 # a list of (ui, featureset) functions.
290 # only functions defined in module of enabled extensions are invoked
300 # only functions defined in module of enabled extensions are invoked
291 featuresetupfuncs = set()
301 featuresetupfuncs = set()
292
302
293 def __init__(self, baseui, path, create=False):
303 def __init__(self, baseui, path, create=False):
294 self.requirements = set()
304 self.requirements = set()
295 self.filtername = None
305 self.filtername = None
296 # wvfs: rooted at the repository root, used to access the working copy
306 # wvfs: rooted at the repository root, used to access the working copy
297 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
307 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
298 # vfs: rooted at .hg, used to access repo files outside of .hg/store
308 # vfs: rooted at .hg, used to access repo files outside of .hg/store
299 self.vfs = None
309 self.vfs = None
300 # svfs: usually rooted at .hg/store, used to access repository history
310 # svfs: usually rooted at .hg/store, used to access repository history
301 # If this is a shared repository, this vfs may point to another
311 # If this is a shared repository, this vfs may point to another
302 # repository's .hg/store directory.
312 # repository's .hg/store directory.
303 self.svfs = None
313 self.svfs = None
304 self.root = self.wvfs.base
314 self.root = self.wvfs.base
305 self.path = self.wvfs.join(".hg")
315 self.path = self.wvfs.join(".hg")
306 self.origroot = path
316 self.origroot = path
307 # These auditor are not used by the vfs,
317 # These auditor are not used by the vfs,
308 # only used when writing this comment: basectx.match
318 # only used when writing this comment: basectx.match
309 self.auditor = pathutil.pathauditor(self.root, self._checknested)
319 self.auditor = pathutil.pathauditor(self.root, self._checknested)
310 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
320 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
311 realfs=False)
321 realfs=False)
312 self.vfs = vfsmod.vfs(self.path)
322 self.vfs = vfsmod.vfs(self.path)
313 self.baseui = baseui
323 self.baseui = baseui
314 self.ui = baseui.copy()
324 self.ui = baseui.copy()
315 self.ui.copy = baseui.copy # prevent copying repo configuration
325 self.ui.copy = baseui.copy # prevent copying repo configuration
316 # A list of callback to shape the phase if no data were found.
326 # A list of callback to shape the phase if no data were found.
317 # Callback are in the form: func(repo, roots) --> processed root.
327 # Callback are in the form: func(repo, roots) --> processed root.
318 # This list it to be filled by extension during repo setup
328 # This list it to be filled by extension during repo setup
319 self._phasedefaults = []
329 self._phasedefaults = []
320 try:
330 try:
321 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
331 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
322 self._loadextensions()
332 self._loadextensions()
323 except IOError:
333 except IOError:
324 pass
334 pass
325
335
326 if self.featuresetupfuncs:
336 if self.featuresetupfuncs:
327 self.supported = set(self._basesupported) # use private copy
337 self.supported = set(self._basesupported) # use private copy
328 extmods = set(m.__name__ for n, m
338 extmods = set(m.__name__ for n, m
329 in extensions.extensions(self.ui))
339 in extensions.extensions(self.ui))
330 for setupfunc in self.featuresetupfuncs:
340 for setupfunc in self.featuresetupfuncs:
331 if setupfunc.__module__ in extmods:
341 if setupfunc.__module__ in extmods:
332 setupfunc(self.ui, self.supported)
342 setupfunc(self.ui, self.supported)
333 else:
343 else:
334 self.supported = self._basesupported
344 self.supported = self._basesupported
335 color.setup(self.ui)
345 color.setup(self.ui)
336
346
337 # Add compression engines.
347 # Add compression engines.
338 for name in util.compengines:
348 for name in util.compengines:
339 engine = util.compengines[name]
349 engine = util.compengines[name]
340 if engine.revlogheader():
350 if engine.revlogheader():
341 self.supported.add('exp-compression-%s' % name)
351 self.supported.add('exp-compression-%s' % name)
342
352
343 if not self.vfs.isdir():
353 if not self.vfs.isdir():
344 if create:
354 if create:
345 self.requirements = newreporequirements(self)
355 self.requirements = newreporequirements(self)
346
356
347 if not self.wvfs.exists():
357 if not self.wvfs.exists():
348 self.wvfs.makedirs()
358 self.wvfs.makedirs()
349 self.vfs.makedir(notindexed=True)
359 self.vfs.makedir(notindexed=True)
350
360
351 if 'store' in self.requirements:
361 if 'store' in self.requirements:
352 self.vfs.mkdir("store")
362 self.vfs.mkdir("store")
353
363
354 # create an invalid changelog
364 # create an invalid changelog
355 self.vfs.append(
365 self.vfs.append(
356 "00changelog.i",
366 "00changelog.i",
357 '\0\0\0\2' # represents revlogv2
367 '\0\0\0\2' # represents revlogv2
358 ' dummy changelog to prevent using the old repo layout'
368 ' dummy changelog to prevent using the old repo layout'
359 )
369 )
360 else:
370 else:
361 raise error.RepoError(_("repository %s not found") % path)
371 raise error.RepoError(_("repository %s not found") % path)
362 elif create:
372 elif create:
363 raise error.RepoError(_("repository %s already exists") % path)
373 raise error.RepoError(_("repository %s already exists") % path)
364 else:
374 else:
365 try:
375 try:
366 self.requirements = scmutil.readrequires(
376 self.requirements = scmutil.readrequires(
367 self.vfs, self.supported)
377 self.vfs, self.supported)
368 except IOError as inst:
378 except IOError as inst:
369 if inst.errno != errno.ENOENT:
379 if inst.errno != errno.ENOENT:
370 raise
380 raise
371
381
372 self.sharedpath = self.path
382 self.sharedpath = self.path
373 try:
383 try:
374 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
384 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
375 if 'relshared' in self.requirements:
385 if 'relshared' in self.requirements:
376 sharedpath = self.vfs.join(sharedpath)
386 sharedpath = self.vfs.join(sharedpath)
377 vfs = vfsmod.vfs(sharedpath, realpath=True)
387 vfs = vfsmod.vfs(sharedpath, realpath=True)
378 s = vfs.base
388 s = vfs.base
379 if not vfs.exists():
389 if not vfs.exists():
380 raise error.RepoError(
390 raise error.RepoError(
381 _('.hg/sharedpath points to nonexistent directory %s') % s)
391 _('.hg/sharedpath points to nonexistent directory %s') % s)
382 self.sharedpath = s
392 self.sharedpath = s
383 except IOError as inst:
393 except IOError as inst:
384 if inst.errno != errno.ENOENT:
394 if inst.errno != errno.ENOENT:
385 raise
395 raise
386
396
387 self.store = store.store(
397 self.store = store.store(
388 self.requirements, self.sharedpath, vfsmod.vfs)
398 self.requirements, self.sharedpath, vfsmod.vfs)
389 self.spath = self.store.path
399 self.spath = self.store.path
390 self.svfs = self.store.vfs
400 self.svfs = self.store.vfs
391 self.sjoin = self.store.join
401 self.sjoin = self.store.join
392 self.vfs.createmode = self.store.createmode
402 self.vfs.createmode = self.store.createmode
393 self._applyopenerreqs()
403 self._applyopenerreqs()
394 if create:
404 if create:
395 self._writerequirements()
405 self._writerequirements()
396
406
397 self._dirstatevalidatewarned = False
407 self._dirstatevalidatewarned = False
398
408
399 self._branchcaches = {}
409 self._branchcaches = {}
400 self._revbranchcache = None
410 self._revbranchcache = None
401 self.filterpats = {}
411 self.filterpats = {}
402 self._datafilters = {}
412 self._datafilters = {}
403 self._transref = self._lockref = self._wlockref = None
413 self._transref = self._lockref = self._wlockref = None
404
414
405 # A cache for various files under .hg/ that tracks file changes,
415 # A cache for various files under .hg/ that tracks file changes,
406 # (used by the filecache decorator)
416 # (used by the filecache decorator)
407 #
417 #
408 # Maps a property name to its util.filecacheentry
418 # Maps a property name to its util.filecacheentry
409 self._filecache = {}
419 self._filecache = {}
410
420
411 # hold sets of revision to be filtered
421 # hold sets of revision to be filtered
412 # should be cleared when something might have changed the filter value:
422 # should be cleared when something might have changed the filter value:
413 # - new changesets,
423 # - new changesets,
414 # - phase change,
424 # - phase change,
415 # - new obsolescence marker,
425 # - new obsolescence marker,
416 # - working directory parent change,
426 # - working directory parent change,
417 # - bookmark changes
427 # - bookmark changes
418 self.filteredrevcache = {}
428 self.filteredrevcache = {}
419
429
420 # post-dirstate-status hooks
430 # post-dirstate-status hooks
421 self._postdsstatus = []
431 self._postdsstatus = []
422
432
423 # generic mapping between names and nodes
433 # generic mapping between names and nodes
424 self.names = namespaces.namespaces()
434 self.names = namespaces.namespaces()
425
435
426 # Key to signature value.
436 # Key to signature value.
427 self._sparsesignaturecache = {}
437 self._sparsesignaturecache = {}
428 # Signature to cached matcher instance.
438 # Signature to cached matcher instance.
429 self._sparsematchercache = {}
439 self._sparsematchercache = {}
430
440
431 def close(self):
441 def close(self):
432 self._writecaches()
442 self._writecaches()
433
443
434 def _loadextensions(self):
444 def _loadextensions(self):
435 extensions.loadall(self.ui)
445 extensions.loadall(self.ui)
436
446
437 def _writecaches(self):
447 def _writecaches(self):
438 if self._revbranchcache:
448 if self._revbranchcache:
439 self._revbranchcache.write()
449 self._revbranchcache.write()
440
450
441 def _restrictcapabilities(self, caps):
451 def _restrictcapabilities(self, caps):
442 if self.ui.configbool('experimental', 'bundle2-advertise', True):
452 if self.ui.configbool('experimental', 'bundle2-advertise', True):
443 caps = set(caps)
453 caps = set(caps)
444 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
454 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
445 caps.add('bundle2=' + urlreq.quote(capsblob))
455 caps.add('bundle2=' + urlreq.quote(capsblob))
446 return caps
456 return caps
447
457
448 def _applyopenerreqs(self):
458 def _applyopenerreqs(self):
449 self.svfs.options = dict((r, 1) for r in self.requirements
459 self.svfs.options = dict((r, 1) for r in self.requirements
450 if r in self.openerreqs)
460 if r in self.openerreqs)
451 # experimental config: format.chunkcachesize
461 # experimental config: format.chunkcachesize
452 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
462 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
453 if chunkcachesize is not None:
463 if chunkcachesize is not None:
454 self.svfs.options['chunkcachesize'] = chunkcachesize
464 self.svfs.options['chunkcachesize'] = chunkcachesize
455 # experimental config: format.maxchainlen
465 # experimental config: format.maxchainlen
456 maxchainlen = self.ui.configint('format', 'maxchainlen')
466 maxchainlen = self.ui.configint('format', 'maxchainlen')
457 if maxchainlen is not None:
467 if maxchainlen is not None:
458 self.svfs.options['maxchainlen'] = maxchainlen
468 self.svfs.options['maxchainlen'] = maxchainlen
459 # experimental config: format.manifestcachesize
469 # experimental config: format.manifestcachesize
460 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
470 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
461 if manifestcachesize is not None:
471 if manifestcachesize is not None:
462 self.svfs.options['manifestcachesize'] = manifestcachesize
472 self.svfs.options['manifestcachesize'] = manifestcachesize
463 # experimental config: format.aggressivemergedeltas
473 # experimental config: format.aggressivemergedeltas
464 aggressivemergedeltas = self.ui.configbool('format',
474 aggressivemergedeltas = self.ui.configbool('format',
465 'aggressivemergedeltas')
475 'aggressivemergedeltas')
466 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
476 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
467 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
477 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
468 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
478 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan', -1)
469 if 0 <= chainspan:
479 if 0 <= chainspan:
470 self.svfs.options['maxdeltachainspan'] = chainspan
480 self.svfs.options['maxdeltachainspan'] = chainspan
471
481
472 for r in self.requirements:
482 for r in self.requirements:
473 if r.startswith('exp-compression-'):
483 if r.startswith('exp-compression-'):
474 self.svfs.options['compengine'] = r[len('exp-compression-'):]
484 self.svfs.options['compengine'] = r[len('exp-compression-'):]
475
485
476 # TODO move "revlogv2" to openerreqs once finalized.
486 # TODO move "revlogv2" to openerreqs once finalized.
477 if REVLOGV2_REQUIREMENT in self.requirements:
487 if REVLOGV2_REQUIREMENT in self.requirements:
478 self.svfs.options['revlogv2'] = True
488 self.svfs.options['revlogv2'] = True
479
489
480 def _writerequirements(self):
490 def _writerequirements(self):
481 scmutil.writerequires(self.vfs, self.requirements)
491 scmutil.writerequires(self.vfs, self.requirements)
482
492
483 def _checknested(self, path):
493 def _checknested(self, path):
484 """Determine if path is a legal nested repository."""
494 """Determine if path is a legal nested repository."""
485 if not path.startswith(self.root):
495 if not path.startswith(self.root):
486 return False
496 return False
487 subpath = path[len(self.root) + 1:]
497 subpath = path[len(self.root) + 1:]
488 normsubpath = util.pconvert(subpath)
498 normsubpath = util.pconvert(subpath)
489
499
490 # XXX: Checking against the current working copy is wrong in
500 # XXX: Checking against the current working copy is wrong in
491 # the sense that it can reject things like
501 # the sense that it can reject things like
492 #
502 #
493 # $ hg cat -r 10 sub/x.txt
503 # $ hg cat -r 10 sub/x.txt
494 #
504 #
495 # if sub/ is no longer a subrepository in the working copy
505 # if sub/ is no longer a subrepository in the working copy
496 # parent revision.
506 # parent revision.
497 #
507 #
498 # However, it can of course also allow things that would have
508 # However, it can of course also allow things that would have
499 # been rejected before, such as the above cat command if sub/
509 # been rejected before, such as the above cat command if sub/
500 # is a subrepository now, but was a normal directory before.
510 # is a subrepository now, but was a normal directory before.
501 # The old path auditor would have rejected by mistake since it
511 # The old path auditor would have rejected by mistake since it
502 # panics when it sees sub/.hg/.
512 # panics when it sees sub/.hg/.
503 #
513 #
504 # All in all, checking against the working copy seems sensible
514 # All in all, checking against the working copy seems sensible
505 # since we want to prevent access to nested repositories on
515 # since we want to prevent access to nested repositories on
506 # the filesystem *now*.
516 # the filesystem *now*.
507 ctx = self[None]
517 ctx = self[None]
508 parts = util.splitpath(subpath)
518 parts = util.splitpath(subpath)
509 while parts:
519 while parts:
510 prefix = '/'.join(parts)
520 prefix = '/'.join(parts)
511 if prefix in ctx.substate:
521 if prefix in ctx.substate:
512 if prefix == normsubpath:
522 if prefix == normsubpath:
513 return True
523 return True
514 else:
524 else:
515 sub = ctx.sub(prefix)
525 sub = ctx.sub(prefix)
516 return sub.checknested(subpath[len(prefix) + 1:])
526 return sub.checknested(subpath[len(prefix) + 1:])
517 else:
527 else:
518 parts.pop()
528 parts.pop()
519 return False
529 return False
520
530
521 def peer(self):
531 def peer(self):
522 return localpeer(self) # not cached to avoid reference cycle
532 return localpeer(self) # not cached to avoid reference cycle
523
533
524 def unfiltered(self):
534 def unfiltered(self):
525 """Return unfiltered version of the repository
535 """Return unfiltered version of the repository
526
536
527 Intended to be overwritten by filtered repo."""
537 Intended to be overwritten by filtered repo."""
528 return self
538 return self
529
539
530 def filtered(self, name):
540 def filtered(self, name):
531 """Return a filtered version of a repository"""
541 """Return a filtered version of a repository"""
532 # build a new class with the mixin and the current class
542 # build a new class with the mixin and the current class
533 # (possibly subclass of the repo)
543 # (possibly subclass of the repo)
534 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
544 class filteredrepo(repoview.repoview, self.unfiltered().__class__):
535 pass
545 pass
536 return filteredrepo(self, name)
546 return filteredrepo(self, name)
537
547
538 @repofilecache('bookmarks', 'bookmarks.current')
548 @repofilecache('bookmarks', 'bookmarks.current')
539 def _bookmarks(self):
549 def _bookmarks(self):
540 return bookmarks.bmstore(self)
550 return bookmarks.bmstore(self)
541
551
542 @property
552 @property
543 def _activebookmark(self):
553 def _activebookmark(self):
544 return self._bookmarks.active
554 return self._bookmarks.active
545
555
546 # _phaserevs and _phasesets depend on changelog. what we need is to
556 # _phaserevs and _phasesets depend on changelog. what we need is to
547 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
557 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
548 # can't be easily expressed in filecache mechanism.
558 # can't be easily expressed in filecache mechanism.
549 @storecache('phaseroots', '00changelog.i')
559 @storecache('phaseroots', '00changelog.i')
550 def _phasecache(self):
560 def _phasecache(self):
551 return phases.phasecache(self, self._phasedefaults)
561 return phases.phasecache(self, self._phasedefaults)
552
562
553 @storecache('obsstore')
563 @storecache('obsstore')
554 def obsstore(self):
564 def obsstore(self):
555 return obsolete.makestore(self.ui, self)
565 return obsolete.makestore(self.ui, self)
556
566
557 @storecache('00changelog.i')
567 @storecache('00changelog.i')
558 def changelog(self):
568 def changelog(self):
559 return changelog.changelog(self.svfs,
569 return changelog.changelog(self.svfs,
560 trypending=txnutil.mayhavepending(self.root))
570 trypending=txnutil.mayhavepending(self.root))
561
571
562 def _constructmanifest(self):
572 def _constructmanifest(self):
563 # This is a temporary function while we migrate from manifest to
573 # This is a temporary function while we migrate from manifest to
564 # manifestlog. It allows bundlerepo and unionrepo to intercept the
574 # manifestlog. It allows bundlerepo and unionrepo to intercept the
565 # manifest creation.
575 # manifest creation.
566 return manifest.manifestrevlog(self.svfs)
576 return manifest.manifestrevlog(self.svfs)
567
577
568 @storecache('00manifest.i')
578 @storecache('00manifest.i')
569 def manifestlog(self):
579 def manifestlog(self):
570 return manifest.manifestlog(self.svfs, self)
580 return manifest.manifestlog(self.svfs, self)
571
581
572 @repofilecache('dirstate')
582 @repofilecache('dirstate')
573 def dirstate(self):
583 def dirstate(self):
574 sparsematchfn = lambda: sparse.matcher(self)
584 sparsematchfn = lambda: sparse.matcher(self)
575
585
576 return dirstate.dirstate(self.vfs, self.ui, self.root,
586 return dirstate.dirstate(self.vfs, self.ui, self.root,
577 self._dirstatevalidate, sparsematchfn)
587 self._dirstatevalidate, sparsematchfn)
578
588
579 def _dirstatevalidate(self, node):
589 def _dirstatevalidate(self, node):
580 try:
590 try:
581 self.changelog.rev(node)
591 self.changelog.rev(node)
582 return node
592 return node
583 except error.LookupError:
593 except error.LookupError:
584 if not self._dirstatevalidatewarned:
594 if not self._dirstatevalidatewarned:
585 self._dirstatevalidatewarned = True
595 self._dirstatevalidatewarned = True
586 self.ui.warn(_("warning: ignoring unknown"
596 self.ui.warn(_("warning: ignoring unknown"
587 " working parent %s!\n") % short(node))
597 " working parent %s!\n") % short(node))
588 return nullid
598 return nullid
589
599
590 def __getitem__(self, changeid):
600 def __getitem__(self, changeid):
591 if changeid is None:
601 if changeid is None:
592 return context.workingctx(self)
602 return context.workingctx(self)
593 if isinstance(changeid, slice):
603 if isinstance(changeid, slice):
594 # wdirrev isn't contiguous so the slice shouldn't include it
604 # wdirrev isn't contiguous so the slice shouldn't include it
595 return [context.changectx(self, i)
605 return [context.changectx(self, i)
596 for i in xrange(*changeid.indices(len(self)))
606 for i in xrange(*changeid.indices(len(self)))
597 if i not in self.changelog.filteredrevs]
607 if i not in self.changelog.filteredrevs]
598 try:
608 try:
599 return context.changectx(self, changeid)
609 return context.changectx(self, changeid)
600 except error.WdirUnsupported:
610 except error.WdirUnsupported:
601 return context.workingctx(self)
611 return context.workingctx(self)
602
612
603 def __contains__(self, changeid):
613 def __contains__(self, changeid):
604 """True if the given changeid exists
614 """True if the given changeid exists
605
615
606 error.LookupError is raised if an ambiguous node specified.
616 error.LookupError is raised if an ambiguous node specified.
607 """
617 """
608 try:
618 try:
609 self[changeid]
619 self[changeid]
610 return True
620 return True
611 except error.RepoLookupError:
621 except error.RepoLookupError:
612 return False
622 return False
613
623
614 def __nonzero__(self):
624 def __nonzero__(self):
615 return True
625 return True
616
626
617 __bool__ = __nonzero__
627 __bool__ = __nonzero__
618
628
619 def __len__(self):
629 def __len__(self):
620 return len(self.changelog)
630 return len(self.changelog)
621
631
622 def __iter__(self):
632 def __iter__(self):
623 return iter(self.changelog)
633 return iter(self.changelog)
624
634
625 def revs(self, expr, *args):
635 def revs(self, expr, *args):
626 '''Find revisions matching a revset.
636 '''Find revisions matching a revset.
627
637
628 The revset is specified as a string ``expr`` that may contain
638 The revset is specified as a string ``expr`` that may contain
629 %-formatting to escape certain types. See ``revsetlang.formatspec``.
639 %-formatting to escape certain types. See ``revsetlang.formatspec``.
630
640
631 Revset aliases from the configuration are not expanded. To expand
641 Revset aliases from the configuration are not expanded. To expand
632 user aliases, consider calling ``scmutil.revrange()`` or
642 user aliases, consider calling ``scmutil.revrange()`` or
633 ``repo.anyrevs([expr], user=True)``.
643 ``repo.anyrevs([expr], user=True)``.
634
644
635 Returns a revset.abstractsmartset, which is a list-like interface
645 Returns a revset.abstractsmartset, which is a list-like interface
636 that contains integer revisions.
646 that contains integer revisions.
637 '''
647 '''
638 expr = revsetlang.formatspec(expr, *args)
648 expr = revsetlang.formatspec(expr, *args)
639 m = revset.match(None, expr)
649 m = revset.match(None, expr)
640 return m(self)
650 return m(self)
641
651
642 def set(self, expr, *args):
652 def set(self, expr, *args):
643 '''Find revisions matching a revset and emit changectx instances.
653 '''Find revisions matching a revset and emit changectx instances.
644
654
645 This is a convenience wrapper around ``revs()`` that iterates the
655 This is a convenience wrapper around ``revs()`` that iterates the
646 result and is a generator of changectx instances.
656 result and is a generator of changectx instances.
647
657
648 Revset aliases from the configuration are not expanded. To expand
658 Revset aliases from the configuration are not expanded. To expand
649 user aliases, consider calling ``scmutil.revrange()``.
659 user aliases, consider calling ``scmutil.revrange()``.
650 '''
660 '''
651 for r in self.revs(expr, *args):
661 for r in self.revs(expr, *args):
652 yield self[r]
662 yield self[r]
653
663
654 def anyrevs(self, specs, user=False, localalias=None):
664 def anyrevs(self, specs, user=False, localalias=None):
655 '''Find revisions matching one of the given revsets.
665 '''Find revisions matching one of the given revsets.
656
666
657 Revset aliases from the configuration are not expanded by default. To
667 Revset aliases from the configuration are not expanded by default. To
658 expand user aliases, specify ``user=True``. To provide some local
668 expand user aliases, specify ``user=True``. To provide some local
659 definitions overriding user aliases, set ``localalias`` to
669 definitions overriding user aliases, set ``localalias`` to
660 ``{name: definitionstring}``.
670 ``{name: definitionstring}``.
661 '''
671 '''
662 if user:
672 if user:
663 m = revset.matchany(self.ui, specs, repo=self,
673 m = revset.matchany(self.ui, specs, repo=self,
664 localalias=localalias)
674 localalias=localalias)
665 else:
675 else:
666 m = revset.matchany(None, specs, localalias=localalias)
676 m = revset.matchany(None, specs, localalias=localalias)
667 return m(self)
677 return m(self)
668
678
669 def url(self):
679 def url(self):
670 return 'file:' + self.root
680 return 'file:' + self.root
671
681
672 def hook(self, name, throw=False, **args):
682 def hook(self, name, throw=False, **args):
673 """Call a hook, passing this repo instance.
683 """Call a hook, passing this repo instance.
674
684
675 This a convenience method to aid invoking hooks. Extensions likely
685 This a convenience method to aid invoking hooks. Extensions likely
676 won't call this unless they have registered a custom hook or are
686 won't call this unless they have registered a custom hook or are
677 replacing code that is expected to call a hook.
687 replacing code that is expected to call a hook.
678 """
688 """
679 return hook.hook(self.ui, self, name, throw, **args)
689 return hook.hook(self.ui, self, name, throw, **args)
680
690
681 @filteredpropertycache
691 @filteredpropertycache
682 def _tagscache(self):
692 def _tagscache(self):
683 '''Returns a tagscache object that contains various tags related
693 '''Returns a tagscache object that contains various tags related
684 caches.'''
694 caches.'''
685
695
686 # This simplifies its cache management by having one decorated
696 # This simplifies its cache management by having one decorated
687 # function (this one) and the rest simply fetch things from it.
697 # function (this one) and the rest simply fetch things from it.
688 class tagscache(object):
698 class tagscache(object):
689 def __init__(self):
699 def __init__(self):
690 # These two define the set of tags for this repository. tags
700 # These two define the set of tags for this repository. tags
691 # maps tag name to node; tagtypes maps tag name to 'global' or
701 # maps tag name to node; tagtypes maps tag name to 'global' or
692 # 'local'. (Global tags are defined by .hgtags across all
702 # 'local'. (Global tags are defined by .hgtags across all
693 # heads, and local tags are defined in .hg/localtags.)
703 # heads, and local tags are defined in .hg/localtags.)
694 # They constitute the in-memory cache of tags.
704 # They constitute the in-memory cache of tags.
695 self.tags = self.tagtypes = None
705 self.tags = self.tagtypes = None
696
706
697 self.nodetagscache = self.tagslist = None
707 self.nodetagscache = self.tagslist = None
698
708
699 cache = tagscache()
709 cache = tagscache()
700 cache.tags, cache.tagtypes = self._findtags()
710 cache.tags, cache.tagtypes = self._findtags()
701
711
702 return cache
712 return cache
703
713
704 def tags(self):
714 def tags(self):
705 '''return a mapping of tag to node'''
715 '''return a mapping of tag to node'''
706 t = {}
716 t = {}
707 if self.changelog.filteredrevs:
717 if self.changelog.filteredrevs:
708 tags, tt = self._findtags()
718 tags, tt = self._findtags()
709 else:
719 else:
710 tags = self._tagscache.tags
720 tags = self._tagscache.tags
711 for k, v in tags.iteritems():
721 for k, v in tags.iteritems():
712 try:
722 try:
713 # ignore tags to unknown nodes
723 # ignore tags to unknown nodes
714 self.changelog.rev(v)
724 self.changelog.rev(v)
715 t[k] = v
725 t[k] = v
716 except (error.LookupError, ValueError):
726 except (error.LookupError, ValueError):
717 pass
727 pass
718 return t
728 return t
719
729
720 def _findtags(self):
730 def _findtags(self):
721 '''Do the hard work of finding tags. Return a pair of dicts
731 '''Do the hard work of finding tags. Return a pair of dicts
722 (tags, tagtypes) where tags maps tag name to node, and tagtypes
732 (tags, tagtypes) where tags maps tag name to node, and tagtypes
723 maps tag name to a string like \'global\' or \'local\'.
733 maps tag name to a string like \'global\' or \'local\'.
724 Subclasses or extensions are free to add their own tags, but
734 Subclasses or extensions are free to add their own tags, but
725 should be aware that the returned dicts will be retained for the
735 should be aware that the returned dicts will be retained for the
726 duration of the localrepo object.'''
736 duration of the localrepo object.'''
727
737
728 # XXX what tagtype should subclasses/extensions use? Currently
738 # XXX what tagtype should subclasses/extensions use? Currently
729 # mq and bookmarks add tags, but do not set the tagtype at all.
739 # mq and bookmarks add tags, but do not set the tagtype at all.
730 # Should each extension invent its own tag type? Should there
740 # Should each extension invent its own tag type? Should there
731 # be one tagtype for all such "virtual" tags? Or is the status
741 # be one tagtype for all such "virtual" tags? Or is the status
732 # quo fine?
742 # quo fine?
733
743
734
744
735 # map tag name to (node, hist)
745 # map tag name to (node, hist)
736 alltags = tagsmod.findglobaltags(self.ui, self)
746 alltags = tagsmod.findglobaltags(self.ui, self)
737 # map tag name to tag type
747 # map tag name to tag type
738 tagtypes = dict((tag, 'global') for tag in alltags)
748 tagtypes = dict((tag, 'global') for tag in alltags)
739
749
740 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
750 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
741
751
742 # Build the return dicts. Have to re-encode tag names because
752 # Build the return dicts. Have to re-encode tag names because
743 # the tags module always uses UTF-8 (in order not to lose info
753 # the tags module always uses UTF-8 (in order not to lose info
744 # writing to the cache), but the rest of Mercurial wants them in
754 # writing to the cache), but the rest of Mercurial wants them in
745 # local encoding.
755 # local encoding.
746 tags = {}
756 tags = {}
747 for (name, (node, hist)) in alltags.iteritems():
757 for (name, (node, hist)) in alltags.iteritems():
748 if node != nullid:
758 if node != nullid:
749 tags[encoding.tolocal(name)] = node
759 tags[encoding.tolocal(name)] = node
750 tags['tip'] = self.changelog.tip()
760 tags['tip'] = self.changelog.tip()
751 tagtypes = dict([(encoding.tolocal(name), value)
761 tagtypes = dict([(encoding.tolocal(name), value)
752 for (name, value) in tagtypes.iteritems()])
762 for (name, value) in tagtypes.iteritems()])
753 return (tags, tagtypes)
763 return (tags, tagtypes)
754
764
755 def tagtype(self, tagname):
765 def tagtype(self, tagname):
756 '''
766 '''
757 return the type of the given tag. result can be:
767 return the type of the given tag. result can be:
758
768
759 'local' : a local tag
769 'local' : a local tag
760 'global' : a global tag
770 'global' : a global tag
761 None : tag does not exist
771 None : tag does not exist
762 '''
772 '''
763
773
764 return self._tagscache.tagtypes.get(tagname)
774 return self._tagscache.tagtypes.get(tagname)
765
775
766 def tagslist(self):
776 def tagslist(self):
767 '''return a list of tags ordered by revision'''
777 '''return a list of tags ordered by revision'''
768 if not self._tagscache.tagslist:
778 if not self._tagscache.tagslist:
769 l = []
779 l = []
770 for t, n in self.tags().iteritems():
780 for t, n in self.tags().iteritems():
771 l.append((self.changelog.rev(n), t, n))
781 l.append((self.changelog.rev(n), t, n))
772 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
782 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
773
783
774 return self._tagscache.tagslist
784 return self._tagscache.tagslist
775
785
776 def nodetags(self, node):
786 def nodetags(self, node):
777 '''return the tags associated with a node'''
787 '''return the tags associated with a node'''
778 if not self._tagscache.nodetagscache:
788 if not self._tagscache.nodetagscache:
779 nodetagscache = {}
789 nodetagscache = {}
780 for t, n in self._tagscache.tags.iteritems():
790 for t, n in self._tagscache.tags.iteritems():
781 nodetagscache.setdefault(n, []).append(t)
791 nodetagscache.setdefault(n, []).append(t)
782 for tags in nodetagscache.itervalues():
792 for tags in nodetagscache.itervalues():
783 tags.sort()
793 tags.sort()
784 self._tagscache.nodetagscache = nodetagscache
794 self._tagscache.nodetagscache = nodetagscache
785 return self._tagscache.nodetagscache.get(node, [])
795 return self._tagscache.nodetagscache.get(node, [])
786
796
787 def nodebookmarks(self, node):
797 def nodebookmarks(self, node):
788 """return the list of bookmarks pointing to the specified node"""
798 """return the list of bookmarks pointing to the specified node"""
789 marks = []
799 marks = []
790 for bookmark, n in self._bookmarks.iteritems():
800 for bookmark, n in self._bookmarks.iteritems():
791 if n == node:
801 if n == node:
792 marks.append(bookmark)
802 marks.append(bookmark)
793 return sorted(marks)
803 return sorted(marks)
794
804
795 def branchmap(self):
805 def branchmap(self):
796 '''returns a dictionary {branch: [branchheads]} with branchheads
806 '''returns a dictionary {branch: [branchheads]} with branchheads
797 ordered by increasing revision number'''
807 ordered by increasing revision number'''
798 branchmap.updatecache(self)
808 branchmap.updatecache(self)
799 return self._branchcaches[self.filtername]
809 return self._branchcaches[self.filtername]
800
810
801 @unfilteredmethod
811 @unfilteredmethod
802 def revbranchcache(self):
812 def revbranchcache(self):
803 if not self._revbranchcache:
813 if not self._revbranchcache:
804 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
814 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
805 return self._revbranchcache
815 return self._revbranchcache
806
816
807 def branchtip(self, branch, ignoremissing=False):
817 def branchtip(self, branch, ignoremissing=False):
808 '''return the tip node for a given branch
818 '''return the tip node for a given branch
809
819
810 If ignoremissing is True, then this method will not raise an error.
820 If ignoremissing is True, then this method will not raise an error.
811 This is helpful for callers that only expect None for a missing branch
821 This is helpful for callers that only expect None for a missing branch
812 (e.g. namespace).
822 (e.g. namespace).
813
823
814 '''
824 '''
815 try:
825 try:
816 return self.branchmap().branchtip(branch)
826 return self.branchmap().branchtip(branch)
817 except KeyError:
827 except KeyError:
818 if not ignoremissing:
828 if not ignoremissing:
819 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
829 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
820 else:
830 else:
821 pass
831 pass
822
832
823 def lookup(self, key):
833 def lookup(self, key):
824 return self[key].node()
834 return self[key].node()
825
835
826 def lookupbranch(self, key, remote=None):
836 def lookupbranch(self, key, remote=None):
827 repo = remote or self
837 repo = remote or self
828 if key in repo.branchmap():
838 if key in repo.branchmap():
829 return key
839 return key
830
840
831 repo = (remote and remote.local()) and remote or self
841 repo = (remote and remote.local()) and remote or self
832 return repo[key].branch()
842 return repo[key].branch()
833
843
834 def known(self, nodes):
844 def known(self, nodes):
835 cl = self.changelog
845 cl = self.changelog
836 nm = cl.nodemap
846 nm = cl.nodemap
837 filtered = cl.filteredrevs
847 filtered = cl.filteredrevs
838 result = []
848 result = []
839 for n in nodes:
849 for n in nodes:
840 r = nm.get(n)
850 r = nm.get(n)
841 resp = not (r is None or r in filtered)
851 resp = not (r is None or r in filtered)
842 result.append(resp)
852 result.append(resp)
843 return result
853 return result
844
854
845 def local(self):
855 def local(self):
846 return self
856 return self
847
857
848 def publishing(self):
858 def publishing(self):
849 # it's safe (and desirable) to trust the publish flag unconditionally
859 # it's safe (and desirable) to trust the publish flag unconditionally
850 # so that we don't finalize changes shared between users via ssh or nfs
860 # so that we don't finalize changes shared between users via ssh or nfs
851 return self.ui.configbool('phases', 'publish', True, untrusted=True)
861 return self.ui.configbool('phases', 'publish', True, untrusted=True)
852
862
853 def cancopy(self):
863 def cancopy(self):
854 # so statichttprepo's override of local() works
864 # so statichttprepo's override of local() works
855 if not self.local():
865 if not self.local():
856 return False
866 return False
857 if not self.publishing():
867 if not self.publishing():
858 return True
868 return True
859 # if publishing we can't copy if there is filtered content
869 # if publishing we can't copy if there is filtered content
860 return not self.filtered('visible').changelog.filteredrevs
870 return not self.filtered('visible').changelog.filteredrevs
861
871
862 def shared(self):
872 def shared(self):
863 '''the type of shared repository (None if not shared)'''
873 '''the type of shared repository (None if not shared)'''
864 if self.sharedpath != self.path:
874 if self.sharedpath != self.path:
865 return 'store'
875 return 'store'
866 return None
876 return None
867
877
868 def wjoin(self, f, *insidef):
878 def wjoin(self, f, *insidef):
869 return self.vfs.reljoin(self.root, f, *insidef)
879 return self.vfs.reljoin(self.root, f, *insidef)
870
880
871 def file(self, f):
881 def file(self, f):
872 if f[0] == '/':
882 if f[0] == '/':
873 f = f[1:]
883 f = f[1:]
874 return filelog.filelog(self.svfs, f)
884 return filelog.filelog(self.svfs, f)
875
885
876 def changectx(self, changeid):
886 def changectx(self, changeid):
877 return self[changeid]
887 return self[changeid]
878
888
879 def setparents(self, p1, p2=nullid):
889 def setparents(self, p1, p2=nullid):
880 with self.dirstate.parentchange():
890 with self.dirstate.parentchange():
881 copies = self.dirstate.setparents(p1, p2)
891 copies = self.dirstate.setparents(p1, p2)
882 pctx = self[p1]
892 pctx = self[p1]
883 if copies:
893 if copies:
884 # Adjust copy records, the dirstate cannot do it, it
894 # Adjust copy records, the dirstate cannot do it, it
885 # requires access to parents manifests. Preserve them
895 # requires access to parents manifests. Preserve them
886 # only for entries added to first parent.
896 # only for entries added to first parent.
887 for f in copies:
897 for f in copies:
888 if f not in pctx and copies[f] in pctx:
898 if f not in pctx and copies[f] in pctx:
889 self.dirstate.copy(copies[f], f)
899 self.dirstate.copy(copies[f], f)
890 if p2 == nullid:
900 if p2 == nullid:
891 for f, s in sorted(self.dirstate.copies().items()):
901 for f, s in sorted(self.dirstate.copies().items()):
892 if f not in pctx and s not in pctx:
902 if f not in pctx and s not in pctx:
893 self.dirstate.copy(None, f)
903 self.dirstate.copy(None, f)
894
904
895 def filectx(self, path, changeid=None, fileid=None):
905 def filectx(self, path, changeid=None, fileid=None):
896 """changeid can be a changeset revision, node, or tag.
906 """changeid can be a changeset revision, node, or tag.
897 fileid can be a file revision or node."""
907 fileid can be a file revision or node."""
898 return context.filectx(self, path, changeid, fileid)
908 return context.filectx(self, path, changeid, fileid)
899
909
900 def getcwd(self):
910 def getcwd(self):
901 return self.dirstate.getcwd()
911 return self.dirstate.getcwd()
902
912
903 def pathto(self, f, cwd=None):
913 def pathto(self, f, cwd=None):
904 return self.dirstate.pathto(f, cwd)
914 return self.dirstate.pathto(f, cwd)
905
915
906 def _loadfilter(self, filter):
916 def _loadfilter(self, filter):
907 if filter not in self.filterpats:
917 if filter not in self.filterpats:
908 l = []
918 l = []
909 for pat, cmd in self.ui.configitems(filter):
919 for pat, cmd in self.ui.configitems(filter):
910 if cmd == '!':
920 if cmd == '!':
911 continue
921 continue
912 mf = matchmod.match(self.root, '', [pat])
922 mf = matchmod.match(self.root, '', [pat])
913 fn = None
923 fn = None
914 params = cmd
924 params = cmd
915 for name, filterfn in self._datafilters.iteritems():
925 for name, filterfn in self._datafilters.iteritems():
916 if cmd.startswith(name):
926 if cmd.startswith(name):
917 fn = filterfn
927 fn = filterfn
918 params = cmd[len(name):].lstrip()
928 params = cmd[len(name):].lstrip()
919 break
929 break
920 if not fn:
930 if not fn:
921 fn = lambda s, c, **kwargs: util.filter(s, c)
931 fn = lambda s, c, **kwargs: util.filter(s, c)
922 # Wrap old filters not supporting keyword arguments
932 # Wrap old filters not supporting keyword arguments
923 if not inspect.getargspec(fn)[2]:
933 if not inspect.getargspec(fn)[2]:
924 oldfn = fn
934 oldfn = fn
925 fn = lambda s, c, **kwargs: oldfn(s, c)
935 fn = lambda s, c, **kwargs: oldfn(s, c)
926 l.append((mf, fn, params))
936 l.append((mf, fn, params))
927 self.filterpats[filter] = l
937 self.filterpats[filter] = l
928 return self.filterpats[filter]
938 return self.filterpats[filter]
929
939
930 def _filter(self, filterpats, filename, data):
940 def _filter(self, filterpats, filename, data):
931 for mf, fn, cmd in filterpats:
941 for mf, fn, cmd in filterpats:
932 if mf(filename):
942 if mf(filename):
933 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
943 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
934 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
944 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
935 break
945 break
936
946
937 return data
947 return data
938
948
939 @unfilteredpropertycache
949 @unfilteredpropertycache
940 def _encodefilterpats(self):
950 def _encodefilterpats(self):
941 return self._loadfilter('encode')
951 return self._loadfilter('encode')
942
952
943 @unfilteredpropertycache
953 @unfilteredpropertycache
944 def _decodefilterpats(self):
954 def _decodefilterpats(self):
945 return self._loadfilter('decode')
955 return self._loadfilter('decode')
946
956
947 def adddatafilter(self, name, filter):
957 def adddatafilter(self, name, filter):
948 self._datafilters[name] = filter
958 self._datafilters[name] = filter
949
959
950 def wread(self, filename):
960 def wread(self, filename):
951 if self.wvfs.islink(filename):
961 if self.wvfs.islink(filename):
952 data = self.wvfs.readlink(filename)
962 data = self.wvfs.readlink(filename)
953 else:
963 else:
954 data = self.wvfs.read(filename)
964 data = self.wvfs.read(filename)
955 return self._filter(self._encodefilterpats, filename, data)
965 return self._filter(self._encodefilterpats, filename, data)
956
966
957 def wwrite(self, filename, data, flags, backgroundclose=False):
967 def wwrite(self, filename, data, flags, backgroundclose=False):
958 """write ``data`` into ``filename`` in the working directory
968 """write ``data`` into ``filename`` in the working directory
959
969
960 This returns length of written (maybe decoded) data.
970 This returns length of written (maybe decoded) data.
961 """
971 """
962 data = self._filter(self._decodefilterpats, filename, data)
972 data = self._filter(self._decodefilterpats, filename, data)
963 if 'l' in flags:
973 if 'l' in flags:
964 self.wvfs.symlink(data, filename)
974 self.wvfs.symlink(data, filename)
965 else:
975 else:
966 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
976 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
967 if 'x' in flags:
977 if 'x' in flags:
968 self.wvfs.setflags(filename, False, True)
978 self.wvfs.setflags(filename, False, True)
969 return len(data)
979 return len(data)
970
980
971 def wwritedata(self, filename, data):
981 def wwritedata(self, filename, data):
972 return self._filter(self._decodefilterpats, filename, data)
982 return self._filter(self._decodefilterpats, filename, data)
973
983
974 def currenttransaction(self):
984 def currenttransaction(self):
975 """return the current transaction or None if non exists"""
985 """return the current transaction or None if non exists"""
976 if self._transref:
986 if self._transref:
977 tr = self._transref()
987 tr = self._transref()
978 else:
988 else:
979 tr = None
989 tr = None
980
990
981 if tr and tr.running():
991 if tr and tr.running():
982 return tr
992 return tr
983 return None
993 return None
984
994
985 def transaction(self, desc, report=None):
995 def transaction(self, desc, report=None):
986 if (self.ui.configbool('devel', 'all-warnings')
996 if (self.ui.configbool('devel', 'all-warnings')
987 or self.ui.configbool('devel', 'check-locks')):
997 or self.ui.configbool('devel', 'check-locks')):
988 if self._currentlock(self._lockref) is None:
998 if self._currentlock(self._lockref) is None:
989 raise error.ProgrammingError('transaction requires locking')
999 raise error.ProgrammingError('transaction requires locking')
990 tr = self.currenttransaction()
1000 tr = self.currenttransaction()
991 if tr is not None:
1001 if tr is not None:
992 return tr.nest()
1002 return tr.nest()
993
1003
994 # abort here if the journal already exists
1004 # abort here if the journal already exists
995 if self.svfs.exists("journal"):
1005 if self.svfs.exists("journal"):
996 raise error.RepoError(
1006 raise error.RepoError(
997 _("abandoned transaction found"),
1007 _("abandoned transaction found"),
998 hint=_("run 'hg recover' to clean up transaction"))
1008 hint=_("run 'hg recover' to clean up transaction"))
999
1009
1000 idbase = "%.40f#%f" % (random.random(), time.time())
1010 idbase = "%.40f#%f" % (random.random(), time.time())
1001 ha = hex(hashlib.sha1(idbase).digest())
1011 ha = hex(hashlib.sha1(idbase).digest())
1002 txnid = 'TXN:' + ha
1012 txnid = 'TXN:' + ha
1003 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1013 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1004
1014
1005 self._writejournal(desc)
1015 self._writejournal(desc)
1006 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1016 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1007 if report:
1017 if report:
1008 rp = report
1018 rp = report
1009 else:
1019 else:
1010 rp = self.ui.warn
1020 rp = self.ui.warn
1011 vfsmap = {'plain': self.vfs} # root of .hg/
1021 vfsmap = {'plain': self.vfs} # root of .hg/
1012 # we must avoid cyclic reference between repo and transaction.
1022 # we must avoid cyclic reference between repo and transaction.
1013 reporef = weakref.ref(self)
1023 reporef = weakref.ref(self)
1014 # Code to track tag movement
1024 # Code to track tag movement
1015 #
1025 #
1016 # Since tags are all handled as file content, it is actually quite hard
1026 # Since tags are all handled as file content, it is actually quite hard
1017 # to track these movement from a code perspective. So we fallback to a
1027 # to track these movement from a code perspective. So we fallback to a
1018 # tracking at the repository level. One could envision to track changes
1028 # tracking at the repository level. One could envision to track changes
1019 # to the '.hgtags' file through changegroup apply but that fails to
1029 # to the '.hgtags' file through changegroup apply but that fails to
1020 # cope with case where transaction expose new heads without changegroup
1030 # cope with case where transaction expose new heads without changegroup
1021 # being involved (eg: phase movement).
1031 # being involved (eg: phase movement).
1022 #
1032 #
1023 # For now, We gate the feature behind a flag since this likely comes
1033 # For now, We gate the feature behind a flag since this likely comes
1024 # with performance impacts. The current code run more often than needed
1034 # with performance impacts. The current code run more often than needed
1025 # and do not use caches as much as it could. The current focus is on
1035 # and do not use caches as much as it could. The current focus is on
1026 # the behavior of the feature so we disable it by default. The flag
1036 # the behavior of the feature so we disable it by default. The flag
1027 # will be removed when we are happy with the performance impact.
1037 # will be removed when we are happy with the performance impact.
1028 #
1038 #
1029 # Once this feature is no longer experimental move the following
1039 # Once this feature is no longer experimental move the following
1030 # documentation to the appropriate help section:
1040 # documentation to the appropriate help section:
1031 #
1041 #
1032 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1042 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1033 # tags (new or changed or deleted tags). In addition the details of
1043 # tags (new or changed or deleted tags). In addition the details of
1034 # these changes are made available in a file at:
1044 # these changes are made available in a file at:
1035 # ``REPOROOT/.hg/changes/tags.changes``.
1045 # ``REPOROOT/.hg/changes/tags.changes``.
1036 # Make sure you check for HG_TAG_MOVED before reading that file as it
1046 # Make sure you check for HG_TAG_MOVED before reading that file as it
1037 # might exist from a previous transaction even if no tag were touched
1047 # might exist from a previous transaction even if no tag were touched
1038 # in this one. Changes are recorded in a line base format::
1048 # in this one. Changes are recorded in a line base format::
1039 #
1049 #
1040 # <action> <hex-node> <tag-name>\n
1050 # <action> <hex-node> <tag-name>\n
1041 #
1051 #
1042 # Actions are defined as follow:
1052 # Actions are defined as follow:
1043 # "-R": tag is removed,
1053 # "-R": tag is removed,
1044 # "+A": tag is added,
1054 # "+A": tag is added,
1045 # "-M": tag is moved (old value),
1055 # "-M": tag is moved (old value),
1046 # "+M": tag is moved (new value),
1056 # "+M": tag is moved (new value),
1047 tracktags = lambda x: None
1057 tracktags = lambda x: None
1048 # experimental config: experimental.hook-track-tags
1058 # experimental config: experimental.hook-track-tags
1049 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1059 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags',
1050 False)
1060 False)
1051 if desc != 'strip' and shouldtracktags:
1061 if desc != 'strip' and shouldtracktags:
1052 oldheads = self.changelog.headrevs()
1062 oldheads = self.changelog.headrevs()
1053 def tracktags(tr2):
1063 def tracktags(tr2):
1054 repo = reporef()
1064 repo = reporef()
1055 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1065 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1056 newheads = repo.changelog.headrevs()
1066 newheads = repo.changelog.headrevs()
1057 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1067 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1058 # notes: we compare lists here.
1068 # notes: we compare lists here.
1059 # As we do it only once buiding set would not be cheaper
1069 # As we do it only once buiding set would not be cheaper
1060 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1070 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1061 if changes:
1071 if changes:
1062 tr2.hookargs['tag_moved'] = '1'
1072 tr2.hookargs['tag_moved'] = '1'
1063 with repo.vfs('changes/tags.changes', 'w',
1073 with repo.vfs('changes/tags.changes', 'w',
1064 atomictemp=True) as changesfile:
1074 atomictemp=True) as changesfile:
1065 # note: we do not register the file to the transaction
1075 # note: we do not register the file to the transaction
1066 # because we needs it to still exist on the transaction
1076 # because we needs it to still exist on the transaction
1067 # is close (for txnclose hooks)
1077 # is close (for txnclose hooks)
1068 tagsmod.writediff(changesfile, changes)
1078 tagsmod.writediff(changesfile, changes)
1069 def validate(tr2):
1079 def validate(tr2):
1070 """will run pre-closing hooks"""
1080 """will run pre-closing hooks"""
1071 # XXX the transaction API is a bit lacking here so we take a hacky
1081 # XXX the transaction API is a bit lacking here so we take a hacky
1072 # path for now
1082 # path for now
1073 #
1083 #
1074 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1084 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1075 # dict is copied before these run. In addition we needs the data
1085 # dict is copied before these run. In addition we needs the data
1076 # available to in memory hooks too.
1086 # available to in memory hooks too.
1077 #
1087 #
1078 # Moreover, we also need to make sure this runs before txnclose
1088 # Moreover, we also need to make sure this runs before txnclose
1079 # hooks and there is no "pending" mechanism that would execute
1089 # hooks and there is no "pending" mechanism that would execute
1080 # logic only if hooks are about to run.
1090 # logic only if hooks are about to run.
1081 #
1091 #
1082 # Fixing this limitation of the transaction is also needed to track
1092 # Fixing this limitation of the transaction is also needed to track
1083 # other families of changes (bookmarks, phases, obsolescence).
1093 # other families of changes (bookmarks, phases, obsolescence).
1084 #
1094 #
1085 # This will have to be fixed before we remove the experimental
1095 # This will have to be fixed before we remove the experimental
1086 # gating.
1096 # gating.
1087 tracktags(tr2)
1097 tracktags(tr2)
1088 reporef().hook('pretxnclose', throw=True,
1098 reporef().hook('pretxnclose', throw=True,
1089 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1099 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1090 def releasefn(tr, success):
1100 def releasefn(tr, success):
1091 repo = reporef()
1101 repo = reporef()
1092 if success:
1102 if success:
1093 # this should be explicitly invoked here, because
1103 # this should be explicitly invoked here, because
1094 # in-memory changes aren't written out at closing
1104 # in-memory changes aren't written out at closing
1095 # transaction, if tr.addfilegenerator (via
1105 # transaction, if tr.addfilegenerator (via
1096 # dirstate.write or so) isn't invoked while
1106 # dirstate.write or so) isn't invoked while
1097 # transaction running
1107 # transaction running
1098 repo.dirstate.write(None)
1108 repo.dirstate.write(None)
1099 else:
1109 else:
1100 # discard all changes (including ones already written
1110 # discard all changes (including ones already written
1101 # out) in this transaction
1111 # out) in this transaction
1102 repo.dirstate.restorebackup(None, prefix='journal.')
1112 repo.dirstate.restorebackup(None, prefix='journal.')
1103
1113
1104 repo.invalidate(clearfilecache=True)
1114 repo.invalidate(clearfilecache=True)
1105
1115
1106 tr = transaction.transaction(rp, self.svfs, vfsmap,
1116 tr = transaction.transaction(rp, self.svfs, vfsmap,
1107 "journal",
1117 "journal",
1108 "undo",
1118 "undo",
1109 aftertrans(renames),
1119 aftertrans(renames),
1110 self.store.createmode,
1120 self.store.createmode,
1111 validator=validate,
1121 validator=validate,
1112 releasefn=releasefn,
1122 releasefn=releasefn,
1113 checkambigfiles=_cachedfiles)
1123 checkambigfiles=_cachedfiles)
1114 tr.changes['revs'] = set()
1124 tr.changes['revs'] = set()
1115 tr.changes['obsmarkers'] = set()
1125 tr.changes['obsmarkers'] = set()
1116
1126
1117 tr.hookargs['txnid'] = txnid
1127 tr.hookargs['txnid'] = txnid
1118 # note: writing the fncache only during finalize mean that the file is
1128 # note: writing the fncache only during finalize mean that the file is
1119 # outdated when running hooks. As fncache is used for streaming clone,
1129 # outdated when running hooks. As fncache is used for streaming clone,
1120 # this is not expected to break anything that happen during the hooks.
1130 # this is not expected to break anything that happen during the hooks.
1121 tr.addfinalize('flush-fncache', self.store.write)
1131 tr.addfinalize('flush-fncache', self.store.write)
1122 def txnclosehook(tr2):
1132 def txnclosehook(tr2):
1123 """To be run if transaction is successful, will schedule a hook run
1133 """To be run if transaction is successful, will schedule a hook run
1124 """
1134 """
1125 # Don't reference tr2 in hook() so we don't hold a reference.
1135 # Don't reference tr2 in hook() so we don't hold a reference.
1126 # This reduces memory consumption when there are multiple
1136 # This reduces memory consumption when there are multiple
1127 # transactions per lock. This can likely go away if issue5045
1137 # transactions per lock. This can likely go away if issue5045
1128 # fixes the function accumulation.
1138 # fixes the function accumulation.
1129 hookargs = tr2.hookargs
1139 hookargs = tr2.hookargs
1130
1140
1131 def hook():
1141 def hook():
1132 reporef().hook('txnclose', throw=False, txnname=desc,
1142 reporef().hook('txnclose', throw=False, txnname=desc,
1133 **pycompat.strkwargs(hookargs))
1143 **pycompat.strkwargs(hookargs))
1134 reporef()._afterlock(hook)
1144 reporef()._afterlock(hook)
1135 tr.addfinalize('txnclose-hook', txnclosehook)
1145 tr.addfinalize('txnclose-hook', txnclosehook)
1136 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1146 tr.addpostclose('warms-cache', self._buildcacheupdater(tr))
1137 def txnaborthook(tr2):
1147 def txnaborthook(tr2):
1138 """To be run if transaction is aborted
1148 """To be run if transaction is aborted
1139 """
1149 """
1140 reporef().hook('txnabort', throw=False, txnname=desc,
1150 reporef().hook('txnabort', throw=False, txnname=desc,
1141 **tr2.hookargs)
1151 **tr2.hookargs)
1142 tr.addabort('txnabort-hook', txnaborthook)
1152 tr.addabort('txnabort-hook', txnaborthook)
1143 # avoid eager cache invalidation. in-memory data should be identical
1153 # avoid eager cache invalidation. in-memory data should be identical
1144 # to stored data if transaction has no error.
1154 # to stored data if transaction has no error.
1145 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1155 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1146 self._transref = weakref.ref(tr)
1156 self._transref = weakref.ref(tr)
1147 return tr
1157 return tr
1148
1158
1149 def _journalfiles(self):
1159 def _journalfiles(self):
1150 return ((self.svfs, 'journal'),
1160 return ((self.svfs, 'journal'),
1151 (self.vfs, 'journal.dirstate'),
1161 (self.vfs, 'journal.dirstate'),
1152 (self.vfs, 'journal.branch'),
1162 (self.vfs, 'journal.branch'),
1153 (self.vfs, 'journal.desc'),
1163 (self.vfs, 'journal.desc'),
1154 (self.vfs, 'journal.bookmarks'),
1164 (self.vfs, 'journal.bookmarks'),
1155 (self.svfs, 'journal.phaseroots'))
1165 (self.svfs, 'journal.phaseroots'))
1156
1166
1157 def undofiles(self):
1167 def undofiles(self):
1158 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1168 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1159
1169
1160 @unfilteredmethod
1170 @unfilteredmethod
1161 def _writejournal(self, desc):
1171 def _writejournal(self, desc):
1162 self.dirstate.savebackup(None, prefix='journal.')
1172 self.dirstate.savebackup(None, prefix='journal.')
1163 self.vfs.write("journal.branch",
1173 self.vfs.write("journal.branch",
1164 encoding.fromlocal(self.dirstate.branch()))
1174 encoding.fromlocal(self.dirstate.branch()))
1165 self.vfs.write("journal.desc",
1175 self.vfs.write("journal.desc",
1166 "%d\n%s\n" % (len(self), desc))
1176 "%d\n%s\n" % (len(self), desc))
1167 self.vfs.write("journal.bookmarks",
1177 self.vfs.write("journal.bookmarks",
1168 self.vfs.tryread("bookmarks"))
1178 self.vfs.tryread("bookmarks"))
1169 self.svfs.write("journal.phaseroots",
1179 self.svfs.write("journal.phaseroots",
1170 self.svfs.tryread("phaseroots"))
1180 self.svfs.tryread("phaseroots"))
1171
1181
1172 def recover(self):
1182 def recover(self):
1173 with self.lock():
1183 with self.lock():
1174 if self.svfs.exists("journal"):
1184 if self.svfs.exists("journal"):
1175 self.ui.status(_("rolling back interrupted transaction\n"))
1185 self.ui.status(_("rolling back interrupted transaction\n"))
1176 vfsmap = {'': self.svfs,
1186 vfsmap = {'': self.svfs,
1177 'plain': self.vfs,}
1187 'plain': self.vfs,}
1178 transaction.rollback(self.svfs, vfsmap, "journal",
1188 transaction.rollback(self.svfs, vfsmap, "journal",
1179 self.ui.warn,
1189 self.ui.warn,
1180 checkambigfiles=_cachedfiles)
1190 checkambigfiles=_cachedfiles)
1181 self.invalidate()
1191 self.invalidate()
1182 return True
1192 return True
1183 else:
1193 else:
1184 self.ui.warn(_("no interrupted transaction available\n"))
1194 self.ui.warn(_("no interrupted transaction available\n"))
1185 return False
1195 return False
1186
1196
1187 def rollback(self, dryrun=False, force=False):
1197 def rollback(self, dryrun=False, force=False):
1188 wlock = lock = dsguard = None
1198 wlock = lock = dsguard = None
1189 try:
1199 try:
1190 wlock = self.wlock()
1200 wlock = self.wlock()
1191 lock = self.lock()
1201 lock = self.lock()
1192 if self.svfs.exists("undo"):
1202 if self.svfs.exists("undo"):
1193 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1203 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1194
1204
1195 return self._rollback(dryrun, force, dsguard)
1205 return self._rollback(dryrun, force, dsguard)
1196 else:
1206 else:
1197 self.ui.warn(_("no rollback information available\n"))
1207 self.ui.warn(_("no rollback information available\n"))
1198 return 1
1208 return 1
1199 finally:
1209 finally:
1200 release(dsguard, lock, wlock)
1210 release(dsguard, lock, wlock)
1201
1211
1202 @unfilteredmethod # Until we get smarter cache management
1212 @unfilteredmethod # Until we get smarter cache management
1203 def _rollback(self, dryrun, force, dsguard):
1213 def _rollback(self, dryrun, force, dsguard):
1204 ui = self.ui
1214 ui = self.ui
1205 try:
1215 try:
1206 args = self.vfs.read('undo.desc').splitlines()
1216 args = self.vfs.read('undo.desc').splitlines()
1207 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1217 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1208 if len(args) >= 3:
1218 if len(args) >= 3:
1209 detail = args[2]
1219 detail = args[2]
1210 oldtip = oldlen - 1
1220 oldtip = oldlen - 1
1211
1221
1212 if detail and ui.verbose:
1222 if detail and ui.verbose:
1213 msg = (_('repository tip rolled back to revision %d'
1223 msg = (_('repository tip rolled back to revision %d'
1214 ' (undo %s: %s)\n')
1224 ' (undo %s: %s)\n')
1215 % (oldtip, desc, detail))
1225 % (oldtip, desc, detail))
1216 else:
1226 else:
1217 msg = (_('repository tip rolled back to revision %d'
1227 msg = (_('repository tip rolled back to revision %d'
1218 ' (undo %s)\n')
1228 ' (undo %s)\n')
1219 % (oldtip, desc))
1229 % (oldtip, desc))
1220 except IOError:
1230 except IOError:
1221 msg = _('rolling back unknown transaction\n')
1231 msg = _('rolling back unknown transaction\n')
1222 desc = None
1232 desc = None
1223
1233
1224 if not force and self['.'] != self['tip'] and desc == 'commit':
1234 if not force and self['.'] != self['tip'] and desc == 'commit':
1225 raise error.Abort(
1235 raise error.Abort(
1226 _('rollback of last commit while not checked out '
1236 _('rollback of last commit while not checked out '
1227 'may lose data'), hint=_('use -f to force'))
1237 'may lose data'), hint=_('use -f to force'))
1228
1238
1229 ui.status(msg)
1239 ui.status(msg)
1230 if dryrun:
1240 if dryrun:
1231 return 0
1241 return 0
1232
1242
1233 parents = self.dirstate.parents()
1243 parents = self.dirstate.parents()
1234 self.destroying()
1244 self.destroying()
1235 vfsmap = {'plain': self.vfs, '': self.svfs}
1245 vfsmap = {'plain': self.vfs, '': self.svfs}
1236 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1246 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1237 checkambigfiles=_cachedfiles)
1247 checkambigfiles=_cachedfiles)
1238 if self.vfs.exists('undo.bookmarks'):
1248 if self.vfs.exists('undo.bookmarks'):
1239 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1249 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1240 if self.svfs.exists('undo.phaseroots'):
1250 if self.svfs.exists('undo.phaseroots'):
1241 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1251 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1242 self.invalidate()
1252 self.invalidate()
1243
1253
1244 parentgone = (parents[0] not in self.changelog.nodemap or
1254 parentgone = (parents[0] not in self.changelog.nodemap or
1245 parents[1] not in self.changelog.nodemap)
1255 parents[1] not in self.changelog.nodemap)
1246 if parentgone:
1256 if parentgone:
1247 # prevent dirstateguard from overwriting already restored one
1257 # prevent dirstateguard from overwriting already restored one
1248 dsguard.close()
1258 dsguard.close()
1249
1259
1250 self.dirstate.restorebackup(None, prefix='undo.')
1260 self.dirstate.restorebackup(None, prefix='undo.')
1251 try:
1261 try:
1252 branch = self.vfs.read('undo.branch')
1262 branch = self.vfs.read('undo.branch')
1253 self.dirstate.setbranch(encoding.tolocal(branch))
1263 self.dirstate.setbranch(encoding.tolocal(branch))
1254 except IOError:
1264 except IOError:
1255 ui.warn(_('named branch could not be reset: '
1265 ui.warn(_('named branch could not be reset: '
1256 'current branch is still \'%s\'\n')
1266 'current branch is still \'%s\'\n')
1257 % self.dirstate.branch())
1267 % self.dirstate.branch())
1258
1268
1259 parents = tuple([p.rev() for p in self[None].parents()])
1269 parents = tuple([p.rev() for p in self[None].parents()])
1260 if len(parents) > 1:
1270 if len(parents) > 1:
1261 ui.status(_('working directory now based on '
1271 ui.status(_('working directory now based on '
1262 'revisions %d and %d\n') % parents)
1272 'revisions %d and %d\n') % parents)
1263 else:
1273 else:
1264 ui.status(_('working directory now based on '
1274 ui.status(_('working directory now based on '
1265 'revision %d\n') % parents)
1275 'revision %d\n') % parents)
1266 mergemod.mergestate.clean(self, self['.'].node())
1276 mergemod.mergestate.clean(self, self['.'].node())
1267
1277
1268 # TODO: if we know which new heads may result from this rollback, pass
1278 # TODO: if we know which new heads may result from this rollback, pass
1269 # them to destroy(), which will prevent the branchhead cache from being
1279 # them to destroy(), which will prevent the branchhead cache from being
1270 # invalidated.
1280 # invalidated.
1271 self.destroyed()
1281 self.destroyed()
1272 return 0
1282 return 0
1273
1283
1274 def _buildcacheupdater(self, newtransaction):
1284 def _buildcacheupdater(self, newtransaction):
1275 """called during transaction to build the callback updating cache
1285 """called during transaction to build the callback updating cache
1276
1286
1277 Lives on the repository to help extension who might want to augment
1287 Lives on the repository to help extension who might want to augment
1278 this logic. For this purpose, the created transaction is passed to the
1288 this logic. For this purpose, the created transaction is passed to the
1279 method.
1289 method.
1280 """
1290 """
1281 # we must avoid cyclic reference between repo and transaction.
1291 # we must avoid cyclic reference between repo and transaction.
1282 reporef = weakref.ref(self)
1292 reporef = weakref.ref(self)
1283 def updater(tr):
1293 def updater(tr):
1284 repo = reporef()
1294 repo = reporef()
1285 repo.updatecaches(tr)
1295 repo.updatecaches(tr)
1286 return updater
1296 return updater
1287
1297
1288 @unfilteredmethod
1298 @unfilteredmethod
1289 def updatecaches(self, tr=None):
1299 def updatecaches(self, tr=None):
1290 """warm appropriate caches
1300 """warm appropriate caches
1291
1301
1292 If this function is called after a transaction closed. The transaction
1302 If this function is called after a transaction closed. The transaction
1293 will be available in the 'tr' argument. This can be used to selectively
1303 will be available in the 'tr' argument. This can be used to selectively
1294 update caches relevant to the changes in that transaction.
1304 update caches relevant to the changes in that transaction.
1295 """
1305 """
1296 if tr is not None and tr.hookargs.get('source') == 'strip':
1306 if tr is not None and tr.hookargs.get('source') == 'strip':
1297 # During strip, many caches are invalid but
1307 # During strip, many caches are invalid but
1298 # later call to `destroyed` will refresh them.
1308 # later call to `destroyed` will refresh them.
1299 return
1309 return
1300
1310
1301 if tr is None or tr.changes['revs']:
1311 if tr is None or tr.changes['revs']:
1302 # updating the unfiltered branchmap should refresh all the others,
1312 # updating the unfiltered branchmap should refresh all the others,
1303 self.ui.debug('updating the branch cache\n')
1313 self.ui.debug('updating the branch cache\n')
1304 branchmap.updatecache(self.filtered('served'))
1314 branchmap.updatecache(self.filtered('served'))
1305
1315
1306 def invalidatecaches(self):
1316 def invalidatecaches(self):
1307
1317
1308 if '_tagscache' in vars(self):
1318 if '_tagscache' in vars(self):
1309 # can't use delattr on proxy
1319 # can't use delattr on proxy
1310 del self.__dict__['_tagscache']
1320 del self.__dict__['_tagscache']
1311
1321
1312 self.unfiltered()._branchcaches.clear()
1322 self.unfiltered()._branchcaches.clear()
1313 self.invalidatevolatilesets()
1323 self.invalidatevolatilesets()
1314 self._sparsesignaturecache.clear()
1324 self._sparsesignaturecache.clear()
1315
1325
1316 def invalidatevolatilesets(self):
1326 def invalidatevolatilesets(self):
1317 self.filteredrevcache.clear()
1327 self.filteredrevcache.clear()
1318 obsolete.clearobscaches(self)
1328 obsolete.clearobscaches(self)
1319
1329
1320 def invalidatedirstate(self):
1330 def invalidatedirstate(self):
1321 '''Invalidates the dirstate, causing the next call to dirstate
1331 '''Invalidates the dirstate, causing the next call to dirstate
1322 to check if it was modified since the last time it was read,
1332 to check if it was modified since the last time it was read,
1323 rereading it if it has.
1333 rereading it if it has.
1324
1334
1325 This is different to dirstate.invalidate() that it doesn't always
1335 This is different to dirstate.invalidate() that it doesn't always
1326 rereads the dirstate. Use dirstate.invalidate() if you want to
1336 rereads the dirstate. Use dirstate.invalidate() if you want to
1327 explicitly read the dirstate again (i.e. restoring it to a previous
1337 explicitly read the dirstate again (i.e. restoring it to a previous
1328 known good state).'''
1338 known good state).'''
1329 if hasunfilteredcache(self, 'dirstate'):
1339 if hasunfilteredcache(self, 'dirstate'):
1330 for k in self.dirstate._filecache:
1340 for k in self.dirstate._filecache:
1331 try:
1341 try:
1332 delattr(self.dirstate, k)
1342 delattr(self.dirstate, k)
1333 except AttributeError:
1343 except AttributeError:
1334 pass
1344 pass
1335 delattr(self.unfiltered(), 'dirstate')
1345 delattr(self.unfiltered(), 'dirstate')
1336
1346
1337 def invalidate(self, clearfilecache=False):
1347 def invalidate(self, clearfilecache=False):
1338 '''Invalidates both store and non-store parts other than dirstate
1348 '''Invalidates both store and non-store parts other than dirstate
1339
1349
1340 If a transaction is running, invalidation of store is omitted,
1350 If a transaction is running, invalidation of store is omitted,
1341 because discarding in-memory changes might cause inconsistency
1351 because discarding in-memory changes might cause inconsistency
1342 (e.g. incomplete fncache causes unintentional failure, but
1352 (e.g. incomplete fncache causes unintentional failure, but
1343 redundant one doesn't).
1353 redundant one doesn't).
1344 '''
1354 '''
1345 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1355 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1346 for k in list(self._filecache.keys()):
1356 for k in list(self._filecache.keys()):
1347 # dirstate is invalidated separately in invalidatedirstate()
1357 # dirstate is invalidated separately in invalidatedirstate()
1348 if k == 'dirstate':
1358 if k == 'dirstate':
1349 continue
1359 continue
1350
1360
1351 if clearfilecache:
1361 if clearfilecache:
1352 del self._filecache[k]
1362 del self._filecache[k]
1353 try:
1363 try:
1354 delattr(unfiltered, k)
1364 delattr(unfiltered, k)
1355 except AttributeError:
1365 except AttributeError:
1356 pass
1366 pass
1357 self.invalidatecaches()
1367 self.invalidatecaches()
1358 if not self.currenttransaction():
1368 if not self.currenttransaction():
1359 # TODO: Changing contents of store outside transaction
1369 # TODO: Changing contents of store outside transaction
1360 # causes inconsistency. We should make in-memory store
1370 # causes inconsistency. We should make in-memory store
1361 # changes detectable, and abort if changed.
1371 # changes detectable, and abort if changed.
1362 self.store.invalidatecaches()
1372 self.store.invalidatecaches()
1363
1373
1364 def invalidateall(self):
1374 def invalidateall(self):
1365 '''Fully invalidates both store and non-store parts, causing the
1375 '''Fully invalidates both store and non-store parts, causing the
1366 subsequent operation to reread any outside changes.'''
1376 subsequent operation to reread any outside changes.'''
1367 # extension should hook this to invalidate its caches
1377 # extension should hook this to invalidate its caches
1368 self.invalidate()
1378 self.invalidate()
1369 self.invalidatedirstate()
1379 self.invalidatedirstate()
1370
1380
1371 @unfilteredmethod
1381 @unfilteredmethod
1372 def _refreshfilecachestats(self, tr):
1382 def _refreshfilecachestats(self, tr):
1373 """Reload stats of cached files so that they are flagged as valid"""
1383 """Reload stats of cached files so that they are flagged as valid"""
1374 for k, ce in self._filecache.items():
1384 for k, ce in self._filecache.items():
1375 if k == 'dirstate' or k not in self.__dict__:
1385 if k == 'dirstate' or k not in self.__dict__:
1376 continue
1386 continue
1377 ce.refresh()
1387 ce.refresh()
1378
1388
1379 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1389 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1380 inheritchecker=None, parentenvvar=None):
1390 inheritchecker=None, parentenvvar=None):
1381 parentlock = None
1391 parentlock = None
1382 # the contents of parentenvvar are used by the underlying lock to
1392 # the contents of parentenvvar are used by the underlying lock to
1383 # determine whether it can be inherited
1393 # determine whether it can be inherited
1384 if parentenvvar is not None:
1394 if parentenvvar is not None:
1385 parentlock = encoding.environ.get(parentenvvar)
1395 parentlock = encoding.environ.get(parentenvvar)
1386 try:
1396 try:
1387 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1397 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1388 acquirefn=acquirefn, desc=desc,
1398 acquirefn=acquirefn, desc=desc,
1389 inheritchecker=inheritchecker,
1399 inheritchecker=inheritchecker,
1390 parentlock=parentlock)
1400 parentlock=parentlock)
1391 except error.LockHeld as inst:
1401 except error.LockHeld as inst:
1392 if not wait:
1402 if not wait:
1393 raise
1403 raise
1394 # show more details for new-style locks
1404 # show more details for new-style locks
1395 if ':' in inst.locker:
1405 if ':' in inst.locker:
1396 host, pid = inst.locker.split(":", 1)
1406 host, pid = inst.locker.split(":", 1)
1397 self.ui.warn(
1407 self.ui.warn(
1398 _("waiting for lock on %s held by process %r "
1408 _("waiting for lock on %s held by process %r "
1399 "on host %r\n") % (desc, pid, host))
1409 "on host %r\n") % (desc, pid, host))
1400 else:
1410 else:
1401 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1411 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1402 (desc, inst.locker))
1412 (desc, inst.locker))
1403 # default to 600 seconds timeout
1413 # default to 600 seconds timeout
1404 l = lockmod.lock(vfs, lockname,
1414 l = lockmod.lock(vfs, lockname,
1405 int(self.ui.config("ui", "timeout", "600")),
1415 int(self.ui.config("ui", "timeout", "600")),
1406 releasefn=releasefn, acquirefn=acquirefn,
1416 releasefn=releasefn, acquirefn=acquirefn,
1407 desc=desc)
1417 desc=desc)
1408 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1418 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1409 return l
1419 return l
1410
1420
1411 def _afterlock(self, callback):
1421 def _afterlock(self, callback):
1412 """add a callback to be run when the repository is fully unlocked
1422 """add a callback to be run when the repository is fully unlocked
1413
1423
1414 The callback will be executed when the outermost lock is released
1424 The callback will be executed when the outermost lock is released
1415 (with wlock being higher level than 'lock')."""
1425 (with wlock being higher level than 'lock')."""
1416 for ref in (self._wlockref, self._lockref):
1426 for ref in (self._wlockref, self._lockref):
1417 l = ref and ref()
1427 l = ref and ref()
1418 if l and l.held:
1428 if l and l.held:
1419 l.postrelease.append(callback)
1429 l.postrelease.append(callback)
1420 break
1430 break
1421 else: # no lock have been found.
1431 else: # no lock have been found.
1422 callback()
1432 callback()
1423
1433
1424 def lock(self, wait=True):
1434 def lock(self, wait=True):
1425 '''Lock the repository store (.hg/store) and return a weak reference
1435 '''Lock the repository store (.hg/store) and return a weak reference
1426 to the lock. Use this before modifying the store (e.g. committing or
1436 to the lock. Use this before modifying the store (e.g. committing or
1427 stripping). If you are opening a transaction, get a lock as well.)
1437 stripping). If you are opening a transaction, get a lock as well.)
1428
1438
1429 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1439 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1430 'wlock' first to avoid a dead-lock hazard.'''
1440 'wlock' first to avoid a dead-lock hazard.'''
1431 l = self._currentlock(self._lockref)
1441 l = self._currentlock(self._lockref)
1432 if l is not None:
1442 if l is not None:
1433 l.lock()
1443 l.lock()
1434 return l
1444 return l
1435
1445
1436 l = self._lock(self.svfs, "lock", wait, None,
1446 l = self._lock(self.svfs, "lock", wait, None,
1437 self.invalidate, _('repository %s') % self.origroot)
1447 self.invalidate, _('repository %s') % self.origroot)
1438 self._lockref = weakref.ref(l)
1448 self._lockref = weakref.ref(l)
1439 return l
1449 return l
1440
1450
1441 def _wlockchecktransaction(self):
1451 def _wlockchecktransaction(self):
1442 if self.currenttransaction() is not None:
1452 if self.currenttransaction() is not None:
1443 raise error.LockInheritanceContractViolation(
1453 raise error.LockInheritanceContractViolation(
1444 'wlock cannot be inherited in the middle of a transaction')
1454 'wlock cannot be inherited in the middle of a transaction')
1445
1455
1446 def wlock(self, wait=True):
1456 def wlock(self, wait=True):
1447 '''Lock the non-store parts of the repository (everything under
1457 '''Lock the non-store parts of the repository (everything under
1448 .hg except .hg/store) and return a weak reference to the lock.
1458 .hg except .hg/store) and return a weak reference to the lock.
1449
1459
1450 Use this before modifying files in .hg.
1460 Use this before modifying files in .hg.
1451
1461
1452 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1462 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1453 'wlock' first to avoid a dead-lock hazard.'''
1463 'wlock' first to avoid a dead-lock hazard.'''
1454 l = self._wlockref and self._wlockref()
1464 l = self._wlockref and self._wlockref()
1455 if l is not None and l.held:
1465 if l is not None and l.held:
1456 l.lock()
1466 l.lock()
1457 return l
1467 return l
1458
1468
1459 # We do not need to check for non-waiting lock acquisition. Such
1469 # We do not need to check for non-waiting lock acquisition. Such
1460 # acquisition would not cause dead-lock as they would just fail.
1470 # acquisition would not cause dead-lock as they would just fail.
1461 if wait and (self.ui.configbool('devel', 'all-warnings')
1471 if wait and (self.ui.configbool('devel', 'all-warnings')
1462 or self.ui.configbool('devel', 'check-locks')):
1472 or self.ui.configbool('devel', 'check-locks')):
1463 if self._currentlock(self._lockref) is not None:
1473 if self._currentlock(self._lockref) is not None:
1464 self.ui.develwarn('"wlock" acquired after "lock"')
1474 self.ui.develwarn('"wlock" acquired after "lock"')
1465
1475
1466 def unlock():
1476 def unlock():
1467 if self.dirstate.pendingparentchange():
1477 if self.dirstate.pendingparentchange():
1468 self.dirstate.invalidate()
1478 self.dirstate.invalidate()
1469 else:
1479 else:
1470 self.dirstate.write(None)
1480 self.dirstate.write(None)
1471
1481
1472 self._filecache['dirstate'].refresh()
1482 self._filecache['dirstate'].refresh()
1473
1483
1474 l = self._lock(self.vfs, "wlock", wait, unlock,
1484 l = self._lock(self.vfs, "wlock", wait, unlock,
1475 self.invalidatedirstate, _('working directory of %s') %
1485 self.invalidatedirstate, _('working directory of %s') %
1476 self.origroot,
1486 self.origroot,
1477 inheritchecker=self._wlockchecktransaction,
1487 inheritchecker=self._wlockchecktransaction,
1478 parentenvvar='HG_WLOCK_LOCKER')
1488 parentenvvar='HG_WLOCK_LOCKER')
1479 self._wlockref = weakref.ref(l)
1489 self._wlockref = weakref.ref(l)
1480 return l
1490 return l
1481
1491
1482 def _currentlock(self, lockref):
1492 def _currentlock(self, lockref):
1483 """Returns the lock if it's held, or None if it's not."""
1493 """Returns the lock if it's held, or None if it's not."""
1484 if lockref is None:
1494 if lockref is None:
1485 return None
1495 return None
1486 l = lockref()
1496 l = lockref()
1487 if l is None or not l.held:
1497 if l is None or not l.held:
1488 return None
1498 return None
1489 return l
1499 return l
1490
1500
1491 def currentwlock(self):
1501 def currentwlock(self):
1492 """Returns the wlock if it's held, or None if it's not."""
1502 """Returns the wlock if it's held, or None if it's not."""
1493 return self._currentlock(self._wlockref)
1503 return self._currentlock(self._wlockref)
1494
1504
1495 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1505 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1496 """
1506 """
1497 commit an individual file as part of a larger transaction
1507 commit an individual file as part of a larger transaction
1498 """
1508 """
1499
1509
1500 fname = fctx.path()
1510 fname = fctx.path()
1501 fparent1 = manifest1.get(fname, nullid)
1511 fparent1 = manifest1.get(fname, nullid)
1502 fparent2 = manifest2.get(fname, nullid)
1512 fparent2 = manifest2.get(fname, nullid)
1503 if isinstance(fctx, context.filectx):
1513 if isinstance(fctx, context.filectx):
1504 node = fctx.filenode()
1514 node = fctx.filenode()
1505 if node in [fparent1, fparent2]:
1515 if node in [fparent1, fparent2]:
1506 self.ui.debug('reusing %s filelog entry\n' % fname)
1516 self.ui.debug('reusing %s filelog entry\n' % fname)
1507 if manifest1.flags(fname) != fctx.flags():
1517 if manifest1.flags(fname) != fctx.flags():
1508 changelist.append(fname)
1518 changelist.append(fname)
1509 return node
1519 return node
1510
1520
1511 flog = self.file(fname)
1521 flog = self.file(fname)
1512 meta = {}
1522 meta = {}
1513 copy = fctx.renamed()
1523 copy = fctx.renamed()
1514 if copy and copy[0] != fname:
1524 if copy and copy[0] != fname:
1515 # Mark the new revision of this file as a copy of another
1525 # Mark the new revision of this file as a copy of another
1516 # file. This copy data will effectively act as a parent
1526 # file. This copy data will effectively act as a parent
1517 # of this new revision. If this is a merge, the first
1527 # of this new revision. If this is a merge, the first
1518 # parent will be the nullid (meaning "look up the copy data")
1528 # parent will be the nullid (meaning "look up the copy data")
1519 # and the second one will be the other parent. For example:
1529 # and the second one will be the other parent. For example:
1520 #
1530 #
1521 # 0 --- 1 --- 3 rev1 changes file foo
1531 # 0 --- 1 --- 3 rev1 changes file foo
1522 # \ / rev2 renames foo to bar and changes it
1532 # \ / rev2 renames foo to bar and changes it
1523 # \- 2 -/ rev3 should have bar with all changes and
1533 # \- 2 -/ rev3 should have bar with all changes and
1524 # should record that bar descends from
1534 # should record that bar descends from
1525 # bar in rev2 and foo in rev1
1535 # bar in rev2 and foo in rev1
1526 #
1536 #
1527 # this allows this merge to succeed:
1537 # this allows this merge to succeed:
1528 #
1538 #
1529 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1539 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1530 # \ / merging rev3 and rev4 should use bar@rev2
1540 # \ / merging rev3 and rev4 should use bar@rev2
1531 # \- 2 --- 4 as the merge base
1541 # \- 2 --- 4 as the merge base
1532 #
1542 #
1533
1543
1534 cfname = copy[0]
1544 cfname = copy[0]
1535 crev = manifest1.get(cfname)
1545 crev = manifest1.get(cfname)
1536 newfparent = fparent2
1546 newfparent = fparent2
1537
1547
1538 if manifest2: # branch merge
1548 if manifest2: # branch merge
1539 if fparent2 == nullid or crev is None: # copied on remote side
1549 if fparent2 == nullid or crev is None: # copied on remote side
1540 if cfname in manifest2:
1550 if cfname in manifest2:
1541 crev = manifest2[cfname]
1551 crev = manifest2[cfname]
1542 newfparent = fparent1
1552 newfparent = fparent1
1543
1553
1544 # Here, we used to search backwards through history to try to find
1554 # Here, we used to search backwards through history to try to find
1545 # where the file copy came from if the source of a copy was not in
1555 # where the file copy came from if the source of a copy was not in
1546 # the parent directory. However, this doesn't actually make sense to
1556 # the parent directory. However, this doesn't actually make sense to
1547 # do (what does a copy from something not in your working copy even
1557 # do (what does a copy from something not in your working copy even
1548 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1558 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1549 # the user that copy information was dropped, so if they didn't
1559 # the user that copy information was dropped, so if they didn't
1550 # expect this outcome it can be fixed, but this is the correct
1560 # expect this outcome it can be fixed, but this is the correct
1551 # behavior in this circumstance.
1561 # behavior in this circumstance.
1552
1562
1553 if crev:
1563 if crev:
1554 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1564 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1555 meta["copy"] = cfname
1565 meta["copy"] = cfname
1556 meta["copyrev"] = hex(crev)
1566 meta["copyrev"] = hex(crev)
1557 fparent1, fparent2 = nullid, newfparent
1567 fparent1, fparent2 = nullid, newfparent
1558 else:
1568 else:
1559 self.ui.warn(_("warning: can't find ancestor for '%s' "
1569 self.ui.warn(_("warning: can't find ancestor for '%s' "
1560 "copied from '%s'!\n") % (fname, cfname))
1570 "copied from '%s'!\n") % (fname, cfname))
1561
1571
1562 elif fparent1 == nullid:
1572 elif fparent1 == nullid:
1563 fparent1, fparent2 = fparent2, nullid
1573 fparent1, fparent2 = fparent2, nullid
1564 elif fparent2 != nullid:
1574 elif fparent2 != nullid:
1565 # is one parent an ancestor of the other?
1575 # is one parent an ancestor of the other?
1566 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1576 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1567 if fparent1 in fparentancestors:
1577 if fparent1 in fparentancestors:
1568 fparent1, fparent2 = fparent2, nullid
1578 fparent1, fparent2 = fparent2, nullid
1569 elif fparent2 in fparentancestors:
1579 elif fparent2 in fparentancestors:
1570 fparent2 = nullid
1580 fparent2 = nullid
1571
1581
1572 # is the file changed?
1582 # is the file changed?
1573 text = fctx.data()
1583 text = fctx.data()
1574 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1584 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1575 changelist.append(fname)
1585 changelist.append(fname)
1576 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1586 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1577 # are just the flags changed during merge?
1587 # are just the flags changed during merge?
1578 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1588 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1579 changelist.append(fname)
1589 changelist.append(fname)
1580
1590
1581 return fparent1
1591 return fparent1
1582
1592
1583 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1593 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1584 """check for commit arguments that aren't committable"""
1594 """check for commit arguments that aren't committable"""
1585 if match.isexact() or match.prefix():
1595 if match.isexact() or match.prefix():
1586 matched = set(status.modified + status.added + status.removed)
1596 matched = set(status.modified + status.added + status.removed)
1587
1597
1588 for f in match.files():
1598 for f in match.files():
1589 f = self.dirstate.normalize(f)
1599 f = self.dirstate.normalize(f)
1590 if f == '.' or f in matched or f in wctx.substate:
1600 if f == '.' or f in matched or f in wctx.substate:
1591 continue
1601 continue
1592 if f in status.deleted:
1602 if f in status.deleted:
1593 fail(f, _('file not found!'))
1603 fail(f, _('file not found!'))
1594 if f in vdirs: # visited directory
1604 if f in vdirs: # visited directory
1595 d = f + '/'
1605 d = f + '/'
1596 for mf in matched:
1606 for mf in matched:
1597 if mf.startswith(d):
1607 if mf.startswith(d):
1598 break
1608 break
1599 else:
1609 else:
1600 fail(f, _("no match under directory!"))
1610 fail(f, _("no match under directory!"))
1601 elif f not in self.dirstate:
1611 elif f not in self.dirstate:
1602 fail(f, _("file not tracked!"))
1612 fail(f, _("file not tracked!"))
1603
1613
1604 @unfilteredmethod
1614 @unfilteredmethod
1605 def commit(self, text="", user=None, date=None, match=None, force=False,
1615 def commit(self, text="", user=None, date=None, match=None, force=False,
1606 editor=False, extra=None):
1616 editor=False, extra=None):
1607 """Add a new revision to current repository.
1617 """Add a new revision to current repository.
1608
1618
1609 Revision information is gathered from the working directory,
1619 Revision information is gathered from the working directory,
1610 match can be used to filter the committed files. If editor is
1620 match can be used to filter the committed files. If editor is
1611 supplied, it is called to get a commit message.
1621 supplied, it is called to get a commit message.
1612 """
1622 """
1613 if extra is None:
1623 if extra is None:
1614 extra = {}
1624 extra = {}
1615
1625
1616 def fail(f, msg):
1626 def fail(f, msg):
1617 raise error.Abort('%s: %s' % (f, msg))
1627 raise error.Abort('%s: %s' % (f, msg))
1618
1628
1619 if not match:
1629 if not match:
1620 match = matchmod.always(self.root, '')
1630 match = matchmod.always(self.root, '')
1621
1631
1622 if not force:
1632 if not force:
1623 vdirs = []
1633 vdirs = []
1624 match.explicitdir = vdirs.append
1634 match.explicitdir = vdirs.append
1625 match.bad = fail
1635 match.bad = fail
1626
1636
1627 wlock = lock = tr = None
1637 wlock = lock = tr = None
1628 try:
1638 try:
1629 wlock = self.wlock()
1639 wlock = self.wlock()
1630 lock = self.lock() # for recent changelog (see issue4368)
1640 lock = self.lock() # for recent changelog (see issue4368)
1631
1641
1632 wctx = self[None]
1642 wctx = self[None]
1633 merge = len(wctx.parents()) > 1
1643 merge = len(wctx.parents()) > 1
1634
1644
1635 if not force and merge and not match.always():
1645 if not force and merge and not match.always():
1636 raise error.Abort(_('cannot partially commit a merge '
1646 raise error.Abort(_('cannot partially commit a merge '
1637 '(do not specify files or patterns)'))
1647 '(do not specify files or patterns)'))
1638
1648
1639 status = self.status(match=match, clean=force)
1649 status = self.status(match=match, clean=force)
1640 if force:
1650 if force:
1641 status.modified.extend(status.clean) # mq may commit clean files
1651 status.modified.extend(status.clean) # mq may commit clean files
1642
1652
1643 # check subrepos
1653 # check subrepos
1644 subs = []
1654 subs = []
1645 commitsubs = set()
1655 commitsubs = set()
1646 newstate = wctx.substate.copy()
1656 newstate = wctx.substate.copy()
1647 # only manage subrepos and .hgsubstate if .hgsub is present
1657 # only manage subrepos and .hgsubstate if .hgsub is present
1648 if '.hgsub' in wctx:
1658 if '.hgsub' in wctx:
1649 # we'll decide whether to track this ourselves, thanks
1659 # we'll decide whether to track this ourselves, thanks
1650 for c in status.modified, status.added, status.removed:
1660 for c in status.modified, status.added, status.removed:
1651 if '.hgsubstate' in c:
1661 if '.hgsubstate' in c:
1652 c.remove('.hgsubstate')
1662 c.remove('.hgsubstate')
1653
1663
1654 # compare current state to last committed state
1664 # compare current state to last committed state
1655 # build new substate based on last committed state
1665 # build new substate based on last committed state
1656 oldstate = wctx.p1().substate
1666 oldstate = wctx.p1().substate
1657 for s in sorted(newstate.keys()):
1667 for s in sorted(newstate.keys()):
1658 if not match(s):
1668 if not match(s):
1659 # ignore working copy, use old state if present
1669 # ignore working copy, use old state if present
1660 if s in oldstate:
1670 if s in oldstate:
1661 newstate[s] = oldstate[s]
1671 newstate[s] = oldstate[s]
1662 continue
1672 continue
1663 if not force:
1673 if not force:
1664 raise error.Abort(
1674 raise error.Abort(
1665 _("commit with new subrepo %s excluded") % s)
1675 _("commit with new subrepo %s excluded") % s)
1666 dirtyreason = wctx.sub(s).dirtyreason(True)
1676 dirtyreason = wctx.sub(s).dirtyreason(True)
1667 if dirtyreason:
1677 if dirtyreason:
1668 if not self.ui.configbool('ui', 'commitsubrepos'):
1678 if not self.ui.configbool('ui', 'commitsubrepos'):
1669 raise error.Abort(dirtyreason,
1679 raise error.Abort(dirtyreason,
1670 hint=_("use --subrepos for recursive commit"))
1680 hint=_("use --subrepos for recursive commit"))
1671 subs.append(s)
1681 subs.append(s)
1672 commitsubs.add(s)
1682 commitsubs.add(s)
1673 else:
1683 else:
1674 bs = wctx.sub(s).basestate()
1684 bs = wctx.sub(s).basestate()
1675 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1685 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1676 if oldstate.get(s, (None, None, None))[1] != bs:
1686 if oldstate.get(s, (None, None, None))[1] != bs:
1677 subs.append(s)
1687 subs.append(s)
1678
1688
1679 # check for removed subrepos
1689 # check for removed subrepos
1680 for p in wctx.parents():
1690 for p in wctx.parents():
1681 r = [s for s in p.substate if s not in newstate]
1691 r = [s for s in p.substate if s not in newstate]
1682 subs += [s for s in r if match(s)]
1692 subs += [s for s in r if match(s)]
1683 if subs:
1693 if subs:
1684 if (not match('.hgsub') and
1694 if (not match('.hgsub') and
1685 '.hgsub' in (wctx.modified() + wctx.added())):
1695 '.hgsub' in (wctx.modified() + wctx.added())):
1686 raise error.Abort(
1696 raise error.Abort(
1687 _("can't commit subrepos without .hgsub"))
1697 _("can't commit subrepos without .hgsub"))
1688 status.modified.insert(0, '.hgsubstate')
1698 status.modified.insert(0, '.hgsubstate')
1689
1699
1690 elif '.hgsub' in status.removed:
1700 elif '.hgsub' in status.removed:
1691 # clean up .hgsubstate when .hgsub is removed
1701 # clean up .hgsubstate when .hgsub is removed
1692 if ('.hgsubstate' in wctx and
1702 if ('.hgsubstate' in wctx and
1693 '.hgsubstate' not in (status.modified + status.added +
1703 '.hgsubstate' not in (status.modified + status.added +
1694 status.removed)):
1704 status.removed)):
1695 status.removed.insert(0, '.hgsubstate')
1705 status.removed.insert(0, '.hgsubstate')
1696
1706
1697 # make sure all explicit patterns are matched
1707 # make sure all explicit patterns are matched
1698 if not force:
1708 if not force:
1699 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1709 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1700
1710
1701 cctx = context.workingcommitctx(self, status,
1711 cctx = context.workingcommitctx(self, status,
1702 text, user, date, extra)
1712 text, user, date, extra)
1703
1713
1704 # internal config: ui.allowemptycommit
1714 # internal config: ui.allowemptycommit
1705 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1715 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1706 or extra.get('close') or merge or cctx.files()
1716 or extra.get('close') or merge or cctx.files()
1707 or self.ui.configbool('ui', 'allowemptycommit'))
1717 or self.ui.configbool('ui', 'allowemptycommit'))
1708 if not allowemptycommit:
1718 if not allowemptycommit:
1709 return None
1719 return None
1710
1720
1711 if merge and cctx.deleted():
1721 if merge and cctx.deleted():
1712 raise error.Abort(_("cannot commit merge with missing files"))
1722 raise error.Abort(_("cannot commit merge with missing files"))
1713
1723
1714 ms = mergemod.mergestate.read(self)
1724 ms = mergemod.mergestate.read(self)
1715 mergeutil.checkunresolved(ms)
1725 mergeutil.checkunresolved(ms)
1716
1726
1717 if editor:
1727 if editor:
1718 cctx._text = editor(self, cctx, subs)
1728 cctx._text = editor(self, cctx, subs)
1719 edited = (text != cctx._text)
1729 edited = (text != cctx._text)
1720
1730
1721 # Save commit message in case this transaction gets rolled back
1731 # Save commit message in case this transaction gets rolled back
1722 # (e.g. by a pretxncommit hook). Leave the content alone on
1732 # (e.g. by a pretxncommit hook). Leave the content alone on
1723 # the assumption that the user will use the same editor again.
1733 # the assumption that the user will use the same editor again.
1724 msgfn = self.savecommitmessage(cctx._text)
1734 msgfn = self.savecommitmessage(cctx._text)
1725
1735
1726 # commit subs and write new state
1736 # commit subs and write new state
1727 if subs:
1737 if subs:
1728 for s in sorted(commitsubs):
1738 for s in sorted(commitsubs):
1729 sub = wctx.sub(s)
1739 sub = wctx.sub(s)
1730 self.ui.status(_('committing subrepository %s\n') %
1740 self.ui.status(_('committing subrepository %s\n') %
1731 subrepo.subrelpath(sub))
1741 subrepo.subrelpath(sub))
1732 sr = sub.commit(cctx._text, user, date)
1742 sr = sub.commit(cctx._text, user, date)
1733 newstate[s] = (newstate[s][0], sr)
1743 newstate[s] = (newstate[s][0], sr)
1734 subrepo.writestate(self, newstate)
1744 subrepo.writestate(self, newstate)
1735
1745
1736 p1, p2 = self.dirstate.parents()
1746 p1, p2 = self.dirstate.parents()
1737 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1747 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1738 try:
1748 try:
1739 self.hook("precommit", throw=True, parent1=hookp1,
1749 self.hook("precommit", throw=True, parent1=hookp1,
1740 parent2=hookp2)
1750 parent2=hookp2)
1741 tr = self.transaction('commit')
1751 tr = self.transaction('commit')
1742 ret = self.commitctx(cctx, True)
1752 ret = self.commitctx(cctx, True)
1743 except: # re-raises
1753 except: # re-raises
1744 if edited:
1754 if edited:
1745 self.ui.write(
1755 self.ui.write(
1746 _('note: commit message saved in %s\n') % msgfn)
1756 _('note: commit message saved in %s\n') % msgfn)
1747 raise
1757 raise
1748 # update bookmarks, dirstate and mergestate
1758 # update bookmarks, dirstate and mergestate
1749 bookmarks.update(self, [p1, p2], ret)
1759 bookmarks.update(self, [p1, p2], ret)
1750 cctx.markcommitted(ret)
1760 cctx.markcommitted(ret)
1751 ms.reset()
1761 ms.reset()
1752 tr.close()
1762 tr.close()
1753
1763
1754 finally:
1764 finally:
1755 lockmod.release(tr, lock, wlock)
1765 lockmod.release(tr, lock, wlock)
1756
1766
1757 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1767 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1758 # hack for command that use a temporary commit (eg: histedit)
1768 # hack for command that use a temporary commit (eg: histedit)
1759 # temporary commit got stripped before hook release
1769 # temporary commit got stripped before hook release
1760 if self.changelog.hasnode(ret):
1770 if self.changelog.hasnode(ret):
1761 self.hook("commit", node=node, parent1=parent1,
1771 self.hook("commit", node=node, parent1=parent1,
1762 parent2=parent2)
1772 parent2=parent2)
1763 self._afterlock(commithook)
1773 self._afterlock(commithook)
1764 return ret
1774 return ret
1765
1775
1766 @unfilteredmethod
1776 @unfilteredmethod
1767 def commitctx(self, ctx, error=False):
1777 def commitctx(self, ctx, error=False):
1768 """Add a new revision to current repository.
1778 """Add a new revision to current repository.
1769 Revision information is passed via the context argument.
1779 Revision information is passed via the context argument.
1770 """
1780 """
1771
1781
1772 tr = None
1782 tr = None
1773 p1, p2 = ctx.p1(), ctx.p2()
1783 p1, p2 = ctx.p1(), ctx.p2()
1774 user = ctx.user()
1784 user = ctx.user()
1775
1785
1776 lock = self.lock()
1786 lock = self.lock()
1777 try:
1787 try:
1778 tr = self.transaction("commit")
1788 tr = self.transaction("commit")
1779 trp = weakref.proxy(tr)
1789 trp = weakref.proxy(tr)
1780
1790
1781 if ctx.manifestnode():
1791 if ctx.manifestnode():
1782 # reuse an existing manifest revision
1792 # reuse an existing manifest revision
1783 mn = ctx.manifestnode()
1793 mn = ctx.manifestnode()
1784 files = ctx.files()
1794 files = ctx.files()
1785 elif ctx.files():
1795 elif ctx.files():
1786 m1ctx = p1.manifestctx()
1796 m1ctx = p1.manifestctx()
1787 m2ctx = p2.manifestctx()
1797 m2ctx = p2.manifestctx()
1788 mctx = m1ctx.copy()
1798 mctx = m1ctx.copy()
1789
1799
1790 m = mctx.read()
1800 m = mctx.read()
1791 m1 = m1ctx.read()
1801 m1 = m1ctx.read()
1792 m2 = m2ctx.read()
1802 m2 = m2ctx.read()
1793
1803
1794 # check in files
1804 # check in files
1795 added = []
1805 added = []
1796 changed = []
1806 changed = []
1797 removed = list(ctx.removed())
1807 removed = list(ctx.removed())
1798 linkrev = len(self)
1808 linkrev = len(self)
1799 self.ui.note(_("committing files:\n"))
1809 self.ui.note(_("committing files:\n"))
1800 for f in sorted(ctx.modified() + ctx.added()):
1810 for f in sorted(ctx.modified() + ctx.added()):
1801 self.ui.note(f + "\n")
1811 self.ui.note(f + "\n")
1802 try:
1812 try:
1803 fctx = ctx[f]
1813 fctx = ctx[f]
1804 if fctx is None:
1814 if fctx is None:
1805 removed.append(f)
1815 removed.append(f)
1806 else:
1816 else:
1807 added.append(f)
1817 added.append(f)
1808 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1818 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1809 trp, changed)
1819 trp, changed)
1810 m.setflag(f, fctx.flags())
1820 m.setflag(f, fctx.flags())
1811 except OSError as inst:
1821 except OSError as inst:
1812 self.ui.warn(_("trouble committing %s!\n") % f)
1822 self.ui.warn(_("trouble committing %s!\n") % f)
1813 raise
1823 raise
1814 except IOError as inst:
1824 except IOError as inst:
1815 errcode = getattr(inst, 'errno', errno.ENOENT)
1825 errcode = getattr(inst, 'errno', errno.ENOENT)
1816 if error or errcode and errcode != errno.ENOENT:
1826 if error or errcode and errcode != errno.ENOENT:
1817 self.ui.warn(_("trouble committing %s!\n") % f)
1827 self.ui.warn(_("trouble committing %s!\n") % f)
1818 raise
1828 raise
1819
1829
1820 # update manifest
1830 # update manifest
1821 self.ui.note(_("committing manifest\n"))
1831 self.ui.note(_("committing manifest\n"))
1822 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1832 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1823 drop = [f for f in removed if f in m]
1833 drop = [f for f in removed if f in m]
1824 for f in drop:
1834 for f in drop:
1825 del m[f]
1835 del m[f]
1826 mn = mctx.write(trp, linkrev,
1836 mn = mctx.write(trp, linkrev,
1827 p1.manifestnode(), p2.manifestnode(),
1837 p1.manifestnode(), p2.manifestnode(),
1828 added, drop)
1838 added, drop)
1829 files = changed + removed
1839 files = changed + removed
1830 else:
1840 else:
1831 mn = p1.manifestnode()
1841 mn = p1.manifestnode()
1832 files = []
1842 files = []
1833
1843
1834 # update changelog
1844 # update changelog
1835 self.ui.note(_("committing changelog\n"))
1845 self.ui.note(_("committing changelog\n"))
1836 self.changelog.delayupdate(tr)
1846 self.changelog.delayupdate(tr)
1837 n = self.changelog.add(mn, files, ctx.description(),
1847 n = self.changelog.add(mn, files, ctx.description(),
1838 trp, p1.node(), p2.node(),
1848 trp, p1.node(), p2.node(),
1839 user, ctx.date(), ctx.extra().copy())
1849 user, ctx.date(), ctx.extra().copy())
1840 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1850 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1841 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1851 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1842 parent2=xp2)
1852 parent2=xp2)
1843 # set the new commit is proper phase
1853 # set the new commit is proper phase
1844 targetphase = subrepo.newcommitphase(self.ui, ctx)
1854 targetphase = subrepo.newcommitphase(self.ui, ctx)
1845 if targetphase:
1855 if targetphase:
1846 # retract boundary do not alter parent changeset.
1856 # retract boundary do not alter parent changeset.
1847 # if a parent have higher the resulting phase will
1857 # if a parent have higher the resulting phase will
1848 # be compliant anyway
1858 # be compliant anyway
1849 #
1859 #
1850 # if minimal phase was 0 we don't need to retract anything
1860 # if minimal phase was 0 we don't need to retract anything
1851 phases.retractboundary(self, tr, targetphase, [n])
1861 phases.retractboundary(self, tr, targetphase, [n])
1852 tr.close()
1862 tr.close()
1853 return n
1863 return n
1854 finally:
1864 finally:
1855 if tr:
1865 if tr:
1856 tr.release()
1866 tr.release()
1857 lock.release()
1867 lock.release()
1858
1868
1859 @unfilteredmethod
1869 @unfilteredmethod
1860 def destroying(self):
1870 def destroying(self):
1861 '''Inform the repository that nodes are about to be destroyed.
1871 '''Inform the repository that nodes are about to be destroyed.
1862 Intended for use by strip and rollback, so there's a common
1872 Intended for use by strip and rollback, so there's a common
1863 place for anything that has to be done before destroying history.
1873 place for anything that has to be done before destroying history.
1864
1874
1865 This is mostly useful for saving state that is in memory and waiting
1875 This is mostly useful for saving state that is in memory and waiting
1866 to be flushed when the current lock is released. Because a call to
1876 to be flushed when the current lock is released. Because a call to
1867 destroyed is imminent, the repo will be invalidated causing those
1877 destroyed is imminent, the repo will be invalidated causing those
1868 changes to stay in memory (waiting for the next unlock), or vanish
1878 changes to stay in memory (waiting for the next unlock), or vanish
1869 completely.
1879 completely.
1870 '''
1880 '''
1871 # When using the same lock to commit and strip, the phasecache is left
1881 # When using the same lock to commit and strip, the phasecache is left
1872 # dirty after committing. Then when we strip, the repo is invalidated,
1882 # dirty after committing. Then when we strip, the repo is invalidated,
1873 # causing those changes to disappear.
1883 # causing those changes to disappear.
1874 if '_phasecache' in vars(self):
1884 if '_phasecache' in vars(self):
1875 self._phasecache.write()
1885 self._phasecache.write()
1876
1886
1877 @unfilteredmethod
1887 @unfilteredmethod
1878 def destroyed(self):
1888 def destroyed(self):
1879 '''Inform the repository that nodes have been destroyed.
1889 '''Inform the repository that nodes have been destroyed.
1880 Intended for use by strip and rollback, so there's a common
1890 Intended for use by strip and rollback, so there's a common
1881 place for anything that has to be done after destroying history.
1891 place for anything that has to be done after destroying history.
1882 '''
1892 '''
1883 # When one tries to:
1893 # When one tries to:
1884 # 1) destroy nodes thus calling this method (e.g. strip)
1894 # 1) destroy nodes thus calling this method (e.g. strip)
1885 # 2) use phasecache somewhere (e.g. commit)
1895 # 2) use phasecache somewhere (e.g. commit)
1886 #
1896 #
1887 # then 2) will fail because the phasecache contains nodes that were
1897 # then 2) will fail because the phasecache contains nodes that were
1888 # removed. We can either remove phasecache from the filecache,
1898 # removed. We can either remove phasecache from the filecache,
1889 # causing it to reload next time it is accessed, or simply filter
1899 # causing it to reload next time it is accessed, or simply filter
1890 # the removed nodes now and write the updated cache.
1900 # the removed nodes now and write the updated cache.
1891 self._phasecache.filterunknown(self)
1901 self._phasecache.filterunknown(self)
1892 self._phasecache.write()
1902 self._phasecache.write()
1893
1903
1894 # refresh all repository caches
1904 # refresh all repository caches
1895 self.updatecaches()
1905 self.updatecaches()
1896
1906
1897 # Ensure the persistent tag cache is updated. Doing it now
1907 # Ensure the persistent tag cache is updated. Doing it now
1898 # means that the tag cache only has to worry about destroyed
1908 # means that the tag cache only has to worry about destroyed
1899 # heads immediately after a strip/rollback. That in turn
1909 # heads immediately after a strip/rollback. That in turn
1900 # guarantees that "cachetip == currenttip" (comparing both rev
1910 # guarantees that "cachetip == currenttip" (comparing both rev
1901 # and node) always means no nodes have been added or destroyed.
1911 # and node) always means no nodes have been added or destroyed.
1902
1912
1903 # XXX this is suboptimal when qrefresh'ing: we strip the current
1913 # XXX this is suboptimal when qrefresh'ing: we strip the current
1904 # head, refresh the tag cache, then immediately add a new head.
1914 # head, refresh the tag cache, then immediately add a new head.
1905 # But I think doing it this way is necessary for the "instant
1915 # But I think doing it this way is necessary for the "instant
1906 # tag cache retrieval" case to work.
1916 # tag cache retrieval" case to work.
1907 self.invalidate()
1917 self.invalidate()
1908
1918
1909 def walk(self, match, node=None):
1919 def walk(self, match, node=None):
1910 '''
1920 '''
1911 walk recursively through the directory tree or a given
1921 walk recursively through the directory tree or a given
1912 changeset, finding all files matched by the match
1922 changeset, finding all files matched by the match
1913 function
1923 function
1914 '''
1924 '''
1915 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1925 self.ui.deprecwarn('use repo[node].walk instead of repo.walk', '4.3')
1916 return self[node].walk(match)
1926 return self[node].walk(match)
1917
1927
1918 def status(self, node1='.', node2=None, match=None,
1928 def status(self, node1='.', node2=None, match=None,
1919 ignored=False, clean=False, unknown=False,
1929 ignored=False, clean=False, unknown=False,
1920 listsubrepos=False):
1930 listsubrepos=False):
1921 '''a convenience method that calls node1.status(node2)'''
1931 '''a convenience method that calls node1.status(node2)'''
1922 return self[node1].status(node2, match, ignored, clean, unknown,
1932 return self[node1].status(node2, match, ignored, clean, unknown,
1923 listsubrepos)
1933 listsubrepos)
1924
1934
1925 def addpostdsstatus(self, ps):
1935 def addpostdsstatus(self, ps):
1926 """Add a callback to run within the wlock, at the point at which status
1936 """Add a callback to run within the wlock, at the point at which status
1927 fixups happen.
1937 fixups happen.
1928
1938
1929 On status completion, callback(wctx, status) will be called with the
1939 On status completion, callback(wctx, status) will be called with the
1930 wlock held, unless the dirstate has changed from underneath or the wlock
1940 wlock held, unless the dirstate has changed from underneath or the wlock
1931 couldn't be grabbed.
1941 couldn't be grabbed.
1932
1942
1933 Callbacks should not capture and use a cached copy of the dirstate --
1943 Callbacks should not capture and use a cached copy of the dirstate --
1934 it might change in the meanwhile. Instead, they should access the
1944 it might change in the meanwhile. Instead, they should access the
1935 dirstate via wctx.repo().dirstate.
1945 dirstate via wctx.repo().dirstate.
1936
1946
1937 This list is emptied out after each status run -- extensions should
1947 This list is emptied out after each status run -- extensions should
1938 make sure it adds to this list each time dirstate.status is called.
1948 make sure it adds to this list each time dirstate.status is called.
1939 Extensions should also make sure they don't call this for statuses
1949 Extensions should also make sure they don't call this for statuses
1940 that don't involve the dirstate.
1950 that don't involve the dirstate.
1941 """
1951 """
1942
1952
1943 # The list is located here for uniqueness reasons -- it is actually
1953 # The list is located here for uniqueness reasons -- it is actually
1944 # managed by the workingctx, but that isn't unique per-repo.
1954 # managed by the workingctx, but that isn't unique per-repo.
1945 self._postdsstatus.append(ps)
1955 self._postdsstatus.append(ps)
1946
1956
1947 def postdsstatus(self):
1957 def postdsstatus(self):
1948 """Used by workingctx to get the list of post-dirstate-status hooks."""
1958 """Used by workingctx to get the list of post-dirstate-status hooks."""
1949 return self._postdsstatus
1959 return self._postdsstatus
1950
1960
1951 def clearpostdsstatus(self):
1961 def clearpostdsstatus(self):
1952 """Used by workingctx to clear post-dirstate-status hooks."""
1962 """Used by workingctx to clear post-dirstate-status hooks."""
1953 del self._postdsstatus[:]
1963 del self._postdsstatus[:]
1954
1964
1955 def heads(self, start=None):
1965 def heads(self, start=None):
1956 if start is None:
1966 if start is None:
1957 cl = self.changelog
1967 cl = self.changelog
1958 headrevs = reversed(cl.headrevs())
1968 headrevs = reversed(cl.headrevs())
1959 return [cl.node(rev) for rev in headrevs]
1969 return [cl.node(rev) for rev in headrevs]
1960
1970
1961 heads = self.changelog.heads(start)
1971 heads = self.changelog.heads(start)
1962 # sort the output in rev descending order
1972 # sort the output in rev descending order
1963 return sorted(heads, key=self.changelog.rev, reverse=True)
1973 return sorted(heads, key=self.changelog.rev, reverse=True)
1964
1974
1965 def branchheads(self, branch=None, start=None, closed=False):
1975 def branchheads(self, branch=None, start=None, closed=False):
1966 '''return a (possibly filtered) list of heads for the given branch
1976 '''return a (possibly filtered) list of heads for the given branch
1967
1977
1968 Heads are returned in topological order, from newest to oldest.
1978 Heads are returned in topological order, from newest to oldest.
1969 If branch is None, use the dirstate branch.
1979 If branch is None, use the dirstate branch.
1970 If start is not None, return only heads reachable from start.
1980 If start is not None, return only heads reachable from start.
1971 If closed is True, return heads that are marked as closed as well.
1981 If closed is True, return heads that are marked as closed as well.
1972 '''
1982 '''
1973 if branch is None:
1983 if branch is None:
1974 branch = self[None].branch()
1984 branch = self[None].branch()
1975 branches = self.branchmap()
1985 branches = self.branchmap()
1976 if branch not in branches:
1986 if branch not in branches:
1977 return []
1987 return []
1978 # the cache returns heads ordered lowest to highest
1988 # the cache returns heads ordered lowest to highest
1979 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1989 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1980 if start is not None:
1990 if start is not None:
1981 # filter out the heads that cannot be reached from startrev
1991 # filter out the heads that cannot be reached from startrev
1982 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1992 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1983 bheads = [h for h in bheads if h in fbheads]
1993 bheads = [h for h in bheads if h in fbheads]
1984 return bheads
1994 return bheads
1985
1995
1986 def branches(self, nodes):
1996 def branches(self, nodes):
1987 if not nodes:
1997 if not nodes:
1988 nodes = [self.changelog.tip()]
1998 nodes = [self.changelog.tip()]
1989 b = []
1999 b = []
1990 for n in nodes:
2000 for n in nodes:
1991 t = n
2001 t = n
1992 while True:
2002 while True:
1993 p = self.changelog.parents(n)
2003 p = self.changelog.parents(n)
1994 if p[1] != nullid or p[0] == nullid:
2004 if p[1] != nullid or p[0] == nullid:
1995 b.append((t, n, p[0], p[1]))
2005 b.append((t, n, p[0], p[1]))
1996 break
2006 break
1997 n = p[0]
2007 n = p[0]
1998 return b
2008 return b
1999
2009
2000 def between(self, pairs):
2010 def between(self, pairs):
2001 r = []
2011 r = []
2002
2012
2003 for top, bottom in pairs:
2013 for top, bottom in pairs:
2004 n, l, i = top, [], 0
2014 n, l, i = top, [], 0
2005 f = 1
2015 f = 1
2006
2016
2007 while n != bottom and n != nullid:
2017 while n != bottom and n != nullid:
2008 p = self.changelog.parents(n)[0]
2018 p = self.changelog.parents(n)[0]
2009 if i == f:
2019 if i == f:
2010 l.append(n)
2020 l.append(n)
2011 f = f * 2
2021 f = f * 2
2012 n = p
2022 n = p
2013 i += 1
2023 i += 1
2014
2024
2015 r.append(l)
2025 r.append(l)
2016
2026
2017 return r
2027 return r
2018
2028
2019 def checkpush(self, pushop):
2029 def checkpush(self, pushop):
2020 """Extensions can override this function if additional checks have
2030 """Extensions can override this function if additional checks have
2021 to be performed before pushing, or call it if they override push
2031 to be performed before pushing, or call it if they override push
2022 command.
2032 command.
2023 """
2033 """
2024 pass
2034 pass
2025
2035
2026 @unfilteredpropertycache
2036 @unfilteredpropertycache
2027 def prepushoutgoinghooks(self):
2037 def prepushoutgoinghooks(self):
2028 """Return util.hooks consists of a pushop with repo, remote, outgoing
2038 """Return util.hooks consists of a pushop with repo, remote, outgoing
2029 methods, which are called before pushing changesets.
2039 methods, which are called before pushing changesets.
2030 """
2040 """
2031 return util.hooks()
2041 return util.hooks()
2032
2042
2033 def pushkey(self, namespace, key, old, new):
2043 def pushkey(self, namespace, key, old, new):
2034 try:
2044 try:
2035 tr = self.currenttransaction()
2045 tr = self.currenttransaction()
2036 hookargs = {}
2046 hookargs = {}
2037 if tr is not None:
2047 if tr is not None:
2038 hookargs.update(tr.hookargs)
2048 hookargs.update(tr.hookargs)
2039 hookargs['namespace'] = namespace
2049 hookargs['namespace'] = namespace
2040 hookargs['key'] = key
2050 hookargs['key'] = key
2041 hookargs['old'] = old
2051 hookargs['old'] = old
2042 hookargs['new'] = new
2052 hookargs['new'] = new
2043 self.hook('prepushkey', throw=True, **hookargs)
2053 self.hook('prepushkey', throw=True, **hookargs)
2044 except error.HookAbort as exc:
2054 except error.HookAbort as exc:
2045 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2055 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2046 if exc.hint:
2056 if exc.hint:
2047 self.ui.write_err(_("(%s)\n") % exc.hint)
2057 self.ui.write_err(_("(%s)\n") % exc.hint)
2048 return False
2058 return False
2049 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2059 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2050 ret = pushkey.push(self, namespace, key, old, new)
2060 ret = pushkey.push(self, namespace, key, old, new)
2051 def runhook():
2061 def runhook():
2052 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2062 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2053 ret=ret)
2063 ret=ret)
2054 self._afterlock(runhook)
2064 self._afterlock(runhook)
2055 return ret
2065 return ret
2056
2066
2057 def listkeys(self, namespace):
2067 def listkeys(self, namespace):
2058 self.hook('prelistkeys', throw=True, namespace=namespace)
2068 self.hook('prelistkeys', throw=True, namespace=namespace)
2059 self.ui.debug('listing keys for "%s"\n' % namespace)
2069 self.ui.debug('listing keys for "%s"\n' % namespace)
2060 values = pushkey.list(self, namespace)
2070 values = pushkey.list(self, namespace)
2061 self.hook('listkeys', namespace=namespace, values=values)
2071 self.hook('listkeys', namespace=namespace, values=values)
2062 return values
2072 return values
2063
2073
2064 def debugwireargs(self, one, two, three=None, four=None, five=None):
2074 def debugwireargs(self, one, two, three=None, four=None, five=None):
2065 '''used to test argument passing over the wire'''
2075 '''used to test argument passing over the wire'''
2066 return "%s %s %s %s %s" % (one, two, three, four, five)
2076 return "%s %s %s %s %s" % (one, two, three, four, five)
2067
2077
2068 def savecommitmessage(self, text):
2078 def savecommitmessage(self, text):
2069 fp = self.vfs('last-message.txt', 'wb')
2079 fp = self.vfs('last-message.txt', 'wb')
2070 try:
2080 try:
2071 fp.write(text)
2081 fp.write(text)
2072 finally:
2082 finally:
2073 fp.close()
2083 fp.close()
2074 return self.pathto(fp.name[len(self.root) + 1:])
2084 return self.pathto(fp.name[len(self.root) + 1:])
2075
2085
2076 # used to avoid circular references so destructors work
2086 # used to avoid circular references so destructors work
2077 def aftertrans(files):
2087 def aftertrans(files):
2078 renamefiles = [tuple(t) for t in files]
2088 renamefiles = [tuple(t) for t in files]
2079 def a():
2089 def a():
2080 for vfs, src, dest in renamefiles:
2090 for vfs, src, dest in renamefiles:
2081 # if src and dest refer to a same file, vfs.rename is a no-op,
2091 # if src and dest refer to a same file, vfs.rename is a no-op,
2082 # leaving both src and dest on disk. delete dest to make sure
2092 # leaving both src and dest on disk. delete dest to make sure
2083 # the rename couldn't be such a no-op.
2093 # the rename couldn't be such a no-op.
2084 vfs.tryunlink(dest)
2094 vfs.tryunlink(dest)
2085 try:
2095 try:
2086 vfs.rename(src, dest)
2096 vfs.rename(src, dest)
2087 except OSError: # journal file does not yet exist
2097 except OSError: # journal file does not yet exist
2088 pass
2098 pass
2089 return a
2099 return a
2090
2100
2091 def undoname(fn):
2101 def undoname(fn):
2092 base, name = os.path.split(fn)
2102 base, name = os.path.split(fn)
2093 assert name.startswith('journal')
2103 assert name.startswith('journal')
2094 return os.path.join(base, name.replace('journal', 'undo', 1))
2104 return os.path.join(base, name.replace('journal', 'undo', 1))
2095
2105
2096 def instance(ui, path, create):
2106 def instance(ui, path, create):
2097 return localrepository(ui, util.urllocalpath(path), create)
2107 return localrepository(ui, util.urllocalpath(path), create)
2098
2108
2099 def islocal(path):
2109 def islocal(path):
2100 return True
2110 return True
2101
2111
2102 def newreporequirements(repo):
2112 def newreporequirements(repo):
2103 """Determine the set of requirements for a new local repository.
2113 """Determine the set of requirements for a new local repository.
2104
2114
2105 Extensions can wrap this function to specify custom requirements for
2115 Extensions can wrap this function to specify custom requirements for
2106 new repositories.
2116 new repositories.
2107 """
2117 """
2108 ui = repo.ui
2118 ui = repo.ui
2109 requirements = {'revlogv1'}
2119 requirements = {'revlogv1'}
2110 if ui.configbool('format', 'usestore'):
2120 if ui.configbool('format', 'usestore'):
2111 requirements.add('store')
2121 requirements.add('store')
2112 if ui.configbool('format', 'usefncache'):
2122 if ui.configbool('format', 'usefncache'):
2113 requirements.add('fncache')
2123 requirements.add('fncache')
2114 if ui.configbool('format', 'dotencode'):
2124 if ui.configbool('format', 'dotencode'):
2115 requirements.add('dotencode')
2125 requirements.add('dotencode')
2116
2126
2117 compengine = ui.config('experimental', 'format.compression', 'zlib')
2127 compengine = ui.config('experimental', 'format.compression', 'zlib')
2118 if compengine not in util.compengines:
2128 if compengine not in util.compengines:
2119 raise error.Abort(_('compression engine %s defined by '
2129 raise error.Abort(_('compression engine %s defined by '
2120 'experimental.format.compression not available') %
2130 'experimental.format.compression not available') %
2121 compengine,
2131 compengine,
2122 hint=_('run "hg debuginstall" to list available '
2132 hint=_('run "hg debuginstall" to list available '
2123 'compression engines'))
2133 'compression engines'))
2124
2134
2125 # zlib is the historical default and doesn't need an explicit requirement.
2135 # zlib is the historical default and doesn't need an explicit requirement.
2126 if compengine != 'zlib':
2136 if compengine != 'zlib':
2127 requirements.add('exp-compression-%s' % compengine)
2137 requirements.add('exp-compression-%s' % compengine)
2128
2138
2129 if scmutil.gdinitconfig(ui):
2139 if scmutil.gdinitconfig(ui):
2130 requirements.add('generaldelta')
2140 requirements.add('generaldelta')
2131 if ui.configbool('experimental', 'treemanifest', False):
2141 if ui.configbool('experimental', 'treemanifest', False):
2132 requirements.add('treemanifest')
2142 requirements.add('treemanifest')
2133 if ui.configbool('experimental', 'manifestv2', False):
2143 if ui.configbool('experimental', 'manifestv2', False):
2134 requirements.add('manifestv2')
2144 requirements.add('manifestv2')
2135
2145
2136 revlogv2 = ui.config('experimental', 'revlogv2')
2146 revlogv2 = ui.config('experimental', 'revlogv2')
2137 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2147 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2138 requirements.remove('revlogv1')
2148 requirements.remove('revlogv1')
2139 # generaldelta is implied by revlogv2.
2149 # generaldelta is implied by revlogv2.
2140 requirements.discard('generaldelta')
2150 requirements.discard('generaldelta')
2141 requirements.add(REVLOGV2_REQUIREMENT)
2151 requirements.add(REVLOGV2_REQUIREMENT)
2142
2152
2143 return requirements
2153 return requirements
General Comments 0
You need to be logged in to leave comments. Login now