##// END OF EJS Templates
commit: add debug message regarding manifest reuse
Yuya Nishihara -
r39145:a915db9a default
parent child Browse files
Show More
@@ -1,2410 +1,2412 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 }
397 }
398 openerreqs = {
398 openerreqs = {
399 'revlogv1',
399 'revlogv1',
400 'generaldelta',
400 'generaldelta',
401 'treemanifest',
401 'treemanifest',
402 }
402 }
403
403
404 # list of prefix for file which can be written without 'wlock'
404 # list of prefix for file which can be written without 'wlock'
405 # Extensions should extend this list when needed
405 # Extensions should extend this list when needed
406 _wlockfreeprefix = {
406 _wlockfreeprefix = {
407 # We migh consider requiring 'wlock' for the next
407 # We migh consider requiring 'wlock' for the next
408 # two, but pretty much all the existing code assume
408 # two, but pretty much all the existing code assume
409 # wlock is not needed so we keep them excluded for
409 # wlock is not needed so we keep them excluded for
410 # now.
410 # now.
411 'hgrc',
411 'hgrc',
412 'requires',
412 'requires',
413 # XXX cache is a complicatged business someone
413 # XXX cache is a complicatged business someone
414 # should investigate this in depth at some point
414 # should investigate this in depth at some point
415 'cache/',
415 'cache/',
416 # XXX shouldn't be dirstate covered by the wlock?
416 # XXX shouldn't be dirstate covered by the wlock?
417 'dirstate',
417 'dirstate',
418 # XXX bisect was still a bit too messy at the time
418 # XXX bisect was still a bit too messy at the time
419 # this changeset was introduced. Someone should fix
419 # this changeset was introduced. Someone should fix
420 # the remainig bit and drop this line
420 # the remainig bit and drop this line
421 'bisect.state',
421 'bisect.state',
422 }
422 }
423
423
424 def __init__(self, baseui, path, create=False, intents=None):
424 def __init__(self, baseui, path, create=False, intents=None):
425 self.requirements = set()
425 self.requirements = set()
426 self.filtername = None
426 self.filtername = None
427 # wvfs: rooted at the repository root, used to access the working copy
427 # wvfs: rooted at the repository root, used to access the working copy
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 self.vfs = None
430 self.vfs = None
431 # svfs: usually rooted at .hg/store, used to access repository history
431 # svfs: usually rooted at .hg/store, used to access repository history
432 # If this is a shared repository, this vfs may point to another
432 # If this is a shared repository, this vfs may point to another
433 # repository's .hg/store directory.
433 # repository's .hg/store directory.
434 self.svfs = None
434 self.svfs = None
435 self.root = self.wvfs.base
435 self.root = self.wvfs.base
436 self.path = self.wvfs.join(".hg")
436 self.path = self.wvfs.join(".hg")
437 self.origroot = path
437 self.origroot = path
438 # This is only used by context.workingctx.match in order to
438 # This is only used by context.workingctx.match in order to
439 # detect files in subrepos.
439 # detect files in subrepos.
440 self.auditor = pathutil.pathauditor(
440 self.auditor = pathutil.pathauditor(
441 self.root, callback=self._checknested)
441 self.root, callback=self._checknested)
442 # This is only used by context.basectx.match in order to detect
442 # This is only used by context.basectx.match in order to detect
443 # files in subrepos.
443 # files in subrepos.
444 self.nofsauditor = pathutil.pathauditor(
444 self.nofsauditor = pathutil.pathauditor(
445 self.root, callback=self._checknested, realfs=False, cached=True)
445 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.baseui = baseui
446 self.baseui = baseui
447 self.ui = baseui.copy()
447 self.ui = baseui.copy()
448 self.ui.copy = baseui.copy # prevent copying repo configuration
448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 if (self.ui.configbool('devel', 'all-warnings') or
450 if (self.ui.configbool('devel', 'all-warnings') or
451 self.ui.configbool('devel', 'check-locks')):
451 self.ui.configbool('devel', 'check-locks')):
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 # A list of callback to shape the phase if no data were found.
453 # A list of callback to shape the phase if no data were found.
454 # Callback are in the form: func(repo, roots) --> processed root.
454 # Callback are in the form: func(repo, roots) --> processed root.
455 # This list it to be filled by extension during repo setup
455 # This list it to be filled by extension during repo setup
456 self._phasedefaults = []
456 self._phasedefaults = []
457 try:
457 try:
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self._loadextensions()
459 self._loadextensions()
460 except IOError:
460 except IOError:
461 pass
461 pass
462
462
463 if featuresetupfuncs:
463 if featuresetupfuncs:
464 self.supported = set(self._basesupported) # use private copy
464 self.supported = set(self._basesupported) # use private copy
465 extmods = set(m.__name__ for n, m
465 extmods = set(m.__name__ for n, m
466 in extensions.extensions(self.ui))
466 in extensions.extensions(self.ui))
467 for setupfunc in featuresetupfuncs:
467 for setupfunc in featuresetupfuncs:
468 if setupfunc.__module__ in extmods:
468 if setupfunc.__module__ in extmods:
469 setupfunc(self.ui, self.supported)
469 setupfunc(self.ui, self.supported)
470 else:
470 else:
471 self.supported = self._basesupported
471 self.supported = self._basesupported
472 color.setup(self.ui)
472 color.setup(self.ui)
473
473
474 # Add compression engines.
474 # Add compression engines.
475 for name in util.compengines:
475 for name in util.compengines:
476 engine = util.compengines[name]
476 engine = util.compengines[name]
477 if engine.revlogheader():
477 if engine.revlogheader():
478 self.supported.add('exp-compression-%s' % name)
478 self.supported.add('exp-compression-%s' % name)
479
479
480 if not self.vfs.isdir():
480 if not self.vfs.isdir():
481 if create:
481 if create:
482 self.requirements = newreporequirements(self)
482 self.requirements = newreporequirements(self)
483
483
484 if not self.wvfs.exists():
484 if not self.wvfs.exists():
485 self.wvfs.makedirs()
485 self.wvfs.makedirs()
486 self.vfs.makedir(notindexed=True)
486 self.vfs.makedir(notindexed=True)
487
487
488 if 'store' in self.requirements:
488 if 'store' in self.requirements:
489 self.vfs.mkdir("store")
489 self.vfs.mkdir("store")
490
490
491 # create an invalid changelog
491 # create an invalid changelog
492 self.vfs.append(
492 self.vfs.append(
493 "00changelog.i",
493 "00changelog.i",
494 '\0\0\0\2' # represents revlogv2
494 '\0\0\0\2' # represents revlogv2
495 ' dummy changelog to prevent using the old repo layout'
495 ' dummy changelog to prevent using the old repo layout'
496 )
496 )
497 else:
497 else:
498 try:
498 try:
499 self.vfs.stat()
499 self.vfs.stat()
500 except OSError as inst:
500 except OSError as inst:
501 if inst.errno != errno.ENOENT:
501 if inst.errno != errno.ENOENT:
502 raise
502 raise
503 raise error.RepoError(_("repository %s not found") % path)
503 raise error.RepoError(_("repository %s not found") % path)
504 elif create:
504 elif create:
505 raise error.RepoError(_("repository %s already exists") % path)
505 raise error.RepoError(_("repository %s already exists") % path)
506 else:
506 else:
507 try:
507 try:
508 self.requirements = scmutil.readrequires(
508 self.requirements = scmutil.readrequires(
509 self.vfs, self.supported)
509 self.vfs, self.supported)
510 except IOError as inst:
510 except IOError as inst:
511 if inst.errno != errno.ENOENT:
511 if inst.errno != errno.ENOENT:
512 raise
512 raise
513
513
514 cachepath = self.vfs.join('cache')
514 cachepath = self.vfs.join('cache')
515 self.sharedpath = self.path
515 self.sharedpath = self.path
516 try:
516 try:
517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
517 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
518 if 'relshared' in self.requirements:
518 if 'relshared' in self.requirements:
519 sharedpath = self.vfs.join(sharedpath)
519 sharedpath = self.vfs.join(sharedpath)
520 vfs = vfsmod.vfs(sharedpath, realpath=True)
520 vfs = vfsmod.vfs(sharedpath, realpath=True)
521 cachepath = vfs.join('cache')
521 cachepath = vfs.join('cache')
522 s = vfs.base
522 s = vfs.base
523 if not vfs.exists():
523 if not vfs.exists():
524 raise error.RepoError(
524 raise error.RepoError(
525 _('.hg/sharedpath points to nonexistent directory %s') % s)
525 _('.hg/sharedpath points to nonexistent directory %s') % s)
526 self.sharedpath = s
526 self.sharedpath = s
527 except IOError as inst:
527 except IOError as inst:
528 if inst.errno != errno.ENOENT:
528 if inst.errno != errno.ENOENT:
529 raise
529 raise
530
530
531 if 'exp-sparse' in self.requirements and not sparse.enabled:
531 if 'exp-sparse' in self.requirements and not sparse.enabled:
532 raise error.RepoError(_('repository is using sparse feature but '
532 raise error.RepoError(_('repository is using sparse feature but '
533 'sparse is not enabled; enable the '
533 'sparse is not enabled; enable the '
534 '"sparse" extensions to access'))
534 '"sparse" extensions to access'))
535
535
536 self.store = store.store(
536 self.store = store.store(
537 self.requirements, self.sharedpath,
537 self.requirements, self.sharedpath,
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 lambda base: vfsmod.vfs(base, cacheaudited=True))
539 self.spath = self.store.path
539 self.spath = self.store.path
540 self.svfs = self.store.vfs
540 self.svfs = self.store.vfs
541 self.sjoin = self.store.join
541 self.sjoin = self.store.join
542 self.vfs.createmode = self.store.createmode
542 self.vfs.createmode = self.store.createmode
543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
543 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 self.cachevfs.createmode = self.store.createmode
544 self.cachevfs.createmode = self.store.createmode
545 if (self.ui.configbool('devel', 'all-warnings') or
545 if (self.ui.configbool('devel', 'all-warnings') or
546 self.ui.configbool('devel', 'check-locks')):
546 self.ui.configbool('devel', 'check-locks')):
547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
547 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
548 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
549 else: # standard vfs
549 else: # standard vfs
550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
550 self.svfs.audit = self._getsvfsward(self.svfs.audit)
551 self._applyopenerreqs()
551 self._applyopenerreqs()
552 if create:
552 if create:
553 self._writerequirements()
553 self._writerequirements()
554
554
555 self._dirstatevalidatewarned = False
555 self._dirstatevalidatewarned = False
556
556
557 self._branchcaches = {}
557 self._branchcaches = {}
558 self._revbranchcache = None
558 self._revbranchcache = None
559 self._filterpats = {}
559 self._filterpats = {}
560 self._datafilters = {}
560 self._datafilters = {}
561 self._transref = self._lockref = self._wlockref = None
561 self._transref = self._lockref = self._wlockref = None
562
562
563 # A cache for various files under .hg/ that tracks file changes,
563 # A cache for various files under .hg/ that tracks file changes,
564 # (used by the filecache decorator)
564 # (used by the filecache decorator)
565 #
565 #
566 # Maps a property name to its util.filecacheentry
566 # Maps a property name to its util.filecacheentry
567 self._filecache = {}
567 self._filecache = {}
568
568
569 # hold sets of revision to be filtered
569 # hold sets of revision to be filtered
570 # should be cleared when something might have changed the filter value:
570 # should be cleared when something might have changed the filter value:
571 # - new changesets,
571 # - new changesets,
572 # - phase change,
572 # - phase change,
573 # - new obsolescence marker,
573 # - new obsolescence marker,
574 # - working directory parent change,
574 # - working directory parent change,
575 # - bookmark changes
575 # - bookmark changes
576 self.filteredrevcache = {}
576 self.filteredrevcache = {}
577
577
578 # post-dirstate-status hooks
578 # post-dirstate-status hooks
579 self._postdsstatus = []
579 self._postdsstatus = []
580
580
581 # generic mapping between names and nodes
581 # generic mapping between names and nodes
582 self.names = namespaces.namespaces()
582 self.names = namespaces.namespaces()
583
583
584 # Key to signature value.
584 # Key to signature value.
585 self._sparsesignaturecache = {}
585 self._sparsesignaturecache = {}
586 # Signature to cached matcher instance.
586 # Signature to cached matcher instance.
587 self._sparsematchercache = {}
587 self._sparsematchercache = {}
588
588
589 def _getvfsward(self, origfunc):
589 def _getvfsward(self, origfunc):
590 """build a ward for self.vfs"""
590 """build a ward for self.vfs"""
591 rref = weakref.ref(self)
591 rref = weakref.ref(self)
592 def checkvfs(path, mode=None):
592 def checkvfs(path, mode=None):
593 ret = origfunc(path, mode=mode)
593 ret = origfunc(path, mode=mode)
594 repo = rref()
594 repo = rref()
595 if (repo is None
595 if (repo is None
596 or not util.safehasattr(repo, '_wlockref')
596 or not util.safehasattr(repo, '_wlockref')
597 or not util.safehasattr(repo, '_lockref')):
597 or not util.safehasattr(repo, '_lockref')):
598 return
598 return
599 if mode in (None, 'r', 'rb'):
599 if mode in (None, 'r', 'rb'):
600 return
600 return
601 if path.startswith(repo.path):
601 if path.startswith(repo.path):
602 # truncate name relative to the repository (.hg)
602 # truncate name relative to the repository (.hg)
603 path = path[len(repo.path) + 1:]
603 path = path[len(repo.path) + 1:]
604 if path.startswith('cache/'):
604 if path.startswith('cache/'):
605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
605 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
606 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
607 if path.startswith('journal.'):
607 if path.startswith('journal.'):
608 # journal is covered by 'lock'
608 # journal is covered by 'lock'
609 if repo._currentlock(repo._lockref) is None:
609 if repo._currentlock(repo._lockref) is None:
610 repo.ui.develwarn('write with no lock: "%s"' % path,
610 repo.ui.develwarn('write with no lock: "%s"' % path,
611 stacklevel=2, config='check-locks')
611 stacklevel=2, config='check-locks')
612 elif repo._currentlock(repo._wlockref) is None:
612 elif repo._currentlock(repo._wlockref) is None:
613 # rest of vfs files are covered by 'wlock'
613 # rest of vfs files are covered by 'wlock'
614 #
614 #
615 # exclude special files
615 # exclude special files
616 for prefix in self._wlockfreeprefix:
616 for prefix in self._wlockfreeprefix:
617 if path.startswith(prefix):
617 if path.startswith(prefix):
618 return
618 return
619 repo.ui.develwarn('write with no wlock: "%s"' % path,
619 repo.ui.develwarn('write with no wlock: "%s"' % path,
620 stacklevel=2, config='check-locks')
620 stacklevel=2, config='check-locks')
621 return ret
621 return ret
622 return checkvfs
622 return checkvfs
623
623
624 def _getsvfsward(self, origfunc):
624 def _getsvfsward(self, origfunc):
625 """build a ward for self.svfs"""
625 """build a ward for self.svfs"""
626 rref = weakref.ref(self)
626 rref = weakref.ref(self)
627 def checksvfs(path, mode=None):
627 def checksvfs(path, mode=None):
628 ret = origfunc(path, mode=mode)
628 ret = origfunc(path, mode=mode)
629 repo = rref()
629 repo = rref()
630 if repo is None or not util.safehasattr(repo, '_lockref'):
630 if repo is None or not util.safehasattr(repo, '_lockref'):
631 return
631 return
632 if mode in (None, 'r', 'rb'):
632 if mode in (None, 'r', 'rb'):
633 return
633 return
634 if path.startswith(repo.sharedpath):
634 if path.startswith(repo.sharedpath):
635 # truncate name relative to the repository (.hg)
635 # truncate name relative to the repository (.hg)
636 path = path[len(repo.sharedpath) + 1:]
636 path = path[len(repo.sharedpath) + 1:]
637 if repo._currentlock(repo._lockref) is None:
637 if repo._currentlock(repo._lockref) is None:
638 repo.ui.develwarn('write with no lock: "%s"' % path,
638 repo.ui.develwarn('write with no lock: "%s"' % path,
639 stacklevel=3)
639 stacklevel=3)
640 return ret
640 return ret
641 return checksvfs
641 return checksvfs
642
642
643 def close(self):
643 def close(self):
644 self._writecaches()
644 self._writecaches()
645
645
646 def _loadextensions(self):
646 def _loadextensions(self):
647 extensions.loadall(self.ui)
647 extensions.loadall(self.ui)
648
648
649 def _writecaches(self):
649 def _writecaches(self):
650 if self._revbranchcache:
650 if self._revbranchcache:
651 self._revbranchcache.write()
651 self._revbranchcache.write()
652
652
653 def _restrictcapabilities(self, caps):
653 def _restrictcapabilities(self, caps):
654 if self.ui.configbool('experimental', 'bundle2-advertise'):
654 if self.ui.configbool('experimental', 'bundle2-advertise'):
655 caps = set(caps)
655 caps = set(caps)
656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
656 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
657 role='client'))
657 role='client'))
658 caps.add('bundle2=' + urlreq.quote(capsblob))
658 caps.add('bundle2=' + urlreq.quote(capsblob))
659 return caps
659 return caps
660
660
661 def _applyopenerreqs(self):
661 def _applyopenerreqs(self):
662 self.svfs.options = dict((r, 1) for r in self.requirements
662 self.svfs.options = dict((r, 1) for r in self.requirements
663 if r in self.openerreqs)
663 if r in self.openerreqs)
664 # experimental config: format.chunkcachesize
664 # experimental config: format.chunkcachesize
665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
665 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
666 if chunkcachesize is not None:
666 if chunkcachesize is not None:
667 self.svfs.options['chunkcachesize'] = chunkcachesize
667 self.svfs.options['chunkcachesize'] = chunkcachesize
668 # experimental config: format.maxchainlen
668 # experimental config: format.maxchainlen
669 maxchainlen = self.ui.configint('format', 'maxchainlen')
669 maxchainlen = self.ui.configint('format', 'maxchainlen')
670 if maxchainlen is not None:
670 if maxchainlen is not None:
671 self.svfs.options['maxchainlen'] = maxchainlen
671 self.svfs.options['maxchainlen'] = maxchainlen
672 # experimental config: format.manifestcachesize
672 # experimental config: format.manifestcachesize
673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
673 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
674 if manifestcachesize is not None:
674 if manifestcachesize is not None:
675 self.svfs.options['manifestcachesize'] = manifestcachesize
675 self.svfs.options['manifestcachesize'] = manifestcachesize
676 deltabothparents = self.ui.configbool('storage',
676 deltabothparents = self.ui.configbool('storage',
677 'revlog.optimize-delta-parent-choice')
677 'revlog.optimize-delta-parent-choice')
678 self.svfs.options['deltabothparents'] = deltabothparents
678 self.svfs.options['deltabothparents'] = deltabothparents
679 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
679 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
680 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
680 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
681 if 0 <= chainspan:
681 if 0 <= chainspan:
682 self.svfs.options['maxdeltachainspan'] = chainspan
682 self.svfs.options['maxdeltachainspan'] = chainspan
683 mmapindexthreshold = self.ui.configbytes('experimental',
683 mmapindexthreshold = self.ui.configbytes('experimental',
684 'mmapindexthreshold')
684 'mmapindexthreshold')
685 if mmapindexthreshold is not None:
685 if mmapindexthreshold is not None:
686 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
686 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
687 withsparseread = self.ui.configbool('experimental', 'sparse-read')
687 withsparseread = self.ui.configbool('experimental', 'sparse-read')
688 srdensitythres = float(self.ui.config('experimental',
688 srdensitythres = float(self.ui.config('experimental',
689 'sparse-read.density-threshold'))
689 'sparse-read.density-threshold'))
690 srmingapsize = self.ui.configbytes('experimental',
690 srmingapsize = self.ui.configbytes('experimental',
691 'sparse-read.min-gap-size')
691 'sparse-read.min-gap-size')
692 self.svfs.options['with-sparse-read'] = withsparseread
692 self.svfs.options['with-sparse-read'] = withsparseread
693 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
693 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
694 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
694 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
695 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
695 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
696 self.svfs.options['sparse-revlog'] = sparserevlog
696 self.svfs.options['sparse-revlog'] = sparserevlog
697 if sparserevlog:
697 if sparserevlog:
698 self.svfs.options['generaldelta'] = True
698 self.svfs.options['generaldelta'] = True
699
699
700 for r in self.requirements:
700 for r in self.requirements:
701 if r.startswith('exp-compression-'):
701 if r.startswith('exp-compression-'):
702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
702 self.svfs.options['compengine'] = r[len('exp-compression-'):]
703
703
704 # TODO move "revlogv2" to openerreqs once finalized.
704 # TODO move "revlogv2" to openerreqs once finalized.
705 if REVLOGV2_REQUIREMENT in self.requirements:
705 if REVLOGV2_REQUIREMENT in self.requirements:
706 self.svfs.options['revlogv2'] = True
706 self.svfs.options['revlogv2'] = True
707
707
708 def _writerequirements(self):
708 def _writerequirements(self):
709 scmutil.writerequires(self.vfs, self.requirements)
709 scmutil.writerequires(self.vfs, self.requirements)
710
710
711 def _checknested(self, path):
711 def _checknested(self, path):
712 """Determine if path is a legal nested repository."""
712 """Determine if path is a legal nested repository."""
713 if not path.startswith(self.root):
713 if not path.startswith(self.root):
714 return False
714 return False
715 subpath = path[len(self.root) + 1:]
715 subpath = path[len(self.root) + 1:]
716 normsubpath = util.pconvert(subpath)
716 normsubpath = util.pconvert(subpath)
717
717
718 # XXX: Checking against the current working copy is wrong in
718 # XXX: Checking against the current working copy is wrong in
719 # the sense that it can reject things like
719 # the sense that it can reject things like
720 #
720 #
721 # $ hg cat -r 10 sub/x.txt
721 # $ hg cat -r 10 sub/x.txt
722 #
722 #
723 # if sub/ is no longer a subrepository in the working copy
723 # if sub/ is no longer a subrepository in the working copy
724 # parent revision.
724 # parent revision.
725 #
725 #
726 # However, it can of course also allow things that would have
726 # However, it can of course also allow things that would have
727 # been rejected before, such as the above cat command if sub/
727 # been rejected before, such as the above cat command if sub/
728 # is a subrepository now, but was a normal directory before.
728 # is a subrepository now, but was a normal directory before.
729 # The old path auditor would have rejected by mistake since it
729 # The old path auditor would have rejected by mistake since it
730 # panics when it sees sub/.hg/.
730 # panics when it sees sub/.hg/.
731 #
731 #
732 # All in all, checking against the working copy seems sensible
732 # All in all, checking against the working copy seems sensible
733 # since we want to prevent access to nested repositories on
733 # since we want to prevent access to nested repositories on
734 # the filesystem *now*.
734 # the filesystem *now*.
735 ctx = self[None]
735 ctx = self[None]
736 parts = util.splitpath(subpath)
736 parts = util.splitpath(subpath)
737 while parts:
737 while parts:
738 prefix = '/'.join(parts)
738 prefix = '/'.join(parts)
739 if prefix in ctx.substate:
739 if prefix in ctx.substate:
740 if prefix == normsubpath:
740 if prefix == normsubpath:
741 return True
741 return True
742 else:
742 else:
743 sub = ctx.sub(prefix)
743 sub = ctx.sub(prefix)
744 return sub.checknested(subpath[len(prefix) + 1:])
744 return sub.checknested(subpath[len(prefix) + 1:])
745 else:
745 else:
746 parts.pop()
746 parts.pop()
747 return False
747 return False
748
748
749 def peer(self):
749 def peer(self):
750 return localpeer(self) # not cached to avoid reference cycle
750 return localpeer(self) # not cached to avoid reference cycle
751
751
752 def unfiltered(self):
752 def unfiltered(self):
753 """Return unfiltered version of the repository
753 """Return unfiltered version of the repository
754
754
755 Intended to be overwritten by filtered repo."""
755 Intended to be overwritten by filtered repo."""
756 return self
756 return self
757
757
758 def filtered(self, name, visibilityexceptions=None):
758 def filtered(self, name, visibilityexceptions=None):
759 """Return a filtered version of a repository"""
759 """Return a filtered version of a repository"""
760 cls = repoview.newtype(self.unfiltered().__class__)
760 cls = repoview.newtype(self.unfiltered().__class__)
761 return cls(self, name, visibilityexceptions)
761 return cls(self, name, visibilityexceptions)
762
762
763 @repofilecache('bookmarks', 'bookmarks.current')
763 @repofilecache('bookmarks', 'bookmarks.current')
764 def _bookmarks(self):
764 def _bookmarks(self):
765 return bookmarks.bmstore(self)
765 return bookmarks.bmstore(self)
766
766
767 @property
767 @property
768 def _activebookmark(self):
768 def _activebookmark(self):
769 return self._bookmarks.active
769 return self._bookmarks.active
770
770
771 # _phasesets depend on changelog. what we need is to call
771 # _phasesets depend on changelog. what we need is to call
772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
772 # _phasecache.invalidate() if '00changelog.i' was changed, but it
773 # can't be easily expressed in filecache mechanism.
773 # can't be easily expressed in filecache mechanism.
774 @storecache('phaseroots', '00changelog.i')
774 @storecache('phaseroots', '00changelog.i')
775 def _phasecache(self):
775 def _phasecache(self):
776 return phases.phasecache(self, self._phasedefaults)
776 return phases.phasecache(self, self._phasedefaults)
777
777
778 @storecache('obsstore')
778 @storecache('obsstore')
779 def obsstore(self):
779 def obsstore(self):
780 return obsolete.makestore(self.ui, self)
780 return obsolete.makestore(self.ui, self)
781
781
782 @storecache('00changelog.i')
782 @storecache('00changelog.i')
783 def changelog(self):
783 def changelog(self):
784 return changelog.changelog(self.svfs,
784 return changelog.changelog(self.svfs,
785 trypending=txnutil.mayhavepending(self.root))
785 trypending=txnutil.mayhavepending(self.root))
786
786
787 def _constructmanifest(self):
787 def _constructmanifest(self):
788 # This is a temporary function while we migrate from manifest to
788 # This is a temporary function while we migrate from manifest to
789 # manifestlog. It allows bundlerepo and unionrepo to intercept the
789 # manifestlog. It allows bundlerepo and unionrepo to intercept the
790 # manifest creation.
790 # manifest creation.
791 return manifest.manifestrevlog(self.svfs)
791 return manifest.manifestrevlog(self.svfs)
792
792
793 @storecache('00manifest.i')
793 @storecache('00manifest.i')
794 def manifestlog(self):
794 def manifestlog(self):
795 return manifest.manifestlog(self.svfs, self)
795 return manifest.manifestlog(self.svfs, self)
796
796
797 @repofilecache('dirstate')
797 @repofilecache('dirstate')
798 def dirstate(self):
798 def dirstate(self):
799 return self._makedirstate()
799 return self._makedirstate()
800
800
801 def _makedirstate(self):
801 def _makedirstate(self):
802 """Extension point for wrapping the dirstate per-repo."""
802 """Extension point for wrapping the dirstate per-repo."""
803 sparsematchfn = lambda: sparse.matcher(self)
803 sparsematchfn = lambda: sparse.matcher(self)
804
804
805 return dirstate.dirstate(self.vfs, self.ui, self.root,
805 return dirstate.dirstate(self.vfs, self.ui, self.root,
806 self._dirstatevalidate, sparsematchfn)
806 self._dirstatevalidate, sparsematchfn)
807
807
808 def _dirstatevalidate(self, node):
808 def _dirstatevalidate(self, node):
809 try:
809 try:
810 self.changelog.rev(node)
810 self.changelog.rev(node)
811 return node
811 return node
812 except error.LookupError:
812 except error.LookupError:
813 if not self._dirstatevalidatewarned:
813 if not self._dirstatevalidatewarned:
814 self._dirstatevalidatewarned = True
814 self._dirstatevalidatewarned = True
815 self.ui.warn(_("warning: ignoring unknown"
815 self.ui.warn(_("warning: ignoring unknown"
816 " working parent %s!\n") % short(node))
816 " working parent %s!\n") % short(node))
817 return nullid
817 return nullid
818
818
819 @storecache(narrowspec.FILENAME)
819 @storecache(narrowspec.FILENAME)
820 def narrowpats(self):
820 def narrowpats(self):
821 """matcher patterns for this repository's narrowspec
821 """matcher patterns for this repository's narrowspec
822
822
823 A tuple of (includes, excludes).
823 A tuple of (includes, excludes).
824 """
824 """
825 source = self
825 source = self
826 if self.shared():
826 if self.shared():
827 from . import hg
827 from . import hg
828 source = hg.sharedreposource(self)
828 source = hg.sharedreposource(self)
829 return narrowspec.load(source)
829 return narrowspec.load(source)
830
830
831 @storecache(narrowspec.FILENAME)
831 @storecache(narrowspec.FILENAME)
832 def _narrowmatch(self):
832 def _narrowmatch(self):
833 if repository.NARROW_REQUIREMENT not in self.requirements:
833 if repository.NARROW_REQUIREMENT not in self.requirements:
834 return matchmod.always(self.root, '')
834 return matchmod.always(self.root, '')
835 include, exclude = self.narrowpats
835 include, exclude = self.narrowpats
836 return narrowspec.match(self.root, include=include, exclude=exclude)
836 return narrowspec.match(self.root, include=include, exclude=exclude)
837
837
838 # TODO(martinvonz): make this property-like instead?
838 # TODO(martinvonz): make this property-like instead?
839 def narrowmatch(self):
839 def narrowmatch(self):
840 return self._narrowmatch
840 return self._narrowmatch
841
841
842 def setnarrowpats(self, newincludes, newexcludes):
842 def setnarrowpats(self, newincludes, newexcludes):
843 target = self
843 target = self
844 if self.shared():
844 if self.shared():
845 from . import hg
845 from . import hg
846 target = hg.sharedreposource(self)
846 target = hg.sharedreposource(self)
847 narrowspec.save(target, newincludes, newexcludes)
847 narrowspec.save(target, newincludes, newexcludes)
848 self.invalidate(clearfilecache=True)
848 self.invalidate(clearfilecache=True)
849
849
850 def __getitem__(self, changeid):
850 def __getitem__(self, changeid):
851 if changeid is None:
851 if changeid is None:
852 return context.workingctx(self)
852 return context.workingctx(self)
853 if isinstance(changeid, context.basectx):
853 if isinstance(changeid, context.basectx):
854 return changeid
854 return changeid
855 if isinstance(changeid, slice):
855 if isinstance(changeid, slice):
856 # wdirrev isn't contiguous so the slice shouldn't include it
856 # wdirrev isn't contiguous so the slice shouldn't include it
857 return [context.changectx(self, i)
857 return [context.changectx(self, i)
858 for i in pycompat.xrange(*changeid.indices(len(self)))
858 for i in pycompat.xrange(*changeid.indices(len(self)))
859 if i not in self.changelog.filteredrevs]
859 if i not in self.changelog.filteredrevs]
860 try:
860 try:
861 return context.changectx(self, changeid)
861 return context.changectx(self, changeid)
862 except error.WdirUnsupported:
862 except error.WdirUnsupported:
863 return context.workingctx(self)
863 return context.workingctx(self)
864
864
865 def __contains__(self, changeid):
865 def __contains__(self, changeid):
866 """True if the given changeid exists
866 """True if the given changeid exists
867
867
868 error.AmbiguousPrefixLookupError is raised if an ambiguous node
868 error.AmbiguousPrefixLookupError is raised if an ambiguous node
869 specified.
869 specified.
870 """
870 """
871 try:
871 try:
872 self[changeid]
872 self[changeid]
873 return True
873 return True
874 except error.RepoLookupError:
874 except error.RepoLookupError:
875 return False
875 return False
876
876
877 def __nonzero__(self):
877 def __nonzero__(self):
878 return True
878 return True
879
879
880 __bool__ = __nonzero__
880 __bool__ = __nonzero__
881
881
882 def __len__(self):
882 def __len__(self):
883 # no need to pay the cost of repoview.changelog
883 # no need to pay the cost of repoview.changelog
884 unfi = self.unfiltered()
884 unfi = self.unfiltered()
885 return len(unfi.changelog)
885 return len(unfi.changelog)
886
886
887 def __iter__(self):
887 def __iter__(self):
888 return iter(self.changelog)
888 return iter(self.changelog)
889
889
890 def revs(self, expr, *args):
890 def revs(self, expr, *args):
891 '''Find revisions matching a revset.
891 '''Find revisions matching a revset.
892
892
893 The revset is specified as a string ``expr`` that may contain
893 The revset is specified as a string ``expr`` that may contain
894 %-formatting to escape certain types. See ``revsetlang.formatspec``.
894 %-formatting to escape certain types. See ``revsetlang.formatspec``.
895
895
896 Revset aliases from the configuration are not expanded. To expand
896 Revset aliases from the configuration are not expanded. To expand
897 user aliases, consider calling ``scmutil.revrange()`` or
897 user aliases, consider calling ``scmutil.revrange()`` or
898 ``repo.anyrevs([expr], user=True)``.
898 ``repo.anyrevs([expr], user=True)``.
899
899
900 Returns a revset.abstractsmartset, which is a list-like interface
900 Returns a revset.abstractsmartset, which is a list-like interface
901 that contains integer revisions.
901 that contains integer revisions.
902 '''
902 '''
903 expr = revsetlang.formatspec(expr, *args)
903 expr = revsetlang.formatspec(expr, *args)
904 m = revset.match(None, expr)
904 m = revset.match(None, expr)
905 return m(self)
905 return m(self)
906
906
907 def set(self, expr, *args):
907 def set(self, expr, *args):
908 '''Find revisions matching a revset and emit changectx instances.
908 '''Find revisions matching a revset and emit changectx instances.
909
909
910 This is a convenience wrapper around ``revs()`` that iterates the
910 This is a convenience wrapper around ``revs()`` that iterates the
911 result and is a generator of changectx instances.
911 result and is a generator of changectx instances.
912
912
913 Revset aliases from the configuration are not expanded. To expand
913 Revset aliases from the configuration are not expanded. To expand
914 user aliases, consider calling ``scmutil.revrange()``.
914 user aliases, consider calling ``scmutil.revrange()``.
915 '''
915 '''
916 for r in self.revs(expr, *args):
916 for r in self.revs(expr, *args):
917 yield self[r]
917 yield self[r]
918
918
919 def anyrevs(self, specs, user=False, localalias=None):
919 def anyrevs(self, specs, user=False, localalias=None):
920 '''Find revisions matching one of the given revsets.
920 '''Find revisions matching one of the given revsets.
921
921
922 Revset aliases from the configuration are not expanded by default. To
922 Revset aliases from the configuration are not expanded by default. To
923 expand user aliases, specify ``user=True``. To provide some local
923 expand user aliases, specify ``user=True``. To provide some local
924 definitions overriding user aliases, set ``localalias`` to
924 definitions overriding user aliases, set ``localalias`` to
925 ``{name: definitionstring}``.
925 ``{name: definitionstring}``.
926 '''
926 '''
927 if user:
927 if user:
928 m = revset.matchany(self.ui, specs,
928 m = revset.matchany(self.ui, specs,
929 lookup=revset.lookupfn(self),
929 lookup=revset.lookupfn(self),
930 localalias=localalias)
930 localalias=localalias)
931 else:
931 else:
932 m = revset.matchany(None, specs, localalias=localalias)
932 m = revset.matchany(None, specs, localalias=localalias)
933 return m(self)
933 return m(self)
934
934
935 def url(self):
935 def url(self):
936 return 'file:' + self.root
936 return 'file:' + self.root
937
937
938 def hook(self, name, throw=False, **args):
938 def hook(self, name, throw=False, **args):
939 """Call a hook, passing this repo instance.
939 """Call a hook, passing this repo instance.
940
940
941 This a convenience method to aid invoking hooks. Extensions likely
941 This a convenience method to aid invoking hooks. Extensions likely
942 won't call this unless they have registered a custom hook or are
942 won't call this unless they have registered a custom hook or are
943 replacing code that is expected to call a hook.
943 replacing code that is expected to call a hook.
944 """
944 """
945 return hook.hook(self.ui, self, name, throw, **args)
945 return hook.hook(self.ui, self, name, throw, **args)
946
946
947 @filteredpropertycache
947 @filteredpropertycache
948 def _tagscache(self):
948 def _tagscache(self):
949 '''Returns a tagscache object that contains various tags related
949 '''Returns a tagscache object that contains various tags related
950 caches.'''
950 caches.'''
951
951
952 # This simplifies its cache management by having one decorated
952 # This simplifies its cache management by having one decorated
953 # function (this one) and the rest simply fetch things from it.
953 # function (this one) and the rest simply fetch things from it.
954 class tagscache(object):
954 class tagscache(object):
955 def __init__(self):
955 def __init__(self):
956 # These two define the set of tags for this repository. tags
956 # These two define the set of tags for this repository. tags
957 # maps tag name to node; tagtypes maps tag name to 'global' or
957 # maps tag name to node; tagtypes maps tag name to 'global' or
958 # 'local'. (Global tags are defined by .hgtags across all
958 # 'local'. (Global tags are defined by .hgtags across all
959 # heads, and local tags are defined in .hg/localtags.)
959 # heads, and local tags are defined in .hg/localtags.)
960 # They constitute the in-memory cache of tags.
960 # They constitute the in-memory cache of tags.
961 self.tags = self.tagtypes = None
961 self.tags = self.tagtypes = None
962
962
963 self.nodetagscache = self.tagslist = None
963 self.nodetagscache = self.tagslist = None
964
964
965 cache = tagscache()
965 cache = tagscache()
966 cache.tags, cache.tagtypes = self._findtags()
966 cache.tags, cache.tagtypes = self._findtags()
967
967
968 return cache
968 return cache
969
969
970 def tags(self):
970 def tags(self):
971 '''return a mapping of tag to node'''
971 '''return a mapping of tag to node'''
972 t = {}
972 t = {}
973 if self.changelog.filteredrevs:
973 if self.changelog.filteredrevs:
974 tags, tt = self._findtags()
974 tags, tt = self._findtags()
975 else:
975 else:
976 tags = self._tagscache.tags
976 tags = self._tagscache.tags
977 for k, v in tags.iteritems():
977 for k, v in tags.iteritems():
978 try:
978 try:
979 # ignore tags to unknown nodes
979 # ignore tags to unknown nodes
980 self.changelog.rev(v)
980 self.changelog.rev(v)
981 t[k] = v
981 t[k] = v
982 except (error.LookupError, ValueError):
982 except (error.LookupError, ValueError):
983 pass
983 pass
984 return t
984 return t
985
985
986 def _findtags(self):
986 def _findtags(self):
987 '''Do the hard work of finding tags. Return a pair of dicts
987 '''Do the hard work of finding tags. Return a pair of dicts
988 (tags, tagtypes) where tags maps tag name to node, and tagtypes
988 (tags, tagtypes) where tags maps tag name to node, and tagtypes
989 maps tag name to a string like \'global\' or \'local\'.
989 maps tag name to a string like \'global\' or \'local\'.
990 Subclasses or extensions are free to add their own tags, but
990 Subclasses or extensions are free to add their own tags, but
991 should be aware that the returned dicts will be retained for the
991 should be aware that the returned dicts will be retained for the
992 duration of the localrepo object.'''
992 duration of the localrepo object.'''
993
993
994 # XXX what tagtype should subclasses/extensions use? Currently
994 # XXX what tagtype should subclasses/extensions use? Currently
995 # mq and bookmarks add tags, but do not set the tagtype at all.
995 # mq and bookmarks add tags, but do not set the tagtype at all.
996 # Should each extension invent its own tag type? Should there
996 # Should each extension invent its own tag type? Should there
997 # be one tagtype for all such "virtual" tags? Or is the status
997 # be one tagtype for all such "virtual" tags? Or is the status
998 # quo fine?
998 # quo fine?
999
999
1000
1000
1001 # map tag name to (node, hist)
1001 # map tag name to (node, hist)
1002 alltags = tagsmod.findglobaltags(self.ui, self)
1002 alltags = tagsmod.findglobaltags(self.ui, self)
1003 # map tag name to tag type
1003 # map tag name to tag type
1004 tagtypes = dict((tag, 'global') for tag in alltags)
1004 tagtypes = dict((tag, 'global') for tag in alltags)
1005
1005
1006 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1006 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1007
1007
1008 # Build the return dicts. Have to re-encode tag names because
1008 # Build the return dicts. Have to re-encode tag names because
1009 # the tags module always uses UTF-8 (in order not to lose info
1009 # the tags module always uses UTF-8 (in order not to lose info
1010 # writing to the cache), but the rest of Mercurial wants them in
1010 # writing to the cache), but the rest of Mercurial wants them in
1011 # local encoding.
1011 # local encoding.
1012 tags = {}
1012 tags = {}
1013 for (name, (node, hist)) in alltags.iteritems():
1013 for (name, (node, hist)) in alltags.iteritems():
1014 if node != nullid:
1014 if node != nullid:
1015 tags[encoding.tolocal(name)] = node
1015 tags[encoding.tolocal(name)] = node
1016 tags['tip'] = self.changelog.tip()
1016 tags['tip'] = self.changelog.tip()
1017 tagtypes = dict([(encoding.tolocal(name), value)
1017 tagtypes = dict([(encoding.tolocal(name), value)
1018 for (name, value) in tagtypes.iteritems()])
1018 for (name, value) in tagtypes.iteritems()])
1019 return (tags, tagtypes)
1019 return (tags, tagtypes)
1020
1020
1021 def tagtype(self, tagname):
1021 def tagtype(self, tagname):
1022 '''
1022 '''
1023 return the type of the given tag. result can be:
1023 return the type of the given tag. result can be:
1024
1024
1025 'local' : a local tag
1025 'local' : a local tag
1026 'global' : a global tag
1026 'global' : a global tag
1027 None : tag does not exist
1027 None : tag does not exist
1028 '''
1028 '''
1029
1029
1030 return self._tagscache.tagtypes.get(tagname)
1030 return self._tagscache.tagtypes.get(tagname)
1031
1031
1032 def tagslist(self):
1032 def tagslist(self):
1033 '''return a list of tags ordered by revision'''
1033 '''return a list of tags ordered by revision'''
1034 if not self._tagscache.tagslist:
1034 if not self._tagscache.tagslist:
1035 l = []
1035 l = []
1036 for t, n in self.tags().iteritems():
1036 for t, n in self.tags().iteritems():
1037 l.append((self.changelog.rev(n), t, n))
1037 l.append((self.changelog.rev(n), t, n))
1038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1038 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1039
1039
1040 return self._tagscache.tagslist
1040 return self._tagscache.tagslist
1041
1041
1042 def nodetags(self, node):
1042 def nodetags(self, node):
1043 '''return the tags associated with a node'''
1043 '''return the tags associated with a node'''
1044 if not self._tagscache.nodetagscache:
1044 if not self._tagscache.nodetagscache:
1045 nodetagscache = {}
1045 nodetagscache = {}
1046 for t, n in self._tagscache.tags.iteritems():
1046 for t, n in self._tagscache.tags.iteritems():
1047 nodetagscache.setdefault(n, []).append(t)
1047 nodetagscache.setdefault(n, []).append(t)
1048 for tags in nodetagscache.itervalues():
1048 for tags in nodetagscache.itervalues():
1049 tags.sort()
1049 tags.sort()
1050 self._tagscache.nodetagscache = nodetagscache
1050 self._tagscache.nodetagscache = nodetagscache
1051 return self._tagscache.nodetagscache.get(node, [])
1051 return self._tagscache.nodetagscache.get(node, [])
1052
1052
1053 def nodebookmarks(self, node):
1053 def nodebookmarks(self, node):
1054 """return the list of bookmarks pointing to the specified node"""
1054 """return the list of bookmarks pointing to the specified node"""
1055 return self._bookmarks.names(node)
1055 return self._bookmarks.names(node)
1056
1056
1057 def branchmap(self):
1057 def branchmap(self):
1058 '''returns a dictionary {branch: [branchheads]} with branchheads
1058 '''returns a dictionary {branch: [branchheads]} with branchheads
1059 ordered by increasing revision number'''
1059 ordered by increasing revision number'''
1060 branchmap.updatecache(self)
1060 branchmap.updatecache(self)
1061 return self._branchcaches[self.filtername]
1061 return self._branchcaches[self.filtername]
1062
1062
1063 @unfilteredmethod
1063 @unfilteredmethod
1064 def revbranchcache(self):
1064 def revbranchcache(self):
1065 if not self._revbranchcache:
1065 if not self._revbranchcache:
1066 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1066 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1067 return self._revbranchcache
1067 return self._revbranchcache
1068
1068
1069 def branchtip(self, branch, ignoremissing=False):
1069 def branchtip(self, branch, ignoremissing=False):
1070 '''return the tip node for a given branch
1070 '''return the tip node for a given branch
1071
1071
1072 If ignoremissing is True, then this method will not raise an error.
1072 If ignoremissing is True, then this method will not raise an error.
1073 This is helpful for callers that only expect None for a missing branch
1073 This is helpful for callers that only expect None for a missing branch
1074 (e.g. namespace).
1074 (e.g. namespace).
1075
1075
1076 '''
1076 '''
1077 try:
1077 try:
1078 return self.branchmap().branchtip(branch)
1078 return self.branchmap().branchtip(branch)
1079 except KeyError:
1079 except KeyError:
1080 if not ignoremissing:
1080 if not ignoremissing:
1081 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1081 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1082 else:
1082 else:
1083 pass
1083 pass
1084
1084
1085 def lookup(self, key):
1085 def lookup(self, key):
1086 return scmutil.revsymbol(self, key).node()
1086 return scmutil.revsymbol(self, key).node()
1087
1087
1088 def lookupbranch(self, key):
1088 def lookupbranch(self, key):
1089 if key in self.branchmap():
1089 if key in self.branchmap():
1090 return key
1090 return key
1091
1091
1092 return scmutil.revsymbol(self, key).branch()
1092 return scmutil.revsymbol(self, key).branch()
1093
1093
1094 def known(self, nodes):
1094 def known(self, nodes):
1095 cl = self.changelog
1095 cl = self.changelog
1096 nm = cl.nodemap
1096 nm = cl.nodemap
1097 filtered = cl.filteredrevs
1097 filtered = cl.filteredrevs
1098 result = []
1098 result = []
1099 for n in nodes:
1099 for n in nodes:
1100 r = nm.get(n)
1100 r = nm.get(n)
1101 resp = not (r is None or r in filtered)
1101 resp = not (r is None or r in filtered)
1102 result.append(resp)
1102 result.append(resp)
1103 return result
1103 return result
1104
1104
1105 def local(self):
1105 def local(self):
1106 return self
1106 return self
1107
1107
1108 def publishing(self):
1108 def publishing(self):
1109 # it's safe (and desirable) to trust the publish flag unconditionally
1109 # it's safe (and desirable) to trust the publish flag unconditionally
1110 # so that we don't finalize changes shared between users via ssh or nfs
1110 # so that we don't finalize changes shared between users via ssh or nfs
1111 return self.ui.configbool('phases', 'publish', untrusted=True)
1111 return self.ui.configbool('phases', 'publish', untrusted=True)
1112
1112
1113 def cancopy(self):
1113 def cancopy(self):
1114 # so statichttprepo's override of local() works
1114 # so statichttprepo's override of local() works
1115 if not self.local():
1115 if not self.local():
1116 return False
1116 return False
1117 if not self.publishing():
1117 if not self.publishing():
1118 return True
1118 return True
1119 # if publishing we can't copy if there is filtered content
1119 # if publishing we can't copy if there is filtered content
1120 return not self.filtered('visible').changelog.filteredrevs
1120 return not self.filtered('visible').changelog.filteredrevs
1121
1121
1122 def shared(self):
1122 def shared(self):
1123 '''the type of shared repository (None if not shared)'''
1123 '''the type of shared repository (None if not shared)'''
1124 if self.sharedpath != self.path:
1124 if self.sharedpath != self.path:
1125 return 'store'
1125 return 'store'
1126 return None
1126 return None
1127
1127
1128 def wjoin(self, f, *insidef):
1128 def wjoin(self, f, *insidef):
1129 return self.vfs.reljoin(self.root, f, *insidef)
1129 return self.vfs.reljoin(self.root, f, *insidef)
1130
1130
1131 def file(self, f):
1131 def file(self, f):
1132 if f[0] == '/':
1132 if f[0] == '/':
1133 f = f[1:]
1133 f = f[1:]
1134 return filelog.filelog(self.svfs, f)
1134 return filelog.filelog(self.svfs, f)
1135
1135
1136 def setparents(self, p1, p2=nullid):
1136 def setparents(self, p1, p2=nullid):
1137 with self.dirstate.parentchange():
1137 with self.dirstate.parentchange():
1138 copies = self.dirstate.setparents(p1, p2)
1138 copies = self.dirstate.setparents(p1, p2)
1139 pctx = self[p1]
1139 pctx = self[p1]
1140 if copies:
1140 if copies:
1141 # Adjust copy records, the dirstate cannot do it, it
1141 # Adjust copy records, the dirstate cannot do it, it
1142 # requires access to parents manifests. Preserve them
1142 # requires access to parents manifests. Preserve them
1143 # only for entries added to first parent.
1143 # only for entries added to first parent.
1144 for f in copies:
1144 for f in copies:
1145 if f not in pctx and copies[f] in pctx:
1145 if f not in pctx and copies[f] in pctx:
1146 self.dirstate.copy(copies[f], f)
1146 self.dirstate.copy(copies[f], f)
1147 if p2 == nullid:
1147 if p2 == nullid:
1148 for f, s in sorted(self.dirstate.copies().items()):
1148 for f, s in sorted(self.dirstate.copies().items()):
1149 if f not in pctx and s not in pctx:
1149 if f not in pctx and s not in pctx:
1150 self.dirstate.copy(None, f)
1150 self.dirstate.copy(None, f)
1151
1151
1152 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1152 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1153 """changeid can be a changeset revision, node, or tag.
1153 """changeid can be a changeset revision, node, or tag.
1154 fileid can be a file revision or node."""
1154 fileid can be a file revision or node."""
1155 return context.filectx(self, path, changeid, fileid,
1155 return context.filectx(self, path, changeid, fileid,
1156 changectx=changectx)
1156 changectx=changectx)
1157
1157
1158 def getcwd(self):
1158 def getcwd(self):
1159 return self.dirstate.getcwd()
1159 return self.dirstate.getcwd()
1160
1160
1161 def pathto(self, f, cwd=None):
1161 def pathto(self, f, cwd=None):
1162 return self.dirstate.pathto(f, cwd)
1162 return self.dirstate.pathto(f, cwd)
1163
1163
1164 def _loadfilter(self, filter):
1164 def _loadfilter(self, filter):
1165 if filter not in self._filterpats:
1165 if filter not in self._filterpats:
1166 l = []
1166 l = []
1167 for pat, cmd in self.ui.configitems(filter):
1167 for pat, cmd in self.ui.configitems(filter):
1168 if cmd == '!':
1168 if cmd == '!':
1169 continue
1169 continue
1170 mf = matchmod.match(self.root, '', [pat])
1170 mf = matchmod.match(self.root, '', [pat])
1171 fn = None
1171 fn = None
1172 params = cmd
1172 params = cmd
1173 for name, filterfn in self._datafilters.iteritems():
1173 for name, filterfn in self._datafilters.iteritems():
1174 if cmd.startswith(name):
1174 if cmd.startswith(name):
1175 fn = filterfn
1175 fn = filterfn
1176 params = cmd[len(name):].lstrip()
1176 params = cmd[len(name):].lstrip()
1177 break
1177 break
1178 if not fn:
1178 if not fn:
1179 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1179 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1180 # Wrap old filters not supporting keyword arguments
1180 # Wrap old filters not supporting keyword arguments
1181 if not pycompat.getargspec(fn)[2]:
1181 if not pycompat.getargspec(fn)[2]:
1182 oldfn = fn
1182 oldfn = fn
1183 fn = lambda s, c, **kwargs: oldfn(s, c)
1183 fn = lambda s, c, **kwargs: oldfn(s, c)
1184 l.append((mf, fn, params))
1184 l.append((mf, fn, params))
1185 self._filterpats[filter] = l
1185 self._filterpats[filter] = l
1186 return self._filterpats[filter]
1186 return self._filterpats[filter]
1187
1187
1188 def _filter(self, filterpats, filename, data):
1188 def _filter(self, filterpats, filename, data):
1189 for mf, fn, cmd in filterpats:
1189 for mf, fn, cmd in filterpats:
1190 if mf(filename):
1190 if mf(filename):
1191 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1191 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1192 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1192 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1193 break
1193 break
1194
1194
1195 return data
1195 return data
1196
1196
1197 @unfilteredpropertycache
1197 @unfilteredpropertycache
1198 def _encodefilterpats(self):
1198 def _encodefilterpats(self):
1199 return self._loadfilter('encode')
1199 return self._loadfilter('encode')
1200
1200
1201 @unfilteredpropertycache
1201 @unfilteredpropertycache
1202 def _decodefilterpats(self):
1202 def _decodefilterpats(self):
1203 return self._loadfilter('decode')
1203 return self._loadfilter('decode')
1204
1204
1205 def adddatafilter(self, name, filter):
1205 def adddatafilter(self, name, filter):
1206 self._datafilters[name] = filter
1206 self._datafilters[name] = filter
1207
1207
1208 def wread(self, filename):
1208 def wread(self, filename):
1209 if self.wvfs.islink(filename):
1209 if self.wvfs.islink(filename):
1210 data = self.wvfs.readlink(filename)
1210 data = self.wvfs.readlink(filename)
1211 else:
1211 else:
1212 data = self.wvfs.read(filename)
1212 data = self.wvfs.read(filename)
1213 return self._filter(self._encodefilterpats, filename, data)
1213 return self._filter(self._encodefilterpats, filename, data)
1214
1214
1215 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1215 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1216 """write ``data`` into ``filename`` in the working directory
1216 """write ``data`` into ``filename`` in the working directory
1217
1217
1218 This returns length of written (maybe decoded) data.
1218 This returns length of written (maybe decoded) data.
1219 """
1219 """
1220 data = self._filter(self._decodefilterpats, filename, data)
1220 data = self._filter(self._decodefilterpats, filename, data)
1221 if 'l' in flags:
1221 if 'l' in flags:
1222 self.wvfs.symlink(data, filename)
1222 self.wvfs.symlink(data, filename)
1223 else:
1223 else:
1224 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1224 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1225 **kwargs)
1225 **kwargs)
1226 if 'x' in flags:
1226 if 'x' in flags:
1227 self.wvfs.setflags(filename, False, True)
1227 self.wvfs.setflags(filename, False, True)
1228 else:
1228 else:
1229 self.wvfs.setflags(filename, False, False)
1229 self.wvfs.setflags(filename, False, False)
1230 return len(data)
1230 return len(data)
1231
1231
1232 def wwritedata(self, filename, data):
1232 def wwritedata(self, filename, data):
1233 return self._filter(self._decodefilterpats, filename, data)
1233 return self._filter(self._decodefilterpats, filename, data)
1234
1234
1235 def currenttransaction(self):
1235 def currenttransaction(self):
1236 """return the current transaction or None if non exists"""
1236 """return the current transaction or None if non exists"""
1237 if self._transref:
1237 if self._transref:
1238 tr = self._transref()
1238 tr = self._transref()
1239 else:
1239 else:
1240 tr = None
1240 tr = None
1241
1241
1242 if tr and tr.running():
1242 if tr and tr.running():
1243 return tr
1243 return tr
1244 return None
1244 return None
1245
1245
1246 def transaction(self, desc, report=None):
1246 def transaction(self, desc, report=None):
1247 if (self.ui.configbool('devel', 'all-warnings')
1247 if (self.ui.configbool('devel', 'all-warnings')
1248 or self.ui.configbool('devel', 'check-locks')):
1248 or self.ui.configbool('devel', 'check-locks')):
1249 if self._currentlock(self._lockref) is None:
1249 if self._currentlock(self._lockref) is None:
1250 raise error.ProgrammingError('transaction requires locking')
1250 raise error.ProgrammingError('transaction requires locking')
1251 tr = self.currenttransaction()
1251 tr = self.currenttransaction()
1252 if tr is not None:
1252 if tr is not None:
1253 return tr.nest(name=desc)
1253 return tr.nest(name=desc)
1254
1254
1255 # abort here if the journal already exists
1255 # abort here if the journal already exists
1256 if self.svfs.exists("journal"):
1256 if self.svfs.exists("journal"):
1257 raise error.RepoError(
1257 raise error.RepoError(
1258 _("abandoned transaction found"),
1258 _("abandoned transaction found"),
1259 hint=_("run 'hg recover' to clean up transaction"))
1259 hint=_("run 'hg recover' to clean up transaction"))
1260
1260
1261 idbase = "%.40f#%f" % (random.random(), time.time())
1261 idbase = "%.40f#%f" % (random.random(), time.time())
1262 ha = hex(hashlib.sha1(idbase).digest())
1262 ha = hex(hashlib.sha1(idbase).digest())
1263 txnid = 'TXN:' + ha
1263 txnid = 'TXN:' + ha
1264 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1264 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1265
1265
1266 self._writejournal(desc)
1266 self._writejournal(desc)
1267 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1267 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1268 if report:
1268 if report:
1269 rp = report
1269 rp = report
1270 else:
1270 else:
1271 rp = self.ui.warn
1271 rp = self.ui.warn
1272 vfsmap = {'plain': self.vfs} # root of .hg/
1272 vfsmap = {'plain': self.vfs} # root of .hg/
1273 # we must avoid cyclic reference between repo and transaction.
1273 # we must avoid cyclic reference between repo and transaction.
1274 reporef = weakref.ref(self)
1274 reporef = weakref.ref(self)
1275 # Code to track tag movement
1275 # Code to track tag movement
1276 #
1276 #
1277 # Since tags are all handled as file content, it is actually quite hard
1277 # Since tags are all handled as file content, it is actually quite hard
1278 # to track these movement from a code perspective. So we fallback to a
1278 # to track these movement from a code perspective. So we fallback to a
1279 # tracking at the repository level. One could envision to track changes
1279 # tracking at the repository level. One could envision to track changes
1280 # to the '.hgtags' file through changegroup apply but that fails to
1280 # to the '.hgtags' file through changegroup apply but that fails to
1281 # cope with case where transaction expose new heads without changegroup
1281 # cope with case where transaction expose new heads without changegroup
1282 # being involved (eg: phase movement).
1282 # being involved (eg: phase movement).
1283 #
1283 #
1284 # For now, We gate the feature behind a flag since this likely comes
1284 # For now, We gate the feature behind a flag since this likely comes
1285 # with performance impacts. The current code run more often than needed
1285 # with performance impacts. The current code run more often than needed
1286 # and do not use caches as much as it could. The current focus is on
1286 # and do not use caches as much as it could. The current focus is on
1287 # the behavior of the feature so we disable it by default. The flag
1287 # the behavior of the feature so we disable it by default. The flag
1288 # will be removed when we are happy with the performance impact.
1288 # will be removed when we are happy with the performance impact.
1289 #
1289 #
1290 # Once this feature is no longer experimental move the following
1290 # Once this feature is no longer experimental move the following
1291 # documentation to the appropriate help section:
1291 # documentation to the appropriate help section:
1292 #
1292 #
1293 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1293 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1294 # tags (new or changed or deleted tags). In addition the details of
1294 # tags (new or changed or deleted tags). In addition the details of
1295 # these changes are made available in a file at:
1295 # these changes are made available in a file at:
1296 # ``REPOROOT/.hg/changes/tags.changes``.
1296 # ``REPOROOT/.hg/changes/tags.changes``.
1297 # Make sure you check for HG_TAG_MOVED before reading that file as it
1297 # Make sure you check for HG_TAG_MOVED before reading that file as it
1298 # might exist from a previous transaction even if no tag were touched
1298 # might exist from a previous transaction even if no tag were touched
1299 # in this one. Changes are recorded in a line base format::
1299 # in this one. Changes are recorded in a line base format::
1300 #
1300 #
1301 # <action> <hex-node> <tag-name>\n
1301 # <action> <hex-node> <tag-name>\n
1302 #
1302 #
1303 # Actions are defined as follow:
1303 # Actions are defined as follow:
1304 # "-R": tag is removed,
1304 # "-R": tag is removed,
1305 # "+A": tag is added,
1305 # "+A": tag is added,
1306 # "-M": tag is moved (old value),
1306 # "-M": tag is moved (old value),
1307 # "+M": tag is moved (new value),
1307 # "+M": tag is moved (new value),
1308 tracktags = lambda x: None
1308 tracktags = lambda x: None
1309 # experimental config: experimental.hook-track-tags
1309 # experimental config: experimental.hook-track-tags
1310 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1310 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1311 if desc != 'strip' and shouldtracktags:
1311 if desc != 'strip' and shouldtracktags:
1312 oldheads = self.changelog.headrevs()
1312 oldheads = self.changelog.headrevs()
1313 def tracktags(tr2):
1313 def tracktags(tr2):
1314 repo = reporef()
1314 repo = reporef()
1315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1315 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1316 newheads = repo.changelog.headrevs()
1316 newheads = repo.changelog.headrevs()
1317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1317 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1318 # notes: we compare lists here.
1318 # notes: we compare lists here.
1319 # As we do it only once buiding set would not be cheaper
1319 # As we do it only once buiding set would not be cheaper
1320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1320 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1321 if changes:
1321 if changes:
1322 tr2.hookargs['tag_moved'] = '1'
1322 tr2.hookargs['tag_moved'] = '1'
1323 with repo.vfs('changes/tags.changes', 'w',
1323 with repo.vfs('changes/tags.changes', 'w',
1324 atomictemp=True) as changesfile:
1324 atomictemp=True) as changesfile:
1325 # note: we do not register the file to the transaction
1325 # note: we do not register the file to the transaction
1326 # because we needs it to still exist on the transaction
1326 # because we needs it to still exist on the transaction
1327 # is close (for txnclose hooks)
1327 # is close (for txnclose hooks)
1328 tagsmod.writediff(changesfile, changes)
1328 tagsmod.writediff(changesfile, changes)
1329 def validate(tr2):
1329 def validate(tr2):
1330 """will run pre-closing hooks"""
1330 """will run pre-closing hooks"""
1331 # XXX the transaction API is a bit lacking here so we take a hacky
1331 # XXX the transaction API is a bit lacking here so we take a hacky
1332 # path for now
1332 # path for now
1333 #
1333 #
1334 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1334 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1335 # dict is copied before these run. In addition we needs the data
1335 # dict is copied before these run. In addition we needs the data
1336 # available to in memory hooks too.
1336 # available to in memory hooks too.
1337 #
1337 #
1338 # Moreover, we also need to make sure this runs before txnclose
1338 # Moreover, we also need to make sure this runs before txnclose
1339 # hooks and there is no "pending" mechanism that would execute
1339 # hooks and there is no "pending" mechanism that would execute
1340 # logic only if hooks are about to run.
1340 # logic only if hooks are about to run.
1341 #
1341 #
1342 # Fixing this limitation of the transaction is also needed to track
1342 # Fixing this limitation of the transaction is also needed to track
1343 # other families of changes (bookmarks, phases, obsolescence).
1343 # other families of changes (bookmarks, phases, obsolescence).
1344 #
1344 #
1345 # This will have to be fixed before we remove the experimental
1345 # This will have to be fixed before we remove the experimental
1346 # gating.
1346 # gating.
1347 tracktags(tr2)
1347 tracktags(tr2)
1348 repo = reporef()
1348 repo = reporef()
1349 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1349 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1350 scmutil.enforcesinglehead(repo, tr2, desc)
1350 scmutil.enforcesinglehead(repo, tr2, desc)
1351 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1351 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1352 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1352 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1353 args = tr.hookargs.copy()
1353 args = tr.hookargs.copy()
1354 args.update(bookmarks.preparehookargs(name, old, new))
1354 args.update(bookmarks.preparehookargs(name, old, new))
1355 repo.hook('pretxnclose-bookmark', throw=True,
1355 repo.hook('pretxnclose-bookmark', throw=True,
1356 txnname=desc,
1356 txnname=desc,
1357 **pycompat.strkwargs(args))
1357 **pycompat.strkwargs(args))
1358 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1358 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1359 cl = repo.unfiltered().changelog
1359 cl = repo.unfiltered().changelog
1360 for rev, (old, new) in tr.changes['phases'].items():
1360 for rev, (old, new) in tr.changes['phases'].items():
1361 args = tr.hookargs.copy()
1361 args = tr.hookargs.copy()
1362 node = hex(cl.node(rev))
1362 node = hex(cl.node(rev))
1363 args.update(phases.preparehookargs(node, old, new))
1363 args.update(phases.preparehookargs(node, old, new))
1364 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1364 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1365 **pycompat.strkwargs(args))
1365 **pycompat.strkwargs(args))
1366
1366
1367 repo.hook('pretxnclose', throw=True,
1367 repo.hook('pretxnclose', throw=True,
1368 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1368 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1369 def releasefn(tr, success):
1369 def releasefn(tr, success):
1370 repo = reporef()
1370 repo = reporef()
1371 if success:
1371 if success:
1372 # this should be explicitly invoked here, because
1372 # this should be explicitly invoked here, because
1373 # in-memory changes aren't written out at closing
1373 # in-memory changes aren't written out at closing
1374 # transaction, if tr.addfilegenerator (via
1374 # transaction, if tr.addfilegenerator (via
1375 # dirstate.write or so) isn't invoked while
1375 # dirstate.write or so) isn't invoked while
1376 # transaction running
1376 # transaction running
1377 repo.dirstate.write(None)
1377 repo.dirstate.write(None)
1378 else:
1378 else:
1379 # discard all changes (including ones already written
1379 # discard all changes (including ones already written
1380 # out) in this transaction
1380 # out) in this transaction
1381 narrowspec.restorebackup(self, 'journal.narrowspec')
1381 narrowspec.restorebackup(self, 'journal.narrowspec')
1382 repo.dirstate.restorebackup(None, 'journal.dirstate')
1382 repo.dirstate.restorebackup(None, 'journal.dirstate')
1383
1383
1384 repo.invalidate(clearfilecache=True)
1384 repo.invalidate(clearfilecache=True)
1385
1385
1386 tr = transaction.transaction(rp, self.svfs, vfsmap,
1386 tr = transaction.transaction(rp, self.svfs, vfsmap,
1387 "journal",
1387 "journal",
1388 "undo",
1388 "undo",
1389 aftertrans(renames),
1389 aftertrans(renames),
1390 self.store.createmode,
1390 self.store.createmode,
1391 validator=validate,
1391 validator=validate,
1392 releasefn=releasefn,
1392 releasefn=releasefn,
1393 checkambigfiles=_cachedfiles,
1393 checkambigfiles=_cachedfiles,
1394 name=desc)
1394 name=desc)
1395 tr.changes['revs'] = pycompat.xrange(0, 0)
1395 tr.changes['revs'] = pycompat.xrange(0, 0)
1396 tr.changes['obsmarkers'] = set()
1396 tr.changes['obsmarkers'] = set()
1397 tr.changes['phases'] = {}
1397 tr.changes['phases'] = {}
1398 tr.changes['bookmarks'] = {}
1398 tr.changes['bookmarks'] = {}
1399
1399
1400 tr.hookargs['txnid'] = txnid
1400 tr.hookargs['txnid'] = txnid
1401 # note: writing the fncache only during finalize mean that the file is
1401 # note: writing the fncache only during finalize mean that the file is
1402 # outdated when running hooks. As fncache is used for streaming clone,
1402 # outdated when running hooks. As fncache is used for streaming clone,
1403 # this is not expected to break anything that happen during the hooks.
1403 # this is not expected to break anything that happen during the hooks.
1404 tr.addfinalize('flush-fncache', self.store.write)
1404 tr.addfinalize('flush-fncache', self.store.write)
1405 def txnclosehook(tr2):
1405 def txnclosehook(tr2):
1406 """To be run if transaction is successful, will schedule a hook run
1406 """To be run if transaction is successful, will schedule a hook run
1407 """
1407 """
1408 # Don't reference tr2 in hook() so we don't hold a reference.
1408 # Don't reference tr2 in hook() so we don't hold a reference.
1409 # This reduces memory consumption when there are multiple
1409 # This reduces memory consumption when there are multiple
1410 # transactions per lock. This can likely go away if issue5045
1410 # transactions per lock. This can likely go away if issue5045
1411 # fixes the function accumulation.
1411 # fixes the function accumulation.
1412 hookargs = tr2.hookargs
1412 hookargs = tr2.hookargs
1413
1413
1414 def hookfunc():
1414 def hookfunc():
1415 repo = reporef()
1415 repo = reporef()
1416 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1416 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1417 bmchanges = sorted(tr.changes['bookmarks'].items())
1417 bmchanges = sorted(tr.changes['bookmarks'].items())
1418 for name, (old, new) in bmchanges:
1418 for name, (old, new) in bmchanges:
1419 args = tr.hookargs.copy()
1419 args = tr.hookargs.copy()
1420 args.update(bookmarks.preparehookargs(name, old, new))
1420 args.update(bookmarks.preparehookargs(name, old, new))
1421 repo.hook('txnclose-bookmark', throw=False,
1421 repo.hook('txnclose-bookmark', throw=False,
1422 txnname=desc, **pycompat.strkwargs(args))
1422 txnname=desc, **pycompat.strkwargs(args))
1423
1423
1424 if hook.hashook(repo.ui, 'txnclose-phase'):
1424 if hook.hashook(repo.ui, 'txnclose-phase'):
1425 cl = repo.unfiltered().changelog
1425 cl = repo.unfiltered().changelog
1426 phasemv = sorted(tr.changes['phases'].items())
1426 phasemv = sorted(tr.changes['phases'].items())
1427 for rev, (old, new) in phasemv:
1427 for rev, (old, new) in phasemv:
1428 args = tr.hookargs.copy()
1428 args = tr.hookargs.copy()
1429 node = hex(cl.node(rev))
1429 node = hex(cl.node(rev))
1430 args.update(phases.preparehookargs(node, old, new))
1430 args.update(phases.preparehookargs(node, old, new))
1431 repo.hook('txnclose-phase', throw=False, txnname=desc,
1431 repo.hook('txnclose-phase', throw=False, txnname=desc,
1432 **pycompat.strkwargs(args))
1432 **pycompat.strkwargs(args))
1433
1433
1434 repo.hook('txnclose', throw=False, txnname=desc,
1434 repo.hook('txnclose', throw=False, txnname=desc,
1435 **pycompat.strkwargs(hookargs))
1435 **pycompat.strkwargs(hookargs))
1436 reporef()._afterlock(hookfunc)
1436 reporef()._afterlock(hookfunc)
1437 tr.addfinalize('txnclose-hook', txnclosehook)
1437 tr.addfinalize('txnclose-hook', txnclosehook)
1438 # Include a leading "-" to make it happen before the transaction summary
1438 # Include a leading "-" to make it happen before the transaction summary
1439 # reports registered via scmutil.registersummarycallback() whose names
1439 # reports registered via scmutil.registersummarycallback() whose names
1440 # are 00-txnreport etc. That way, the caches will be warm when the
1440 # are 00-txnreport etc. That way, the caches will be warm when the
1441 # callbacks run.
1441 # callbacks run.
1442 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1442 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1443 def txnaborthook(tr2):
1443 def txnaborthook(tr2):
1444 """To be run if transaction is aborted
1444 """To be run if transaction is aborted
1445 """
1445 """
1446 reporef().hook('txnabort', throw=False, txnname=desc,
1446 reporef().hook('txnabort', throw=False, txnname=desc,
1447 **pycompat.strkwargs(tr2.hookargs))
1447 **pycompat.strkwargs(tr2.hookargs))
1448 tr.addabort('txnabort-hook', txnaborthook)
1448 tr.addabort('txnabort-hook', txnaborthook)
1449 # avoid eager cache invalidation. in-memory data should be identical
1449 # avoid eager cache invalidation. in-memory data should be identical
1450 # to stored data if transaction has no error.
1450 # to stored data if transaction has no error.
1451 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1451 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1452 self._transref = weakref.ref(tr)
1452 self._transref = weakref.ref(tr)
1453 scmutil.registersummarycallback(self, tr, desc)
1453 scmutil.registersummarycallback(self, tr, desc)
1454 return tr
1454 return tr
1455
1455
1456 def _journalfiles(self):
1456 def _journalfiles(self):
1457 return ((self.svfs, 'journal'),
1457 return ((self.svfs, 'journal'),
1458 (self.vfs, 'journal.dirstate'),
1458 (self.vfs, 'journal.dirstate'),
1459 (self.vfs, 'journal.branch'),
1459 (self.vfs, 'journal.branch'),
1460 (self.vfs, 'journal.desc'),
1460 (self.vfs, 'journal.desc'),
1461 (self.vfs, 'journal.bookmarks'),
1461 (self.vfs, 'journal.bookmarks'),
1462 (self.svfs, 'journal.phaseroots'))
1462 (self.svfs, 'journal.phaseroots'))
1463
1463
1464 def undofiles(self):
1464 def undofiles(self):
1465 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1465 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1466
1466
1467 @unfilteredmethod
1467 @unfilteredmethod
1468 def _writejournal(self, desc):
1468 def _writejournal(self, desc):
1469 self.dirstate.savebackup(None, 'journal.dirstate')
1469 self.dirstate.savebackup(None, 'journal.dirstate')
1470 narrowspec.savebackup(self, 'journal.narrowspec')
1470 narrowspec.savebackup(self, 'journal.narrowspec')
1471 self.vfs.write("journal.branch",
1471 self.vfs.write("journal.branch",
1472 encoding.fromlocal(self.dirstate.branch()))
1472 encoding.fromlocal(self.dirstate.branch()))
1473 self.vfs.write("journal.desc",
1473 self.vfs.write("journal.desc",
1474 "%d\n%s\n" % (len(self), desc))
1474 "%d\n%s\n" % (len(self), desc))
1475 self.vfs.write("journal.bookmarks",
1475 self.vfs.write("journal.bookmarks",
1476 self.vfs.tryread("bookmarks"))
1476 self.vfs.tryread("bookmarks"))
1477 self.svfs.write("journal.phaseroots",
1477 self.svfs.write("journal.phaseroots",
1478 self.svfs.tryread("phaseroots"))
1478 self.svfs.tryread("phaseroots"))
1479
1479
1480 def recover(self):
1480 def recover(self):
1481 with self.lock():
1481 with self.lock():
1482 if self.svfs.exists("journal"):
1482 if self.svfs.exists("journal"):
1483 self.ui.status(_("rolling back interrupted transaction\n"))
1483 self.ui.status(_("rolling back interrupted transaction\n"))
1484 vfsmap = {'': self.svfs,
1484 vfsmap = {'': self.svfs,
1485 'plain': self.vfs,}
1485 'plain': self.vfs,}
1486 transaction.rollback(self.svfs, vfsmap, "journal",
1486 transaction.rollback(self.svfs, vfsmap, "journal",
1487 self.ui.warn,
1487 self.ui.warn,
1488 checkambigfiles=_cachedfiles)
1488 checkambigfiles=_cachedfiles)
1489 self.invalidate()
1489 self.invalidate()
1490 return True
1490 return True
1491 else:
1491 else:
1492 self.ui.warn(_("no interrupted transaction available\n"))
1492 self.ui.warn(_("no interrupted transaction available\n"))
1493 return False
1493 return False
1494
1494
1495 def rollback(self, dryrun=False, force=False):
1495 def rollback(self, dryrun=False, force=False):
1496 wlock = lock = dsguard = None
1496 wlock = lock = dsguard = None
1497 try:
1497 try:
1498 wlock = self.wlock()
1498 wlock = self.wlock()
1499 lock = self.lock()
1499 lock = self.lock()
1500 if self.svfs.exists("undo"):
1500 if self.svfs.exists("undo"):
1501 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1501 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1502
1502
1503 return self._rollback(dryrun, force, dsguard)
1503 return self._rollback(dryrun, force, dsguard)
1504 else:
1504 else:
1505 self.ui.warn(_("no rollback information available\n"))
1505 self.ui.warn(_("no rollback information available\n"))
1506 return 1
1506 return 1
1507 finally:
1507 finally:
1508 release(dsguard, lock, wlock)
1508 release(dsguard, lock, wlock)
1509
1509
1510 @unfilteredmethod # Until we get smarter cache management
1510 @unfilteredmethod # Until we get smarter cache management
1511 def _rollback(self, dryrun, force, dsguard):
1511 def _rollback(self, dryrun, force, dsguard):
1512 ui = self.ui
1512 ui = self.ui
1513 try:
1513 try:
1514 args = self.vfs.read('undo.desc').splitlines()
1514 args = self.vfs.read('undo.desc').splitlines()
1515 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1515 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1516 if len(args) >= 3:
1516 if len(args) >= 3:
1517 detail = args[2]
1517 detail = args[2]
1518 oldtip = oldlen - 1
1518 oldtip = oldlen - 1
1519
1519
1520 if detail and ui.verbose:
1520 if detail and ui.verbose:
1521 msg = (_('repository tip rolled back to revision %d'
1521 msg = (_('repository tip rolled back to revision %d'
1522 ' (undo %s: %s)\n')
1522 ' (undo %s: %s)\n')
1523 % (oldtip, desc, detail))
1523 % (oldtip, desc, detail))
1524 else:
1524 else:
1525 msg = (_('repository tip rolled back to revision %d'
1525 msg = (_('repository tip rolled back to revision %d'
1526 ' (undo %s)\n')
1526 ' (undo %s)\n')
1527 % (oldtip, desc))
1527 % (oldtip, desc))
1528 except IOError:
1528 except IOError:
1529 msg = _('rolling back unknown transaction\n')
1529 msg = _('rolling back unknown transaction\n')
1530 desc = None
1530 desc = None
1531
1531
1532 if not force and self['.'] != self['tip'] and desc == 'commit':
1532 if not force and self['.'] != self['tip'] and desc == 'commit':
1533 raise error.Abort(
1533 raise error.Abort(
1534 _('rollback of last commit while not checked out '
1534 _('rollback of last commit while not checked out '
1535 'may lose data'), hint=_('use -f to force'))
1535 'may lose data'), hint=_('use -f to force'))
1536
1536
1537 ui.status(msg)
1537 ui.status(msg)
1538 if dryrun:
1538 if dryrun:
1539 return 0
1539 return 0
1540
1540
1541 parents = self.dirstate.parents()
1541 parents = self.dirstate.parents()
1542 self.destroying()
1542 self.destroying()
1543 vfsmap = {'plain': self.vfs, '': self.svfs}
1543 vfsmap = {'plain': self.vfs, '': self.svfs}
1544 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1544 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1545 checkambigfiles=_cachedfiles)
1545 checkambigfiles=_cachedfiles)
1546 if self.vfs.exists('undo.bookmarks'):
1546 if self.vfs.exists('undo.bookmarks'):
1547 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1547 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1548 if self.svfs.exists('undo.phaseroots'):
1548 if self.svfs.exists('undo.phaseroots'):
1549 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1549 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1550 self.invalidate()
1550 self.invalidate()
1551
1551
1552 parentgone = (parents[0] not in self.changelog.nodemap or
1552 parentgone = (parents[0] not in self.changelog.nodemap or
1553 parents[1] not in self.changelog.nodemap)
1553 parents[1] not in self.changelog.nodemap)
1554 if parentgone:
1554 if parentgone:
1555 # prevent dirstateguard from overwriting already restored one
1555 # prevent dirstateguard from overwriting already restored one
1556 dsguard.close()
1556 dsguard.close()
1557
1557
1558 narrowspec.restorebackup(self, 'undo.narrowspec')
1558 narrowspec.restorebackup(self, 'undo.narrowspec')
1559 self.dirstate.restorebackup(None, 'undo.dirstate')
1559 self.dirstate.restorebackup(None, 'undo.dirstate')
1560 try:
1560 try:
1561 branch = self.vfs.read('undo.branch')
1561 branch = self.vfs.read('undo.branch')
1562 self.dirstate.setbranch(encoding.tolocal(branch))
1562 self.dirstate.setbranch(encoding.tolocal(branch))
1563 except IOError:
1563 except IOError:
1564 ui.warn(_('named branch could not be reset: '
1564 ui.warn(_('named branch could not be reset: '
1565 'current branch is still \'%s\'\n')
1565 'current branch is still \'%s\'\n')
1566 % self.dirstate.branch())
1566 % self.dirstate.branch())
1567
1567
1568 parents = tuple([p.rev() for p in self[None].parents()])
1568 parents = tuple([p.rev() for p in self[None].parents()])
1569 if len(parents) > 1:
1569 if len(parents) > 1:
1570 ui.status(_('working directory now based on '
1570 ui.status(_('working directory now based on '
1571 'revisions %d and %d\n') % parents)
1571 'revisions %d and %d\n') % parents)
1572 else:
1572 else:
1573 ui.status(_('working directory now based on '
1573 ui.status(_('working directory now based on '
1574 'revision %d\n') % parents)
1574 'revision %d\n') % parents)
1575 mergemod.mergestate.clean(self, self['.'].node())
1575 mergemod.mergestate.clean(self, self['.'].node())
1576
1576
1577 # TODO: if we know which new heads may result from this rollback, pass
1577 # TODO: if we know which new heads may result from this rollback, pass
1578 # them to destroy(), which will prevent the branchhead cache from being
1578 # them to destroy(), which will prevent the branchhead cache from being
1579 # invalidated.
1579 # invalidated.
1580 self.destroyed()
1580 self.destroyed()
1581 return 0
1581 return 0
1582
1582
1583 def _buildcacheupdater(self, newtransaction):
1583 def _buildcacheupdater(self, newtransaction):
1584 """called during transaction to build the callback updating cache
1584 """called during transaction to build the callback updating cache
1585
1585
1586 Lives on the repository to help extension who might want to augment
1586 Lives on the repository to help extension who might want to augment
1587 this logic. For this purpose, the created transaction is passed to the
1587 this logic. For this purpose, the created transaction is passed to the
1588 method.
1588 method.
1589 """
1589 """
1590 # we must avoid cyclic reference between repo and transaction.
1590 # we must avoid cyclic reference between repo and transaction.
1591 reporef = weakref.ref(self)
1591 reporef = weakref.ref(self)
1592 def updater(tr):
1592 def updater(tr):
1593 repo = reporef()
1593 repo = reporef()
1594 repo.updatecaches(tr)
1594 repo.updatecaches(tr)
1595 return updater
1595 return updater
1596
1596
1597 @unfilteredmethod
1597 @unfilteredmethod
1598 def updatecaches(self, tr=None, full=False):
1598 def updatecaches(self, tr=None, full=False):
1599 """warm appropriate caches
1599 """warm appropriate caches
1600
1600
1601 If this function is called after a transaction closed. The transaction
1601 If this function is called after a transaction closed. The transaction
1602 will be available in the 'tr' argument. This can be used to selectively
1602 will be available in the 'tr' argument. This can be used to selectively
1603 update caches relevant to the changes in that transaction.
1603 update caches relevant to the changes in that transaction.
1604
1604
1605 If 'full' is set, make sure all caches the function knows about have
1605 If 'full' is set, make sure all caches the function knows about have
1606 up-to-date data. Even the ones usually loaded more lazily.
1606 up-to-date data. Even the ones usually loaded more lazily.
1607 """
1607 """
1608 if tr is not None and tr.hookargs.get('source') == 'strip':
1608 if tr is not None and tr.hookargs.get('source') == 'strip':
1609 # During strip, many caches are invalid but
1609 # During strip, many caches are invalid but
1610 # later call to `destroyed` will refresh them.
1610 # later call to `destroyed` will refresh them.
1611 return
1611 return
1612
1612
1613 if tr is None or tr.changes['revs']:
1613 if tr is None or tr.changes['revs']:
1614 # updating the unfiltered branchmap should refresh all the others,
1614 # updating the unfiltered branchmap should refresh all the others,
1615 self.ui.debug('updating the branch cache\n')
1615 self.ui.debug('updating the branch cache\n')
1616 branchmap.updatecache(self.filtered('served'))
1616 branchmap.updatecache(self.filtered('served'))
1617
1617
1618 if full:
1618 if full:
1619 rbc = self.revbranchcache()
1619 rbc = self.revbranchcache()
1620 for r in self.changelog:
1620 for r in self.changelog:
1621 rbc.branchinfo(r)
1621 rbc.branchinfo(r)
1622 rbc.write()
1622 rbc.write()
1623
1623
1624 # ensure the working copy parents are in the manifestfulltextcache
1624 # ensure the working copy parents are in the manifestfulltextcache
1625 for ctx in self['.'].parents():
1625 for ctx in self['.'].parents():
1626 ctx.manifest() # accessing the manifest is enough
1626 ctx.manifest() # accessing the manifest is enough
1627
1627
1628 def invalidatecaches(self):
1628 def invalidatecaches(self):
1629
1629
1630 if '_tagscache' in vars(self):
1630 if '_tagscache' in vars(self):
1631 # can't use delattr on proxy
1631 # can't use delattr on proxy
1632 del self.__dict__['_tagscache']
1632 del self.__dict__['_tagscache']
1633
1633
1634 self.unfiltered()._branchcaches.clear()
1634 self.unfiltered()._branchcaches.clear()
1635 self.invalidatevolatilesets()
1635 self.invalidatevolatilesets()
1636 self._sparsesignaturecache.clear()
1636 self._sparsesignaturecache.clear()
1637
1637
1638 def invalidatevolatilesets(self):
1638 def invalidatevolatilesets(self):
1639 self.filteredrevcache.clear()
1639 self.filteredrevcache.clear()
1640 obsolete.clearobscaches(self)
1640 obsolete.clearobscaches(self)
1641
1641
1642 def invalidatedirstate(self):
1642 def invalidatedirstate(self):
1643 '''Invalidates the dirstate, causing the next call to dirstate
1643 '''Invalidates the dirstate, causing the next call to dirstate
1644 to check if it was modified since the last time it was read,
1644 to check if it was modified since the last time it was read,
1645 rereading it if it has.
1645 rereading it if it has.
1646
1646
1647 This is different to dirstate.invalidate() that it doesn't always
1647 This is different to dirstate.invalidate() that it doesn't always
1648 rereads the dirstate. Use dirstate.invalidate() if you want to
1648 rereads the dirstate. Use dirstate.invalidate() if you want to
1649 explicitly read the dirstate again (i.e. restoring it to a previous
1649 explicitly read the dirstate again (i.e. restoring it to a previous
1650 known good state).'''
1650 known good state).'''
1651 if hasunfilteredcache(self, 'dirstate'):
1651 if hasunfilteredcache(self, 'dirstate'):
1652 for k in self.dirstate._filecache:
1652 for k in self.dirstate._filecache:
1653 try:
1653 try:
1654 delattr(self.dirstate, k)
1654 delattr(self.dirstate, k)
1655 except AttributeError:
1655 except AttributeError:
1656 pass
1656 pass
1657 delattr(self.unfiltered(), 'dirstate')
1657 delattr(self.unfiltered(), 'dirstate')
1658
1658
1659 def invalidate(self, clearfilecache=False):
1659 def invalidate(self, clearfilecache=False):
1660 '''Invalidates both store and non-store parts other than dirstate
1660 '''Invalidates both store and non-store parts other than dirstate
1661
1661
1662 If a transaction is running, invalidation of store is omitted,
1662 If a transaction is running, invalidation of store is omitted,
1663 because discarding in-memory changes might cause inconsistency
1663 because discarding in-memory changes might cause inconsistency
1664 (e.g. incomplete fncache causes unintentional failure, but
1664 (e.g. incomplete fncache causes unintentional failure, but
1665 redundant one doesn't).
1665 redundant one doesn't).
1666 '''
1666 '''
1667 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1667 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1668 for k in list(self._filecache.keys()):
1668 for k in list(self._filecache.keys()):
1669 # dirstate is invalidated separately in invalidatedirstate()
1669 # dirstate is invalidated separately in invalidatedirstate()
1670 if k == 'dirstate':
1670 if k == 'dirstate':
1671 continue
1671 continue
1672 if (k == 'changelog' and
1672 if (k == 'changelog' and
1673 self.currenttransaction() and
1673 self.currenttransaction() and
1674 self.changelog._delayed):
1674 self.changelog._delayed):
1675 # The changelog object may store unwritten revisions. We don't
1675 # The changelog object may store unwritten revisions. We don't
1676 # want to lose them.
1676 # want to lose them.
1677 # TODO: Solve the problem instead of working around it.
1677 # TODO: Solve the problem instead of working around it.
1678 continue
1678 continue
1679
1679
1680 if clearfilecache:
1680 if clearfilecache:
1681 del self._filecache[k]
1681 del self._filecache[k]
1682 try:
1682 try:
1683 delattr(unfiltered, k)
1683 delattr(unfiltered, k)
1684 except AttributeError:
1684 except AttributeError:
1685 pass
1685 pass
1686 self.invalidatecaches()
1686 self.invalidatecaches()
1687 if not self.currenttransaction():
1687 if not self.currenttransaction():
1688 # TODO: Changing contents of store outside transaction
1688 # TODO: Changing contents of store outside transaction
1689 # causes inconsistency. We should make in-memory store
1689 # causes inconsistency. We should make in-memory store
1690 # changes detectable, and abort if changed.
1690 # changes detectable, and abort if changed.
1691 self.store.invalidatecaches()
1691 self.store.invalidatecaches()
1692
1692
1693 def invalidateall(self):
1693 def invalidateall(self):
1694 '''Fully invalidates both store and non-store parts, causing the
1694 '''Fully invalidates both store and non-store parts, causing the
1695 subsequent operation to reread any outside changes.'''
1695 subsequent operation to reread any outside changes.'''
1696 # extension should hook this to invalidate its caches
1696 # extension should hook this to invalidate its caches
1697 self.invalidate()
1697 self.invalidate()
1698 self.invalidatedirstate()
1698 self.invalidatedirstate()
1699
1699
1700 @unfilteredmethod
1700 @unfilteredmethod
1701 def _refreshfilecachestats(self, tr):
1701 def _refreshfilecachestats(self, tr):
1702 """Reload stats of cached files so that they are flagged as valid"""
1702 """Reload stats of cached files so that they are flagged as valid"""
1703 for k, ce in self._filecache.items():
1703 for k, ce in self._filecache.items():
1704 k = pycompat.sysstr(k)
1704 k = pycompat.sysstr(k)
1705 if k == r'dirstate' or k not in self.__dict__:
1705 if k == r'dirstate' or k not in self.__dict__:
1706 continue
1706 continue
1707 ce.refresh()
1707 ce.refresh()
1708
1708
1709 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1709 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1710 inheritchecker=None, parentenvvar=None):
1710 inheritchecker=None, parentenvvar=None):
1711 parentlock = None
1711 parentlock = None
1712 # the contents of parentenvvar are used by the underlying lock to
1712 # the contents of parentenvvar are used by the underlying lock to
1713 # determine whether it can be inherited
1713 # determine whether it can be inherited
1714 if parentenvvar is not None:
1714 if parentenvvar is not None:
1715 parentlock = encoding.environ.get(parentenvvar)
1715 parentlock = encoding.environ.get(parentenvvar)
1716
1716
1717 timeout = 0
1717 timeout = 0
1718 warntimeout = 0
1718 warntimeout = 0
1719 if wait:
1719 if wait:
1720 timeout = self.ui.configint("ui", "timeout")
1720 timeout = self.ui.configint("ui", "timeout")
1721 warntimeout = self.ui.configint("ui", "timeout.warn")
1721 warntimeout = self.ui.configint("ui", "timeout.warn")
1722 # internal config: ui.signal-safe-lock
1722 # internal config: ui.signal-safe-lock
1723 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1723 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1724
1724
1725 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1725 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1726 releasefn=releasefn,
1726 releasefn=releasefn,
1727 acquirefn=acquirefn, desc=desc,
1727 acquirefn=acquirefn, desc=desc,
1728 inheritchecker=inheritchecker,
1728 inheritchecker=inheritchecker,
1729 parentlock=parentlock,
1729 parentlock=parentlock,
1730 signalsafe=signalsafe)
1730 signalsafe=signalsafe)
1731 return l
1731 return l
1732
1732
1733 def _afterlock(self, callback):
1733 def _afterlock(self, callback):
1734 """add a callback to be run when the repository is fully unlocked
1734 """add a callback to be run when the repository is fully unlocked
1735
1735
1736 The callback will be executed when the outermost lock is released
1736 The callback will be executed when the outermost lock is released
1737 (with wlock being higher level than 'lock')."""
1737 (with wlock being higher level than 'lock')."""
1738 for ref in (self._wlockref, self._lockref):
1738 for ref in (self._wlockref, self._lockref):
1739 l = ref and ref()
1739 l = ref and ref()
1740 if l and l.held:
1740 if l and l.held:
1741 l.postrelease.append(callback)
1741 l.postrelease.append(callback)
1742 break
1742 break
1743 else: # no lock have been found.
1743 else: # no lock have been found.
1744 callback()
1744 callback()
1745
1745
1746 def lock(self, wait=True):
1746 def lock(self, wait=True):
1747 '''Lock the repository store (.hg/store) and return a weak reference
1747 '''Lock the repository store (.hg/store) and return a weak reference
1748 to the lock. Use this before modifying the store (e.g. committing or
1748 to the lock. Use this before modifying the store (e.g. committing or
1749 stripping). If you are opening a transaction, get a lock as well.)
1749 stripping). If you are opening a transaction, get a lock as well.)
1750
1750
1751 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1751 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1752 'wlock' first to avoid a dead-lock hazard.'''
1752 'wlock' first to avoid a dead-lock hazard.'''
1753 l = self._currentlock(self._lockref)
1753 l = self._currentlock(self._lockref)
1754 if l is not None:
1754 if l is not None:
1755 l.lock()
1755 l.lock()
1756 return l
1756 return l
1757
1757
1758 l = self._lock(self.svfs, "lock", wait, None,
1758 l = self._lock(self.svfs, "lock", wait, None,
1759 self.invalidate, _('repository %s') % self.origroot)
1759 self.invalidate, _('repository %s') % self.origroot)
1760 self._lockref = weakref.ref(l)
1760 self._lockref = weakref.ref(l)
1761 return l
1761 return l
1762
1762
1763 def _wlockchecktransaction(self):
1763 def _wlockchecktransaction(self):
1764 if self.currenttransaction() is not None:
1764 if self.currenttransaction() is not None:
1765 raise error.LockInheritanceContractViolation(
1765 raise error.LockInheritanceContractViolation(
1766 'wlock cannot be inherited in the middle of a transaction')
1766 'wlock cannot be inherited in the middle of a transaction')
1767
1767
1768 def wlock(self, wait=True):
1768 def wlock(self, wait=True):
1769 '''Lock the non-store parts of the repository (everything under
1769 '''Lock the non-store parts of the repository (everything under
1770 .hg except .hg/store) and return a weak reference to the lock.
1770 .hg except .hg/store) and return a weak reference to the lock.
1771
1771
1772 Use this before modifying files in .hg.
1772 Use this before modifying files in .hg.
1773
1773
1774 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1774 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1775 'wlock' first to avoid a dead-lock hazard.'''
1775 'wlock' first to avoid a dead-lock hazard.'''
1776 l = self._wlockref and self._wlockref()
1776 l = self._wlockref and self._wlockref()
1777 if l is not None and l.held:
1777 if l is not None and l.held:
1778 l.lock()
1778 l.lock()
1779 return l
1779 return l
1780
1780
1781 # We do not need to check for non-waiting lock acquisition. Such
1781 # We do not need to check for non-waiting lock acquisition. Such
1782 # acquisition would not cause dead-lock as they would just fail.
1782 # acquisition would not cause dead-lock as they would just fail.
1783 if wait and (self.ui.configbool('devel', 'all-warnings')
1783 if wait and (self.ui.configbool('devel', 'all-warnings')
1784 or self.ui.configbool('devel', 'check-locks')):
1784 or self.ui.configbool('devel', 'check-locks')):
1785 if self._currentlock(self._lockref) is not None:
1785 if self._currentlock(self._lockref) is not None:
1786 self.ui.develwarn('"wlock" acquired after "lock"')
1786 self.ui.develwarn('"wlock" acquired after "lock"')
1787
1787
1788 def unlock():
1788 def unlock():
1789 if self.dirstate.pendingparentchange():
1789 if self.dirstate.pendingparentchange():
1790 self.dirstate.invalidate()
1790 self.dirstate.invalidate()
1791 else:
1791 else:
1792 self.dirstate.write(None)
1792 self.dirstate.write(None)
1793
1793
1794 self._filecache['dirstate'].refresh()
1794 self._filecache['dirstate'].refresh()
1795
1795
1796 l = self._lock(self.vfs, "wlock", wait, unlock,
1796 l = self._lock(self.vfs, "wlock", wait, unlock,
1797 self.invalidatedirstate, _('working directory of %s') %
1797 self.invalidatedirstate, _('working directory of %s') %
1798 self.origroot,
1798 self.origroot,
1799 inheritchecker=self._wlockchecktransaction,
1799 inheritchecker=self._wlockchecktransaction,
1800 parentenvvar='HG_WLOCK_LOCKER')
1800 parentenvvar='HG_WLOCK_LOCKER')
1801 self._wlockref = weakref.ref(l)
1801 self._wlockref = weakref.ref(l)
1802 return l
1802 return l
1803
1803
1804 def _currentlock(self, lockref):
1804 def _currentlock(self, lockref):
1805 """Returns the lock if it's held, or None if it's not."""
1805 """Returns the lock if it's held, or None if it's not."""
1806 if lockref is None:
1806 if lockref is None:
1807 return None
1807 return None
1808 l = lockref()
1808 l = lockref()
1809 if l is None or not l.held:
1809 if l is None or not l.held:
1810 return None
1810 return None
1811 return l
1811 return l
1812
1812
1813 def currentwlock(self):
1813 def currentwlock(self):
1814 """Returns the wlock if it's held, or None if it's not."""
1814 """Returns the wlock if it's held, or None if it's not."""
1815 return self._currentlock(self._wlockref)
1815 return self._currentlock(self._wlockref)
1816
1816
1817 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1817 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1818 """
1818 """
1819 commit an individual file as part of a larger transaction
1819 commit an individual file as part of a larger transaction
1820 """
1820 """
1821
1821
1822 fname = fctx.path()
1822 fname = fctx.path()
1823 fparent1 = manifest1.get(fname, nullid)
1823 fparent1 = manifest1.get(fname, nullid)
1824 fparent2 = manifest2.get(fname, nullid)
1824 fparent2 = manifest2.get(fname, nullid)
1825 if isinstance(fctx, context.filectx):
1825 if isinstance(fctx, context.filectx):
1826 node = fctx.filenode()
1826 node = fctx.filenode()
1827 if node in [fparent1, fparent2]:
1827 if node in [fparent1, fparent2]:
1828 self.ui.debug('reusing %s filelog entry\n' % fname)
1828 self.ui.debug('reusing %s filelog entry\n' % fname)
1829 if manifest1.flags(fname) != fctx.flags():
1829 if manifest1.flags(fname) != fctx.flags():
1830 changelist.append(fname)
1830 changelist.append(fname)
1831 return node
1831 return node
1832
1832
1833 flog = self.file(fname)
1833 flog = self.file(fname)
1834 meta = {}
1834 meta = {}
1835 copy = fctx.renamed()
1835 copy = fctx.renamed()
1836 if copy and copy[0] != fname:
1836 if copy and copy[0] != fname:
1837 # Mark the new revision of this file as a copy of another
1837 # Mark the new revision of this file as a copy of another
1838 # file. This copy data will effectively act as a parent
1838 # file. This copy data will effectively act as a parent
1839 # of this new revision. If this is a merge, the first
1839 # of this new revision. If this is a merge, the first
1840 # parent will be the nullid (meaning "look up the copy data")
1840 # parent will be the nullid (meaning "look up the copy data")
1841 # and the second one will be the other parent. For example:
1841 # and the second one will be the other parent. For example:
1842 #
1842 #
1843 # 0 --- 1 --- 3 rev1 changes file foo
1843 # 0 --- 1 --- 3 rev1 changes file foo
1844 # \ / rev2 renames foo to bar and changes it
1844 # \ / rev2 renames foo to bar and changes it
1845 # \- 2 -/ rev3 should have bar with all changes and
1845 # \- 2 -/ rev3 should have bar with all changes and
1846 # should record that bar descends from
1846 # should record that bar descends from
1847 # bar in rev2 and foo in rev1
1847 # bar in rev2 and foo in rev1
1848 #
1848 #
1849 # this allows this merge to succeed:
1849 # this allows this merge to succeed:
1850 #
1850 #
1851 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1851 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1852 # \ / merging rev3 and rev4 should use bar@rev2
1852 # \ / merging rev3 and rev4 should use bar@rev2
1853 # \- 2 --- 4 as the merge base
1853 # \- 2 --- 4 as the merge base
1854 #
1854 #
1855
1855
1856 cfname = copy[0]
1856 cfname = copy[0]
1857 crev = manifest1.get(cfname)
1857 crev = manifest1.get(cfname)
1858 newfparent = fparent2
1858 newfparent = fparent2
1859
1859
1860 if manifest2: # branch merge
1860 if manifest2: # branch merge
1861 if fparent2 == nullid or crev is None: # copied on remote side
1861 if fparent2 == nullid or crev is None: # copied on remote side
1862 if cfname in manifest2:
1862 if cfname in manifest2:
1863 crev = manifest2[cfname]
1863 crev = manifest2[cfname]
1864 newfparent = fparent1
1864 newfparent = fparent1
1865
1865
1866 # Here, we used to search backwards through history to try to find
1866 # Here, we used to search backwards through history to try to find
1867 # where the file copy came from if the source of a copy was not in
1867 # where the file copy came from if the source of a copy was not in
1868 # the parent directory. However, this doesn't actually make sense to
1868 # the parent directory. However, this doesn't actually make sense to
1869 # do (what does a copy from something not in your working copy even
1869 # do (what does a copy from something not in your working copy even
1870 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1870 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1871 # the user that copy information was dropped, so if they didn't
1871 # the user that copy information was dropped, so if they didn't
1872 # expect this outcome it can be fixed, but this is the correct
1872 # expect this outcome it can be fixed, but this is the correct
1873 # behavior in this circumstance.
1873 # behavior in this circumstance.
1874
1874
1875 if crev:
1875 if crev:
1876 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1876 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1877 meta["copy"] = cfname
1877 meta["copy"] = cfname
1878 meta["copyrev"] = hex(crev)
1878 meta["copyrev"] = hex(crev)
1879 fparent1, fparent2 = nullid, newfparent
1879 fparent1, fparent2 = nullid, newfparent
1880 else:
1880 else:
1881 self.ui.warn(_("warning: can't find ancestor for '%s' "
1881 self.ui.warn(_("warning: can't find ancestor for '%s' "
1882 "copied from '%s'!\n") % (fname, cfname))
1882 "copied from '%s'!\n") % (fname, cfname))
1883
1883
1884 elif fparent1 == nullid:
1884 elif fparent1 == nullid:
1885 fparent1, fparent2 = fparent2, nullid
1885 fparent1, fparent2 = fparent2, nullid
1886 elif fparent2 != nullid:
1886 elif fparent2 != nullid:
1887 # is one parent an ancestor of the other?
1887 # is one parent an ancestor of the other?
1888 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1888 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1889 if fparent1 in fparentancestors:
1889 if fparent1 in fparentancestors:
1890 fparent1, fparent2 = fparent2, nullid
1890 fparent1, fparent2 = fparent2, nullid
1891 elif fparent2 in fparentancestors:
1891 elif fparent2 in fparentancestors:
1892 fparent2 = nullid
1892 fparent2 = nullid
1893
1893
1894 # is the file changed?
1894 # is the file changed?
1895 text = fctx.data()
1895 text = fctx.data()
1896 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1896 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1897 changelist.append(fname)
1897 changelist.append(fname)
1898 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1898 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1899 # are just the flags changed during merge?
1899 # are just the flags changed during merge?
1900 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1900 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1901 changelist.append(fname)
1901 changelist.append(fname)
1902
1902
1903 return fparent1
1903 return fparent1
1904
1904
1905 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1905 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1906 """check for commit arguments that aren't committable"""
1906 """check for commit arguments that aren't committable"""
1907 if match.isexact() or match.prefix():
1907 if match.isexact() or match.prefix():
1908 matched = set(status.modified + status.added + status.removed)
1908 matched = set(status.modified + status.added + status.removed)
1909
1909
1910 for f in match.files():
1910 for f in match.files():
1911 f = self.dirstate.normalize(f)
1911 f = self.dirstate.normalize(f)
1912 if f == '.' or f in matched or f in wctx.substate:
1912 if f == '.' or f in matched or f in wctx.substate:
1913 continue
1913 continue
1914 if f in status.deleted:
1914 if f in status.deleted:
1915 fail(f, _('file not found!'))
1915 fail(f, _('file not found!'))
1916 if f in vdirs: # visited directory
1916 if f in vdirs: # visited directory
1917 d = f + '/'
1917 d = f + '/'
1918 for mf in matched:
1918 for mf in matched:
1919 if mf.startswith(d):
1919 if mf.startswith(d):
1920 break
1920 break
1921 else:
1921 else:
1922 fail(f, _("no match under directory!"))
1922 fail(f, _("no match under directory!"))
1923 elif f not in self.dirstate:
1923 elif f not in self.dirstate:
1924 fail(f, _("file not tracked!"))
1924 fail(f, _("file not tracked!"))
1925
1925
1926 @unfilteredmethod
1926 @unfilteredmethod
1927 def commit(self, text="", user=None, date=None, match=None, force=False,
1927 def commit(self, text="", user=None, date=None, match=None, force=False,
1928 editor=False, extra=None):
1928 editor=False, extra=None):
1929 """Add a new revision to current repository.
1929 """Add a new revision to current repository.
1930
1930
1931 Revision information is gathered from the working directory,
1931 Revision information is gathered from the working directory,
1932 match can be used to filter the committed files. If editor is
1932 match can be used to filter the committed files. If editor is
1933 supplied, it is called to get a commit message.
1933 supplied, it is called to get a commit message.
1934 """
1934 """
1935 if extra is None:
1935 if extra is None:
1936 extra = {}
1936 extra = {}
1937
1937
1938 def fail(f, msg):
1938 def fail(f, msg):
1939 raise error.Abort('%s: %s' % (f, msg))
1939 raise error.Abort('%s: %s' % (f, msg))
1940
1940
1941 if not match:
1941 if not match:
1942 match = matchmod.always(self.root, '')
1942 match = matchmod.always(self.root, '')
1943
1943
1944 if not force:
1944 if not force:
1945 vdirs = []
1945 vdirs = []
1946 match.explicitdir = vdirs.append
1946 match.explicitdir = vdirs.append
1947 match.bad = fail
1947 match.bad = fail
1948
1948
1949 wlock = lock = tr = None
1949 wlock = lock = tr = None
1950 try:
1950 try:
1951 wlock = self.wlock()
1951 wlock = self.wlock()
1952 lock = self.lock() # for recent changelog (see issue4368)
1952 lock = self.lock() # for recent changelog (see issue4368)
1953
1953
1954 wctx = self[None]
1954 wctx = self[None]
1955 merge = len(wctx.parents()) > 1
1955 merge = len(wctx.parents()) > 1
1956
1956
1957 if not force and merge and not match.always():
1957 if not force and merge and not match.always():
1958 raise error.Abort(_('cannot partially commit a merge '
1958 raise error.Abort(_('cannot partially commit a merge '
1959 '(do not specify files or patterns)'))
1959 '(do not specify files or patterns)'))
1960
1960
1961 status = self.status(match=match, clean=force)
1961 status = self.status(match=match, clean=force)
1962 if force:
1962 if force:
1963 status.modified.extend(status.clean) # mq may commit clean files
1963 status.modified.extend(status.clean) # mq may commit clean files
1964
1964
1965 # check subrepos
1965 # check subrepos
1966 subs, commitsubs, newstate = subrepoutil.precommit(
1966 subs, commitsubs, newstate = subrepoutil.precommit(
1967 self.ui, wctx, status, match, force=force)
1967 self.ui, wctx, status, match, force=force)
1968
1968
1969 # make sure all explicit patterns are matched
1969 # make sure all explicit patterns are matched
1970 if not force:
1970 if not force:
1971 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1971 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1972
1972
1973 cctx = context.workingcommitctx(self, status,
1973 cctx = context.workingcommitctx(self, status,
1974 text, user, date, extra)
1974 text, user, date, extra)
1975
1975
1976 # internal config: ui.allowemptycommit
1976 # internal config: ui.allowemptycommit
1977 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1977 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1978 or extra.get('close') or merge or cctx.files()
1978 or extra.get('close') or merge or cctx.files()
1979 or self.ui.configbool('ui', 'allowemptycommit'))
1979 or self.ui.configbool('ui', 'allowemptycommit'))
1980 if not allowemptycommit:
1980 if not allowemptycommit:
1981 return None
1981 return None
1982
1982
1983 if merge and cctx.deleted():
1983 if merge and cctx.deleted():
1984 raise error.Abort(_("cannot commit merge with missing files"))
1984 raise error.Abort(_("cannot commit merge with missing files"))
1985
1985
1986 ms = mergemod.mergestate.read(self)
1986 ms = mergemod.mergestate.read(self)
1987 mergeutil.checkunresolved(ms)
1987 mergeutil.checkunresolved(ms)
1988
1988
1989 if editor:
1989 if editor:
1990 cctx._text = editor(self, cctx, subs)
1990 cctx._text = editor(self, cctx, subs)
1991 edited = (text != cctx._text)
1991 edited = (text != cctx._text)
1992
1992
1993 # Save commit message in case this transaction gets rolled back
1993 # Save commit message in case this transaction gets rolled back
1994 # (e.g. by a pretxncommit hook). Leave the content alone on
1994 # (e.g. by a pretxncommit hook). Leave the content alone on
1995 # the assumption that the user will use the same editor again.
1995 # the assumption that the user will use the same editor again.
1996 msgfn = self.savecommitmessage(cctx._text)
1996 msgfn = self.savecommitmessage(cctx._text)
1997
1997
1998 # commit subs and write new state
1998 # commit subs and write new state
1999 if subs:
1999 if subs:
2000 for s in sorted(commitsubs):
2000 for s in sorted(commitsubs):
2001 sub = wctx.sub(s)
2001 sub = wctx.sub(s)
2002 self.ui.status(_('committing subrepository %s\n') %
2002 self.ui.status(_('committing subrepository %s\n') %
2003 subrepoutil.subrelpath(sub))
2003 subrepoutil.subrelpath(sub))
2004 sr = sub.commit(cctx._text, user, date)
2004 sr = sub.commit(cctx._text, user, date)
2005 newstate[s] = (newstate[s][0], sr)
2005 newstate[s] = (newstate[s][0], sr)
2006 subrepoutil.writestate(self, newstate)
2006 subrepoutil.writestate(self, newstate)
2007
2007
2008 p1, p2 = self.dirstate.parents()
2008 p1, p2 = self.dirstate.parents()
2009 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2009 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2010 try:
2010 try:
2011 self.hook("precommit", throw=True, parent1=hookp1,
2011 self.hook("precommit", throw=True, parent1=hookp1,
2012 parent2=hookp2)
2012 parent2=hookp2)
2013 tr = self.transaction('commit')
2013 tr = self.transaction('commit')
2014 ret = self.commitctx(cctx, True)
2014 ret = self.commitctx(cctx, True)
2015 except: # re-raises
2015 except: # re-raises
2016 if edited:
2016 if edited:
2017 self.ui.write(
2017 self.ui.write(
2018 _('note: commit message saved in %s\n') % msgfn)
2018 _('note: commit message saved in %s\n') % msgfn)
2019 raise
2019 raise
2020 # update bookmarks, dirstate and mergestate
2020 # update bookmarks, dirstate and mergestate
2021 bookmarks.update(self, [p1, p2], ret)
2021 bookmarks.update(self, [p1, p2], ret)
2022 cctx.markcommitted(ret)
2022 cctx.markcommitted(ret)
2023 ms.reset()
2023 ms.reset()
2024 tr.close()
2024 tr.close()
2025
2025
2026 finally:
2026 finally:
2027 lockmod.release(tr, lock, wlock)
2027 lockmod.release(tr, lock, wlock)
2028
2028
2029 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2029 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2030 # hack for command that use a temporary commit (eg: histedit)
2030 # hack for command that use a temporary commit (eg: histedit)
2031 # temporary commit got stripped before hook release
2031 # temporary commit got stripped before hook release
2032 if self.changelog.hasnode(ret):
2032 if self.changelog.hasnode(ret):
2033 self.hook("commit", node=node, parent1=parent1,
2033 self.hook("commit", node=node, parent1=parent1,
2034 parent2=parent2)
2034 parent2=parent2)
2035 self._afterlock(commithook)
2035 self._afterlock(commithook)
2036 return ret
2036 return ret
2037
2037
2038 @unfilteredmethod
2038 @unfilteredmethod
2039 def commitctx(self, ctx, error=False):
2039 def commitctx(self, ctx, error=False):
2040 """Add a new revision to current repository.
2040 """Add a new revision to current repository.
2041 Revision information is passed via the context argument.
2041 Revision information is passed via the context argument.
2042 """
2042 """
2043
2043
2044 tr = None
2044 tr = None
2045 p1, p2 = ctx.p1(), ctx.p2()
2045 p1, p2 = ctx.p1(), ctx.p2()
2046 user = ctx.user()
2046 user = ctx.user()
2047
2047
2048 lock = self.lock()
2048 lock = self.lock()
2049 try:
2049 try:
2050 tr = self.transaction("commit")
2050 tr = self.transaction("commit")
2051 trp = weakref.proxy(tr)
2051 trp = weakref.proxy(tr)
2052
2052
2053 if ctx.manifestnode():
2053 if ctx.manifestnode():
2054 # reuse an existing manifest revision
2054 # reuse an existing manifest revision
2055 self.ui.debug('reusing known manifest\n')
2055 mn = ctx.manifestnode()
2056 mn = ctx.manifestnode()
2056 files = ctx.files()
2057 files = ctx.files()
2057 elif ctx.files():
2058 elif ctx.files():
2058 m1ctx = p1.manifestctx()
2059 m1ctx = p1.manifestctx()
2059 m2ctx = p2.manifestctx()
2060 m2ctx = p2.manifestctx()
2060 mctx = m1ctx.copy()
2061 mctx = m1ctx.copy()
2061
2062
2062 m = mctx.read()
2063 m = mctx.read()
2063 m1 = m1ctx.read()
2064 m1 = m1ctx.read()
2064 m2 = m2ctx.read()
2065 m2 = m2ctx.read()
2065
2066
2066 # check in files
2067 # check in files
2067 added = []
2068 added = []
2068 changed = []
2069 changed = []
2069 removed = list(ctx.removed())
2070 removed = list(ctx.removed())
2070 linkrev = len(self)
2071 linkrev = len(self)
2071 self.ui.note(_("committing files:\n"))
2072 self.ui.note(_("committing files:\n"))
2072 for f in sorted(ctx.modified() + ctx.added()):
2073 for f in sorted(ctx.modified() + ctx.added()):
2073 self.ui.note(f + "\n")
2074 self.ui.note(f + "\n")
2074 try:
2075 try:
2075 fctx = ctx[f]
2076 fctx = ctx[f]
2076 if fctx is None:
2077 if fctx is None:
2077 removed.append(f)
2078 removed.append(f)
2078 else:
2079 else:
2079 added.append(f)
2080 added.append(f)
2080 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2081 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2081 trp, changed)
2082 trp, changed)
2082 m.setflag(f, fctx.flags())
2083 m.setflag(f, fctx.flags())
2083 except OSError as inst:
2084 except OSError as inst:
2084 self.ui.warn(_("trouble committing %s!\n") % f)
2085 self.ui.warn(_("trouble committing %s!\n") % f)
2085 raise
2086 raise
2086 except IOError as inst:
2087 except IOError as inst:
2087 errcode = getattr(inst, 'errno', errno.ENOENT)
2088 errcode = getattr(inst, 'errno', errno.ENOENT)
2088 if error or errcode and errcode != errno.ENOENT:
2089 if error or errcode and errcode != errno.ENOENT:
2089 self.ui.warn(_("trouble committing %s!\n") % f)
2090 self.ui.warn(_("trouble committing %s!\n") % f)
2090 raise
2091 raise
2091
2092
2092 # update manifest
2093 # update manifest
2093 self.ui.note(_("committing manifest\n"))
2094 self.ui.note(_("committing manifest\n"))
2094 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2095 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2095 drop = [f for f in removed if f in m]
2096 drop = [f for f in removed if f in m]
2096 for f in drop:
2097 for f in drop:
2097 del m[f]
2098 del m[f]
2098 mn = mctx.write(trp, linkrev,
2099 mn = mctx.write(trp, linkrev,
2099 p1.manifestnode(), p2.manifestnode(),
2100 p1.manifestnode(), p2.manifestnode(),
2100 added, drop)
2101 added, drop)
2101 files = changed + removed
2102 files = changed + removed
2102 else:
2103 else:
2104 self.ui.debug('reusing manifest from p1 (no file change)\n')
2103 mn = p1.manifestnode()
2105 mn = p1.manifestnode()
2104 files = []
2106 files = []
2105
2107
2106 # update changelog
2108 # update changelog
2107 self.ui.note(_("committing changelog\n"))
2109 self.ui.note(_("committing changelog\n"))
2108 self.changelog.delayupdate(tr)
2110 self.changelog.delayupdate(tr)
2109 n = self.changelog.add(mn, files, ctx.description(),
2111 n = self.changelog.add(mn, files, ctx.description(),
2110 trp, p1.node(), p2.node(),
2112 trp, p1.node(), p2.node(),
2111 user, ctx.date(), ctx.extra().copy())
2113 user, ctx.date(), ctx.extra().copy())
2112 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2114 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2113 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2115 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2114 parent2=xp2)
2116 parent2=xp2)
2115 # set the new commit is proper phase
2117 # set the new commit is proper phase
2116 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2118 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2117 if targetphase:
2119 if targetphase:
2118 # retract boundary do not alter parent changeset.
2120 # retract boundary do not alter parent changeset.
2119 # if a parent have higher the resulting phase will
2121 # if a parent have higher the resulting phase will
2120 # be compliant anyway
2122 # be compliant anyway
2121 #
2123 #
2122 # if minimal phase was 0 we don't need to retract anything
2124 # if minimal phase was 0 we don't need to retract anything
2123 phases.registernew(self, tr, targetphase, [n])
2125 phases.registernew(self, tr, targetphase, [n])
2124 tr.close()
2126 tr.close()
2125 return n
2127 return n
2126 finally:
2128 finally:
2127 if tr:
2129 if tr:
2128 tr.release()
2130 tr.release()
2129 lock.release()
2131 lock.release()
2130
2132
2131 @unfilteredmethod
2133 @unfilteredmethod
2132 def destroying(self):
2134 def destroying(self):
2133 '''Inform the repository that nodes are about to be destroyed.
2135 '''Inform the repository that nodes are about to be destroyed.
2134 Intended for use by strip and rollback, so there's a common
2136 Intended for use by strip and rollback, so there's a common
2135 place for anything that has to be done before destroying history.
2137 place for anything that has to be done before destroying history.
2136
2138
2137 This is mostly useful for saving state that is in memory and waiting
2139 This is mostly useful for saving state that is in memory and waiting
2138 to be flushed when the current lock is released. Because a call to
2140 to be flushed when the current lock is released. Because a call to
2139 destroyed is imminent, the repo will be invalidated causing those
2141 destroyed is imminent, the repo will be invalidated causing those
2140 changes to stay in memory (waiting for the next unlock), or vanish
2142 changes to stay in memory (waiting for the next unlock), or vanish
2141 completely.
2143 completely.
2142 '''
2144 '''
2143 # When using the same lock to commit and strip, the phasecache is left
2145 # When using the same lock to commit and strip, the phasecache is left
2144 # dirty after committing. Then when we strip, the repo is invalidated,
2146 # dirty after committing. Then when we strip, the repo is invalidated,
2145 # causing those changes to disappear.
2147 # causing those changes to disappear.
2146 if '_phasecache' in vars(self):
2148 if '_phasecache' in vars(self):
2147 self._phasecache.write()
2149 self._phasecache.write()
2148
2150
2149 @unfilteredmethod
2151 @unfilteredmethod
2150 def destroyed(self):
2152 def destroyed(self):
2151 '''Inform the repository that nodes have been destroyed.
2153 '''Inform the repository that nodes have been destroyed.
2152 Intended for use by strip and rollback, so there's a common
2154 Intended for use by strip and rollback, so there's a common
2153 place for anything that has to be done after destroying history.
2155 place for anything that has to be done after destroying history.
2154 '''
2156 '''
2155 # When one tries to:
2157 # When one tries to:
2156 # 1) destroy nodes thus calling this method (e.g. strip)
2158 # 1) destroy nodes thus calling this method (e.g. strip)
2157 # 2) use phasecache somewhere (e.g. commit)
2159 # 2) use phasecache somewhere (e.g. commit)
2158 #
2160 #
2159 # then 2) will fail because the phasecache contains nodes that were
2161 # then 2) will fail because the phasecache contains nodes that were
2160 # removed. We can either remove phasecache from the filecache,
2162 # removed. We can either remove phasecache from the filecache,
2161 # causing it to reload next time it is accessed, or simply filter
2163 # causing it to reload next time it is accessed, or simply filter
2162 # the removed nodes now and write the updated cache.
2164 # the removed nodes now and write the updated cache.
2163 self._phasecache.filterunknown(self)
2165 self._phasecache.filterunknown(self)
2164 self._phasecache.write()
2166 self._phasecache.write()
2165
2167
2166 # refresh all repository caches
2168 # refresh all repository caches
2167 self.updatecaches()
2169 self.updatecaches()
2168
2170
2169 # Ensure the persistent tag cache is updated. Doing it now
2171 # Ensure the persistent tag cache is updated. Doing it now
2170 # means that the tag cache only has to worry about destroyed
2172 # means that the tag cache only has to worry about destroyed
2171 # heads immediately after a strip/rollback. That in turn
2173 # heads immediately after a strip/rollback. That in turn
2172 # guarantees that "cachetip == currenttip" (comparing both rev
2174 # guarantees that "cachetip == currenttip" (comparing both rev
2173 # and node) always means no nodes have been added or destroyed.
2175 # and node) always means no nodes have been added or destroyed.
2174
2176
2175 # XXX this is suboptimal when qrefresh'ing: we strip the current
2177 # XXX this is suboptimal when qrefresh'ing: we strip the current
2176 # head, refresh the tag cache, then immediately add a new head.
2178 # head, refresh the tag cache, then immediately add a new head.
2177 # But I think doing it this way is necessary for the "instant
2179 # But I think doing it this way is necessary for the "instant
2178 # tag cache retrieval" case to work.
2180 # tag cache retrieval" case to work.
2179 self.invalidate()
2181 self.invalidate()
2180
2182
2181 def status(self, node1='.', node2=None, match=None,
2183 def status(self, node1='.', node2=None, match=None,
2182 ignored=False, clean=False, unknown=False,
2184 ignored=False, clean=False, unknown=False,
2183 listsubrepos=False):
2185 listsubrepos=False):
2184 '''a convenience method that calls node1.status(node2)'''
2186 '''a convenience method that calls node1.status(node2)'''
2185 return self[node1].status(node2, match, ignored, clean, unknown,
2187 return self[node1].status(node2, match, ignored, clean, unknown,
2186 listsubrepos)
2188 listsubrepos)
2187
2189
2188 def addpostdsstatus(self, ps):
2190 def addpostdsstatus(self, ps):
2189 """Add a callback to run within the wlock, at the point at which status
2191 """Add a callback to run within the wlock, at the point at which status
2190 fixups happen.
2192 fixups happen.
2191
2193
2192 On status completion, callback(wctx, status) will be called with the
2194 On status completion, callback(wctx, status) will be called with the
2193 wlock held, unless the dirstate has changed from underneath or the wlock
2195 wlock held, unless the dirstate has changed from underneath or the wlock
2194 couldn't be grabbed.
2196 couldn't be grabbed.
2195
2197
2196 Callbacks should not capture and use a cached copy of the dirstate --
2198 Callbacks should not capture and use a cached copy of the dirstate --
2197 it might change in the meanwhile. Instead, they should access the
2199 it might change in the meanwhile. Instead, they should access the
2198 dirstate via wctx.repo().dirstate.
2200 dirstate via wctx.repo().dirstate.
2199
2201
2200 This list is emptied out after each status run -- extensions should
2202 This list is emptied out after each status run -- extensions should
2201 make sure it adds to this list each time dirstate.status is called.
2203 make sure it adds to this list each time dirstate.status is called.
2202 Extensions should also make sure they don't call this for statuses
2204 Extensions should also make sure they don't call this for statuses
2203 that don't involve the dirstate.
2205 that don't involve the dirstate.
2204 """
2206 """
2205
2207
2206 # The list is located here for uniqueness reasons -- it is actually
2208 # The list is located here for uniqueness reasons -- it is actually
2207 # managed by the workingctx, but that isn't unique per-repo.
2209 # managed by the workingctx, but that isn't unique per-repo.
2208 self._postdsstatus.append(ps)
2210 self._postdsstatus.append(ps)
2209
2211
2210 def postdsstatus(self):
2212 def postdsstatus(self):
2211 """Used by workingctx to get the list of post-dirstate-status hooks."""
2213 """Used by workingctx to get the list of post-dirstate-status hooks."""
2212 return self._postdsstatus
2214 return self._postdsstatus
2213
2215
2214 def clearpostdsstatus(self):
2216 def clearpostdsstatus(self):
2215 """Used by workingctx to clear post-dirstate-status hooks."""
2217 """Used by workingctx to clear post-dirstate-status hooks."""
2216 del self._postdsstatus[:]
2218 del self._postdsstatus[:]
2217
2219
2218 def heads(self, start=None):
2220 def heads(self, start=None):
2219 if start is None:
2221 if start is None:
2220 cl = self.changelog
2222 cl = self.changelog
2221 headrevs = reversed(cl.headrevs())
2223 headrevs = reversed(cl.headrevs())
2222 return [cl.node(rev) for rev in headrevs]
2224 return [cl.node(rev) for rev in headrevs]
2223
2225
2224 heads = self.changelog.heads(start)
2226 heads = self.changelog.heads(start)
2225 # sort the output in rev descending order
2227 # sort the output in rev descending order
2226 return sorted(heads, key=self.changelog.rev, reverse=True)
2228 return sorted(heads, key=self.changelog.rev, reverse=True)
2227
2229
2228 def branchheads(self, branch=None, start=None, closed=False):
2230 def branchheads(self, branch=None, start=None, closed=False):
2229 '''return a (possibly filtered) list of heads for the given branch
2231 '''return a (possibly filtered) list of heads for the given branch
2230
2232
2231 Heads are returned in topological order, from newest to oldest.
2233 Heads are returned in topological order, from newest to oldest.
2232 If branch is None, use the dirstate branch.
2234 If branch is None, use the dirstate branch.
2233 If start is not None, return only heads reachable from start.
2235 If start is not None, return only heads reachable from start.
2234 If closed is True, return heads that are marked as closed as well.
2236 If closed is True, return heads that are marked as closed as well.
2235 '''
2237 '''
2236 if branch is None:
2238 if branch is None:
2237 branch = self[None].branch()
2239 branch = self[None].branch()
2238 branches = self.branchmap()
2240 branches = self.branchmap()
2239 if branch not in branches:
2241 if branch not in branches:
2240 return []
2242 return []
2241 # the cache returns heads ordered lowest to highest
2243 # the cache returns heads ordered lowest to highest
2242 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2244 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2243 if start is not None:
2245 if start is not None:
2244 # filter out the heads that cannot be reached from startrev
2246 # filter out the heads that cannot be reached from startrev
2245 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2247 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2246 bheads = [h for h in bheads if h in fbheads]
2248 bheads = [h for h in bheads if h in fbheads]
2247 return bheads
2249 return bheads
2248
2250
2249 def branches(self, nodes):
2251 def branches(self, nodes):
2250 if not nodes:
2252 if not nodes:
2251 nodes = [self.changelog.tip()]
2253 nodes = [self.changelog.tip()]
2252 b = []
2254 b = []
2253 for n in nodes:
2255 for n in nodes:
2254 t = n
2256 t = n
2255 while True:
2257 while True:
2256 p = self.changelog.parents(n)
2258 p = self.changelog.parents(n)
2257 if p[1] != nullid or p[0] == nullid:
2259 if p[1] != nullid or p[0] == nullid:
2258 b.append((t, n, p[0], p[1]))
2260 b.append((t, n, p[0], p[1]))
2259 break
2261 break
2260 n = p[0]
2262 n = p[0]
2261 return b
2263 return b
2262
2264
2263 def between(self, pairs):
2265 def between(self, pairs):
2264 r = []
2266 r = []
2265
2267
2266 for top, bottom in pairs:
2268 for top, bottom in pairs:
2267 n, l, i = top, [], 0
2269 n, l, i = top, [], 0
2268 f = 1
2270 f = 1
2269
2271
2270 while n != bottom and n != nullid:
2272 while n != bottom and n != nullid:
2271 p = self.changelog.parents(n)[0]
2273 p = self.changelog.parents(n)[0]
2272 if i == f:
2274 if i == f:
2273 l.append(n)
2275 l.append(n)
2274 f = f * 2
2276 f = f * 2
2275 n = p
2277 n = p
2276 i += 1
2278 i += 1
2277
2279
2278 r.append(l)
2280 r.append(l)
2279
2281
2280 return r
2282 return r
2281
2283
2282 def checkpush(self, pushop):
2284 def checkpush(self, pushop):
2283 """Extensions can override this function if additional checks have
2285 """Extensions can override this function if additional checks have
2284 to be performed before pushing, or call it if they override push
2286 to be performed before pushing, or call it if they override push
2285 command.
2287 command.
2286 """
2288 """
2287
2289
2288 @unfilteredpropertycache
2290 @unfilteredpropertycache
2289 def prepushoutgoinghooks(self):
2291 def prepushoutgoinghooks(self):
2290 """Return util.hooks consists of a pushop with repo, remote, outgoing
2292 """Return util.hooks consists of a pushop with repo, remote, outgoing
2291 methods, which are called before pushing changesets.
2293 methods, which are called before pushing changesets.
2292 """
2294 """
2293 return util.hooks()
2295 return util.hooks()
2294
2296
2295 def pushkey(self, namespace, key, old, new):
2297 def pushkey(self, namespace, key, old, new):
2296 try:
2298 try:
2297 tr = self.currenttransaction()
2299 tr = self.currenttransaction()
2298 hookargs = {}
2300 hookargs = {}
2299 if tr is not None:
2301 if tr is not None:
2300 hookargs.update(tr.hookargs)
2302 hookargs.update(tr.hookargs)
2301 hookargs = pycompat.strkwargs(hookargs)
2303 hookargs = pycompat.strkwargs(hookargs)
2302 hookargs[r'namespace'] = namespace
2304 hookargs[r'namespace'] = namespace
2303 hookargs[r'key'] = key
2305 hookargs[r'key'] = key
2304 hookargs[r'old'] = old
2306 hookargs[r'old'] = old
2305 hookargs[r'new'] = new
2307 hookargs[r'new'] = new
2306 self.hook('prepushkey', throw=True, **hookargs)
2308 self.hook('prepushkey', throw=True, **hookargs)
2307 except error.HookAbort as exc:
2309 except error.HookAbort as exc:
2308 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2310 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2309 if exc.hint:
2311 if exc.hint:
2310 self.ui.write_err(_("(%s)\n") % exc.hint)
2312 self.ui.write_err(_("(%s)\n") % exc.hint)
2311 return False
2313 return False
2312 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2314 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2313 ret = pushkey.push(self, namespace, key, old, new)
2315 ret = pushkey.push(self, namespace, key, old, new)
2314 def runhook():
2316 def runhook():
2315 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2317 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2316 ret=ret)
2318 ret=ret)
2317 self._afterlock(runhook)
2319 self._afterlock(runhook)
2318 return ret
2320 return ret
2319
2321
2320 def listkeys(self, namespace):
2322 def listkeys(self, namespace):
2321 self.hook('prelistkeys', throw=True, namespace=namespace)
2323 self.hook('prelistkeys', throw=True, namespace=namespace)
2322 self.ui.debug('listing keys for "%s"\n' % namespace)
2324 self.ui.debug('listing keys for "%s"\n' % namespace)
2323 values = pushkey.list(self, namespace)
2325 values = pushkey.list(self, namespace)
2324 self.hook('listkeys', namespace=namespace, values=values)
2326 self.hook('listkeys', namespace=namespace, values=values)
2325 return values
2327 return values
2326
2328
2327 def debugwireargs(self, one, two, three=None, four=None, five=None):
2329 def debugwireargs(self, one, two, three=None, four=None, five=None):
2328 '''used to test argument passing over the wire'''
2330 '''used to test argument passing over the wire'''
2329 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2331 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2330 pycompat.bytestr(four),
2332 pycompat.bytestr(four),
2331 pycompat.bytestr(five))
2333 pycompat.bytestr(five))
2332
2334
2333 def savecommitmessage(self, text):
2335 def savecommitmessage(self, text):
2334 fp = self.vfs('last-message.txt', 'wb')
2336 fp = self.vfs('last-message.txt', 'wb')
2335 try:
2337 try:
2336 fp.write(text)
2338 fp.write(text)
2337 finally:
2339 finally:
2338 fp.close()
2340 fp.close()
2339 return self.pathto(fp.name[len(self.root) + 1:])
2341 return self.pathto(fp.name[len(self.root) + 1:])
2340
2342
2341 # used to avoid circular references so destructors work
2343 # used to avoid circular references so destructors work
2342 def aftertrans(files):
2344 def aftertrans(files):
2343 renamefiles = [tuple(t) for t in files]
2345 renamefiles = [tuple(t) for t in files]
2344 def a():
2346 def a():
2345 for vfs, src, dest in renamefiles:
2347 for vfs, src, dest in renamefiles:
2346 # if src and dest refer to a same file, vfs.rename is a no-op,
2348 # if src and dest refer to a same file, vfs.rename is a no-op,
2347 # leaving both src and dest on disk. delete dest to make sure
2349 # leaving both src and dest on disk. delete dest to make sure
2348 # the rename couldn't be such a no-op.
2350 # the rename couldn't be such a no-op.
2349 vfs.tryunlink(dest)
2351 vfs.tryunlink(dest)
2350 try:
2352 try:
2351 vfs.rename(src, dest)
2353 vfs.rename(src, dest)
2352 except OSError: # journal file does not yet exist
2354 except OSError: # journal file does not yet exist
2353 pass
2355 pass
2354 return a
2356 return a
2355
2357
2356 def undoname(fn):
2358 def undoname(fn):
2357 base, name = os.path.split(fn)
2359 base, name = os.path.split(fn)
2358 assert name.startswith('journal')
2360 assert name.startswith('journal')
2359 return os.path.join(base, name.replace('journal', 'undo', 1))
2361 return os.path.join(base, name.replace('journal', 'undo', 1))
2360
2362
2361 def instance(ui, path, create, intents=None):
2363 def instance(ui, path, create, intents=None):
2362 return localrepository(ui, util.urllocalpath(path), create,
2364 return localrepository(ui, util.urllocalpath(path), create,
2363 intents=intents)
2365 intents=intents)
2364
2366
2365 def islocal(path):
2367 def islocal(path):
2366 return True
2368 return True
2367
2369
2368 def newreporequirements(repo):
2370 def newreporequirements(repo):
2369 """Determine the set of requirements for a new local repository.
2371 """Determine the set of requirements for a new local repository.
2370
2372
2371 Extensions can wrap this function to specify custom requirements for
2373 Extensions can wrap this function to specify custom requirements for
2372 new repositories.
2374 new repositories.
2373 """
2375 """
2374 ui = repo.ui
2376 ui = repo.ui
2375 requirements = {'revlogv1'}
2377 requirements = {'revlogv1'}
2376 if ui.configbool('format', 'usestore'):
2378 if ui.configbool('format', 'usestore'):
2377 requirements.add('store')
2379 requirements.add('store')
2378 if ui.configbool('format', 'usefncache'):
2380 if ui.configbool('format', 'usefncache'):
2379 requirements.add('fncache')
2381 requirements.add('fncache')
2380 if ui.configbool('format', 'dotencode'):
2382 if ui.configbool('format', 'dotencode'):
2381 requirements.add('dotencode')
2383 requirements.add('dotencode')
2382
2384
2383 compengine = ui.config('experimental', 'format.compression')
2385 compengine = ui.config('experimental', 'format.compression')
2384 if compengine not in util.compengines:
2386 if compengine not in util.compengines:
2385 raise error.Abort(_('compression engine %s defined by '
2387 raise error.Abort(_('compression engine %s defined by '
2386 'experimental.format.compression not available') %
2388 'experimental.format.compression not available') %
2387 compengine,
2389 compengine,
2388 hint=_('run "hg debuginstall" to list available '
2390 hint=_('run "hg debuginstall" to list available '
2389 'compression engines'))
2391 'compression engines'))
2390
2392
2391 # zlib is the historical default and doesn't need an explicit requirement.
2393 # zlib is the historical default and doesn't need an explicit requirement.
2392 if compengine != 'zlib':
2394 if compengine != 'zlib':
2393 requirements.add('exp-compression-%s' % compengine)
2395 requirements.add('exp-compression-%s' % compengine)
2394
2396
2395 if scmutil.gdinitconfig(ui):
2397 if scmutil.gdinitconfig(ui):
2396 requirements.add('generaldelta')
2398 requirements.add('generaldelta')
2397 if ui.configbool('experimental', 'treemanifest'):
2399 if ui.configbool('experimental', 'treemanifest'):
2398 requirements.add('treemanifest')
2400 requirements.add('treemanifest')
2399 # experimental config: format.sparse-revlog
2401 # experimental config: format.sparse-revlog
2400 if ui.configbool('format', 'sparse-revlog'):
2402 if ui.configbool('format', 'sparse-revlog'):
2401 requirements.add(SPARSEREVLOG_REQUIREMENT)
2403 requirements.add(SPARSEREVLOG_REQUIREMENT)
2402
2404
2403 revlogv2 = ui.config('experimental', 'revlogv2')
2405 revlogv2 = ui.config('experimental', 'revlogv2')
2404 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2406 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2405 requirements.remove('revlogv1')
2407 requirements.remove('revlogv1')
2406 # generaldelta is implied by revlogv2.
2408 # generaldelta is implied by revlogv2.
2407 requirements.discard('generaldelta')
2409 requirements.discard('generaldelta')
2408 requirements.add(REVLOGV2_REQUIREMENT)
2410 requirements.add(REVLOGV2_REQUIREMENT)
2409
2411
2410 return requirements
2412 return requirements
@@ -1,151 +1,154 b''
1 #require svn svn-bindings
1 #require svn svn-bindings
2
2
3 $ cat >> $HGRCPATH <<EOF
3 $ cat >> $HGRCPATH <<EOF
4 > [extensions]
4 > [extensions]
5 > convert =
5 > convert =
6 > EOF
6 > EOF
7
7
8 $ svnadmin create svn-repo
8 $ svnadmin create svn-repo
9 $ svnadmin load -q svn-repo < "$TESTDIR/svn/encoding.svndump"
9 $ svnadmin load -q svn-repo < "$TESTDIR/svn/encoding.svndump"
10
10
11 Convert while testing all possible outputs
11 Convert while testing all possible outputs
12
12
13 $ hg --debug convert svn-repo A-hg --config progress.debug=1
13 $ hg --debug convert svn-repo A-hg --config progress.debug=1
14 initializing destination A-hg repository
14 initializing destination A-hg repository
15 reparent to file:/*/$TESTTMP/svn-repo (glob)
15 reparent to file:/*/$TESTTMP/svn-repo (glob)
16 run hg sink pre-conversion action
16 run hg sink pre-conversion action
17 scanning source...
17 scanning source...
18 found trunk at 'trunk'
18 found trunk at 'trunk'
19 found tags at 'tags'
19 found tags at 'tags'
20 found branches at 'branches'
20 found branches at 'branches'
21 found branch branch\xc3\xa9 at 5 (esc)
21 found branch branch\xc3\xa9 at 5 (esc)
22 found branch branch\xc3\xa9e at 6 (esc)
22 found branch branch\xc3\xa9e at 6 (esc)
23 scanning: 1/4 revisions (25.00%)
23 scanning: 1/4 revisions (25.00%)
24 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
24 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
25 fetching revision log for "/trunk" from 4 to 0
25 fetching revision log for "/trunk" from 4 to 0
26 parsing revision 4 (2 changes)
26 parsing revision 4 (2 changes)
27 parsing revision 3 (4 changes)
27 parsing revision 3 (4 changes)
28 parsing revision 2 (3 changes)
28 parsing revision 2 (3 changes)
29 parsing revision 1 (3 changes)
29 parsing revision 1 (3 changes)
30 no copyfrom path, don't know what to do.
30 no copyfrom path, don't know what to do.
31 '/branches' is not under '/trunk', ignoring
31 '/branches' is not under '/trunk', ignoring
32 '/tags' is not under '/trunk', ignoring
32 '/tags' is not under '/trunk', ignoring
33 scanning: 2/4 revisions (50.00%)
33 scanning: 2/4 revisions (50.00%)
34 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
34 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
35 fetching revision log for "/branches/branch\xc3\xa9" from 5 to 0 (esc)
35 fetching revision log for "/branches/branch\xc3\xa9" from 5 to 0 (esc)
36 parsing revision 5 (1 changes)
36 parsing revision 5 (1 changes)
37 reparent to file:/*/$TESTTMP/svn-repo (glob)
37 reparent to file:/*/$TESTTMP/svn-repo (glob)
38 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
38 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
39 found parent of branch /branches/branch\xc3\xa9 at 4: /trunk (esc)
39 found parent of branch /branches/branch\xc3\xa9 at 4: /trunk (esc)
40 scanning: 3/4 revisions (75.00%)
40 scanning: 3/4 revisions (75.00%)
41 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
41 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
42 fetching revision log for "/branches/branch\xc3\xa9e" from 6 to 0 (esc)
42 fetching revision log for "/branches/branch\xc3\xa9e" from 6 to 0 (esc)
43 parsing revision 6 (1 changes)
43 parsing revision 6 (1 changes)
44 reparent to file:/*/$TESTTMP/svn-repo (glob)
44 reparent to file:/*/$TESTTMP/svn-repo (glob)
45 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
45 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
46 found parent of branch /branches/branch\xc3\xa9e at 5: /branches/branch\xc3\xa9 (esc)
46 found parent of branch /branches/branch\xc3\xa9e at 5: /branches/branch\xc3\xa9 (esc)
47 scanning: 4/4 revisions (100.00%)
47 scanning: 4/4 revisions (100.00%)
48 scanning: 5/4 revisions (125.00%)
48 scanning: 5/4 revisions (125.00%)
49 scanning: 6/4 revisions (150.00%)
49 scanning: 6/4 revisions (150.00%)
50 sorting...
50 sorting...
51 converting...
51 converting...
52 5 init projA
52 5 init projA
53 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
53 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@1
54 converting: 0/6 revisions (0.00%)
54 converting: 0/6 revisions (0.00%)
55 reusing manifest from p1 (no file change)
55 committing changelog
56 committing changelog
56 updating the branch cache
57 updating the branch cache
57 4 hello
58 4 hello
58 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@2
59 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@2
59 converting: 1/6 revisions (16.67%)
60 converting: 1/6 revisions (16.67%)
60 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
61 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
61 scanning paths: /trunk/\xc3\xa0 0/3 paths (0.00%) (esc)
62 scanning paths: /trunk/\xc3\xa0 0/3 paths (0.00%) (esc)
62 scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 paths (33.33%) (esc)
63 scanning paths: /trunk/\xc3\xa0/e\xcc\x81 1/3 paths (33.33%) (esc)
63 scanning paths: /trunk/\xc3\xa9 2/3 paths (66.67%) (esc)
64 scanning paths: /trunk/\xc3\xa9 2/3 paths (66.67%) (esc)
64 committing files:
65 committing files:
65 \xc3\xa0/e\xcc\x81 (esc)
66 \xc3\xa0/e\xcc\x81 (esc)
66 getting files: \xc3\xa0/e\xcc\x81 1/2 files (50.00%) (esc)
67 getting files: \xc3\xa0/e\xcc\x81 1/2 files (50.00%) (esc)
67 \xc3\xa9 (esc)
68 \xc3\xa9 (esc)
68 getting files: \xc3\xa9 2/2 files (100.00%) (esc)
69 getting files: \xc3\xa9 2/2 files (100.00%) (esc)
69 committing manifest
70 committing manifest
70 committing changelog
71 committing changelog
71 updating the branch cache
72 updating the branch cache
72 3 copy files
73 3 copy files
73 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@3
74 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@3
74 converting: 2/6 revisions (33.33%)
75 converting: 2/6 revisions (33.33%)
75 scanning paths: /trunk/\xc3\xa0 0/4 paths (0.00%) (esc)
76 scanning paths: /trunk/\xc3\xa0 0/4 paths (0.00%) (esc)
76 gone from -1
77 gone from -1
77 reparent to file:/*/$TESTTMP/svn-repo (glob)
78 reparent to file:/*/$TESTTMP/svn-repo (glob)
78 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
79 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
79 scanning paths: /trunk/\xc3\xa8 1/4 paths (25.00%) (esc)
80 scanning paths: /trunk/\xc3\xa8 1/4 paths (25.00%) (esc)
80 copied to \xc3\xa8 from \xc3\xa9@2 (esc)
81 copied to \xc3\xa8 from \xc3\xa9@2 (esc)
81 scanning paths: /trunk/\xc3\xa9 2/4 paths (50.00%) (esc)
82 scanning paths: /trunk/\xc3\xa9 2/4 paths (50.00%) (esc)
82 gone from -1
83 gone from -1
83 reparent to file:/*/$TESTTMP/svn-repo (glob)
84 reparent to file:/*/$TESTTMP/svn-repo (glob)
84 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
85 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
85 scanning paths: /trunk/\xc3\xb9 3/4 paths (75.00%) (esc)
86 scanning paths: /trunk/\xc3\xb9 3/4 paths (75.00%) (esc)
86 mark /trunk/\xc3\xb9 came from \xc3\xa0:2 (esc)
87 mark /trunk/\xc3\xb9 came from \xc3\xa0:2 (esc)
87 getting files: \xc3\xa0/e\xcc\x81 1/4 files (25.00%) (esc)
88 getting files: \xc3\xa0/e\xcc\x81 1/4 files (25.00%) (esc)
88 getting files: \xc3\xa9 2/4 files (50.00%) (esc)
89 getting files: \xc3\xa9 2/4 files (50.00%) (esc)
89 committing files:
90 committing files:
90 \xc3\xa8 (esc)
91 \xc3\xa8 (esc)
91 getting files: \xc3\xa8 3/4 files (75.00%) (esc)
92 getting files: \xc3\xa8 3/4 files (75.00%) (esc)
92 \xc3\xa8: copy \xc3\xa9:6b67ccefd5ce6de77e7ead4f5292843a0255329f (esc)
93 \xc3\xa8: copy \xc3\xa9:6b67ccefd5ce6de77e7ead4f5292843a0255329f (esc)
93 \xc3\xb9/e\xcc\x81 (esc)
94 \xc3\xb9/e\xcc\x81 (esc)
94 getting files: \xc3\xb9/e\xcc\x81 4/4 files (100.00%) (esc)
95 getting files: \xc3\xb9/e\xcc\x81 4/4 files (100.00%) (esc)
95 \xc3\xb9/e\xcc\x81: copy \xc3\xa0/e\xcc\x81:a9092a3d84a37b9993b5c73576f6de29b7ea50f6 (esc)
96 \xc3\xb9/e\xcc\x81: copy \xc3\xa0/e\xcc\x81:a9092a3d84a37b9993b5c73576f6de29b7ea50f6 (esc)
96 committing manifest
97 committing manifest
97 committing changelog
98 committing changelog
98 updating the branch cache
99 updating the branch cache
99 2 remove files
100 2 remove files
100 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@4
101 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/trunk@4
101 converting: 3/6 revisions (50.00%)
102 converting: 3/6 revisions (50.00%)
102 scanning paths: /trunk/\xc3\xa8 0/2 paths (0.00%) (esc)
103 scanning paths: /trunk/\xc3\xa8 0/2 paths (0.00%) (esc)
103 gone from -1
104 gone from -1
104 reparent to file:/*/$TESTTMP/svn-repo (glob)
105 reparent to file:/*/$TESTTMP/svn-repo (glob)
105 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
106 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
106 scanning paths: /trunk/\xc3\xb9 1/2 paths (50.00%) (esc)
107 scanning paths: /trunk/\xc3\xb9 1/2 paths (50.00%) (esc)
107 gone from -1
108 gone from -1
108 reparent to file:/*/$TESTTMP/svn-repo (glob)
109 reparent to file:/*/$TESTTMP/svn-repo (glob)
109 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
110 reparent to file:/*/$TESTTMP/svn-repo/trunk (glob)
110 getting files: \xc3\xa8 1/2 files (50.00%) (esc)
111 getting files: \xc3\xa8 1/2 files (50.00%) (esc)
111 getting files: \xc3\xb9/e\xcc\x81 2/2 files (100.00%) (esc)
112 getting files: \xc3\xb9/e\xcc\x81 2/2 files (100.00%) (esc)
112 committing files:
113 committing files:
113 committing manifest
114 committing manifest
114 committing changelog
115 committing changelog
115 updating the branch cache
116 updating the branch cache
116 1 branch to branch?
117 1 branch to branch?
117 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?@5
118 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?@5
118 converting: 4/6 revisions (66.67%)
119 converting: 4/6 revisions (66.67%)
119 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
120 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9 (glob)
120 scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
121 scanning paths: /branches/branch\xc3\xa9 0/1 paths (0.00%) (esc)
122 reusing manifest from p1 (no file change)
121 committing changelog
123 committing changelog
122 updating the branch cache
124 updating the branch cache
123 0 branch to branch?e
125 0 branch to branch?e
124 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?e@6
126 source: svn:afeb9c47-92ff-4c0c-9f72-e1f6eb8ac9af/branches/branch?e@6
125 converting: 5/6 revisions (83.33%)
127 converting: 5/6 revisions (83.33%)
126 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
128 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
127 scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
129 scanning paths: /branches/branch\xc3\xa9e 0/1 paths (0.00%) (esc)
130 reusing manifest from p1 (no file change)
128 committing changelog
131 committing changelog
129 updating the branch cache
132 updating the branch cache
130 reparent to file:/*/$TESTTMP/svn-repo (glob)
133 reparent to file:/*/$TESTTMP/svn-repo (glob)
131 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
134 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
132 reparent to file:/*/$TESTTMP/svn-repo (glob)
135 reparent to file:/*/$TESTTMP/svn-repo (glob)
133 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
136 reparent to file:/*/$TESTTMP/svn-repo/branches/branch%C3%A9e (glob)
134 updating tags
137 updating tags
135 committing files:
138 committing files:
136 .hgtags
139 .hgtags
137 committing manifest
140 committing manifest
138 committing changelog
141 committing changelog
139 updating the branch cache
142 updating the branch cache
140 run hg sink post-conversion action
143 run hg sink post-conversion action
141 $ cd A-hg
144 $ cd A-hg
142 $ hg up
145 $ hg up
143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
146 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144
147
145 Check tags are in UTF-8
148 Check tags are in UTF-8
146
149
147 $ cat .hgtags
150 $ cat .hgtags
148 e94e4422020e715add80525e8f0f46c9968689f1 branch\xc3\xa9e (esc)
151 e94e4422020e715add80525e8f0f46c9968689f1 branch\xc3\xa9e (esc)
149 f7e66f98380ed1e53a797c5c7a7a2616a7ab377d branch\xc3\xa9 (esc)
152 f7e66f98380ed1e53a797c5c7a7a2616a7ab377d branch\xc3\xa9 (esc)
150
153
151 $ cd ..
154 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now