##// END OF EJS Templates
merge with stable
Augie Fackler -
r42541:ffab9eed merge default
parent child Browse files
Show More
@@ -0,0 +1,238 b''
1 ================================
2 Test corner case around bookmark
3 ================================
4
5 This test file is meant to gather test around bookmark that are specific
6 enough to not find a place elsewhere.
7
8 Test bookmark/changelog race condition
9 ======================================
10
11 The data from the bookmark file are filtered to only contains bookmark with
12 node known to the changelog. If the cache invalidation between these two bits
13 goes wrong, bookmark can be dropped.
14
15 global setup
16 ------------
17
18 $ cat >> $HGRCPATH << EOF
19 > [ui]
20 > ssh = "$PYTHON" "$TESTDIR/dummyssh"
21 > [server]
22 > concurrent-push-mode=check-related
23 > EOF
24
25 Setup
26 -----
27
28 initial repository setup
29
30 $ hg init bookrace-server
31 $ cd bookrace-server
32 $ echo a > a
33 $ hg add a
34 $ hg commit -m root
35 $ echo a >> a
36 $ hg bookmark book-A
37 $ hg commit -m A0
38 $ hg up 'desc(root)'
39 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 (leaving bookmark book-A)
41 $ echo b > b
42 $ hg add b
43 $ hg bookmark book-B
44 $ hg commit -m B0
45 created new head
46 $ hg up null
47 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
48 (leaving bookmark book-B)
49 $ hg phase --public --rev 'all()'
50 $ hg log -G
51 o changeset: 2:c79985706978
52 | bookmark: book-B
53 | tag: tip
54 | parent: 0:6569b5a81c7e
55 | user: test
56 | date: Thu Jan 01 00:00:00 1970 +0000
57 | summary: B0
58 |
59 | o changeset: 1:39c28d785860
60 |/ bookmark: book-A
61 | user: test
62 | date: Thu Jan 01 00:00:00 1970 +0000
63 | summary: A0
64 |
65 o changeset: 0:6569b5a81c7e
66 user: test
67 date: Thu Jan 01 00:00:00 1970 +0000
68 summary: root
69
70 $ hg book
71 book-A 1:39c28d785860
72 book-B 2:c79985706978
73 $ cd ..
74
75 Add new changeset on each bookmark in distinct clones
76
77 $ hg clone ssh://user@dummy/bookrace-server client-A
78 requesting all changes
79 adding changesets
80 adding manifests
81 adding file changes
82 added 3 changesets with 3 changes to 2 files (+1 heads)
83 new changesets 6569b5a81c7e:c79985706978
84 updating to branch default
85 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
86 $ hg -R client-A update book-A
87 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
88 (activating bookmark book-A)
89 $ echo a >> client-A/a
90 $ hg -R client-A commit -m A1
91 $ hg clone ssh://user@dummy/bookrace-server client-B
92 requesting all changes
93 adding changesets
94 adding manifests
95 adding file changes
96 added 3 changesets with 3 changes to 2 files (+1 heads)
97 new changesets 6569b5a81c7e:c79985706978
98 updating to branch default
99 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ hg -R client-B update book-B
101 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
102 (activating bookmark book-B)
103 $ echo b >> client-B/b
104 $ hg -R client-B commit -m B1
105
106 extension to reproduce the race
107 -------------------------------
108
109 If two process are pushing we want to make sure the following happens:
110
111 * process A read changelog
112 * process B to its full push
113 * process A read bookmarks
114 * process A proceed with rest of the push
115
116 We build a server side extension for this purpose
117
118 $ cat > bookrace.py << EOF
119 > import os
120 > import time
121 > import atexit
122 > from mercurial import error, extensions, bookmarks
123 > def wrapinit(orig, self, repo):
124 > if not os.path.exists('push-A-started'):
125 > print('setting raced push up')
126 > with open('push-A-started', 'w'):
127 > pass
128 > clock = 300
129 > while not os.path.exists('push-B-done'):
130 > clock -= 1
131 > if clock <= 0:
132 > raise error.Abort("race scenario timed out")
133 > time.sleep(0.1)
134 > return orig(self, repo)
135 >
136 > repo.__class__ = racedrepo
137 > def uisetup(ui):
138 > extensions.wrapfunction(bookmarks.bmstore, '__init__', wrapinit)
139 > def e():
140 > with open('push-A-done', 'w'):
141 > pass
142 > atexit.register(e)
143 > EOF
144
145 Actual test
146 -----------
147
148 Start the raced push.
149
150 $ cat >> bookrace-server/.hg/hgrc << EOF
151 > [extensions]
152 > bookrace=$TESTTMP/bookrace.py
153 > EOF
154 $ hg push -R client-A -r book-A >push-output.txt 2>&1 &
155
156 Wait up to 30 seconds for that push to start.
157
158 $ clock=30
159 $ while [ ! -f push-A-started ] && [ $clock -gt 0 ] ; do
160 > clock=`expr $clock - 1`
161 > sleep 1
162 > done
163
164 Do the other push.
165
166 $ cat >> bookrace-server/.hg/hgrc << EOF
167 > [extensions]
168 > bookrace=!
169 > EOF
170
171 $ hg push -R client-B -r book-B
172 pushing to ssh://user@dummy/bookrace-server
173 searching for changes
174 remote: adding changesets
175 remote: adding manifests
176 remote: adding file changes
177 remote: added 1 changesets with 1 changes to 1 files
178 updating bookmark book-B
179
180 Signal the raced put that we are done (it waits up to 30 seconds).
181
182 $ touch push-B-done
183
184 Wait for the raced push to finish (with the remaning of the initial 30 seconds).
185
186 $ while [ ! -f push-A-done ] && [ $clock -gt 0 ] ; do
187 > clock=`expr $clock - 1`
188 > sleep 1
189 > done
190
191 Check raced push output.
192
193 $ cat push-output.txt
194 pushing to ssh://user@dummy/bookrace-server
195 searching for changes
196 remote: setting raced push up
197 remote: adding changesets
198 remote: adding manifests
199 remote: adding file changes
200 remote: added 1 changesets with 1 changes to 1 files
201 updating bookmark book-A
202
203 Check result of the push.
204
205 $ hg -R bookrace-server log -G
206 o changeset: 4:9ce3b28c16de
207 | bookmark: book-A
208 | tag: tip
209 | parent: 1:39c28d785860
210 | user: test
211 | date: Thu Jan 01 00:00:00 1970 +0000
212 | summary: A1
213 |
214 | o changeset: 3:f26c3b5167d1
215 | | bookmark: book-B
216 | | user: test
217 | | date: Thu Jan 01 00:00:00 1970 +0000
218 | | summary: B1
219 | |
220 | o changeset: 2:c79985706978
221 | | parent: 0:6569b5a81c7e
222 | | user: test
223 | | date: Thu Jan 01 00:00:00 1970 +0000
224 | | summary: B0
225 | |
226 o | changeset: 1:39c28d785860
227 |/ user: test
228 | date: Thu Jan 01 00:00:00 1970 +0000
229 | summary: A0
230 |
231 o changeset: 0:6569b5a81c7e
232 user: test
233 date: Thu Jan 01 00:00:00 1970 +0000
234 summary: root
235
236 $ hg -R bookrace-server book
237 book-A 4:9ce3b28c16de
238 book-B 3:f26c3b5167d1
@@ -1,3180 +1,3180 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 # proxy to unfiltered __dict__ since filtered repo has no entry
94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 unfi = repo.unfiltered()
95 unfi = repo.unfiltered()
96 try:
96 try:
97 return unfi.__dict__[self.sname]
97 return unfi.__dict__[self.sname]
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100 return super(_basefilecache, self).__get__(unfi, type)
100 return super(_basefilecache, self).__get__(unfi, type)
101
101
102 def set(self, repo, value):
102 def set(self, repo, value):
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104
104
105 class repofilecache(_basefilecache):
105 class repofilecache(_basefilecache):
106 """filecache for files in .hg but outside of .hg/store"""
106 """filecache for files in .hg but outside of .hg/store"""
107 def __init__(self, *paths):
107 def __init__(self, *paths):
108 super(repofilecache, self).__init__(*paths)
108 super(repofilecache, self).__init__(*paths)
109 for path in paths:
109 for path in paths:
110 _cachedfiles.add((path, 'plain'))
110 _cachedfiles.add((path, 'plain'))
111
111
112 def join(self, obj, fname):
112 def join(self, obj, fname):
113 return obj.vfs.join(fname)
113 return obj.vfs.join(fname)
114
114
115 class storecache(_basefilecache):
115 class storecache(_basefilecache):
116 """filecache for files in the store"""
116 """filecache for files in the store"""
117 def __init__(self, *paths):
117 def __init__(self, *paths):
118 super(storecache, self).__init__(*paths)
118 super(storecache, self).__init__(*paths)
119 for path in paths:
119 for path in paths:
120 _cachedfiles.add((path, ''))
120 _cachedfiles.add((path, ''))
121
121
122 def join(self, obj, fname):
122 def join(self, obj, fname):
123 return obj.sjoin(fname)
123 return obj.sjoin(fname)
124
124
125 class mixedrepostorecache(_basefilecache):
125 class mixedrepostorecache(_basefilecache):
126 """filecache for a mix files in .hg/store and outside"""
126 """filecache for a mix files in .hg/store and outside"""
127 def __init__(self, *pathsandlocations):
127 def __init__(self, *pathsandlocations):
128 # scmutil.filecache only uses the path for passing back into our
128 # scmutil.filecache only uses the path for passing back into our
129 # join(), so we can safely pass a list of paths and locations
129 # join(), so we can safely pass a list of paths and locations
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
130 super(mixedrepostorecache, self).__init__(*pathsandlocations)
131 for path, location in pathsandlocations:
131 for path, location in pathsandlocations:
132 _cachedfiles.update(pathsandlocations)
132 _cachedfiles.update(pathsandlocations)
133
133
134 def join(self, obj, fnameandlocation):
134 def join(self, obj, fnameandlocation):
135 fname, location = fnameandlocation
135 fname, location = fnameandlocation
136 if location == '':
136 if location == '':
137 return obj.vfs.join(fname)
137 return obj.vfs.join(fname)
138 else:
138 else:
139 if location != 'store':
139 if location != 'store':
140 raise error.ProgrammingError('unexpected location: %s' %
140 raise error.ProgrammingError('unexpected location: %s' %
141 location)
141 location)
142 return obj.sjoin(fname)
142 return obj.sjoin(fname)
143
143
144 def isfilecached(repo, name):
144 def isfilecached(repo, name):
145 """check if a repo has already cached "name" filecache-ed property
145 """check if a repo has already cached "name" filecache-ed property
146
146
147 This returns (cachedobj-or-None, iscached) tuple.
147 This returns (cachedobj-or-None, iscached) tuple.
148 """
148 """
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
149 cacheentry = repo.unfiltered()._filecache.get(name, None)
150 if not cacheentry:
150 if not cacheentry:
151 return None, False
151 return None, False
152 return cacheentry.obj, True
152 return cacheentry.obj, True
153
153
154 class unfilteredpropertycache(util.propertycache):
154 class unfilteredpropertycache(util.propertycache):
155 """propertycache that apply to unfiltered repo only"""
155 """propertycache that apply to unfiltered repo only"""
156
156
157 def __get__(self, repo, type=None):
157 def __get__(self, repo, type=None):
158 unfi = repo.unfiltered()
158 unfi = repo.unfiltered()
159 if unfi is repo:
159 if unfi is repo:
160 return super(unfilteredpropertycache, self).__get__(unfi)
160 return super(unfilteredpropertycache, self).__get__(unfi)
161 return getattr(unfi, self.name)
161 return getattr(unfi, self.name)
162
162
163 class filteredpropertycache(util.propertycache):
163 class filteredpropertycache(util.propertycache):
164 """propertycache that must take filtering in account"""
164 """propertycache that must take filtering in account"""
165
165
166 def cachevalue(self, obj, value):
166 def cachevalue(self, obj, value):
167 object.__setattr__(obj, self.name, value)
167 object.__setattr__(obj, self.name, value)
168
168
169
169
170 def hasunfilteredcache(repo, name):
170 def hasunfilteredcache(repo, name):
171 """check if a repo has an unfilteredpropertycache value for <name>"""
171 """check if a repo has an unfilteredpropertycache value for <name>"""
172 return name in vars(repo.unfiltered())
172 return name in vars(repo.unfiltered())
173
173
174 def unfilteredmethod(orig):
174 def unfilteredmethod(orig):
175 """decorate method that always need to be run on unfiltered version"""
175 """decorate method that always need to be run on unfiltered version"""
176 def wrapper(repo, *args, **kwargs):
176 def wrapper(repo, *args, **kwargs):
177 return orig(repo.unfiltered(), *args, **kwargs)
177 return orig(repo.unfiltered(), *args, **kwargs)
178 return wrapper
178 return wrapper
179
179
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
180 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
181 'unbundle'}
181 'unbundle'}
182 legacycaps = moderncaps.union({'changegroupsubset'})
182 legacycaps = moderncaps.union({'changegroupsubset'})
183
183
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
184 @interfaceutil.implementer(repository.ipeercommandexecutor)
185 class localcommandexecutor(object):
185 class localcommandexecutor(object):
186 def __init__(self, peer):
186 def __init__(self, peer):
187 self._peer = peer
187 self._peer = peer
188 self._sent = False
188 self._sent = False
189 self._closed = False
189 self._closed = False
190
190
191 def __enter__(self):
191 def __enter__(self):
192 return self
192 return self
193
193
194 def __exit__(self, exctype, excvalue, exctb):
194 def __exit__(self, exctype, excvalue, exctb):
195 self.close()
195 self.close()
196
196
197 def callcommand(self, command, args):
197 def callcommand(self, command, args):
198 if self._sent:
198 if self._sent:
199 raise error.ProgrammingError('callcommand() cannot be used after '
199 raise error.ProgrammingError('callcommand() cannot be used after '
200 'sendcommands()')
200 'sendcommands()')
201
201
202 if self._closed:
202 if self._closed:
203 raise error.ProgrammingError('callcommand() cannot be used after '
203 raise error.ProgrammingError('callcommand() cannot be used after '
204 'close()')
204 'close()')
205
205
206 # We don't need to support anything fancy. Just call the named
206 # We don't need to support anything fancy. Just call the named
207 # method on the peer and return a resolved future.
207 # method on the peer and return a resolved future.
208 fn = getattr(self._peer, pycompat.sysstr(command))
208 fn = getattr(self._peer, pycompat.sysstr(command))
209
209
210 f = pycompat.futures.Future()
210 f = pycompat.futures.Future()
211
211
212 try:
212 try:
213 result = fn(**pycompat.strkwargs(args))
213 result = fn(**pycompat.strkwargs(args))
214 except Exception:
214 except Exception:
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
215 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
216 else:
216 else:
217 f.set_result(result)
217 f.set_result(result)
218
218
219 return f
219 return f
220
220
221 def sendcommands(self):
221 def sendcommands(self):
222 self._sent = True
222 self._sent = True
223
223
224 def close(self):
224 def close(self):
225 self._closed = True
225 self._closed = True
226
226
227 @interfaceutil.implementer(repository.ipeercommands)
227 @interfaceutil.implementer(repository.ipeercommands)
228 class localpeer(repository.peer):
228 class localpeer(repository.peer):
229 '''peer for a local repo; reflects only the most recent API'''
229 '''peer for a local repo; reflects only the most recent API'''
230
230
231 def __init__(self, repo, caps=None):
231 def __init__(self, repo, caps=None):
232 super(localpeer, self).__init__()
232 super(localpeer, self).__init__()
233
233
234 if caps is None:
234 if caps is None:
235 caps = moderncaps.copy()
235 caps = moderncaps.copy()
236 self._repo = repo.filtered('served')
236 self._repo = repo.filtered('served')
237 self.ui = repo.ui
237 self.ui = repo.ui
238 self._caps = repo._restrictcapabilities(caps)
238 self._caps = repo._restrictcapabilities(caps)
239
239
240 # Begin of _basepeer interface.
240 # Begin of _basepeer interface.
241
241
242 def url(self):
242 def url(self):
243 return self._repo.url()
243 return self._repo.url()
244
244
245 def local(self):
245 def local(self):
246 return self._repo
246 return self._repo
247
247
248 def peer(self):
248 def peer(self):
249 return self
249 return self
250
250
251 def canpush(self):
251 def canpush(self):
252 return True
252 return True
253
253
254 def close(self):
254 def close(self):
255 self._repo.close()
255 self._repo.close()
256
256
257 # End of _basepeer interface.
257 # End of _basepeer interface.
258
258
259 # Begin of _basewirecommands interface.
259 # Begin of _basewirecommands interface.
260
260
261 def branchmap(self):
261 def branchmap(self):
262 return self._repo.branchmap()
262 return self._repo.branchmap()
263
263
264 def capabilities(self):
264 def capabilities(self):
265 return self._caps
265 return self._caps
266
266
267 def clonebundles(self):
267 def clonebundles(self):
268 return self._repo.tryread('clonebundles.manifest')
268 return self._repo.tryread('clonebundles.manifest')
269
269
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
270 def debugwireargs(self, one, two, three=None, four=None, five=None):
271 """Used to test argument passing over the wire"""
271 """Used to test argument passing over the wire"""
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
272 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
273 pycompat.bytestr(four),
273 pycompat.bytestr(four),
274 pycompat.bytestr(five))
274 pycompat.bytestr(five))
275
275
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
276 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
277 **kwargs):
277 **kwargs):
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
278 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
279 common=common, bundlecaps=bundlecaps,
279 common=common, bundlecaps=bundlecaps,
280 **kwargs)[1]
280 **kwargs)[1]
281 cb = util.chunkbuffer(chunks)
281 cb = util.chunkbuffer(chunks)
282
282
283 if exchange.bundle2requested(bundlecaps):
283 if exchange.bundle2requested(bundlecaps):
284 # When requesting a bundle2, getbundle returns a stream to make the
284 # When requesting a bundle2, getbundle returns a stream to make the
285 # wire level function happier. We need to build a proper object
285 # wire level function happier. We need to build a proper object
286 # from it in local peer.
286 # from it in local peer.
287 return bundle2.getunbundler(self.ui, cb)
287 return bundle2.getunbundler(self.ui, cb)
288 else:
288 else:
289 return changegroup.getunbundler('01', cb, None)
289 return changegroup.getunbundler('01', cb, None)
290
290
291 def heads(self):
291 def heads(self):
292 return self._repo.heads()
292 return self._repo.heads()
293
293
294 def known(self, nodes):
294 def known(self, nodes):
295 return self._repo.known(nodes)
295 return self._repo.known(nodes)
296
296
297 def listkeys(self, namespace):
297 def listkeys(self, namespace):
298 return self._repo.listkeys(namespace)
298 return self._repo.listkeys(namespace)
299
299
300 def lookup(self, key):
300 def lookup(self, key):
301 return self._repo.lookup(key)
301 return self._repo.lookup(key)
302
302
303 def pushkey(self, namespace, key, old, new):
303 def pushkey(self, namespace, key, old, new):
304 return self._repo.pushkey(namespace, key, old, new)
304 return self._repo.pushkey(namespace, key, old, new)
305
305
306 def stream_out(self):
306 def stream_out(self):
307 raise error.Abort(_('cannot perform stream clone against local '
307 raise error.Abort(_('cannot perform stream clone against local '
308 'peer'))
308 'peer'))
309
309
310 def unbundle(self, bundle, heads, url):
310 def unbundle(self, bundle, heads, url):
311 """apply a bundle on a repo
311 """apply a bundle on a repo
312
312
313 This function handles the repo locking itself."""
313 This function handles the repo locking itself."""
314 try:
314 try:
315 try:
315 try:
316 bundle = exchange.readbundle(self.ui, bundle, None)
316 bundle = exchange.readbundle(self.ui, bundle, None)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
317 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
318 if util.safehasattr(ret, 'getchunks'):
318 if util.safehasattr(ret, 'getchunks'):
319 # This is a bundle20 object, turn it into an unbundler.
319 # This is a bundle20 object, turn it into an unbundler.
320 # This little dance should be dropped eventually when the
320 # This little dance should be dropped eventually when the
321 # API is finally improved.
321 # API is finally improved.
322 stream = util.chunkbuffer(ret.getchunks())
322 stream = util.chunkbuffer(ret.getchunks())
323 ret = bundle2.getunbundler(self.ui, stream)
323 ret = bundle2.getunbundler(self.ui, stream)
324 return ret
324 return ret
325 except Exception as exc:
325 except Exception as exc:
326 # If the exception contains output salvaged from a bundle2
326 # If the exception contains output salvaged from a bundle2
327 # reply, we need to make sure it is printed before continuing
327 # reply, we need to make sure it is printed before continuing
328 # to fail. So we build a bundle2 with such output and consume
328 # to fail. So we build a bundle2 with such output and consume
329 # it directly.
329 # it directly.
330 #
330 #
331 # This is not very elegant but allows a "simple" solution for
331 # This is not very elegant but allows a "simple" solution for
332 # issue4594
332 # issue4594
333 output = getattr(exc, '_bundle2salvagedoutput', ())
333 output = getattr(exc, '_bundle2salvagedoutput', ())
334 if output:
334 if output:
335 bundler = bundle2.bundle20(self._repo.ui)
335 bundler = bundle2.bundle20(self._repo.ui)
336 for out in output:
336 for out in output:
337 bundler.addpart(out)
337 bundler.addpart(out)
338 stream = util.chunkbuffer(bundler.getchunks())
338 stream = util.chunkbuffer(bundler.getchunks())
339 b = bundle2.getunbundler(self.ui, stream)
339 b = bundle2.getunbundler(self.ui, stream)
340 bundle2.processbundle(self._repo, b)
340 bundle2.processbundle(self._repo, b)
341 raise
341 raise
342 except error.PushRaced as exc:
342 except error.PushRaced as exc:
343 raise error.ResponseError(_('push failed:'),
343 raise error.ResponseError(_('push failed:'),
344 stringutil.forcebytestr(exc))
344 stringutil.forcebytestr(exc))
345
345
346 # End of _basewirecommands interface.
346 # End of _basewirecommands interface.
347
347
348 # Begin of peer interface.
348 # Begin of peer interface.
349
349
350 def commandexecutor(self):
350 def commandexecutor(self):
351 return localcommandexecutor(self)
351 return localcommandexecutor(self)
352
352
353 # End of peer interface.
353 # End of peer interface.
354
354
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
355 @interfaceutil.implementer(repository.ipeerlegacycommands)
356 class locallegacypeer(localpeer):
356 class locallegacypeer(localpeer):
357 '''peer extension which implements legacy methods too; used for tests with
357 '''peer extension which implements legacy methods too; used for tests with
358 restricted capabilities'''
358 restricted capabilities'''
359
359
360 def __init__(self, repo):
360 def __init__(self, repo):
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
361 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
362
362
363 # Begin of baselegacywirecommands interface.
363 # Begin of baselegacywirecommands interface.
364
364
365 def between(self, pairs):
365 def between(self, pairs):
366 return self._repo.between(pairs)
366 return self._repo.between(pairs)
367
367
368 def branches(self, nodes):
368 def branches(self, nodes):
369 return self._repo.branches(nodes)
369 return self._repo.branches(nodes)
370
370
371 def changegroup(self, nodes, source):
371 def changegroup(self, nodes, source):
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
372 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
373 missingheads=self._repo.heads())
373 missingheads=self._repo.heads())
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
374 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
375
375
376 def changegroupsubset(self, bases, heads, source):
376 def changegroupsubset(self, bases, heads, source):
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
377 outgoing = discovery.outgoing(self._repo, missingroots=bases,
378 missingheads=heads)
378 missingheads=heads)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
379 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
380
380
381 # End of baselegacywirecommands interface.
381 # End of baselegacywirecommands interface.
382
382
383 # Increment the sub-version when the revlog v2 format changes to lock out old
383 # Increment the sub-version when the revlog v2 format changes to lock out old
384 # clients.
384 # clients.
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
385 REVLOGV2_REQUIREMENT = 'exp-revlogv2.1'
386
386
387 # A repository with the sparserevlog feature will have delta chains that
387 # A repository with the sparserevlog feature will have delta chains that
388 # can spread over a larger span. Sparse reading cuts these large spans into
388 # can spread over a larger span. Sparse reading cuts these large spans into
389 # pieces, so that each piece isn't too big.
389 # pieces, so that each piece isn't too big.
390 # Without the sparserevlog capability, reading from the repository could use
390 # Without the sparserevlog capability, reading from the repository could use
391 # huge amounts of memory, because the whole span would be read at once,
391 # huge amounts of memory, because the whole span would be read at once,
392 # including all the intermediate revisions that aren't pertinent for the chain.
392 # including all the intermediate revisions that aren't pertinent for the chain.
393 # This is why once a repository has enabled sparse-read, it becomes required.
393 # This is why once a repository has enabled sparse-read, it becomes required.
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
394 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
395
395
396 # Functions receiving (ui, features) that extensions can register to impact
396 # Functions receiving (ui, features) that extensions can register to impact
397 # the ability to load repositories with custom requirements. Only
397 # the ability to load repositories with custom requirements. Only
398 # functions defined in loaded extensions are called.
398 # functions defined in loaded extensions are called.
399 #
399 #
400 # The function receives a set of requirement strings that the repository
400 # The function receives a set of requirement strings that the repository
401 # is capable of opening. Functions will typically add elements to the
401 # is capable of opening. Functions will typically add elements to the
402 # set to reflect that the extension knows how to handle that requirements.
402 # set to reflect that the extension knows how to handle that requirements.
403 featuresetupfuncs = set()
403 featuresetupfuncs = set()
404
404
405 def makelocalrepository(baseui, path, intents=None):
405 def makelocalrepository(baseui, path, intents=None):
406 """Create a local repository object.
406 """Create a local repository object.
407
407
408 Given arguments needed to construct a local repository, this function
408 Given arguments needed to construct a local repository, this function
409 performs various early repository loading functionality (such as
409 performs various early repository loading functionality (such as
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
410 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
411 the repository can be opened, derives a type suitable for representing
411 the repository can be opened, derives a type suitable for representing
412 that repository, and returns an instance of it.
412 that repository, and returns an instance of it.
413
413
414 The returned object conforms to the ``repository.completelocalrepository``
414 The returned object conforms to the ``repository.completelocalrepository``
415 interface.
415 interface.
416
416
417 The repository type is derived by calling a series of factory functions
417 The repository type is derived by calling a series of factory functions
418 for each aspect/interface of the final repository. These are defined by
418 for each aspect/interface of the final repository. These are defined by
419 ``REPO_INTERFACES``.
419 ``REPO_INTERFACES``.
420
420
421 Each factory function is called to produce a type implementing a specific
421 Each factory function is called to produce a type implementing a specific
422 interface. The cumulative list of returned types will be combined into a
422 interface. The cumulative list of returned types will be combined into a
423 new type and that type will be instantiated to represent the local
423 new type and that type will be instantiated to represent the local
424 repository.
424 repository.
425
425
426 The factory functions each receive various state that may be consulted
426 The factory functions each receive various state that may be consulted
427 as part of deriving a type.
427 as part of deriving a type.
428
428
429 Extensions should wrap these factory functions to customize repository type
429 Extensions should wrap these factory functions to customize repository type
430 creation. Note that an extension's wrapped function may be called even if
430 creation. Note that an extension's wrapped function may be called even if
431 that extension is not loaded for the repo being constructed. Extensions
431 that extension is not loaded for the repo being constructed. Extensions
432 should check if their ``__name__`` appears in the
432 should check if their ``__name__`` appears in the
433 ``extensionmodulenames`` set passed to the factory function and no-op if
433 ``extensionmodulenames`` set passed to the factory function and no-op if
434 not.
434 not.
435 """
435 """
436 ui = baseui.copy()
436 ui = baseui.copy()
437 # Prevent copying repo configuration.
437 # Prevent copying repo configuration.
438 ui.copy = baseui.copy
438 ui.copy = baseui.copy
439
439
440 # Working directory VFS rooted at repository root.
440 # Working directory VFS rooted at repository root.
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
441 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
442
442
443 # Main VFS for .hg/ directory.
443 # Main VFS for .hg/ directory.
444 hgpath = wdirvfs.join(b'.hg')
444 hgpath = wdirvfs.join(b'.hg')
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
445 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
446
446
447 # The .hg/ path should exist and should be a directory. All other
447 # The .hg/ path should exist and should be a directory. All other
448 # cases are errors.
448 # cases are errors.
449 if not hgvfs.isdir():
449 if not hgvfs.isdir():
450 try:
450 try:
451 hgvfs.stat()
451 hgvfs.stat()
452 except OSError as e:
452 except OSError as e:
453 if e.errno != errno.ENOENT:
453 if e.errno != errno.ENOENT:
454 raise
454 raise
455
455
456 raise error.RepoError(_(b'repository %s not found') % path)
456 raise error.RepoError(_(b'repository %s not found') % path)
457
457
458 # .hg/requires file contains a newline-delimited list of
458 # .hg/requires file contains a newline-delimited list of
459 # features/capabilities the opener (us) must have in order to use
459 # features/capabilities the opener (us) must have in order to use
460 # the repository. This file was introduced in Mercurial 0.9.2,
460 # the repository. This file was introduced in Mercurial 0.9.2,
461 # which means very old repositories may not have one. We assume
461 # which means very old repositories may not have one. We assume
462 # a missing file translates to no requirements.
462 # a missing file translates to no requirements.
463 try:
463 try:
464 requirements = set(hgvfs.read(b'requires').splitlines())
464 requirements = set(hgvfs.read(b'requires').splitlines())
465 except IOError as e:
465 except IOError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 requirements = set()
468 requirements = set()
469
469
470 # The .hg/hgrc file may load extensions or contain config options
470 # The .hg/hgrc file may load extensions or contain config options
471 # that influence repository construction. Attempt to load it and
471 # that influence repository construction. Attempt to load it and
472 # process any new extensions that it may have pulled in.
472 # process any new extensions that it may have pulled in.
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
473 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
474 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
475 extensions.loadall(ui)
475 extensions.loadall(ui)
476 extensions.populateui(ui)
476 extensions.populateui(ui)
477
477
478 # Set of module names of extensions loaded for this repository.
478 # Set of module names of extensions loaded for this repository.
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
479 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
480
480
481 supportedrequirements = gathersupportedrequirements(ui)
481 supportedrequirements = gathersupportedrequirements(ui)
482
482
483 # We first validate the requirements are known.
483 # We first validate the requirements are known.
484 ensurerequirementsrecognized(requirements, supportedrequirements)
484 ensurerequirementsrecognized(requirements, supportedrequirements)
485
485
486 # Then we validate that the known set is reasonable to use together.
486 # Then we validate that the known set is reasonable to use together.
487 ensurerequirementscompatible(ui, requirements)
487 ensurerequirementscompatible(ui, requirements)
488
488
489 # TODO there are unhandled edge cases related to opening repositories with
489 # TODO there are unhandled edge cases related to opening repositories with
490 # shared storage. If storage is shared, we should also test for requirements
490 # shared storage. If storage is shared, we should also test for requirements
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
491 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
492 # that repo, as that repo may load extensions needed to open it. This is a
492 # that repo, as that repo may load extensions needed to open it. This is a
493 # bit complicated because we don't want the other hgrc to overwrite settings
493 # bit complicated because we don't want the other hgrc to overwrite settings
494 # in this hgrc.
494 # in this hgrc.
495 #
495 #
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
496 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
497 # file when sharing repos. But if a requirement is added after the share is
497 # file when sharing repos. But if a requirement is added after the share is
498 # performed, thereby introducing a new requirement for the opener, we may
498 # performed, thereby introducing a new requirement for the opener, we may
499 # will not see that and could encounter a run-time error interacting with
499 # will not see that and could encounter a run-time error interacting with
500 # that shared store since it has an unknown-to-us requirement.
500 # that shared store since it has an unknown-to-us requirement.
501
501
502 # At this point, we know we should be capable of opening the repository.
502 # At this point, we know we should be capable of opening the repository.
503 # Now get on with doing that.
503 # Now get on with doing that.
504
504
505 features = set()
505 features = set()
506
506
507 # The "store" part of the repository holds versioned data. How it is
507 # The "store" part of the repository holds versioned data. How it is
508 # accessed is determined by various requirements. The ``shared`` or
508 # accessed is determined by various requirements. The ``shared`` or
509 # ``relshared`` requirements indicate the store lives in the path contained
509 # ``relshared`` requirements indicate the store lives in the path contained
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
510 # in the ``.hg/sharedpath`` file. This is an absolute path for
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
511 # ``shared`` and relative to ``.hg/`` for ``relshared``.
512 if b'shared' in requirements or b'relshared' in requirements:
512 if b'shared' in requirements or b'relshared' in requirements:
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
513 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
514 if b'relshared' in requirements:
514 if b'relshared' in requirements:
515 sharedpath = hgvfs.join(sharedpath)
515 sharedpath = hgvfs.join(sharedpath)
516
516
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
517 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
518
518
519 if not sharedvfs.exists():
519 if not sharedvfs.exists():
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
520 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
521 b'directory %s') % sharedvfs.base)
521 b'directory %s') % sharedvfs.base)
522
522
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
523 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
524
524
525 storebasepath = sharedvfs.base
525 storebasepath = sharedvfs.base
526 cachepath = sharedvfs.join(b'cache')
526 cachepath = sharedvfs.join(b'cache')
527 else:
527 else:
528 storebasepath = hgvfs.base
528 storebasepath = hgvfs.base
529 cachepath = hgvfs.join(b'cache')
529 cachepath = hgvfs.join(b'cache')
530 wcachepath = hgvfs.join(b'wcache')
530 wcachepath = hgvfs.join(b'wcache')
531
531
532
532
533 # The store has changed over time and the exact layout is dictated by
533 # The store has changed over time and the exact layout is dictated by
534 # requirements. The store interface abstracts differences across all
534 # requirements. The store interface abstracts differences across all
535 # of them.
535 # of them.
536 store = makestore(requirements, storebasepath,
536 store = makestore(requirements, storebasepath,
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
537 lambda base: vfsmod.vfs(base, cacheaudited=True))
538 hgvfs.createmode = store.createmode
538 hgvfs.createmode = store.createmode
539
539
540 storevfs = store.vfs
540 storevfs = store.vfs
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
541 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
542
542
543 # The cache vfs is used to manage cache files.
543 # The cache vfs is used to manage cache files.
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
544 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
545 cachevfs.createmode = store.createmode
545 cachevfs.createmode = store.createmode
546 # The cache vfs is used to manage cache files related to the working copy
546 # The cache vfs is used to manage cache files related to the working copy
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
547 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
548 wcachevfs.createmode = store.createmode
548 wcachevfs.createmode = store.createmode
549
549
550 # Now resolve the type for the repository object. We do this by repeatedly
550 # Now resolve the type for the repository object. We do this by repeatedly
551 # calling a factory function to produces types for specific aspects of the
551 # calling a factory function to produces types for specific aspects of the
552 # repo's operation. The aggregate returned types are used as base classes
552 # repo's operation. The aggregate returned types are used as base classes
553 # for a dynamically-derived type, which will represent our new repository.
553 # for a dynamically-derived type, which will represent our new repository.
554
554
555 bases = []
555 bases = []
556 extrastate = {}
556 extrastate = {}
557
557
558 for iface, fn in REPO_INTERFACES:
558 for iface, fn in REPO_INTERFACES:
559 # We pass all potentially useful state to give extensions tons of
559 # We pass all potentially useful state to give extensions tons of
560 # flexibility.
560 # flexibility.
561 typ = fn()(ui=ui,
561 typ = fn()(ui=ui,
562 intents=intents,
562 intents=intents,
563 requirements=requirements,
563 requirements=requirements,
564 features=features,
564 features=features,
565 wdirvfs=wdirvfs,
565 wdirvfs=wdirvfs,
566 hgvfs=hgvfs,
566 hgvfs=hgvfs,
567 store=store,
567 store=store,
568 storevfs=storevfs,
568 storevfs=storevfs,
569 storeoptions=storevfs.options,
569 storeoptions=storevfs.options,
570 cachevfs=cachevfs,
570 cachevfs=cachevfs,
571 wcachevfs=wcachevfs,
571 wcachevfs=wcachevfs,
572 extensionmodulenames=extensionmodulenames,
572 extensionmodulenames=extensionmodulenames,
573 extrastate=extrastate,
573 extrastate=extrastate,
574 baseclasses=bases)
574 baseclasses=bases)
575
575
576 if not isinstance(typ, type):
576 if not isinstance(typ, type):
577 raise error.ProgrammingError('unable to construct type for %s' %
577 raise error.ProgrammingError('unable to construct type for %s' %
578 iface)
578 iface)
579
579
580 bases.append(typ)
580 bases.append(typ)
581
581
582 # type() allows you to use characters in type names that wouldn't be
582 # type() allows you to use characters in type names that wouldn't be
583 # recognized as Python symbols in source code. We abuse that to add
583 # recognized as Python symbols in source code. We abuse that to add
584 # rich information about our constructed repo.
584 # rich information about our constructed repo.
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
585 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
586 wdirvfs.base,
586 wdirvfs.base,
587 b','.join(sorted(requirements))))
587 b','.join(sorted(requirements))))
588
588
589 cls = type(name, tuple(bases), {})
589 cls = type(name, tuple(bases), {})
590
590
591 return cls(
591 return cls(
592 baseui=baseui,
592 baseui=baseui,
593 ui=ui,
593 ui=ui,
594 origroot=path,
594 origroot=path,
595 wdirvfs=wdirvfs,
595 wdirvfs=wdirvfs,
596 hgvfs=hgvfs,
596 hgvfs=hgvfs,
597 requirements=requirements,
597 requirements=requirements,
598 supportedrequirements=supportedrequirements,
598 supportedrequirements=supportedrequirements,
599 sharedpath=storebasepath,
599 sharedpath=storebasepath,
600 store=store,
600 store=store,
601 cachevfs=cachevfs,
601 cachevfs=cachevfs,
602 wcachevfs=wcachevfs,
602 wcachevfs=wcachevfs,
603 features=features,
603 features=features,
604 intents=intents)
604 intents=intents)
605
605
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
606 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
607 """Load hgrc files/content into a ui instance.
607 """Load hgrc files/content into a ui instance.
608
608
609 This is called during repository opening to load any additional
609 This is called during repository opening to load any additional
610 config files or settings relevant to the current repository.
610 config files or settings relevant to the current repository.
611
611
612 Returns a bool indicating whether any additional configs were loaded.
612 Returns a bool indicating whether any additional configs were loaded.
613
613
614 Extensions should monkeypatch this function to modify how per-repo
614 Extensions should monkeypatch this function to modify how per-repo
615 configs are loaded. For example, an extension may wish to pull in
615 configs are loaded. For example, an extension may wish to pull in
616 configs from alternate files or sources.
616 configs from alternate files or sources.
617 """
617 """
618 try:
618 try:
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
619 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
620 return True
620 return True
621 except IOError:
621 except IOError:
622 return False
622 return False
623
623
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
624 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
625 """Perform additional actions after .hg/hgrc is loaded.
625 """Perform additional actions after .hg/hgrc is loaded.
626
626
627 This function is called during repository loading immediately after
627 This function is called during repository loading immediately after
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
628 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
629
629
630 The function can be used to validate configs, automatically add
630 The function can be used to validate configs, automatically add
631 options (including extensions) based on requirements, etc.
631 options (including extensions) based on requirements, etc.
632 """
632 """
633
633
634 # Map of requirements to list of extensions to load automatically when
634 # Map of requirements to list of extensions to load automatically when
635 # requirement is present.
635 # requirement is present.
636 autoextensions = {
636 autoextensions = {
637 b'largefiles': [b'largefiles'],
637 b'largefiles': [b'largefiles'],
638 b'lfs': [b'lfs'],
638 b'lfs': [b'lfs'],
639 }
639 }
640
640
641 for requirement, names in sorted(autoextensions.items()):
641 for requirement, names in sorted(autoextensions.items()):
642 if requirement not in requirements:
642 if requirement not in requirements:
643 continue
643 continue
644
644
645 for name in names:
645 for name in names:
646 if not ui.hasconfig(b'extensions', name):
646 if not ui.hasconfig(b'extensions', name):
647 ui.setconfig(b'extensions', name, b'', source='autoload')
647 ui.setconfig(b'extensions', name, b'', source='autoload')
648
648
649 def gathersupportedrequirements(ui):
649 def gathersupportedrequirements(ui):
650 """Determine the complete set of recognized requirements."""
650 """Determine the complete set of recognized requirements."""
651 # Start with all requirements supported by this file.
651 # Start with all requirements supported by this file.
652 supported = set(localrepository._basesupported)
652 supported = set(localrepository._basesupported)
653
653
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
654 # Execute ``featuresetupfuncs`` entries if they belong to an extension
655 # relevant to this ui instance.
655 # relevant to this ui instance.
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
656 modules = {m.__name__ for n, m in extensions.extensions(ui)}
657
657
658 for fn in featuresetupfuncs:
658 for fn in featuresetupfuncs:
659 if fn.__module__ in modules:
659 if fn.__module__ in modules:
660 fn(ui, supported)
660 fn(ui, supported)
661
661
662 # Add derived requirements from registered compression engines.
662 # Add derived requirements from registered compression engines.
663 for name in util.compengines:
663 for name in util.compengines:
664 engine = util.compengines[name]
664 engine = util.compengines[name]
665 if engine.available() and engine.revlogheader():
665 if engine.available() and engine.revlogheader():
666 supported.add(b'exp-compression-%s' % name)
666 supported.add(b'exp-compression-%s' % name)
667 if engine.name() == 'zstd':
667 if engine.name() == 'zstd':
668 supported.add(b'revlog-compression-zstd')
668 supported.add(b'revlog-compression-zstd')
669
669
670 return supported
670 return supported
671
671
672 def ensurerequirementsrecognized(requirements, supported):
672 def ensurerequirementsrecognized(requirements, supported):
673 """Validate that a set of local requirements is recognized.
673 """Validate that a set of local requirements is recognized.
674
674
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
675 Receives a set of requirements. Raises an ``error.RepoError`` if there
676 exists any requirement in that set that currently loaded code doesn't
676 exists any requirement in that set that currently loaded code doesn't
677 recognize.
677 recognize.
678
678
679 Returns a set of supported requirements.
679 Returns a set of supported requirements.
680 """
680 """
681 missing = set()
681 missing = set()
682
682
683 for requirement in requirements:
683 for requirement in requirements:
684 if requirement in supported:
684 if requirement in supported:
685 continue
685 continue
686
686
687 if not requirement or not requirement[0:1].isalnum():
687 if not requirement or not requirement[0:1].isalnum():
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
688 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
689
689
690 missing.add(requirement)
690 missing.add(requirement)
691
691
692 if missing:
692 if missing:
693 raise error.RequirementError(
693 raise error.RequirementError(
694 _(b'repository requires features unknown to this Mercurial: %s') %
694 _(b'repository requires features unknown to this Mercurial: %s') %
695 b' '.join(sorted(missing)),
695 b' '.join(sorted(missing)),
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
696 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
697 b'for more information'))
697 b'for more information'))
698
698
699 def ensurerequirementscompatible(ui, requirements):
699 def ensurerequirementscompatible(ui, requirements):
700 """Validates that a set of recognized requirements is mutually compatible.
700 """Validates that a set of recognized requirements is mutually compatible.
701
701
702 Some requirements may not be compatible with others or require
702 Some requirements may not be compatible with others or require
703 config options that aren't enabled. This function is called during
703 config options that aren't enabled. This function is called during
704 repository opening to ensure that the set of requirements needed
704 repository opening to ensure that the set of requirements needed
705 to open a repository is sane and compatible with config options.
705 to open a repository is sane and compatible with config options.
706
706
707 Extensions can monkeypatch this function to perform additional
707 Extensions can monkeypatch this function to perform additional
708 checking.
708 checking.
709
709
710 ``error.RepoError`` should be raised on failure.
710 ``error.RepoError`` should be raised on failure.
711 """
711 """
712 if b'exp-sparse' in requirements and not sparse.enabled:
712 if b'exp-sparse' in requirements and not sparse.enabled:
713 raise error.RepoError(_(b'repository is using sparse feature but '
713 raise error.RepoError(_(b'repository is using sparse feature but '
714 b'sparse is not enabled; enable the '
714 b'sparse is not enabled; enable the '
715 b'"sparse" extensions to access'))
715 b'"sparse" extensions to access'))
716
716
717 def makestore(requirements, path, vfstype):
717 def makestore(requirements, path, vfstype):
718 """Construct a storage object for a repository."""
718 """Construct a storage object for a repository."""
719 if b'store' in requirements:
719 if b'store' in requirements:
720 if b'fncache' in requirements:
720 if b'fncache' in requirements:
721 return storemod.fncachestore(path, vfstype,
721 return storemod.fncachestore(path, vfstype,
722 b'dotencode' in requirements)
722 b'dotencode' in requirements)
723
723
724 return storemod.encodedstore(path, vfstype)
724 return storemod.encodedstore(path, vfstype)
725
725
726 return storemod.basicstore(path, vfstype)
726 return storemod.basicstore(path, vfstype)
727
727
728 def resolvestorevfsoptions(ui, requirements, features):
728 def resolvestorevfsoptions(ui, requirements, features):
729 """Resolve the options to pass to the store vfs opener.
729 """Resolve the options to pass to the store vfs opener.
730
730
731 The returned dict is used to influence behavior of the storage layer.
731 The returned dict is used to influence behavior of the storage layer.
732 """
732 """
733 options = {}
733 options = {}
734
734
735 if b'treemanifest' in requirements:
735 if b'treemanifest' in requirements:
736 options[b'treemanifest'] = True
736 options[b'treemanifest'] = True
737
737
738 # experimental config: format.manifestcachesize
738 # experimental config: format.manifestcachesize
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
739 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
740 if manifestcachesize is not None:
740 if manifestcachesize is not None:
741 options[b'manifestcachesize'] = manifestcachesize
741 options[b'manifestcachesize'] = manifestcachesize
742
742
743 # In the absence of another requirement superseding a revlog-related
743 # In the absence of another requirement superseding a revlog-related
744 # requirement, we have to assume the repo is using revlog version 0.
744 # requirement, we have to assume the repo is using revlog version 0.
745 # This revlog format is super old and we don't bother trying to parse
745 # This revlog format is super old and we don't bother trying to parse
746 # opener options for it because those options wouldn't do anything
746 # opener options for it because those options wouldn't do anything
747 # meaningful on such old repos.
747 # meaningful on such old repos.
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
748 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
749 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
750
750
751 return options
751 return options
752
752
753 def resolverevlogstorevfsoptions(ui, requirements, features):
753 def resolverevlogstorevfsoptions(ui, requirements, features):
754 """Resolve opener options specific to revlogs."""
754 """Resolve opener options specific to revlogs."""
755
755
756 options = {}
756 options = {}
757 options[b'flagprocessors'] = {}
757 options[b'flagprocessors'] = {}
758
758
759 if b'revlogv1' in requirements:
759 if b'revlogv1' in requirements:
760 options[b'revlogv1'] = True
760 options[b'revlogv1'] = True
761 if REVLOGV2_REQUIREMENT in requirements:
761 if REVLOGV2_REQUIREMENT in requirements:
762 options[b'revlogv2'] = True
762 options[b'revlogv2'] = True
763
763
764 if b'generaldelta' in requirements:
764 if b'generaldelta' in requirements:
765 options[b'generaldelta'] = True
765 options[b'generaldelta'] = True
766
766
767 # experimental config: format.chunkcachesize
767 # experimental config: format.chunkcachesize
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
768 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
769 if chunkcachesize is not None:
769 if chunkcachesize is not None:
770 options[b'chunkcachesize'] = chunkcachesize
770 options[b'chunkcachesize'] = chunkcachesize
771
771
772 deltabothparents = ui.configbool(b'storage',
772 deltabothparents = ui.configbool(b'storage',
773 b'revlog.optimize-delta-parent-choice')
773 b'revlog.optimize-delta-parent-choice')
774 options[b'deltabothparents'] = deltabothparents
774 options[b'deltabothparents'] = deltabothparents
775
775
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
776 lazydelta = ui.configbool(b'storage', b'revlog.reuse-external-delta')
777 lazydeltabase = False
777 lazydeltabase = False
778 if lazydelta:
778 if lazydelta:
779 lazydeltabase = ui.configbool(b'storage',
779 lazydeltabase = ui.configbool(b'storage',
780 b'revlog.reuse-external-delta-parent')
780 b'revlog.reuse-external-delta-parent')
781 if lazydeltabase is None:
781 if lazydeltabase is None:
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
782 lazydeltabase = not scmutil.gddeltaconfig(ui)
783 options[b'lazydelta'] = lazydelta
783 options[b'lazydelta'] = lazydelta
784 options[b'lazydeltabase'] = lazydeltabase
784 options[b'lazydeltabase'] = lazydeltabase
785
785
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
786 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
787 if 0 <= chainspan:
787 if 0 <= chainspan:
788 options[b'maxdeltachainspan'] = chainspan
788 options[b'maxdeltachainspan'] = chainspan
789
789
790 mmapindexthreshold = ui.configbytes(b'experimental',
790 mmapindexthreshold = ui.configbytes(b'experimental',
791 b'mmapindexthreshold')
791 b'mmapindexthreshold')
792 if mmapindexthreshold is not None:
792 if mmapindexthreshold is not None:
793 options[b'mmapindexthreshold'] = mmapindexthreshold
793 options[b'mmapindexthreshold'] = mmapindexthreshold
794
794
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
795 withsparseread = ui.configbool(b'experimental', b'sparse-read')
796 srdensitythres = float(ui.config(b'experimental',
796 srdensitythres = float(ui.config(b'experimental',
797 b'sparse-read.density-threshold'))
797 b'sparse-read.density-threshold'))
798 srmingapsize = ui.configbytes(b'experimental',
798 srmingapsize = ui.configbytes(b'experimental',
799 b'sparse-read.min-gap-size')
799 b'sparse-read.min-gap-size')
800 options[b'with-sparse-read'] = withsparseread
800 options[b'with-sparse-read'] = withsparseread
801 options[b'sparse-read-density-threshold'] = srdensitythres
801 options[b'sparse-read-density-threshold'] = srdensitythres
802 options[b'sparse-read-min-gap-size'] = srmingapsize
802 options[b'sparse-read-min-gap-size'] = srmingapsize
803
803
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
804 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
805 options[b'sparse-revlog'] = sparserevlog
805 options[b'sparse-revlog'] = sparserevlog
806 if sparserevlog:
806 if sparserevlog:
807 options[b'generaldelta'] = True
807 options[b'generaldelta'] = True
808
808
809 maxchainlen = None
809 maxchainlen = None
810 if sparserevlog:
810 if sparserevlog:
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
811 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
812 # experimental config: format.maxchainlen
812 # experimental config: format.maxchainlen
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
813 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
814 if maxchainlen is not None:
814 if maxchainlen is not None:
815 options[b'maxchainlen'] = maxchainlen
815 options[b'maxchainlen'] = maxchainlen
816
816
817 for r in requirements:
817 for r in requirements:
818 # we allow multiple compression engine requirement to co-exist because
818 # we allow multiple compression engine requirement to co-exist because
819 # strickly speaking, revlog seems to support mixed compression style.
819 # strickly speaking, revlog seems to support mixed compression style.
820 #
820 #
821 # The compression used for new entries will be "the last one"
821 # The compression used for new entries will be "the last one"
822 prefix = r.startswith
822 prefix = r.startswith
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
823 if prefix('revlog-compression-') or prefix('exp-compression-'):
824 options[b'compengine'] = r.split('-', 2)[2]
824 options[b'compengine'] = r.split('-', 2)[2]
825
825
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
826 options[b'zlib.level'] = ui.configint(b'storage', b'revlog.zlib.level')
827 if options[b'zlib.level'] is not None:
827 if options[b'zlib.level'] is not None:
828 if not (0 <= options[b'zlib.level'] <= 9):
828 if not (0 <= options[b'zlib.level'] <= 9):
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
829 msg = _('invalid value for `storage.revlog.zlib.level` config: %d')
830 raise error.Abort(msg % options[b'zlib.level'])
830 raise error.Abort(msg % options[b'zlib.level'])
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
831 options[b'zstd.level'] = ui.configint(b'storage', b'revlog.zstd.level')
832 if options[b'zstd.level'] is not None:
832 if options[b'zstd.level'] is not None:
833 if not (0 <= options[b'zstd.level'] <= 22):
833 if not (0 <= options[b'zstd.level'] <= 22):
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
834 msg = _('invalid value for `storage.revlog.zstd.level` config: %d')
835 raise error.Abort(msg % options[b'zstd.level'])
835 raise error.Abort(msg % options[b'zstd.level'])
836
836
837 if repository.NARROW_REQUIREMENT in requirements:
837 if repository.NARROW_REQUIREMENT in requirements:
838 options[b'enableellipsis'] = True
838 options[b'enableellipsis'] = True
839
839
840 return options
840 return options
841
841
842 def makemain(**kwargs):
842 def makemain(**kwargs):
843 """Produce a type conforming to ``ilocalrepositorymain``."""
843 """Produce a type conforming to ``ilocalrepositorymain``."""
844 return localrepository
844 return localrepository
845
845
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
846 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
847 class revlogfilestorage(object):
847 class revlogfilestorage(object):
848 """File storage when using revlogs."""
848 """File storage when using revlogs."""
849
849
850 def file(self, path):
850 def file(self, path):
851 if path[0] == b'/':
851 if path[0] == b'/':
852 path = path[1:]
852 path = path[1:]
853
853
854 return filelog.filelog(self.svfs, path)
854 return filelog.filelog(self.svfs, path)
855
855
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
856 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
857 class revlognarrowfilestorage(object):
857 class revlognarrowfilestorage(object):
858 """File storage when using revlogs and narrow files."""
858 """File storage when using revlogs and narrow files."""
859
859
860 def file(self, path):
860 def file(self, path):
861 if path[0] == b'/':
861 if path[0] == b'/':
862 path = path[1:]
862 path = path[1:]
863
863
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
864 return filelog.narrowfilelog(self.svfs, path, self._storenarrowmatch)
865
865
866 def makefilestorage(requirements, features, **kwargs):
866 def makefilestorage(requirements, features, **kwargs):
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
867 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
868 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
869 features.add(repository.REPO_FEATURE_STREAM_CLONE)
870
870
871 if repository.NARROW_REQUIREMENT in requirements:
871 if repository.NARROW_REQUIREMENT in requirements:
872 return revlognarrowfilestorage
872 return revlognarrowfilestorage
873 else:
873 else:
874 return revlogfilestorage
874 return revlogfilestorage
875
875
876 # List of repository interfaces and factory functions for them. Each
876 # List of repository interfaces and factory functions for them. Each
877 # will be called in order during ``makelocalrepository()`` to iteratively
877 # will be called in order during ``makelocalrepository()`` to iteratively
878 # derive the final type for a local repository instance. We capture the
878 # derive the final type for a local repository instance. We capture the
879 # function as a lambda so we don't hold a reference and the module-level
879 # function as a lambda so we don't hold a reference and the module-level
880 # functions can be wrapped.
880 # functions can be wrapped.
881 REPO_INTERFACES = [
881 REPO_INTERFACES = [
882 (repository.ilocalrepositorymain, lambda: makemain),
882 (repository.ilocalrepositorymain, lambda: makemain),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
883 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
884 ]
884 ]
885
885
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
886 @interfaceutil.implementer(repository.ilocalrepositorymain)
887 class localrepository(object):
887 class localrepository(object):
888 """Main class for representing local repositories.
888 """Main class for representing local repositories.
889
889
890 All local repositories are instances of this class.
890 All local repositories are instances of this class.
891
891
892 Constructed on its own, instances of this class are not usable as
892 Constructed on its own, instances of this class are not usable as
893 repository objects. To obtain a usable repository object, call
893 repository objects. To obtain a usable repository object, call
894 ``hg.repository()``, ``localrepo.instance()``, or
894 ``hg.repository()``, ``localrepo.instance()``, or
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
895 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
896 ``instance()`` adds support for creating new repositories.
896 ``instance()`` adds support for creating new repositories.
897 ``hg.repository()`` adds more extension integration, including calling
897 ``hg.repository()`` adds more extension integration, including calling
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
898 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
899 used.
899 used.
900 """
900 """
901
901
902 # obsolete experimental requirements:
902 # obsolete experimental requirements:
903 # - manifestv2: An experimental new manifest format that allowed
903 # - manifestv2: An experimental new manifest format that allowed
904 # for stem compression of long paths. Experiment ended up not
904 # for stem compression of long paths. Experiment ended up not
905 # being successful (repository sizes went up due to worse delta
905 # being successful (repository sizes went up due to worse delta
906 # chains), and the code was deleted in 4.6.
906 # chains), and the code was deleted in 4.6.
907 supportedformats = {
907 supportedformats = {
908 'revlogv1',
908 'revlogv1',
909 'generaldelta',
909 'generaldelta',
910 'treemanifest',
910 'treemanifest',
911 REVLOGV2_REQUIREMENT,
911 REVLOGV2_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
912 SPARSEREVLOG_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
913 bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT,
914 }
914 }
915 _basesupported = supportedformats | {
915 _basesupported = supportedformats | {
916 'store',
916 'store',
917 'fncache',
917 'fncache',
918 'shared',
918 'shared',
919 'relshared',
919 'relshared',
920 'dotencode',
920 'dotencode',
921 'exp-sparse',
921 'exp-sparse',
922 'internal-phase'
922 'internal-phase'
923 }
923 }
924
924
925 # list of prefix for file which can be written without 'wlock'
925 # list of prefix for file which can be written without 'wlock'
926 # Extensions should extend this list when needed
926 # Extensions should extend this list when needed
927 _wlockfreeprefix = {
927 _wlockfreeprefix = {
928 # We migh consider requiring 'wlock' for the next
928 # We migh consider requiring 'wlock' for the next
929 # two, but pretty much all the existing code assume
929 # two, but pretty much all the existing code assume
930 # wlock is not needed so we keep them excluded for
930 # wlock is not needed so we keep them excluded for
931 # now.
931 # now.
932 'hgrc',
932 'hgrc',
933 'requires',
933 'requires',
934 # XXX cache is a complicatged business someone
934 # XXX cache is a complicatged business someone
935 # should investigate this in depth at some point
935 # should investigate this in depth at some point
936 'cache/',
936 'cache/',
937 # XXX shouldn't be dirstate covered by the wlock?
937 # XXX shouldn't be dirstate covered by the wlock?
938 'dirstate',
938 'dirstate',
939 # XXX bisect was still a bit too messy at the time
939 # XXX bisect was still a bit too messy at the time
940 # this changeset was introduced. Someone should fix
940 # this changeset was introduced. Someone should fix
941 # the remainig bit and drop this line
941 # the remainig bit and drop this line
942 'bisect.state',
942 'bisect.state',
943 }
943 }
944
944
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
945 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
946 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
947 features, intents=None):
947 features, intents=None):
948 """Create a new local repository instance.
948 """Create a new local repository instance.
949
949
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
950 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
951 or ``localrepo.makelocalrepository()`` for obtaining a new repository
952 object.
952 object.
953
953
954 Arguments:
954 Arguments:
955
955
956 baseui
956 baseui
957 ``ui.ui`` instance that ``ui`` argument was based off of.
957 ``ui.ui`` instance that ``ui`` argument was based off of.
958
958
959 ui
959 ui
960 ``ui.ui`` instance for use by the repository.
960 ``ui.ui`` instance for use by the repository.
961
961
962 origroot
962 origroot
963 ``bytes`` path to working directory root of this repository.
963 ``bytes`` path to working directory root of this repository.
964
964
965 wdirvfs
965 wdirvfs
966 ``vfs.vfs`` rooted at the working directory.
966 ``vfs.vfs`` rooted at the working directory.
967
967
968 hgvfs
968 hgvfs
969 ``vfs.vfs`` rooted at .hg/
969 ``vfs.vfs`` rooted at .hg/
970
970
971 requirements
971 requirements
972 ``set`` of bytestrings representing repository opening requirements.
972 ``set`` of bytestrings representing repository opening requirements.
973
973
974 supportedrequirements
974 supportedrequirements
975 ``set`` of bytestrings representing repository requirements that we
975 ``set`` of bytestrings representing repository requirements that we
976 know how to open. May be a supetset of ``requirements``.
976 know how to open. May be a supetset of ``requirements``.
977
977
978 sharedpath
978 sharedpath
979 ``bytes`` Defining path to storage base directory. Points to a
979 ``bytes`` Defining path to storage base directory. Points to a
980 ``.hg/`` directory somewhere.
980 ``.hg/`` directory somewhere.
981
981
982 store
982 store
983 ``store.basicstore`` (or derived) instance providing access to
983 ``store.basicstore`` (or derived) instance providing access to
984 versioned storage.
984 versioned storage.
985
985
986 cachevfs
986 cachevfs
987 ``vfs.vfs`` used for cache files.
987 ``vfs.vfs`` used for cache files.
988
988
989 wcachevfs
989 wcachevfs
990 ``vfs.vfs`` used for cache files related to the working copy.
990 ``vfs.vfs`` used for cache files related to the working copy.
991
991
992 features
992 features
993 ``set`` of bytestrings defining features/capabilities of this
993 ``set`` of bytestrings defining features/capabilities of this
994 instance.
994 instance.
995
995
996 intents
996 intents
997 ``set`` of system strings indicating what this repo will be used
997 ``set`` of system strings indicating what this repo will be used
998 for.
998 for.
999 """
999 """
1000 self.baseui = baseui
1000 self.baseui = baseui
1001 self.ui = ui
1001 self.ui = ui
1002 self.origroot = origroot
1002 self.origroot = origroot
1003 # vfs rooted at working directory.
1003 # vfs rooted at working directory.
1004 self.wvfs = wdirvfs
1004 self.wvfs = wdirvfs
1005 self.root = wdirvfs.base
1005 self.root = wdirvfs.base
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1006 # vfs rooted at .hg/. Used to access most non-store paths.
1007 self.vfs = hgvfs
1007 self.vfs = hgvfs
1008 self.path = hgvfs.base
1008 self.path = hgvfs.base
1009 self.requirements = requirements
1009 self.requirements = requirements
1010 self.supported = supportedrequirements
1010 self.supported = supportedrequirements
1011 self.sharedpath = sharedpath
1011 self.sharedpath = sharedpath
1012 self.store = store
1012 self.store = store
1013 self.cachevfs = cachevfs
1013 self.cachevfs = cachevfs
1014 self.wcachevfs = wcachevfs
1014 self.wcachevfs = wcachevfs
1015 self.features = features
1015 self.features = features
1016
1016
1017 self.filtername = None
1017 self.filtername = None
1018
1018
1019 if (self.ui.configbool('devel', 'all-warnings') or
1019 if (self.ui.configbool('devel', 'all-warnings') or
1020 self.ui.configbool('devel', 'check-locks')):
1020 self.ui.configbool('devel', 'check-locks')):
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1021 self.vfs.audit = self._getvfsward(self.vfs.audit)
1022 # A list of callback to shape the phase if no data were found.
1022 # A list of callback to shape the phase if no data were found.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1023 # Callback are in the form: func(repo, roots) --> processed root.
1024 # This list it to be filled by extension during repo setup
1024 # This list it to be filled by extension during repo setup
1025 self._phasedefaults = []
1025 self._phasedefaults = []
1026
1026
1027 color.setup(self.ui)
1027 color.setup(self.ui)
1028
1028
1029 self.spath = self.store.path
1029 self.spath = self.store.path
1030 self.svfs = self.store.vfs
1030 self.svfs = self.store.vfs
1031 self.sjoin = self.store.join
1031 self.sjoin = self.store.join
1032 if (self.ui.configbool('devel', 'all-warnings') or
1032 if (self.ui.configbool('devel', 'all-warnings') or
1033 self.ui.configbool('devel', 'check-locks')):
1033 self.ui.configbool('devel', 'check-locks')):
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1034 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1035 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
1036 else: # standard vfs
1036 else: # standard vfs
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1037 self.svfs.audit = self._getsvfsward(self.svfs.audit)
1038
1038
1039 self._dirstatevalidatewarned = False
1039 self._dirstatevalidatewarned = False
1040
1040
1041 self._branchcaches = branchmap.BranchMapCache()
1041 self._branchcaches = branchmap.BranchMapCache()
1042 self._revbranchcache = None
1042 self._revbranchcache = None
1043 self._filterpats = {}
1043 self._filterpats = {}
1044 self._datafilters = {}
1044 self._datafilters = {}
1045 self._transref = self._lockref = self._wlockref = None
1045 self._transref = self._lockref = self._wlockref = None
1046
1046
1047 # A cache for various files under .hg/ that tracks file changes,
1047 # A cache for various files under .hg/ that tracks file changes,
1048 # (used by the filecache decorator)
1048 # (used by the filecache decorator)
1049 #
1049 #
1050 # Maps a property name to its util.filecacheentry
1050 # Maps a property name to its util.filecacheentry
1051 self._filecache = {}
1051 self._filecache = {}
1052
1052
1053 # hold sets of revision to be filtered
1053 # hold sets of revision to be filtered
1054 # should be cleared when something might have changed the filter value:
1054 # should be cleared when something might have changed the filter value:
1055 # - new changesets,
1055 # - new changesets,
1056 # - phase change,
1056 # - phase change,
1057 # - new obsolescence marker,
1057 # - new obsolescence marker,
1058 # - working directory parent change,
1058 # - working directory parent change,
1059 # - bookmark changes
1059 # - bookmark changes
1060 self.filteredrevcache = {}
1060 self.filteredrevcache = {}
1061
1061
1062 # post-dirstate-status hooks
1062 # post-dirstate-status hooks
1063 self._postdsstatus = []
1063 self._postdsstatus = []
1064
1064
1065 # generic mapping between names and nodes
1065 # generic mapping between names and nodes
1066 self.names = namespaces.namespaces()
1066 self.names = namespaces.namespaces()
1067
1067
1068 # Key to signature value.
1068 # Key to signature value.
1069 self._sparsesignaturecache = {}
1069 self._sparsesignaturecache = {}
1070 # Signature to cached matcher instance.
1070 # Signature to cached matcher instance.
1071 self._sparsematchercache = {}
1071 self._sparsematchercache = {}
1072
1072
1073 self._extrafilterid = repoview.extrafilter(ui)
1073 self._extrafilterid = repoview.extrafilter(ui)
1074
1074
1075 def _getvfsward(self, origfunc):
1075 def _getvfsward(self, origfunc):
1076 """build a ward for self.vfs"""
1076 """build a ward for self.vfs"""
1077 rref = weakref.ref(self)
1077 rref = weakref.ref(self)
1078 def checkvfs(path, mode=None):
1078 def checkvfs(path, mode=None):
1079 ret = origfunc(path, mode=mode)
1079 ret = origfunc(path, mode=mode)
1080 repo = rref()
1080 repo = rref()
1081 if (repo is None
1081 if (repo is None
1082 or not util.safehasattr(repo, '_wlockref')
1082 or not util.safehasattr(repo, '_wlockref')
1083 or not util.safehasattr(repo, '_lockref')):
1083 or not util.safehasattr(repo, '_lockref')):
1084 return
1084 return
1085 if mode in (None, 'r', 'rb'):
1085 if mode in (None, 'r', 'rb'):
1086 return
1086 return
1087 if path.startswith(repo.path):
1087 if path.startswith(repo.path):
1088 # truncate name relative to the repository (.hg)
1088 # truncate name relative to the repository (.hg)
1089 path = path[len(repo.path) + 1:]
1089 path = path[len(repo.path) + 1:]
1090 if path.startswith('cache/'):
1090 if path.startswith('cache/'):
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1091 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1092 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1093 if path.startswith('journal.') or path.startswith('undo.'):
1093 if path.startswith('journal.') or path.startswith('undo.'):
1094 # journal is covered by 'lock'
1094 # journal is covered by 'lock'
1095 if repo._currentlock(repo._lockref) is None:
1095 if repo._currentlock(repo._lockref) is None:
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1096 repo.ui.develwarn('write with no lock: "%s"' % path,
1097 stacklevel=3, config='check-locks')
1097 stacklevel=3, config='check-locks')
1098 elif repo._currentlock(repo._wlockref) is None:
1098 elif repo._currentlock(repo._wlockref) is None:
1099 # rest of vfs files are covered by 'wlock'
1099 # rest of vfs files are covered by 'wlock'
1100 #
1100 #
1101 # exclude special files
1101 # exclude special files
1102 for prefix in self._wlockfreeprefix:
1102 for prefix in self._wlockfreeprefix:
1103 if path.startswith(prefix):
1103 if path.startswith(prefix):
1104 return
1104 return
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1105 repo.ui.develwarn('write with no wlock: "%s"' % path,
1106 stacklevel=3, config='check-locks')
1106 stacklevel=3, config='check-locks')
1107 return ret
1107 return ret
1108 return checkvfs
1108 return checkvfs
1109
1109
1110 def _getsvfsward(self, origfunc):
1110 def _getsvfsward(self, origfunc):
1111 """build a ward for self.svfs"""
1111 """build a ward for self.svfs"""
1112 rref = weakref.ref(self)
1112 rref = weakref.ref(self)
1113 def checksvfs(path, mode=None):
1113 def checksvfs(path, mode=None):
1114 ret = origfunc(path, mode=mode)
1114 ret = origfunc(path, mode=mode)
1115 repo = rref()
1115 repo = rref()
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1116 if repo is None or not util.safehasattr(repo, '_lockref'):
1117 return
1117 return
1118 if mode in (None, 'r', 'rb'):
1118 if mode in (None, 'r', 'rb'):
1119 return
1119 return
1120 if path.startswith(repo.sharedpath):
1120 if path.startswith(repo.sharedpath):
1121 # truncate name relative to the repository (.hg)
1121 # truncate name relative to the repository (.hg)
1122 path = path[len(repo.sharedpath) + 1:]
1122 path = path[len(repo.sharedpath) + 1:]
1123 if repo._currentlock(repo._lockref) is None:
1123 if repo._currentlock(repo._lockref) is None:
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1124 repo.ui.develwarn('write with no lock: "%s"' % path,
1125 stacklevel=4)
1125 stacklevel=4)
1126 return ret
1126 return ret
1127 return checksvfs
1127 return checksvfs
1128
1128
1129 def close(self):
1129 def close(self):
1130 self._writecaches()
1130 self._writecaches()
1131
1131
1132 def _writecaches(self):
1132 def _writecaches(self):
1133 if self._revbranchcache:
1133 if self._revbranchcache:
1134 self._revbranchcache.write()
1134 self._revbranchcache.write()
1135
1135
1136 def _restrictcapabilities(self, caps):
1136 def _restrictcapabilities(self, caps):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1137 if self.ui.configbool('experimental', 'bundle2-advertise'):
1138 caps = set(caps)
1138 caps = set(caps)
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1139 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1140 role='client'))
1140 role='client'))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1141 caps.add('bundle2=' + urlreq.quote(capsblob))
1142 return caps
1142 return caps
1143
1143
1144 def _writerequirements(self):
1144 def _writerequirements(self):
1145 scmutil.writerequires(self.vfs, self.requirements)
1145 scmutil.writerequires(self.vfs, self.requirements)
1146
1146
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1147 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1148 # self -> auditor -> self._checknested -> self
1148 # self -> auditor -> self._checknested -> self
1149
1149
1150 @property
1150 @property
1151 def auditor(self):
1151 def auditor(self):
1152 # This is only used by context.workingctx.match in order to
1152 # This is only used by context.workingctx.match in order to
1153 # detect files in subrepos.
1153 # detect files in subrepos.
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1154 return pathutil.pathauditor(self.root, callback=self._checknested)
1155
1155
1156 @property
1156 @property
1157 def nofsauditor(self):
1157 def nofsauditor(self):
1158 # This is only used by context.basectx.match in order to detect
1158 # This is only used by context.basectx.match in order to detect
1159 # files in subrepos.
1159 # files in subrepos.
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1160 return pathutil.pathauditor(self.root, callback=self._checknested,
1161 realfs=False, cached=True)
1161 realfs=False, cached=True)
1162
1162
1163 def _checknested(self, path):
1163 def _checknested(self, path):
1164 """Determine if path is a legal nested repository."""
1164 """Determine if path is a legal nested repository."""
1165 if not path.startswith(self.root):
1165 if not path.startswith(self.root):
1166 return False
1166 return False
1167 subpath = path[len(self.root) + 1:]
1167 subpath = path[len(self.root) + 1:]
1168 normsubpath = util.pconvert(subpath)
1168 normsubpath = util.pconvert(subpath)
1169
1169
1170 # XXX: Checking against the current working copy is wrong in
1170 # XXX: Checking against the current working copy is wrong in
1171 # the sense that it can reject things like
1171 # the sense that it can reject things like
1172 #
1172 #
1173 # $ hg cat -r 10 sub/x.txt
1173 # $ hg cat -r 10 sub/x.txt
1174 #
1174 #
1175 # if sub/ is no longer a subrepository in the working copy
1175 # if sub/ is no longer a subrepository in the working copy
1176 # parent revision.
1176 # parent revision.
1177 #
1177 #
1178 # However, it can of course also allow things that would have
1178 # However, it can of course also allow things that would have
1179 # been rejected before, such as the above cat command if sub/
1179 # been rejected before, such as the above cat command if sub/
1180 # is a subrepository now, but was a normal directory before.
1180 # is a subrepository now, but was a normal directory before.
1181 # The old path auditor would have rejected by mistake since it
1181 # The old path auditor would have rejected by mistake since it
1182 # panics when it sees sub/.hg/.
1182 # panics when it sees sub/.hg/.
1183 #
1183 #
1184 # All in all, checking against the working copy seems sensible
1184 # All in all, checking against the working copy seems sensible
1185 # since we want to prevent access to nested repositories on
1185 # since we want to prevent access to nested repositories on
1186 # the filesystem *now*.
1186 # the filesystem *now*.
1187 ctx = self[None]
1187 ctx = self[None]
1188 parts = util.splitpath(subpath)
1188 parts = util.splitpath(subpath)
1189 while parts:
1189 while parts:
1190 prefix = '/'.join(parts)
1190 prefix = '/'.join(parts)
1191 if prefix in ctx.substate:
1191 if prefix in ctx.substate:
1192 if prefix == normsubpath:
1192 if prefix == normsubpath:
1193 return True
1193 return True
1194 else:
1194 else:
1195 sub = ctx.sub(prefix)
1195 sub = ctx.sub(prefix)
1196 return sub.checknested(subpath[len(prefix) + 1:])
1196 return sub.checknested(subpath[len(prefix) + 1:])
1197 else:
1197 else:
1198 parts.pop()
1198 parts.pop()
1199 return False
1199 return False
1200
1200
1201 def peer(self):
1201 def peer(self):
1202 return localpeer(self) # not cached to avoid reference cycle
1202 return localpeer(self) # not cached to avoid reference cycle
1203
1203
1204 def unfiltered(self):
1204 def unfiltered(self):
1205 """Return unfiltered version of the repository
1205 """Return unfiltered version of the repository
1206
1206
1207 Intended to be overwritten by filtered repo."""
1207 Intended to be overwritten by filtered repo."""
1208 return self
1208 return self
1209
1209
1210 def filtered(self, name, visibilityexceptions=None):
1210 def filtered(self, name, visibilityexceptions=None):
1211 """Return a filtered version of a repository
1211 """Return a filtered version of a repository
1212
1212
1213 The `name` parameter is the identifier of the requested view. This
1213 The `name` parameter is the identifier of the requested view. This
1214 will return a repoview object set "exactly" to the specified view.
1214 will return a repoview object set "exactly" to the specified view.
1215
1215
1216 This function does not apply recursive filtering to a repository. For
1216 This function does not apply recursive filtering to a repository. For
1217 example calling `repo.filtered("served")` will return a repoview using
1217 example calling `repo.filtered("served")` will return a repoview using
1218 the "served" view, regardless of the initial view used by `repo`.
1218 the "served" view, regardless of the initial view used by `repo`.
1219
1219
1220 In other word, there is always only one level of `repoview` "filtering".
1220 In other word, there is always only one level of `repoview` "filtering".
1221 """
1221 """
1222 if self._extrafilterid is not None and '%' not in name:
1222 if self._extrafilterid is not None and '%' not in name:
1223 name = name + '%' + self._extrafilterid
1223 name = name + '%' + self._extrafilterid
1224
1224
1225 cls = repoview.newtype(self.unfiltered().__class__)
1225 cls = repoview.newtype(self.unfiltered().__class__)
1226 return cls(self, name, visibilityexceptions)
1226 return cls(self, name, visibilityexceptions)
1227
1227
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1228 @mixedrepostorecache(('bookmarks', ''), ('bookmarks.current', ''),
1229 ('bookmarks', 'store'))
1229 ('bookmarks', 'store'), ('00changelog.i', 'store'))
1230 def _bookmarks(self):
1230 def _bookmarks(self):
1231 return bookmarks.bmstore(self)
1231 return bookmarks.bmstore(self)
1232
1232
1233 @property
1233 @property
1234 def _activebookmark(self):
1234 def _activebookmark(self):
1235 return self._bookmarks.active
1235 return self._bookmarks.active
1236
1236
1237 # _phasesets depend on changelog. what we need is to call
1237 # _phasesets depend on changelog. what we need is to call
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1238 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1239 # can't be easily expressed in filecache mechanism.
1239 # can't be easily expressed in filecache mechanism.
1240 @storecache('phaseroots', '00changelog.i')
1240 @storecache('phaseroots', '00changelog.i')
1241 def _phasecache(self):
1241 def _phasecache(self):
1242 return phases.phasecache(self, self._phasedefaults)
1242 return phases.phasecache(self, self._phasedefaults)
1243
1243
1244 @storecache('obsstore')
1244 @storecache('obsstore')
1245 def obsstore(self):
1245 def obsstore(self):
1246 return obsolete.makestore(self.ui, self)
1246 return obsolete.makestore(self.ui, self)
1247
1247
1248 @storecache('00changelog.i')
1248 @storecache('00changelog.i')
1249 def changelog(self):
1249 def changelog(self):
1250 return changelog.changelog(self.svfs,
1250 return changelog.changelog(self.svfs,
1251 trypending=txnutil.mayhavepending(self.root))
1251 trypending=txnutil.mayhavepending(self.root))
1252
1252
1253 @storecache('00manifest.i')
1253 @storecache('00manifest.i')
1254 def manifestlog(self):
1254 def manifestlog(self):
1255 rootstore = manifest.manifestrevlog(self.svfs)
1255 rootstore = manifest.manifestrevlog(self.svfs)
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1256 return manifest.manifestlog(self.svfs, self, rootstore,
1257 self._storenarrowmatch)
1257 self._storenarrowmatch)
1258
1258
1259 @repofilecache('dirstate')
1259 @repofilecache('dirstate')
1260 def dirstate(self):
1260 def dirstate(self):
1261 return self._makedirstate()
1261 return self._makedirstate()
1262
1262
1263 def _makedirstate(self):
1263 def _makedirstate(self):
1264 """Extension point for wrapping the dirstate per-repo."""
1264 """Extension point for wrapping the dirstate per-repo."""
1265 sparsematchfn = lambda: sparse.matcher(self)
1265 sparsematchfn = lambda: sparse.matcher(self)
1266
1266
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1267 return dirstate.dirstate(self.vfs, self.ui, self.root,
1268 self._dirstatevalidate, sparsematchfn)
1268 self._dirstatevalidate, sparsematchfn)
1269
1269
1270 def _dirstatevalidate(self, node):
1270 def _dirstatevalidate(self, node):
1271 try:
1271 try:
1272 self.changelog.rev(node)
1272 self.changelog.rev(node)
1273 return node
1273 return node
1274 except error.LookupError:
1274 except error.LookupError:
1275 if not self._dirstatevalidatewarned:
1275 if not self._dirstatevalidatewarned:
1276 self._dirstatevalidatewarned = True
1276 self._dirstatevalidatewarned = True
1277 self.ui.warn(_("warning: ignoring unknown"
1277 self.ui.warn(_("warning: ignoring unknown"
1278 " working parent %s!\n") % short(node))
1278 " working parent %s!\n") % short(node))
1279 return nullid
1279 return nullid
1280
1280
1281 @storecache(narrowspec.FILENAME)
1281 @storecache(narrowspec.FILENAME)
1282 def narrowpats(self):
1282 def narrowpats(self):
1283 """matcher patterns for this repository's narrowspec
1283 """matcher patterns for this repository's narrowspec
1284
1284
1285 A tuple of (includes, excludes).
1285 A tuple of (includes, excludes).
1286 """
1286 """
1287 return narrowspec.load(self)
1287 return narrowspec.load(self)
1288
1288
1289 @storecache(narrowspec.FILENAME)
1289 @storecache(narrowspec.FILENAME)
1290 def _storenarrowmatch(self):
1290 def _storenarrowmatch(self):
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1291 if repository.NARROW_REQUIREMENT not in self.requirements:
1292 return matchmod.always()
1292 return matchmod.always()
1293 include, exclude = self.narrowpats
1293 include, exclude = self.narrowpats
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1294 return narrowspec.match(self.root, include=include, exclude=exclude)
1295
1295
1296 @storecache(narrowspec.FILENAME)
1296 @storecache(narrowspec.FILENAME)
1297 def _narrowmatch(self):
1297 def _narrowmatch(self):
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1298 if repository.NARROW_REQUIREMENT not in self.requirements:
1299 return matchmod.always()
1299 return matchmod.always()
1300 narrowspec.checkworkingcopynarrowspec(self)
1300 narrowspec.checkworkingcopynarrowspec(self)
1301 include, exclude = self.narrowpats
1301 include, exclude = self.narrowpats
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1302 return narrowspec.match(self.root, include=include, exclude=exclude)
1303
1303
1304 def narrowmatch(self, match=None, includeexact=False):
1304 def narrowmatch(self, match=None, includeexact=False):
1305 """matcher corresponding the the repo's narrowspec
1305 """matcher corresponding the the repo's narrowspec
1306
1306
1307 If `match` is given, then that will be intersected with the narrow
1307 If `match` is given, then that will be intersected with the narrow
1308 matcher.
1308 matcher.
1309
1309
1310 If `includeexact` is True, then any exact matches from `match` will
1310 If `includeexact` is True, then any exact matches from `match` will
1311 be included even if they're outside the narrowspec.
1311 be included even if they're outside the narrowspec.
1312 """
1312 """
1313 if match:
1313 if match:
1314 if includeexact and not self._narrowmatch.always():
1314 if includeexact and not self._narrowmatch.always():
1315 # do not exclude explicitly-specified paths so that they can
1315 # do not exclude explicitly-specified paths so that they can
1316 # be warned later on
1316 # be warned later on
1317 em = matchmod.exact(match.files())
1317 em = matchmod.exact(match.files())
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1318 nm = matchmod.unionmatcher([self._narrowmatch, em])
1319 return matchmod.intersectmatchers(match, nm)
1319 return matchmod.intersectmatchers(match, nm)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1320 return matchmod.intersectmatchers(match, self._narrowmatch)
1321 return self._narrowmatch
1321 return self._narrowmatch
1322
1322
1323 def setnarrowpats(self, newincludes, newexcludes):
1323 def setnarrowpats(self, newincludes, newexcludes):
1324 narrowspec.save(self, newincludes, newexcludes)
1324 narrowspec.save(self, newincludes, newexcludes)
1325 self.invalidate(clearfilecache=True)
1325 self.invalidate(clearfilecache=True)
1326
1326
1327 def __getitem__(self, changeid):
1327 def __getitem__(self, changeid):
1328 if changeid is None:
1328 if changeid is None:
1329 return context.workingctx(self)
1329 return context.workingctx(self)
1330 if isinstance(changeid, context.basectx):
1330 if isinstance(changeid, context.basectx):
1331 return changeid
1331 return changeid
1332 if isinstance(changeid, slice):
1332 if isinstance(changeid, slice):
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1333 # wdirrev isn't contiguous so the slice shouldn't include it
1334 return [self[i]
1334 return [self[i]
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1335 for i in pycompat.xrange(*changeid.indices(len(self)))
1336 if i not in self.changelog.filteredrevs]
1336 if i not in self.changelog.filteredrevs]
1337 try:
1337 try:
1338 if isinstance(changeid, int):
1338 if isinstance(changeid, int):
1339 node = self.changelog.node(changeid)
1339 node = self.changelog.node(changeid)
1340 rev = changeid
1340 rev = changeid
1341 elif changeid == 'null':
1341 elif changeid == 'null':
1342 node = nullid
1342 node = nullid
1343 rev = nullrev
1343 rev = nullrev
1344 elif changeid == 'tip':
1344 elif changeid == 'tip':
1345 node = self.changelog.tip()
1345 node = self.changelog.tip()
1346 rev = self.changelog.rev(node)
1346 rev = self.changelog.rev(node)
1347 elif changeid == '.':
1347 elif changeid == '.':
1348 # this is a hack to delay/avoid loading obsmarkers
1348 # this is a hack to delay/avoid loading obsmarkers
1349 # when we know that '.' won't be hidden
1349 # when we know that '.' won't be hidden
1350 node = self.dirstate.p1()
1350 node = self.dirstate.p1()
1351 rev = self.unfiltered().changelog.rev(node)
1351 rev = self.unfiltered().changelog.rev(node)
1352 elif len(changeid) == 20:
1352 elif len(changeid) == 20:
1353 try:
1353 try:
1354 node = changeid
1354 node = changeid
1355 rev = self.changelog.rev(changeid)
1355 rev = self.changelog.rev(changeid)
1356 except error.FilteredLookupError:
1356 except error.FilteredLookupError:
1357 changeid = hex(changeid) # for the error message
1357 changeid = hex(changeid) # for the error message
1358 raise
1358 raise
1359 except LookupError:
1359 except LookupError:
1360 # check if it might have come from damaged dirstate
1360 # check if it might have come from damaged dirstate
1361 #
1361 #
1362 # XXX we could avoid the unfiltered if we had a recognizable
1362 # XXX we could avoid the unfiltered if we had a recognizable
1363 # exception for filtered changeset access
1363 # exception for filtered changeset access
1364 if (self.local()
1364 if (self.local()
1365 and changeid in self.unfiltered().dirstate.parents()):
1365 and changeid in self.unfiltered().dirstate.parents()):
1366 msg = _("working directory has unknown parent '%s'!")
1366 msg = _("working directory has unknown parent '%s'!")
1367 raise error.Abort(msg % short(changeid))
1367 raise error.Abort(msg % short(changeid))
1368 changeid = hex(changeid) # for the error message
1368 changeid = hex(changeid) # for the error message
1369 raise
1369 raise
1370
1370
1371 elif len(changeid) == 40:
1371 elif len(changeid) == 40:
1372 node = bin(changeid)
1372 node = bin(changeid)
1373 rev = self.changelog.rev(node)
1373 rev = self.changelog.rev(node)
1374 else:
1374 else:
1375 raise error.ProgrammingError(
1375 raise error.ProgrammingError(
1376 "unsupported changeid '%s' of type %s" %
1376 "unsupported changeid '%s' of type %s" %
1377 (changeid, type(changeid)))
1377 (changeid, type(changeid)))
1378
1378
1379 return context.changectx(self, rev, node)
1379 return context.changectx(self, rev, node)
1380
1380
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1381 except (error.FilteredIndexError, error.FilteredLookupError):
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1382 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1383 % pycompat.bytestr(changeid))
1383 % pycompat.bytestr(changeid))
1384 except (IndexError, LookupError):
1384 except (IndexError, LookupError):
1385 raise error.RepoLookupError(
1385 raise error.RepoLookupError(
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1386 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1387 except error.WdirUnsupported:
1387 except error.WdirUnsupported:
1388 return context.workingctx(self)
1388 return context.workingctx(self)
1389
1389
1390 def __contains__(self, changeid):
1390 def __contains__(self, changeid):
1391 """True if the given changeid exists
1391 """True if the given changeid exists
1392
1392
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1393 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1394 specified.
1394 specified.
1395 """
1395 """
1396 try:
1396 try:
1397 self[changeid]
1397 self[changeid]
1398 return True
1398 return True
1399 except error.RepoLookupError:
1399 except error.RepoLookupError:
1400 return False
1400 return False
1401
1401
1402 def __nonzero__(self):
1402 def __nonzero__(self):
1403 return True
1403 return True
1404
1404
1405 __bool__ = __nonzero__
1405 __bool__ = __nonzero__
1406
1406
1407 def __len__(self):
1407 def __len__(self):
1408 # no need to pay the cost of repoview.changelog
1408 # no need to pay the cost of repoview.changelog
1409 unfi = self.unfiltered()
1409 unfi = self.unfiltered()
1410 return len(unfi.changelog)
1410 return len(unfi.changelog)
1411
1411
1412 def __iter__(self):
1412 def __iter__(self):
1413 return iter(self.changelog)
1413 return iter(self.changelog)
1414
1414
1415 def revs(self, expr, *args):
1415 def revs(self, expr, *args):
1416 '''Find revisions matching a revset.
1416 '''Find revisions matching a revset.
1417
1417
1418 The revset is specified as a string ``expr`` that may contain
1418 The revset is specified as a string ``expr`` that may contain
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1419 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1420
1420
1421 Revset aliases from the configuration are not expanded. To expand
1421 Revset aliases from the configuration are not expanded. To expand
1422 user aliases, consider calling ``scmutil.revrange()`` or
1422 user aliases, consider calling ``scmutil.revrange()`` or
1423 ``repo.anyrevs([expr], user=True)``.
1423 ``repo.anyrevs([expr], user=True)``.
1424
1424
1425 Returns a revset.abstractsmartset, which is a list-like interface
1425 Returns a revset.abstractsmartset, which is a list-like interface
1426 that contains integer revisions.
1426 that contains integer revisions.
1427 '''
1427 '''
1428 tree = revsetlang.spectree(expr, *args)
1428 tree = revsetlang.spectree(expr, *args)
1429 return revset.makematcher(tree)(self)
1429 return revset.makematcher(tree)(self)
1430
1430
1431 def set(self, expr, *args):
1431 def set(self, expr, *args):
1432 '''Find revisions matching a revset and emit changectx instances.
1432 '''Find revisions matching a revset and emit changectx instances.
1433
1433
1434 This is a convenience wrapper around ``revs()`` that iterates the
1434 This is a convenience wrapper around ``revs()`` that iterates the
1435 result and is a generator of changectx instances.
1435 result and is a generator of changectx instances.
1436
1436
1437 Revset aliases from the configuration are not expanded. To expand
1437 Revset aliases from the configuration are not expanded. To expand
1438 user aliases, consider calling ``scmutil.revrange()``.
1438 user aliases, consider calling ``scmutil.revrange()``.
1439 '''
1439 '''
1440 for r in self.revs(expr, *args):
1440 for r in self.revs(expr, *args):
1441 yield self[r]
1441 yield self[r]
1442
1442
1443 def anyrevs(self, specs, user=False, localalias=None):
1443 def anyrevs(self, specs, user=False, localalias=None):
1444 '''Find revisions matching one of the given revsets.
1444 '''Find revisions matching one of the given revsets.
1445
1445
1446 Revset aliases from the configuration are not expanded by default. To
1446 Revset aliases from the configuration are not expanded by default. To
1447 expand user aliases, specify ``user=True``. To provide some local
1447 expand user aliases, specify ``user=True``. To provide some local
1448 definitions overriding user aliases, set ``localalias`` to
1448 definitions overriding user aliases, set ``localalias`` to
1449 ``{name: definitionstring}``.
1449 ``{name: definitionstring}``.
1450 '''
1450 '''
1451 if user:
1451 if user:
1452 m = revset.matchany(self.ui, specs,
1452 m = revset.matchany(self.ui, specs,
1453 lookup=revset.lookupfn(self),
1453 lookup=revset.lookupfn(self),
1454 localalias=localalias)
1454 localalias=localalias)
1455 else:
1455 else:
1456 m = revset.matchany(None, specs, localalias=localalias)
1456 m = revset.matchany(None, specs, localalias=localalias)
1457 return m(self)
1457 return m(self)
1458
1458
1459 def url(self):
1459 def url(self):
1460 return 'file:' + self.root
1460 return 'file:' + self.root
1461
1461
1462 def hook(self, name, throw=False, **args):
1462 def hook(self, name, throw=False, **args):
1463 """Call a hook, passing this repo instance.
1463 """Call a hook, passing this repo instance.
1464
1464
1465 This a convenience method to aid invoking hooks. Extensions likely
1465 This a convenience method to aid invoking hooks. Extensions likely
1466 won't call this unless they have registered a custom hook or are
1466 won't call this unless they have registered a custom hook or are
1467 replacing code that is expected to call a hook.
1467 replacing code that is expected to call a hook.
1468 """
1468 """
1469 return hook.hook(self.ui, self, name, throw, **args)
1469 return hook.hook(self.ui, self, name, throw, **args)
1470
1470
1471 @filteredpropertycache
1471 @filteredpropertycache
1472 def _tagscache(self):
1472 def _tagscache(self):
1473 '''Returns a tagscache object that contains various tags related
1473 '''Returns a tagscache object that contains various tags related
1474 caches.'''
1474 caches.'''
1475
1475
1476 # This simplifies its cache management by having one decorated
1476 # This simplifies its cache management by having one decorated
1477 # function (this one) and the rest simply fetch things from it.
1477 # function (this one) and the rest simply fetch things from it.
1478 class tagscache(object):
1478 class tagscache(object):
1479 def __init__(self):
1479 def __init__(self):
1480 # These two define the set of tags for this repository. tags
1480 # These two define the set of tags for this repository. tags
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1481 # maps tag name to node; tagtypes maps tag name to 'global' or
1482 # 'local'. (Global tags are defined by .hgtags across all
1482 # 'local'. (Global tags are defined by .hgtags across all
1483 # heads, and local tags are defined in .hg/localtags.)
1483 # heads, and local tags are defined in .hg/localtags.)
1484 # They constitute the in-memory cache of tags.
1484 # They constitute the in-memory cache of tags.
1485 self.tags = self.tagtypes = None
1485 self.tags = self.tagtypes = None
1486
1486
1487 self.nodetagscache = self.tagslist = None
1487 self.nodetagscache = self.tagslist = None
1488
1488
1489 cache = tagscache()
1489 cache = tagscache()
1490 cache.tags, cache.tagtypes = self._findtags()
1490 cache.tags, cache.tagtypes = self._findtags()
1491
1491
1492 return cache
1492 return cache
1493
1493
1494 def tags(self):
1494 def tags(self):
1495 '''return a mapping of tag to node'''
1495 '''return a mapping of tag to node'''
1496 t = {}
1496 t = {}
1497 if self.changelog.filteredrevs:
1497 if self.changelog.filteredrevs:
1498 tags, tt = self._findtags()
1498 tags, tt = self._findtags()
1499 else:
1499 else:
1500 tags = self._tagscache.tags
1500 tags = self._tagscache.tags
1501 rev = self.changelog.rev
1501 rev = self.changelog.rev
1502 for k, v in tags.iteritems():
1502 for k, v in tags.iteritems():
1503 try:
1503 try:
1504 # ignore tags to unknown nodes
1504 # ignore tags to unknown nodes
1505 rev(v)
1505 rev(v)
1506 t[k] = v
1506 t[k] = v
1507 except (error.LookupError, ValueError):
1507 except (error.LookupError, ValueError):
1508 pass
1508 pass
1509 return t
1509 return t
1510
1510
1511 def _findtags(self):
1511 def _findtags(self):
1512 '''Do the hard work of finding tags. Return a pair of dicts
1512 '''Do the hard work of finding tags. Return a pair of dicts
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1513 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1514 maps tag name to a string like \'global\' or \'local\'.
1514 maps tag name to a string like \'global\' or \'local\'.
1515 Subclasses or extensions are free to add their own tags, but
1515 Subclasses or extensions are free to add their own tags, but
1516 should be aware that the returned dicts will be retained for the
1516 should be aware that the returned dicts will be retained for the
1517 duration of the localrepo object.'''
1517 duration of the localrepo object.'''
1518
1518
1519 # XXX what tagtype should subclasses/extensions use? Currently
1519 # XXX what tagtype should subclasses/extensions use? Currently
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1520 # mq and bookmarks add tags, but do not set the tagtype at all.
1521 # Should each extension invent its own tag type? Should there
1521 # Should each extension invent its own tag type? Should there
1522 # be one tagtype for all such "virtual" tags? Or is the status
1522 # be one tagtype for all such "virtual" tags? Or is the status
1523 # quo fine?
1523 # quo fine?
1524
1524
1525
1525
1526 # map tag name to (node, hist)
1526 # map tag name to (node, hist)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1527 alltags = tagsmod.findglobaltags(self.ui, self)
1528 # map tag name to tag type
1528 # map tag name to tag type
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1529 tagtypes = dict((tag, 'global') for tag in alltags)
1530
1530
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1531 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1532
1532
1533 # Build the return dicts. Have to re-encode tag names because
1533 # Build the return dicts. Have to re-encode tag names because
1534 # the tags module always uses UTF-8 (in order not to lose info
1534 # the tags module always uses UTF-8 (in order not to lose info
1535 # writing to the cache), but the rest of Mercurial wants them in
1535 # writing to the cache), but the rest of Mercurial wants them in
1536 # local encoding.
1536 # local encoding.
1537 tags = {}
1537 tags = {}
1538 for (name, (node, hist)) in alltags.iteritems():
1538 for (name, (node, hist)) in alltags.iteritems():
1539 if node != nullid:
1539 if node != nullid:
1540 tags[encoding.tolocal(name)] = node
1540 tags[encoding.tolocal(name)] = node
1541 tags['tip'] = self.changelog.tip()
1541 tags['tip'] = self.changelog.tip()
1542 tagtypes = dict([(encoding.tolocal(name), value)
1542 tagtypes = dict([(encoding.tolocal(name), value)
1543 for (name, value) in tagtypes.iteritems()])
1543 for (name, value) in tagtypes.iteritems()])
1544 return (tags, tagtypes)
1544 return (tags, tagtypes)
1545
1545
1546 def tagtype(self, tagname):
1546 def tagtype(self, tagname):
1547 '''
1547 '''
1548 return the type of the given tag. result can be:
1548 return the type of the given tag. result can be:
1549
1549
1550 'local' : a local tag
1550 'local' : a local tag
1551 'global' : a global tag
1551 'global' : a global tag
1552 None : tag does not exist
1552 None : tag does not exist
1553 '''
1553 '''
1554
1554
1555 return self._tagscache.tagtypes.get(tagname)
1555 return self._tagscache.tagtypes.get(tagname)
1556
1556
1557 def tagslist(self):
1557 def tagslist(self):
1558 '''return a list of tags ordered by revision'''
1558 '''return a list of tags ordered by revision'''
1559 if not self._tagscache.tagslist:
1559 if not self._tagscache.tagslist:
1560 l = []
1560 l = []
1561 for t, n in self.tags().iteritems():
1561 for t, n in self.tags().iteritems():
1562 l.append((self.changelog.rev(n), t, n))
1562 l.append((self.changelog.rev(n), t, n))
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1563 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1564
1564
1565 return self._tagscache.tagslist
1565 return self._tagscache.tagslist
1566
1566
1567 def nodetags(self, node):
1567 def nodetags(self, node):
1568 '''return the tags associated with a node'''
1568 '''return the tags associated with a node'''
1569 if not self._tagscache.nodetagscache:
1569 if not self._tagscache.nodetagscache:
1570 nodetagscache = {}
1570 nodetagscache = {}
1571 for t, n in self._tagscache.tags.iteritems():
1571 for t, n in self._tagscache.tags.iteritems():
1572 nodetagscache.setdefault(n, []).append(t)
1572 nodetagscache.setdefault(n, []).append(t)
1573 for tags in nodetagscache.itervalues():
1573 for tags in nodetagscache.itervalues():
1574 tags.sort()
1574 tags.sort()
1575 self._tagscache.nodetagscache = nodetagscache
1575 self._tagscache.nodetagscache = nodetagscache
1576 return self._tagscache.nodetagscache.get(node, [])
1576 return self._tagscache.nodetagscache.get(node, [])
1577
1577
1578 def nodebookmarks(self, node):
1578 def nodebookmarks(self, node):
1579 """return the list of bookmarks pointing to the specified node"""
1579 """return the list of bookmarks pointing to the specified node"""
1580 return self._bookmarks.names(node)
1580 return self._bookmarks.names(node)
1581
1581
1582 def branchmap(self):
1582 def branchmap(self):
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1583 '''returns a dictionary {branch: [branchheads]} with branchheads
1584 ordered by increasing revision number'''
1584 ordered by increasing revision number'''
1585 return self._branchcaches[self]
1585 return self._branchcaches[self]
1586
1586
1587 @unfilteredmethod
1587 @unfilteredmethod
1588 def revbranchcache(self):
1588 def revbranchcache(self):
1589 if not self._revbranchcache:
1589 if not self._revbranchcache:
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1590 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1591 return self._revbranchcache
1591 return self._revbranchcache
1592
1592
1593 def branchtip(self, branch, ignoremissing=False):
1593 def branchtip(self, branch, ignoremissing=False):
1594 '''return the tip node for a given branch
1594 '''return the tip node for a given branch
1595
1595
1596 If ignoremissing is True, then this method will not raise an error.
1596 If ignoremissing is True, then this method will not raise an error.
1597 This is helpful for callers that only expect None for a missing branch
1597 This is helpful for callers that only expect None for a missing branch
1598 (e.g. namespace).
1598 (e.g. namespace).
1599
1599
1600 '''
1600 '''
1601 try:
1601 try:
1602 return self.branchmap().branchtip(branch)
1602 return self.branchmap().branchtip(branch)
1603 except KeyError:
1603 except KeyError:
1604 if not ignoremissing:
1604 if not ignoremissing:
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1605 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1606 else:
1606 else:
1607 pass
1607 pass
1608
1608
1609 def lookup(self, key):
1609 def lookup(self, key):
1610 node = scmutil.revsymbol(self, key).node()
1610 node = scmutil.revsymbol(self, key).node()
1611 if node is None:
1611 if node is None:
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
1613 return node
1613 return node
1614
1614
1615 def lookupbranch(self, key):
1615 def lookupbranch(self, key):
1616 if self.branchmap().hasbranch(key):
1616 if self.branchmap().hasbranch(key):
1617 return key
1617 return key
1618
1618
1619 return scmutil.revsymbol(self, key).branch()
1619 return scmutil.revsymbol(self, key).branch()
1620
1620
1621 def known(self, nodes):
1621 def known(self, nodes):
1622 cl = self.changelog
1622 cl = self.changelog
1623 nm = cl.nodemap
1623 nm = cl.nodemap
1624 filtered = cl.filteredrevs
1624 filtered = cl.filteredrevs
1625 result = []
1625 result = []
1626 for n in nodes:
1626 for n in nodes:
1627 r = nm.get(n)
1627 r = nm.get(n)
1628 resp = not (r is None or r in filtered)
1628 resp = not (r is None or r in filtered)
1629 result.append(resp)
1629 result.append(resp)
1630 return result
1630 return result
1631
1631
1632 def local(self):
1632 def local(self):
1633 return self
1633 return self
1634
1634
1635 def publishing(self):
1635 def publishing(self):
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1636 # it's safe (and desirable) to trust the publish flag unconditionally
1637 # so that we don't finalize changes shared between users via ssh or nfs
1637 # so that we don't finalize changes shared between users via ssh or nfs
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1638 return self.ui.configbool('phases', 'publish', untrusted=True)
1639
1639
1640 def cancopy(self):
1640 def cancopy(self):
1641 # so statichttprepo's override of local() works
1641 # so statichttprepo's override of local() works
1642 if not self.local():
1642 if not self.local():
1643 return False
1643 return False
1644 if not self.publishing():
1644 if not self.publishing():
1645 return True
1645 return True
1646 # if publishing we can't copy if there is filtered content
1646 # if publishing we can't copy if there is filtered content
1647 return not self.filtered('visible').changelog.filteredrevs
1647 return not self.filtered('visible').changelog.filteredrevs
1648
1648
1649 def shared(self):
1649 def shared(self):
1650 '''the type of shared repository (None if not shared)'''
1650 '''the type of shared repository (None if not shared)'''
1651 if self.sharedpath != self.path:
1651 if self.sharedpath != self.path:
1652 return 'store'
1652 return 'store'
1653 return None
1653 return None
1654
1654
1655 def wjoin(self, f, *insidef):
1655 def wjoin(self, f, *insidef):
1656 return self.vfs.reljoin(self.root, f, *insidef)
1656 return self.vfs.reljoin(self.root, f, *insidef)
1657
1657
1658 def setparents(self, p1, p2=nullid):
1658 def setparents(self, p1, p2=nullid):
1659 with self.dirstate.parentchange():
1659 with self.dirstate.parentchange():
1660 copies = self.dirstate.setparents(p1, p2)
1660 copies = self.dirstate.setparents(p1, p2)
1661 pctx = self[p1]
1661 pctx = self[p1]
1662 if copies:
1662 if copies:
1663 # Adjust copy records, the dirstate cannot do it, it
1663 # Adjust copy records, the dirstate cannot do it, it
1664 # requires access to parents manifests. Preserve them
1664 # requires access to parents manifests. Preserve them
1665 # only for entries added to first parent.
1665 # only for entries added to first parent.
1666 for f in copies:
1666 for f in copies:
1667 if f not in pctx and copies[f] in pctx:
1667 if f not in pctx and copies[f] in pctx:
1668 self.dirstate.copy(copies[f], f)
1668 self.dirstate.copy(copies[f], f)
1669 if p2 == nullid:
1669 if p2 == nullid:
1670 for f, s in sorted(self.dirstate.copies().items()):
1670 for f, s in sorted(self.dirstate.copies().items()):
1671 if f not in pctx and s not in pctx:
1671 if f not in pctx and s not in pctx:
1672 self.dirstate.copy(None, f)
1672 self.dirstate.copy(None, f)
1673
1673
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1674 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1675 """changeid must be a changeset revision, if specified.
1675 """changeid must be a changeset revision, if specified.
1676 fileid can be a file revision or node."""
1676 fileid can be a file revision or node."""
1677 return context.filectx(self, path, changeid, fileid,
1677 return context.filectx(self, path, changeid, fileid,
1678 changectx=changectx)
1678 changectx=changectx)
1679
1679
1680 def getcwd(self):
1680 def getcwd(self):
1681 return self.dirstate.getcwd()
1681 return self.dirstate.getcwd()
1682
1682
1683 def pathto(self, f, cwd=None):
1683 def pathto(self, f, cwd=None):
1684 return self.dirstate.pathto(f, cwd)
1684 return self.dirstate.pathto(f, cwd)
1685
1685
1686 def _loadfilter(self, filter):
1686 def _loadfilter(self, filter):
1687 if filter not in self._filterpats:
1687 if filter not in self._filterpats:
1688 l = []
1688 l = []
1689 for pat, cmd in self.ui.configitems(filter):
1689 for pat, cmd in self.ui.configitems(filter):
1690 if cmd == '!':
1690 if cmd == '!':
1691 continue
1691 continue
1692 mf = matchmod.match(self.root, '', [pat])
1692 mf = matchmod.match(self.root, '', [pat])
1693 fn = None
1693 fn = None
1694 params = cmd
1694 params = cmd
1695 for name, filterfn in self._datafilters.iteritems():
1695 for name, filterfn in self._datafilters.iteritems():
1696 if cmd.startswith(name):
1696 if cmd.startswith(name):
1697 fn = filterfn
1697 fn = filterfn
1698 params = cmd[len(name):].lstrip()
1698 params = cmd[len(name):].lstrip()
1699 break
1699 break
1700 if not fn:
1700 if not fn:
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1701 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1702 # Wrap old filters not supporting keyword arguments
1702 # Wrap old filters not supporting keyword arguments
1703 if not pycompat.getargspec(fn)[2]:
1703 if not pycompat.getargspec(fn)[2]:
1704 oldfn = fn
1704 oldfn = fn
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1705 fn = lambda s, c, **kwargs: oldfn(s, c)
1706 l.append((mf, fn, params))
1706 l.append((mf, fn, params))
1707 self._filterpats[filter] = l
1707 self._filterpats[filter] = l
1708 return self._filterpats[filter]
1708 return self._filterpats[filter]
1709
1709
1710 def _filter(self, filterpats, filename, data):
1710 def _filter(self, filterpats, filename, data):
1711 for mf, fn, cmd in filterpats:
1711 for mf, fn, cmd in filterpats:
1712 if mf(filename):
1712 if mf(filename):
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1713 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1714 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1715 break
1715 break
1716
1716
1717 return data
1717 return data
1718
1718
1719 @unfilteredpropertycache
1719 @unfilteredpropertycache
1720 def _encodefilterpats(self):
1720 def _encodefilterpats(self):
1721 return self._loadfilter('encode')
1721 return self._loadfilter('encode')
1722
1722
1723 @unfilteredpropertycache
1723 @unfilteredpropertycache
1724 def _decodefilterpats(self):
1724 def _decodefilterpats(self):
1725 return self._loadfilter('decode')
1725 return self._loadfilter('decode')
1726
1726
1727 def adddatafilter(self, name, filter):
1727 def adddatafilter(self, name, filter):
1728 self._datafilters[name] = filter
1728 self._datafilters[name] = filter
1729
1729
1730 def wread(self, filename):
1730 def wread(self, filename):
1731 if self.wvfs.islink(filename):
1731 if self.wvfs.islink(filename):
1732 data = self.wvfs.readlink(filename)
1732 data = self.wvfs.readlink(filename)
1733 else:
1733 else:
1734 data = self.wvfs.read(filename)
1734 data = self.wvfs.read(filename)
1735 return self._filter(self._encodefilterpats, filename, data)
1735 return self._filter(self._encodefilterpats, filename, data)
1736
1736
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1737 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1738 """write ``data`` into ``filename`` in the working directory
1738 """write ``data`` into ``filename`` in the working directory
1739
1739
1740 This returns length of written (maybe decoded) data.
1740 This returns length of written (maybe decoded) data.
1741 """
1741 """
1742 data = self._filter(self._decodefilterpats, filename, data)
1742 data = self._filter(self._decodefilterpats, filename, data)
1743 if 'l' in flags:
1743 if 'l' in flags:
1744 self.wvfs.symlink(data, filename)
1744 self.wvfs.symlink(data, filename)
1745 else:
1745 else:
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1746 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1747 **kwargs)
1747 **kwargs)
1748 if 'x' in flags:
1748 if 'x' in flags:
1749 self.wvfs.setflags(filename, False, True)
1749 self.wvfs.setflags(filename, False, True)
1750 else:
1750 else:
1751 self.wvfs.setflags(filename, False, False)
1751 self.wvfs.setflags(filename, False, False)
1752 return len(data)
1752 return len(data)
1753
1753
1754 def wwritedata(self, filename, data):
1754 def wwritedata(self, filename, data):
1755 return self._filter(self._decodefilterpats, filename, data)
1755 return self._filter(self._decodefilterpats, filename, data)
1756
1756
1757 def currenttransaction(self):
1757 def currenttransaction(self):
1758 """return the current transaction or None if non exists"""
1758 """return the current transaction or None if non exists"""
1759 if self._transref:
1759 if self._transref:
1760 tr = self._transref()
1760 tr = self._transref()
1761 else:
1761 else:
1762 tr = None
1762 tr = None
1763
1763
1764 if tr and tr.running():
1764 if tr and tr.running():
1765 return tr
1765 return tr
1766 return None
1766 return None
1767
1767
1768 def transaction(self, desc, report=None):
1768 def transaction(self, desc, report=None):
1769 if (self.ui.configbool('devel', 'all-warnings')
1769 if (self.ui.configbool('devel', 'all-warnings')
1770 or self.ui.configbool('devel', 'check-locks')):
1770 or self.ui.configbool('devel', 'check-locks')):
1771 if self._currentlock(self._lockref) is None:
1771 if self._currentlock(self._lockref) is None:
1772 raise error.ProgrammingError('transaction requires locking')
1772 raise error.ProgrammingError('transaction requires locking')
1773 tr = self.currenttransaction()
1773 tr = self.currenttransaction()
1774 if tr is not None:
1774 if tr is not None:
1775 return tr.nest(name=desc)
1775 return tr.nest(name=desc)
1776
1776
1777 # abort here if the journal already exists
1777 # abort here if the journal already exists
1778 if self.svfs.exists("journal"):
1778 if self.svfs.exists("journal"):
1779 raise error.RepoError(
1779 raise error.RepoError(
1780 _("abandoned transaction found"),
1780 _("abandoned transaction found"),
1781 hint=_("run 'hg recover' to clean up transaction"))
1781 hint=_("run 'hg recover' to clean up transaction"))
1782
1782
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1783 idbase = "%.40f#%f" % (random.random(), time.time())
1784 ha = hex(hashlib.sha1(idbase).digest())
1784 ha = hex(hashlib.sha1(idbase).digest())
1785 txnid = 'TXN:' + ha
1785 txnid = 'TXN:' + ha
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1786 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1787
1787
1788 self._writejournal(desc)
1788 self._writejournal(desc)
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1789 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1790 if report:
1790 if report:
1791 rp = report
1791 rp = report
1792 else:
1792 else:
1793 rp = self.ui.warn
1793 rp = self.ui.warn
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1794 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1795 # we must avoid cyclic reference between repo and transaction.
1795 # we must avoid cyclic reference between repo and transaction.
1796 reporef = weakref.ref(self)
1796 reporef = weakref.ref(self)
1797 # Code to track tag movement
1797 # Code to track tag movement
1798 #
1798 #
1799 # Since tags are all handled as file content, it is actually quite hard
1799 # Since tags are all handled as file content, it is actually quite hard
1800 # to track these movement from a code perspective. So we fallback to a
1800 # to track these movement from a code perspective. So we fallback to a
1801 # tracking at the repository level. One could envision to track changes
1801 # tracking at the repository level. One could envision to track changes
1802 # to the '.hgtags' file through changegroup apply but that fails to
1802 # to the '.hgtags' file through changegroup apply but that fails to
1803 # cope with case where transaction expose new heads without changegroup
1803 # cope with case where transaction expose new heads without changegroup
1804 # being involved (eg: phase movement).
1804 # being involved (eg: phase movement).
1805 #
1805 #
1806 # For now, We gate the feature behind a flag since this likely comes
1806 # For now, We gate the feature behind a flag since this likely comes
1807 # with performance impacts. The current code run more often than needed
1807 # with performance impacts. The current code run more often than needed
1808 # and do not use caches as much as it could. The current focus is on
1808 # and do not use caches as much as it could. The current focus is on
1809 # the behavior of the feature so we disable it by default. The flag
1809 # the behavior of the feature so we disable it by default. The flag
1810 # will be removed when we are happy with the performance impact.
1810 # will be removed when we are happy with the performance impact.
1811 #
1811 #
1812 # Once this feature is no longer experimental move the following
1812 # Once this feature is no longer experimental move the following
1813 # documentation to the appropriate help section:
1813 # documentation to the appropriate help section:
1814 #
1814 #
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1815 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1816 # tags (new or changed or deleted tags). In addition the details of
1816 # tags (new or changed or deleted tags). In addition the details of
1817 # these changes are made available in a file at:
1817 # these changes are made available in a file at:
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1818 # ``REPOROOT/.hg/changes/tags.changes``.
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1819 # Make sure you check for HG_TAG_MOVED before reading that file as it
1820 # might exist from a previous transaction even if no tag were touched
1820 # might exist from a previous transaction even if no tag were touched
1821 # in this one. Changes are recorded in a line base format::
1821 # in this one. Changes are recorded in a line base format::
1822 #
1822 #
1823 # <action> <hex-node> <tag-name>\n
1823 # <action> <hex-node> <tag-name>\n
1824 #
1824 #
1825 # Actions are defined as follow:
1825 # Actions are defined as follow:
1826 # "-R": tag is removed,
1826 # "-R": tag is removed,
1827 # "+A": tag is added,
1827 # "+A": tag is added,
1828 # "-M": tag is moved (old value),
1828 # "-M": tag is moved (old value),
1829 # "+M": tag is moved (new value),
1829 # "+M": tag is moved (new value),
1830 tracktags = lambda x: None
1830 tracktags = lambda x: None
1831 # experimental config: experimental.hook-track-tags
1831 # experimental config: experimental.hook-track-tags
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1832 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1833 if desc != 'strip' and shouldtracktags:
1833 if desc != 'strip' and shouldtracktags:
1834 oldheads = self.changelog.headrevs()
1834 oldheads = self.changelog.headrevs()
1835 def tracktags(tr2):
1835 def tracktags(tr2):
1836 repo = reporef()
1836 repo = reporef()
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1837 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1838 newheads = repo.changelog.headrevs()
1838 newheads = repo.changelog.headrevs()
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1839 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1840 # notes: we compare lists here.
1840 # notes: we compare lists here.
1841 # As we do it only once buiding set would not be cheaper
1841 # As we do it only once buiding set would not be cheaper
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1842 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1843 if changes:
1843 if changes:
1844 tr2.hookargs['tag_moved'] = '1'
1844 tr2.hookargs['tag_moved'] = '1'
1845 with repo.vfs('changes/tags.changes', 'w',
1845 with repo.vfs('changes/tags.changes', 'w',
1846 atomictemp=True) as changesfile:
1846 atomictemp=True) as changesfile:
1847 # note: we do not register the file to the transaction
1847 # note: we do not register the file to the transaction
1848 # because we needs it to still exist on the transaction
1848 # because we needs it to still exist on the transaction
1849 # is close (for txnclose hooks)
1849 # is close (for txnclose hooks)
1850 tagsmod.writediff(changesfile, changes)
1850 tagsmod.writediff(changesfile, changes)
1851 def validate(tr2):
1851 def validate(tr2):
1852 """will run pre-closing hooks"""
1852 """will run pre-closing hooks"""
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1853 # XXX the transaction API is a bit lacking here so we take a hacky
1854 # path for now
1854 # path for now
1855 #
1855 #
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1856 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1857 # dict is copied before these run. In addition we needs the data
1857 # dict is copied before these run. In addition we needs the data
1858 # available to in memory hooks too.
1858 # available to in memory hooks too.
1859 #
1859 #
1860 # Moreover, we also need to make sure this runs before txnclose
1860 # Moreover, we also need to make sure this runs before txnclose
1861 # hooks and there is no "pending" mechanism that would execute
1861 # hooks and there is no "pending" mechanism that would execute
1862 # logic only if hooks are about to run.
1862 # logic only if hooks are about to run.
1863 #
1863 #
1864 # Fixing this limitation of the transaction is also needed to track
1864 # Fixing this limitation of the transaction is also needed to track
1865 # other families of changes (bookmarks, phases, obsolescence).
1865 # other families of changes (bookmarks, phases, obsolescence).
1866 #
1866 #
1867 # This will have to be fixed before we remove the experimental
1867 # This will have to be fixed before we remove the experimental
1868 # gating.
1868 # gating.
1869 tracktags(tr2)
1869 tracktags(tr2)
1870 repo = reporef()
1870 repo = reporef()
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1871 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1872 scmutil.enforcesinglehead(repo, tr2, desc)
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1873 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1874 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1875 args = tr.hookargs.copy()
1875 args = tr.hookargs.copy()
1876 args.update(bookmarks.preparehookargs(name, old, new))
1876 args.update(bookmarks.preparehookargs(name, old, new))
1877 repo.hook('pretxnclose-bookmark', throw=True,
1877 repo.hook('pretxnclose-bookmark', throw=True,
1878 **pycompat.strkwargs(args))
1878 **pycompat.strkwargs(args))
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1879 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1880 cl = repo.unfiltered().changelog
1880 cl = repo.unfiltered().changelog
1881 for rev, (old, new) in tr.changes['phases'].items():
1881 for rev, (old, new) in tr.changes['phases'].items():
1882 args = tr.hookargs.copy()
1882 args = tr.hookargs.copy()
1883 node = hex(cl.node(rev))
1883 node = hex(cl.node(rev))
1884 args.update(phases.preparehookargs(node, old, new))
1884 args.update(phases.preparehookargs(node, old, new))
1885 repo.hook('pretxnclose-phase', throw=True,
1885 repo.hook('pretxnclose-phase', throw=True,
1886 **pycompat.strkwargs(args))
1886 **pycompat.strkwargs(args))
1887
1887
1888 repo.hook('pretxnclose', throw=True,
1888 repo.hook('pretxnclose', throw=True,
1889 **pycompat.strkwargs(tr.hookargs))
1889 **pycompat.strkwargs(tr.hookargs))
1890 def releasefn(tr, success):
1890 def releasefn(tr, success):
1891 repo = reporef()
1891 repo = reporef()
1892 if success:
1892 if success:
1893 # this should be explicitly invoked here, because
1893 # this should be explicitly invoked here, because
1894 # in-memory changes aren't written out at closing
1894 # in-memory changes aren't written out at closing
1895 # transaction, if tr.addfilegenerator (via
1895 # transaction, if tr.addfilegenerator (via
1896 # dirstate.write or so) isn't invoked while
1896 # dirstate.write or so) isn't invoked while
1897 # transaction running
1897 # transaction running
1898 repo.dirstate.write(None)
1898 repo.dirstate.write(None)
1899 else:
1899 else:
1900 # discard all changes (including ones already written
1900 # discard all changes (including ones already written
1901 # out) in this transaction
1901 # out) in this transaction
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1902 narrowspec.restorebackup(self, 'journal.narrowspec')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1903 narrowspec.restorewcbackup(self, 'journal.narrowspec.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1904 repo.dirstate.restorebackup(None, 'journal.dirstate')
1905
1905
1906 repo.invalidate(clearfilecache=True)
1906 repo.invalidate(clearfilecache=True)
1907
1907
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1908 tr = transaction.transaction(rp, self.svfs, vfsmap,
1909 "journal",
1909 "journal",
1910 "undo",
1910 "undo",
1911 aftertrans(renames),
1911 aftertrans(renames),
1912 self.store.createmode,
1912 self.store.createmode,
1913 validator=validate,
1913 validator=validate,
1914 releasefn=releasefn,
1914 releasefn=releasefn,
1915 checkambigfiles=_cachedfiles,
1915 checkambigfiles=_cachedfiles,
1916 name=desc)
1916 name=desc)
1917 tr.changes['origrepolen'] = len(self)
1917 tr.changes['origrepolen'] = len(self)
1918 tr.changes['obsmarkers'] = set()
1918 tr.changes['obsmarkers'] = set()
1919 tr.changes['phases'] = {}
1919 tr.changes['phases'] = {}
1920 tr.changes['bookmarks'] = {}
1920 tr.changes['bookmarks'] = {}
1921
1921
1922 tr.hookargs['txnid'] = txnid
1922 tr.hookargs['txnid'] = txnid
1923 tr.hookargs['txnname'] = desc
1923 tr.hookargs['txnname'] = desc
1924 # note: writing the fncache only during finalize mean that the file is
1924 # note: writing the fncache only during finalize mean that the file is
1925 # outdated when running hooks. As fncache is used for streaming clone,
1925 # outdated when running hooks. As fncache is used for streaming clone,
1926 # this is not expected to break anything that happen during the hooks.
1926 # this is not expected to break anything that happen during the hooks.
1927 tr.addfinalize('flush-fncache', self.store.write)
1927 tr.addfinalize('flush-fncache', self.store.write)
1928 def txnclosehook(tr2):
1928 def txnclosehook(tr2):
1929 """To be run if transaction is successful, will schedule a hook run
1929 """To be run if transaction is successful, will schedule a hook run
1930 """
1930 """
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1931 # Don't reference tr2 in hook() so we don't hold a reference.
1932 # This reduces memory consumption when there are multiple
1932 # This reduces memory consumption when there are multiple
1933 # transactions per lock. This can likely go away if issue5045
1933 # transactions per lock. This can likely go away if issue5045
1934 # fixes the function accumulation.
1934 # fixes the function accumulation.
1935 hookargs = tr2.hookargs
1935 hookargs = tr2.hookargs
1936
1936
1937 def hookfunc():
1937 def hookfunc():
1938 repo = reporef()
1938 repo = reporef()
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1939 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1940 bmchanges = sorted(tr.changes['bookmarks'].items())
1941 for name, (old, new) in bmchanges:
1941 for name, (old, new) in bmchanges:
1942 args = tr.hookargs.copy()
1942 args = tr.hookargs.copy()
1943 args.update(bookmarks.preparehookargs(name, old, new))
1943 args.update(bookmarks.preparehookargs(name, old, new))
1944 repo.hook('txnclose-bookmark', throw=False,
1944 repo.hook('txnclose-bookmark', throw=False,
1945 **pycompat.strkwargs(args))
1945 **pycompat.strkwargs(args))
1946
1946
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1947 if hook.hashook(repo.ui, 'txnclose-phase'):
1948 cl = repo.unfiltered().changelog
1948 cl = repo.unfiltered().changelog
1949 phasemv = sorted(tr.changes['phases'].items())
1949 phasemv = sorted(tr.changes['phases'].items())
1950 for rev, (old, new) in phasemv:
1950 for rev, (old, new) in phasemv:
1951 args = tr.hookargs.copy()
1951 args = tr.hookargs.copy()
1952 node = hex(cl.node(rev))
1952 node = hex(cl.node(rev))
1953 args.update(phases.preparehookargs(node, old, new))
1953 args.update(phases.preparehookargs(node, old, new))
1954 repo.hook('txnclose-phase', throw=False,
1954 repo.hook('txnclose-phase', throw=False,
1955 **pycompat.strkwargs(args))
1955 **pycompat.strkwargs(args))
1956
1956
1957 repo.hook('txnclose', throw=False,
1957 repo.hook('txnclose', throw=False,
1958 **pycompat.strkwargs(hookargs))
1958 **pycompat.strkwargs(hookargs))
1959 reporef()._afterlock(hookfunc)
1959 reporef()._afterlock(hookfunc)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1960 tr.addfinalize('txnclose-hook', txnclosehook)
1961 # Include a leading "-" to make it happen before the transaction summary
1961 # Include a leading "-" to make it happen before the transaction summary
1962 # reports registered via scmutil.registersummarycallback() whose names
1962 # reports registered via scmutil.registersummarycallback() whose names
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1963 # are 00-txnreport etc. That way, the caches will be warm when the
1964 # callbacks run.
1964 # callbacks run.
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1965 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1966 def txnaborthook(tr2):
1966 def txnaborthook(tr2):
1967 """To be run if transaction is aborted
1967 """To be run if transaction is aborted
1968 """
1968 """
1969 reporef().hook('txnabort', throw=False,
1969 reporef().hook('txnabort', throw=False,
1970 **pycompat.strkwargs(tr2.hookargs))
1970 **pycompat.strkwargs(tr2.hookargs))
1971 tr.addabort('txnabort-hook', txnaborthook)
1971 tr.addabort('txnabort-hook', txnaborthook)
1972 # avoid eager cache invalidation. in-memory data should be identical
1972 # avoid eager cache invalidation. in-memory data should be identical
1973 # to stored data if transaction has no error.
1973 # to stored data if transaction has no error.
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1974 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1975 self._transref = weakref.ref(tr)
1975 self._transref = weakref.ref(tr)
1976 scmutil.registersummarycallback(self, tr, desc)
1976 scmutil.registersummarycallback(self, tr, desc)
1977 return tr
1977 return tr
1978
1978
1979 def _journalfiles(self):
1979 def _journalfiles(self):
1980 return ((self.svfs, 'journal'),
1980 return ((self.svfs, 'journal'),
1981 (self.svfs, 'journal.narrowspec'),
1981 (self.svfs, 'journal.narrowspec'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1982 (self.vfs, 'journal.narrowspec.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1983 (self.vfs, 'journal.dirstate'),
1984 (self.vfs, 'journal.branch'),
1984 (self.vfs, 'journal.branch'),
1985 (self.vfs, 'journal.desc'),
1985 (self.vfs, 'journal.desc'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1986 (bookmarks.bookmarksvfs(self), 'journal.bookmarks'),
1987 (self.svfs, 'journal.phaseroots'))
1987 (self.svfs, 'journal.phaseroots'))
1988
1988
1989 def undofiles(self):
1989 def undofiles(self):
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1990 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1991
1991
1992 @unfilteredmethod
1992 @unfilteredmethod
1993 def _writejournal(self, desc):
1993 def _writejournal(self, desc):
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1994 self.dirstate.savebackup(None, 'journal.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1995 narrowspec.savewcbackup(self, 'journal.narrowspec.dirstate')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1996 narrowspec.savebackup(self, 'journal.narrowspec')
1997 self.vfs.write("journal.branch",
1997 self.vfs.write("journal.branch",
1998 encoding.fromlocal(self.dirstate.branch()))
1998 encoding.fromlocal(self.dirstate.branch()))
1999 self.vfs.write("journal.desc",
1999 self.vfs.write("journal.desc",
2000 "%d\n%s\n" % (len(self), desc))
2000 "%d\n%s\n" % (len(self), desc))
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2001 bookmarksvfs = bookmarks.bookmarksvfs(self)
2002 bookmarksvfs.write("journal.bookmarks",
2002 bookmarksvfs.write("journal.bookmarks",
2003 bookmarksvfs.tryread("bookmarks"))
2003 bookmarksvfs.tryread("bookmarks"))
2004 self.svfs.write("journal.phaseroots",
2004 self.svfs.write("journal.phaseroots",
2005 self.svfs.tryread("phaseroots"))
2005 self.svfs.tryread("phaseroots"))
2006
2006
2007 def recover(self):
2007 def recover(self):
2008 with self.lock():
2008 with self.lock():
2009 if self.svfs.exists("journal"):
2009 if self.svfs.exists("journal"):
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2010 self.ui.status(_("rolling back interrupted transaction\n"))
2011 vfsmap = {'': self.svfs,
2011 vfsmap = {'': self.svfs,
2012 'plain': self.vfs,}
2012 'plain': self.vfs,}
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2013 transaction.rollback(self.svfs, vfsmap, "journal",
2014 self.ui.warn,
2014 self.ui.warn,
2015 checkambigfiles=_cachedfiles)
2015 checkambigfiles=_cachedfiles)
2016 self.invalidate()
2016 self.invalidate()
2017 return True
2017 return True
2018 else:
2018 else:
2019 self.ui.warn(_("no interrupted transaction available\n"))
2019 self.ui.warn(_("no interrupted transaction available\n"))
2020 return False
2020 return False
2021
2021
2022 def rollback(self, dryrun=False, force=False):
2022 def rollback(self, dryrun=False, force=False):
2023 wlock = lock = dsguard = None
2023 wlock = lock = dsguard = None
2024 try:
2024 try:
2025 wlock = self.wlock()
2025 wlock = self.wlock()
2026 lock = self.lock()
2026 lock = self.lock()
2027 if self.svfs.exists("undo"):
2027 if self.svfs.exists("undo"):
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2028 dsguard = dirstateguard.dirstateguard(self, 'rollback')
2029
2029
2030 return self._rollback(dryrun, force, dsguard)
2030 return self._rollback(dryrun, force, dsguard)
2031 else:
2031 else:
2032 self.ui.warn(_("no rollback information available\n"))
2032 self.ui.warn(_("no rollback information available\n"))
2033 return 1
2033 return 1
2034 finally:
2034 finally:
2035 release(dsguard, lock, wlock)
2035 release(dsguard, lock, wlock)
2036
2036
2037 @unfilteredmethod # Until we get smarter cache management
2037 @unfilteredmethod # Until we get smarter cache management
2038 def _rollback(self, dryrun, force, dsguard):
2038 def _rollback(self, dryrun, force, dsguard):
2039 ui = self.ui
2039 ui = self.ui
2040 try:
2040 try:
2041 args = self.vfs.read('undo.desc').splitlines()
2041 args = self.vfs.read('undo.desc').splitlines()
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
2043 if len(args) >= 3:
2043 if len(args) >= 3:
2044 detail = args[2]
2044 detail = args[2]
2045 oldtip = oldlen - 1
2045 oldtip = oldlen - 1
2046
2046
2047 if detail and ui.verbose:
2047 if detail and ui.verbose:
2048 msg = (_('repository tip rolled back to revision %d'
2048 msg = (_('repository tip rolled back to revision %d'
2049 ' (undo %s: %s)\n')
2049 ' (undo %s: %s)\n')
2050 % (oldtip, desc, detail))
2050 % (oldtip, desc, detail))
2051 else:
2051 else:
2052 msg = (_('repository tip rolled back to revision %d'
2052 msg = (_('repository tip rolled back to revision %d'
2053 ' (undo %s)\n')
2053 ' (undo %s)\n')
2054 % (oldtip, desc))
2054 % (oldtip, desc))
2055 except IOError:
2055 except IOError:
2056 msg = _('rolling back unknown transaction\n')
2056 msg = _('rolling back unknown transaction\n')
2057 desc = None
2057 desc = None
2058
2058
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2059 if not force and self['.'] != self['tip'] and desc == 'commit':
2060 raise error.Abort(
2060 raise error.Abort(
2061 _('rollback of last commit while not checked out '
2061 _('rollback of last commit while not checked out '
2062 'may lose data'), hint=_('use -f to force'))
2062 'may lose data'), hint=_('use -f to force'))
2063
2063
2064 ui.status(msg)
2064 ui.status(msg)
2065 if dryrun:
2065 if dryrun:
2066 return 0
2066 return 0
2067
2067
2068 parents = self.dirstate.parents()
2068 parents = self.dirstate.parents()
2069 self.destroying()
2069 self.destroying()
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2070 vfsmap = {'plain': self.vfs, '': self.svfs}
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
2072 checkambigfiles=_cachedfiles)
2072 checkambigfiles=_cachedfiles)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2073 bookmarksvfs = bookmarks.bookmarksvfs(self)
2074 if bookmarksvfs.exists('undo.bookmarks'):
2074 if bookmarksvfs.exists('undo.bookmarks'):
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2075 bookmarksvfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
2076 if self.svfs.exists('undo.phaseroots'):
2076 if self.svfs.exists('undo.phaseroots'):
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2077 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2078 self.invalidate()
2078 self.invalidate()
2079
2079
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2080 parentgone = any(p not in self.changelog.nodemap for p in parents)
2081 if parentgone:
2081 if parentgone:
2082 # prevent dirstateguard from overwriting already restored one
2082 # prevent dirstateguard from overwriting already restored one
2083 dsguard.close()
2083 dsguard.close()
2084
2084
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2085 narrowspec.restorebackup(self, 'undo.narrowspec')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2086 narrowspec.restorewcbackup(self, 'undo.narrowspec.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2087 self.dirstate.restorebackup(None, 'undo.dirstate')
2088 try:
2088 try:
2089 branch = self.vfs.read('undo.branch')
2089 branch = self.vfs.read('undo.branch')
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2090 self.dirstate.setbranch(encoding.tolocal(branch))
2091 except IOError:
2091 except IOError:
2092 ui.warn(_('named branch could not be reset: '
2092 ui.warn(_('named branch could not be reset: '
2093 'current branch is still \'%s\'\n')
2093 'current branch is still \'%s\'\n')
2094 % self.dirstate.branch())
2094 % self.dirstate.branch())
2095
2095
2096 parents = tuple([p.rev() for p in self[None].parents()])
2096 parents = tuple([p.rev() for p in self[None].parents()])
2097 if len(parents) > 1:
2097 if len(parents) > 1:
2098 ui.status(_('working directory now based on '
2098 ui.status(_('working directory now based on '
2099 'revisions %d and %d\n') % parents)
2099 'revisions %d and %d\n') % parents)
2100 else:
2100 else:
2101 ui.status(_('working directory now based on '
2101 ui.status(_('working directory now based on '
2102 'revision %d\n') % parents)
2102 'revision %d\n') % parents)
2103 mergemod.mergestate.clean(self, self['.'].node())
2103 mergemod.mergestate.clean(self, self['.'].node())
2104
2104
2105 # TODO: if we know which new heads may result from this rollback, pass
2105 # TODO: if we know which new heads may result from this rollback, pass
2106 # them to destroy(), which will prevent the branchhead cache from being
2106 # them to destroy(), which will prevent the branchhead cache from being
2107 # invalidated.
2107 # invalidated.
2108 self.destroyed()
2108 self.destroyed()
2109 return 0
2109 return 0
2110
2110
2111 def _buildcacheupdater(self, newtransaction):
2111 def _buildcacheupdater(self, newtransaction):
2112 """called during transaction to build the callback updating cache
2112 """called during transaction to build the callback updating cache
2113
2113
2114 Lives on the repository to help extension who might want to augment
2114 Lives on the repository to help extension who might want to augment
2115 this logic. For this purpose, the created transaction is passed to the
2115 this logic. For this purpose, the created transaction is passed to the
2116 method.
2116 method.
2117 """
2117 """
2118 # we must avoid cyclic reference between repo and transaction.
2118 # we must avoid cyclic reference between repo and transaction.
2119 reporef = weakref.ref(self)
2119 reporef = weakref.ref(self)
2120 def updater(tr):
2120 def updater(tr):
2121 repo = reporef()
2121 repo = reporef()
2122 repo.updatecaches(tr)
2122 repo.updatecaches(tr)
2123 return updater
2123 return updater
2124
2124
2125 @unfilteredmethod
2125 @unfilteredmethod
2126 def updatecaches(self, tr=None, full=False):
2126 def updatecaches(self, tr=None, full=False):
2127 """warm appropriate caches
2127 """warm appropriate caches
2128
2128
2129 If this function is called after a transaction closed. The transaction
2129 If this function is called after a transaction closed. The transaction
2130 will be available in the 'tr' argument. This can be used to selectively
2130 will be available in the 'tr' argument. This can be used to selectively
2131 update caches relevant to the changes in that transaction.
2131 update caches relevant to the changes in that transaction.
2132
2132
2133 If 'full' is set, make sure all caches the function knows about have
2133 If 'full' is set, make sure all caches the function knows about have
2134 up-to-date data. Even the ones usually loaded more lazily.
2134 up-to-date data. Even the ones usually loaded more lazily.
2135 """
2135 """
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2136 if tr is not None and tr.hookargs.get('source') == 'strip':
2137 # During strip, many caches are invalid but
2137 # During strip, many caches are invalid but
2138 # later call to `destroyed` will refresh them.
2138 # later call to `destroyed` will refresh them.
2139 return
2139 return
2140
2140
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2141 if tr is None or tr.changes['origrepolen'] < len(self):
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2142 # accessing the 'ser ved' branchmap should refresh all the others,
2143 self.ui.debug('updating the branch cache\n')
2143 self.ui.debug('updating the branch cache\n')
2144 self.filtered('served').branchmap()
2144 self.filtered('served').branchmap()
2145 self.filtered('served.hidden').branchmap()
2145 self.filtered('served.hidden').branchmap()
2146
2146
2147 if full:
2147 if full:
2148 unfi = self.unfiltered()
2148 unfi = self.unfiltered()
2149 rbc = unfi.revbranchcache()
2149 rbc = unfi.revbranchcache()
2150 for r in unfi.changelog:
2150 for r in unfi.changelog:
2151 rbc.branchinfo(r)
2151 rbc.branchinfo(r)
2152 rbc.write()
2152 rbc.write()
2153
2153
2154 # ensure the working copy parents are in the manifestfulltextcache
2154 # ensure the working copy parents are in the manifestfulltextcache
2155 for ctx in self['.'].parents():
2155 for ctx in self['.'].parents():
2156 ctx.manifest() # accessing the manifest is enough
2156 ctx.manifest() # accessing the manifest is enough
2157
2157
2158 # accessing fnode cache warms the cache
2158 # accessing fnode cache warms the cache
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2159 tagsmod.fnoderevs(self.ui, unfi, unfi.changelog.revs())
2160 # accessing tags warm the cache
2160 # accessing tags warm the cache
2161 self.tags()
2161 self.tags()
2162 self.filtered('served').tags()
2162 self.filtered('served').tags()
2163
2163
2164 def invalidatecaches(self):
2164 def invalidatecaches(self):
2165
2165
2166 if r'_tagscache' in vars(self):
2166 if r'_tagscache' in vars(self):
2167 # can't use delattr on proxy
2167 # can't use delattr on proxy
2168 del self.__dict__[r'_tagscache']
2168 del self.__dict__[r'_tagscache']
2169
2169
2170 self._branchcaches.clear()
2170 self._branchcaches.clear()
2171 self.invalidatevolatilesets()
2171 self.invalidatevolatilesets()
2172 self._sparsesignaturecache.clear()
2172 self._sparsesignaturecache.clear()
2173
2173
2174 def invalidatevolatilesets(self):
2174 def invalidatevolatilesets(self):
2175 self.filteredrevcache.clear()
2175 self.filteredrevcache.clear()
2176 obsolete.clearobscaches(self)
2176 obsolete.clearobscaches(self)
2177
2177
2178 def invalidatedirstate(self):
2178 def invalidatedirstate(self):
2179 '''Invalidates the dirstate, causing the next call to dirstate
2179 '''Invalidates the dirstate, causing the next call to dirstate
2180 to check if it was modified since the last time it was read,
2180 to check if it was modified since the last time it was read,
2181 rereading it if it has.
2181 rereading it if it has.
2182
2182
2183 This is different to dirstate.invalidate() that it doesn't always
2183 This is different to dirstate.invalidate() that it doesn't always
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2184 rereads the dirstate. Use dirstate.invalidate() if you want to
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2185 explicitly read the dirstate again (i.e. restoring it to a previous
2186 known good state).'''
2186 known good state).'''
2187 if hasunfilteredcache(self, r'dirstate'):
2187 if hasunfilteredcache(self, r'dirstate'):
2188 for k in self.dirstate._filecache:
2188 for k in self.dirstate._filecache:
2189 try:
2189 try:
2190 delattr(self.dirstate, k)
2190 delattr(self.dirstate, k)
2191 except AttributeError:
2191 except AttributeError:
2192 pass
2192 pass
2193 delattr(self.unfiltered(), r'dirstate')
2193 delattr(self.unfiltered(), r'dirstate')
2194
2194
2195 def invalidate(self, clearfilecache=False):
2195 def invalidate(self, clearfilecache=False):
2196 '''Invalidates both store and non-store parts other than dirstate
2196 '''Invalidates both store and non-store parts other than dirstate
2197
2197
2198 If a transaction is running, invalidation of store is omitted,
2198 If a transaction is running, invalidation of store is omitted,
2199 because discarding in-memory changes might cause inconsistency
2199 because discarding in-memory changes might cause inconsistency
2200 (e.g. incomplete fncache causes unintentional failure, but
2200 (e.g. incomplete fncache causes unintentional failure, but
2201 redundant one doesn't).
2201 redundant one doesn't).
2202 '''
2202 '''
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2203 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2204 for k in list(self._filecache.keys()):
2204 for k in list(self._filecache.keys()):
2205 # dirstate is invalidated separately in invalidatedirstate()
2205 # dirstate is invalidated separately in invalidatedirstate()
2206 if k == 'dirstate':
2206 if k == 'dirstate':
2207 continue
2207 continue
2208 if (k == 'changelog' and
2208 if (k == 'changelog' and
2209 self.currenttransaction() and
2209 self.currenttransaction() and
2210 self.changelog._delayed):
2210 self.changelog._delayed):
2211 # The changelog object may store unwritten revisions. We don't
2211 # The changelog object may store unwritten revisions. We don't
2212 # want to lose them.
2212 # want to lose them.
2213 # TODO: Solve the problem instead of working around it.
2213 # TODO: Solve the problem instead of working around it.
2214 continue
2214 continue
2215
2215
2216 if clearfilecache:
2216 if clearfilecache:
2217 del self._filecache[k]
2217 del self._filecache[k]
2218 try:
2218 try:
2219 delattr(unfiltered, k)
2219 delattr(unfiltered, k)
2220 except AttributeError:
2220 except AttributeError:
2221 pass
2221 pass
2222 self.invalidatecaches()
2222 self.invalidatecaches()
2223 if not self.currenttransaction():
2223 if not self.currenttransaction():
2224 # TODO: Changing contents of store outside transaction
2224 # TODO: Changing contents of store outside transaction
2225 # causes inconsistency. We should make in-memory store
2225 # causes inconsistency. We should make in-memory store
2226 # changes detectable, and abort if changed.
2226 # changes detectable, and abort if changed.
2227 self.store.invalidatecaches()
2227 self.store.invalidatecaches()
2228
2228
2229 def invalidateall(self):
2229 def invalidateall(self):
2230 '''Fully invalidates both store and non-store parts, causing the
2230 '''Fully invalidates both store and non-store parts, causing the
2231 subsequent operation to reread any outside changes.'''
2231 subsequent operation to reread any outside changes.'''
2232 # extension should hook this to invalidate its caches
2232 # extension should hook this to invalidate its caches
2233 self.invalidate()
2233 self.invalidate()
2234 self.invalidatedirstate()
2234 self.invalidatedirstate()
2235
2235
2236 @unfilteredmethod
2236 @unfilteredmethod
2237 def _refreshfilecachestats(self, tr):
2237 def _refreshfilecachestats(self, tr):
2238 """Reload stats of cached files so that they are flagged as valid"""
2238 """Reload stats of cached files so that they are flagged as valid"""
2239 for k, ce in self._filecache.items():
2239 for k, ce in self._filecache.items():
2240 k = pycompat.sysstr(k)
2240 k = pycompat.sysstr(k)
2241 if k == r'dirstate' or k not in self.__dict__:
2241 if k == r'dirstate' or k not in self.__dict__:
2242 continue
2242 continue
2243 ce.refresh()
2243 ce.refresh()
2244
2244
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2245 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2246 inheritchecker=None, parentenvvar=None):
2246 inheritchecker=None, parentenvvar=None):
2247 parentlock = None
2247 parentlock = None
2248 # the contents of parentenvvar are used by the underlying lock to
2248 # the contents of parentenvvar are used by the underlying lock to
2249 # determine whether it can be inherited
2249 # determine whether it can be inherited
2250 if parentenvvar is not None:
2250 if parentenvvar is not None:
2251 parentlock = encoding.environ.get(parentenvvar)
2251 parentlock = encoding.environ.get(parentenvvar)
2252
2252
2253 timeout = 0
2253 timeout = 0
2254 warntimeout = 0
2254 warntimeout = 0
2255 if wait:
2255 if wait:
2256 timeout = self.ui.configint("ui", "timeout")
2256 timeout = self.ui.configint("ui", "timeout")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2257 warntimeout = self.ui.configint("ui", "timeout.warn")
2258 # internal config: ui.signal-safe-lock
2258 # internal config: ui.signal-safe-lock
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2259 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2260
2260
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2261 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2262 releasefn=releasefn,
2262 releasefn=releasefn,
2263 acquirefn=acquirefn, desc=desc,
2263 acquirefn=acquirefn, desc=desc,
2264 inheritchecker=inheritchecker,
2264 inheritchecker=inheritchecker,
2265 parentlock=parentlock,
2265 parentlock=parentlock,
2266 signalsafe=signalsafe)
2266 signalsafe=signalsafe)
2267 return l
2267 return l
2268
2268
2269 def _afterlock(self, callback):
2269 def _afterlock(self, callback):
2270 """add a callback to be run when the repository is fully unlocked
2270 """add a callback to be run when the repository is fully unlocked
2271
2271
2272 The callback will be executed when the outermost lock is released
2272 The callback will be executed when the outermost lock is released
2273 (with wlock being higher level than 'lock')."""
2273 (with wlock being higher level than 'lock')."""
2274 for ref in (self._wlockref, self._lockref):
2274 for ref in (self._wlockref, self._lockref):
2275 l = ref and ref()
2275 l = ref and ref()
2276 if l and l.held:
2276 if l and l.held:
2277 l.postrelease.append(callback)
2277 l.postrelease.append(callback)
2278 break
2278 break
2279 else: # no lock have been found.
2279 else: # no lock have been found.
2280 callback()
2280 callback()
2281
2281
2282 def lock(self, wait=True):
2282 def lock(self, wait=True):
2283 '''Lock the repository store (.hg/store) and return a weak reference
2283 '''Lock the repository store (.hg/store) and return a weak reference
2284 to the lock. Use this before modifying the store (e.g. committing or
2284 to the lock. Use this before modifying the store (e.g. committing or
2285 stripping). If you are opening a transaction, get a lock as well.)
2285 stripping). If you are opening a transaction, get a lock as well.)
2286
2286
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2287 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2288 'wlock' first to avoid a dead-lock hazard.'''
2288 'wlock' first to avoid a dead-lock hazard.'''
2289 l = self._currentlock(self._lockref)
2289 l = self._currentlock(self._lockref)
2290 if l is not None:
2290 if l is not None:
2291 l.lock()
2291 l.lock()
2292 return l
2292 return l
2293
2293
2294 l = self._lock(vfs=self.svfs,
2294 l = self._lock(vfs=self.svfs,
2295 lockname="lock",
2295 lockname="lock",
2296 wait=wait,
2296 wait=wait,
2297 releasefn=None,
2297 releasefn=None,
2298 acquirefn=self.invalidate,
2298 acquirefn=self.invalidate,
2299 desc=_('repository %s') % self.origroot)
2299 desc=_('repository %s') % self.origroot)
2300 self._lockref = weakref.ref(l)
2300 self._lockref = weakref.ref(l)
2301 return l
2301 return l
2302
2302
2303 def _wlockchecktransaction(self):
2303 def _wlockchecktransaction(self):
2304 if self.currenttransaction() is not None:
2304 if self.currenttransaction() is not None:
2305 raise error.LockInheritanceContractViolation(
2305 raise error.LockInheritanceContractViolation(
2306 'wlock cannot be inherited in the middle of a transaction')
2306 'wlock cannot be inherited in the middle of a transaction')
2307
2307
2308 def wlock(self, wait=True):
2308 def wlock(self, wait=True):
2309 '''Lock the non-store parts of the repository (everything under
2309 '''Lock the non-store parts of the repository (everything under
2310 .hg except .hg/store) and return a weak reference to the lock.
2310 .hg except .hg/store) and return a weak reference to the lock.
2311
2311
2312 Use this before modifying files in .hg.
2312 Use this before modifying files in .hg.
2313
2313
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2314 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2315 'wlock' first to avoid a dead-lock hazard.'''
2315 'wlock' first to avoid a dead-lock hazard.'''
2316 l = self._wlockref and self._wlockref()
2316 l = self._wlockref and self._wlockref()
2317 if l is not None and l.held:
2317 if l is not None and l.held:
2318 l.lock()
2318 l.lock()
2319 return l
2319 return l
2320
2320
2321 # We do not need to check for non-waiting lock acquisition. Such
2321 # We do not need to check for non-waiting lock acquisition. Such
2322 # acquisition would not cause dead-lock as they would just fail.
2322 # acquisition would not cause dead-lock as they would just fail.
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2323 if wait and (self.ui.configbool('devel', 'all-warnings')
2324 or self.ui.configbool('devel', 'check-locks')):
2324 or self.ui.configbool('devel', 'check-locks')):
2325 if self._currentlock(self._lockref) is not None:
2325 if self._currentlock(self._lockref) is not None:
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2326 self.ui.develwarn('"wlock" acquired after "lock"')
2327
2327
2328 def unlock():
2328 def unlock():
2329 if self.dirstate.pendingparentchange():
2329 if self.dirstate.pendingparentchange():
2330 self.dirstate.invalidate()
2330 self.dirstate.invalidate()
2331 else:
2331 else:
2332 self.dirstate.write(None)
2332 self.dirstate.write(None)
2333
2333
2334 self._filecache['dirstate'].refresh()
2334 self._filecache['dirstate'].refresh()
2335
2335
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2336 l = self._lock(self.vfs, "wlock", wait, unlock,
2337 self.invalidatedirstate, _('working directory of %s') %
2337 self.invalidatedirstate, _('working directory of %s') %
2338 self.origroot,
2338 self.origroot,
2339 inheritchecker=self._wlockchecktransaction,
2339 inheritchecker=self._wlockchecktransaction,
2340 parentenvvar='HG_WLOCK_LOCKER')
2340 parentenvvar='HG_WLOCK_LOCKER')
2341 self._wlockref = weakref.ref(l)
2341 self._wlockref = weakref.ref(l)
2342 return l
2342 return l
2343
2343
2344 def _currentlock(self, lockref):
2344 def _currentlock(self, lockref):
2345 """Returns the lock if it's held, or None if it's not."""
2345 """Returns the lock if it's held, or None if it's not."""
2346 if lockref is None:
2346 if lockref is None:
2347 return None
2347 return None
2348 l = lockref()
2348 l = lockref()
2349 if l is None or not l.held:
2349 if l is None or not l.held:
2350 return None
2350 return None
2351 return l
2351 return l
2352
2352
2353 def currentwlock(self):
2353 def currentwlock(self):
2354 """Returns the wlock if it's held, or None if it's not."""
2354 """Returns the wlock if it's held, or None if it's not."""
2355 return self._currentlock(self._wlockref)
2355 return self._currentlock(self._wlockref)
2356
2356
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2357 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist,
2358 includecopymeta):
2358 includecopymeta):
2359 """
2359 """
2360 commit an individual file as part of a larger transaction
2360 commit an individual file as part of a larger transaction
2361 """
2361 """
2362
2362
2363 fname = fctx.path()
2363 fname = fctx.path()
2364 fparent1 = manifest1.get(fname, nullid)
2364 fparent1 = manifest1.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2365 fparent2 = manifest2.get(fname, nullid)
2366 if isinstance(fctx, context.filectx):
2366 if isinstance(fctx, context.filectx):
2367 node = fctx.filenode()
2367 node = fctx.filenode()
2368 if node in [fparent1, fparent2]:
2368 if node in [fparent1, fparent2]:
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2369 self.ui.debug('reusing %s filelog entry\n' % fname)
2370 if ((fparent1 != nullid and
2370 if ((fparent1 != nullid and
2371 manifest1.flags(fname) != fctx.flags()) or
2371 manifest1.flags(fname) != fctx.flags()) or
2372 (fparent2 != nullid and
2372 (fparent2 != nullid and
2373 manifest2.flags(fname) != fctx.flags())):
2373 manifest2.flags(fname) != fctx.flags())):
2374 changelist.append(fname)
2374 changelist.append(fname)
2375 return node
2375 return node
2376
2376
2377 flog = self.file(fname)
2377 flog = self.file(fname)
2378 meta = {}
2378 meta = {}
2379 cfname = fctx.copysource()
2379 cfname = fctx.copysource()
2380 if cfname and cfname != fname:
2380 if cfname and cfname != fname:
2381 # Mark the new revision of this file as a copy of another
2381 # Mark the new revision of this file as a copy of another
2382 # file. This copy data will effectively act as a parent
2382 # file. This copy data will effectively act as a parent
2383 # of this new revision. If this is a merge, the first
2383 # of this new revision. If this is a merge, the first
2384 # parent will be the nullid (meaning "look up the copy data")
2384 # parent will be the nullid (meaning "look up the copy data")
2385 # and the second one will be the other parent. For example:
2385 # and the second one will be the other parent. For example:
2386 #
2386 #
2387 # 0 --- 1 --- 3 rev1 changes file foo
2387 # 0 --- 1 --- 3 rev1 changes file foo
2388 # \ / rev2 renames foo to bar and changes it
2388 # \ / rev2 renames foo to bar and changes it
2389 # \- 2 -/ rev3 should have bar with all changes and
2389 # \- 2 -/ rev3 should have bar with all changes and
2390 # should record that bar descends from
2390 # should record that bar descends from
2391 # bar in rev2 and foo in rev1
2391 # bar in rev2 and foo in rev1
2392 #
2392 #
2393 # this allows this merge to succeed:
2393 # this allows this merge to succeed:
2394 #
2394 #
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2395 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2396 # \ / merging rev3 and rev4 should use bar@rev2
2397 # \- 2 --- 4 as the merge base
2397 # \- 2 --- 4 as the merge base
2398 #
2398 #
2399
2399
2400 cnode = manifest1.get(cfname)
2400 cnode = manifest1.get(cfname)
2401 newfparent = fparent2
2401 newfparent = fparent2
2402
2402
2403 if manifest2: # branch merge
2403 if manifest2: # branch merge
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2404 if fparent2 == nullid or cnode is None: # copied on remote side
2405 if cfname in manifest2:
2405 if cfname in manifest2:
2406 cnode = manifest2[cfname]
2406 cnode = manifest2[cfname]
2407 newfparent = fparent1
2407 newfparent = fparent1
2408
2408
2409 # Here, we used to search backwards through history to try to find
2409 # Here, we used to search backwards through history to try to find
2410 # where the file copy came from if the source of a copy was not in
2410 # where the file copy came from if the source of a copy was not in
2411 # the parent directory. However, this doesn't actually make sense to
2411 # the parent directory. However, this doesn't actually make sense to
2412 # do (what does a copy from something not in your working copy even
2412 # do (what does a copy from something not in your working copy even
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2413 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2414 # the user that copy information was dropped, so if they didn't
2414 # the user that copy information was dropped, so if they didn't
2415 # expect this outcome it can be fixed, but this is the correct
2415 # expect this outcome it can be fixed, but this is the correct
2416 # behavior in this circumstance.
2416 # behavior in this circumstance.
2417
2417
2418 if cnode:
2418 if cnode:
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2419 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(cnode)))
2420 if includecopymeta:
2420 if includecopymeta:
2421 meta["copy"] = cfname
2421 meta["copy"] = cfname
2422 meta["copyrev"] = hex(cnode)
2422 meta["copyrev"] = hex(cnode)
2423 fparent1, fparent2 = nullid, newfparent
2423 fparent1, fparent2 = nullid, newfparent
2424 else:
2424 else:
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2425 self.ui.warn(_("warning: can't find ancestor for '%s' "
2426 "copied from '%s'!\n") % (fname, cfname))
2426 "copied from '%s'!\n") % (fname, cfname))
2427
2427
2428 elif fparent1 == nullid:
2428 elif fparent1 == nullid:
2429 fparent1, fparent2 = fparent2, nullid
2429 fparent1, fparent2 = fparent2, nullid
2430 elif fparent2 != nullid:
2430 elif fparent2 != nullid:
2431 # is one parent an ancestor of the other?
2431 # is one parent an ancestor of the other?
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2432 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2433 if fparent1 in fparentancestors:
2433 if fparent1 in fparentancestors:
2434 fparent1, fparent2 = fparent2, nullid
2434 fparent1, fparent2 = fparent2, nullid
2435 elif fparent2 in fparentancestors:
2435 elif fparent2 in fparentancestors:
2436 fparent2 = nullid
2436 fparent2 = nullid
2437
2437
2438 # is the file changed?
2438 # is the file changed?
2439 text = fctx.data()
2439 text = fctx.data()
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2440 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2441 changelist.append(fname)
2441 changelist.append(fname)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2442 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2443 # are just the flags changed during merge?
2443 # are just the flags changed during merge?
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2444 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2445 changelist.append(fname)
2445 changelist.append(fname)
2446
2446
2447 return fparent1
2447 return fparent1
2448
2448
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2449 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2450 """check for commit arguments that aren't committable"""
2450 """check for commit arguments that aren't committable"""
2451 if match.isexact() or match.prefix():
2451 if match.isexact() or match.prefix():
2452 matched = set(status.modified + status.added + status.removed)
2452 matched = set(status.modified + status.added + status.removed)
2453
2453
2454 for f in match.files():
2454 for f in match.files():
2455 f = self.dirstate.normalize(f)
2455 f = self.dirstate.normalize(f)
2456 if f == '.' or f in matched or f in wctx.substate:
2456 if f == '.' or f in matched or f in wctx.substate:
2457 continue
2457 continue
2458 if f in status.deleted:
2458 if f in status.deleted:
2459 fail(f, _('file not found!'))
2459 fail(f, _('file not found!'))
2460 if f in vdirs: # visited directory
2460 if f in vdirs: # visited directory
2461 d = f + '/'
2461 d = f + '/'
2462 for mf in matched:
2462 for mf in matched:
2463 if mf.startswith(d):
2463 if mf.startswith(d):
2464 break
2464 break
2465 else:
2465 else:
2466 fail(f, _("no match under directory!"))
2466 fail(f, _("no match under directory!"))
2467 elif f not in self.dirstate:
2467 elif f not in self.dirstate:
2468 fail(f, _("file not tracked!"))
2468 fail(f, _("file not tracked!"))
2469
2469
2470 @unfilteredmethod
2470 @unfilteredmethod
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2471 def commit(self, text="", user=None, date=None, match=None, force=False,
2472 editor=False, extra=None):
2472 editor=False, extra=None):
2473 """Add a new revision to current repository.
2473 """Add a new revision to current repository.
2474
2474
2475 Revision information is gathered from the working directory,
2475 Revision information is gathered from the working directory,
2476 match can be used to filter the committed files. If editor is
2476 match can be used to filter the committed files. If editor is
2477 supplied, it is called to get a commit message.
2477 supplied, it is called to get a commit message.
2478 """
2478 """
2479 if extra is None:
2479 if extra is None:
2480 extra = {}
2480 extra = {}
2481
2481
2482 def fail(f, msg):
2482 def fail(f, msg):
2483 raise error.Abort('%s: %s' % (f, msg))
2483 raise error.Abort('%s: %s' % (f, msg))
2484
2484
2485 if not match:
2485 if not match:
2486 match = matchmod.always()
2486 match = matchmod.always()
2487
2487
2488 if not force:
2488 if not force:
2489 vdirs = []
2489 vdirs = []
2490 match.explicitdir = vdirs.append
2490 match.explicitdir = vdirs.append
2491 match.bad = fail
2491 match.bad = fail
2492
2492
2493 # lock() for recent changelog (see issue4368)
2493 # lock() for recent changelog (see issue4368)
2494 with self.wlock(), self.lock():
2494 with self.wlock(), self.lock():
2495 wctx = self[None]
2495 wctx = self[None]
2496 merge = len(wctx.parents()) > 1
2496 merge = len(wctx.parents()) > 1
2497
2497
2498 if not force and merge and not match.always():
2498 if not force and merge and not match.always():
2499 raise error.Abort(_('cannot partially commit a merge '
2499 raise error.Abort(_('cannot partially commit a merge '
2500 '(do not specify files or patterns)'))
2500 '(do not specify files or patterns)'))
2501
2501
2502 status = self.status(match=match, clean=force)
2502 status = self.status(match=match, clean=force)
2503 if force:
2503 if force:
2504 status.modified.extend(status.clean) # mq may commit clean files
2504 status.modified.extend(status.clean) # mq may commit clean files
2505
2505
2506 # check subrepos
2506 # check subrepos
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2507 subs, commitsubs, newstate = subrepoutil.precommit(
2508 self.ui, wctx, status, match, force=force)
2508 self.ui, wctx, status, match, force=force)
2509
2509
2510 # make sure all explicit patterns are matched
2510 # make sure all explicit patterns are matched
2511 if not force:
2511 if not force:
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2512 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2513
2513
2514 cctx = context.workingcommitctx(self, status,
2514 cctx = context.workingcommitctx(self, status,
2515 text, user, date, extra)
2515 text, user, date, extra)
2516
2516
2517 # internal config: ui.allowemptycommit
2517 # internal config: ui.allowemptycommit
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2518 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2519 or extra.get('close') or merge or cctx.files()
2519 or extra.get('close') or merge or cctx.files()
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2520 or self.ui.configbool('ui', 'allowemptycommit'))
2521 if not allowemptycommit:
2521 if not allowemptycommit:
2522 return None
2522 return None
2523
2523
2524 if merge and cctx.deleted():
2524 if merge and cctx.deleted():
2525 raise error.Abort(_("cannot commit merge with missing files"))
2525 raise error.Abort(_("cannot commit merge with missing files"))
2526
2526
2527 ms = mergemod.mergestate.read(self)
2527 ms = mergemod.mergestate.read(self)
2528 mergeutil.checkunresolved(ms)
2528 mergeutil.checkunresolved(ms)
2529
2529
2530 if editor:
2530 if editor:
2531 cctx._text = editor(self, cctx, subs)
2531 cctx._text = editor(self, cctx, subs)
2532 edited = (text != cctx._text)
2532 edited = (text != cctx._text)
2533
2533
2534 # Save commit message in case this transaction gets rolled back
2534 # Save commit message in case this transaction gets rolled back
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2535 # (e.g. by a pretxncommit hook). Leave the content alone on
2536 # the assumption that the user will use the same editor again.
2536 # the assumption that the user will use the same editor again.
2537 msgfn = self.savecommitmessage(cctx._text)
2537 msgfn = self.savecommitmessage(cctx._text)
2538
2538
2539 # commit subs and write new state
2539 # commit subs and write new state
2540 if subs:
2540 if subs:
2541 uipathfn = scmutil.getuipathfn(self)
2541 uipathfn = scmutil.getuipathfn(self)
2542 for s in sorted(commitsubs):
2542 for s in sorted(commitsubs):
2543 sub = wctx.sub(s)
2543 sub = wctx.sub(s)
2544 self.ui.status(_('committing subrepository %s\n') %
2544 self.ui.status(_('committing subrepository %s\n') %
2545 uipathfn(subrepoutil.subrelpath(sub)))
2545 uipathfn(subrepoutil.subrelpath(sub)))
2546 sr = sub.commit(cctx._text, user, date)
2546 sr = sub.commit(cctx._text, user, date)
2547 newstate[s] = (newstate[s][0], sr)
2547 newstate[s] = (newstate[s][0], sr)
2548 subrepoutil.writestate(self, newstate)
2548 subrepoutil.writestate(self, newstate)
2549
2549
2550 p1, p2 = self.dirstate.parents()
2550 p1, p2 = self.dirstate.parents()
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2551 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2552 try:
2552 try:
2553 self.hook("precommit", throw=True, parent1=hookp1,
2553 self.hook("precommit", throw=True, parent1=hookp1,
2554 parent2=hookp2)
2554 parent2=hookp2)
2555 with self.transaction('commit'):
2555 with self.transaction('commit'):
2556 ret = self.commitctx(cctx, True)
2556 ret = self.commitctx(cctx, True)
2557 # update bookmarks, dirstate and mergestate
2557 # update bookmarks, dirstate and mergestate
2558 bookmarks.update(self, [p1, p2], ret)
2558 bookmarks.update(self, [p1, p2], ret)
2559 cctx.markcommitted(ret)
2559 cctx.markcommitted(ret)
2560 ms.reset()
2560 ms.reset()
2561 except: # re-raises
2561 except: # re-raises
2562 if edited:
2562 if edited:
2563 self.ui.write(
2563 self.ui.write(
2564 _('note: commit message saved in %s\n') % msgfn)
2564 _('note: commit message saved in %s\n') % msgfn)
2565 raise
2565 raise
2566
2566
2567 def commithook():
2567 def commithook():
2568 # hack for command that use a temporary commit (eg: histedit)
2568 # hack for command that use a temporary commit (eg: histedit)
2569 # temporary commit got stripped before hook release
2569 # temporary commit got stripped before hook release
2570 if self.changelog.hasnode(ret):
2570 if self.changelog.hasnode(ret):
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2571 self.hook("commit", node=hex(ret), parent1=hookp1,
2572 parent2=hookp2)
2572 parent2=hookp2)
2573 self._afterlock(commithook)
2573 self._afterlock(commithook)
2574 return ret
2574 return ret
2575
2575
2576 @unfilteredmethod
2576 @unfilteredmethod
2577 def commitctx(self, ctx, error=False):
2577 def commitctx(self, ctx, error=False):
2578 """Add a new revision to current repository.
2578 """Add a new revision to current repository.
2579 Revision information is passed via the context argument.
2579 Revision information is passed via the context argument.
2580
2580
2581 ctx.files() should list all files involved in this commit, i.e.
2581 ctx.files() should list all files involved in this commit, i.e.
2582 modified/added/removed files. On merge, it may be wider than the
2582 modified/added/removed files. On merge, it may be wider than the
2583 ctx.files() to be committed, since any file nodes derived directly
2583 ctx.files() to be committed, since any file nodes derived directly
2584 from p1 or p2 are excluded from the committed ctx.files().
2584 from p1 or p2 are excluded from the committed ctx.files().
2585 """
2585 """
2586
2586
2587 p1, p2 = ctx.p1(), ctx.p2()
2587 p1, p2 = ctx.p1(), ctx.p2()
2588 user = ctx.user()
2588 user = ctx.user()
2589
2589
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2590 writecopiesto = self.ui.config('experimental', 'copies.write-to')
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2591 writefilecopymeta = writecopiesto != 'changeset-only'
2592 p1copies, p2copies = None, None
2592 p1copies, p2copies = None, None
2593 if writecopiesto in ('changeset-only', 'compatibility'):
2593 if writecopiesto in ('changeset-only', 'compatibility'):
2594 p1copies = ctx.p1copies()
2594 p1copies = ctx.p1copies()
2595 p2copies = ctx.p2copies()
2595 p2copies = ctx.p2copies()
2596 with self.lock(), self.transaction("commit") as tr:
2596 with self.lock(), self.transaction("commit") as tr:
2597 trp = weakref.proxy(tr)
2597 trp = weakref.proxy(tr)
2598
2598
2599 if ctx.manifestnode():
2599 if ctx.manifestnode():
2600 # reuse an existing manifest revision
2600 # reuse an existing manifest revision
2601 self.ui.debug('reusing known manifest\n')
2601 self.ui.debug('reusing known manifest\n')
2602 mn = ctx.manifestnode()
2602 mn = ctx.manifestnode()
2603 files = ctx.files()
2603 files = ctx.files()
2604 elif ctx.files():
2604 elif ctx.files():
2605 m1ctx = p1.manifestctx()
2605 m1ctx = p1.manifestctx()
2606 m2ctx = p2.manifestctx()
2606 m2ctx = p2.manifestctx()
2607 mctx = m1ctx.copy()
2607 mctx = m1ctx.copy()
2608
2608
2609 m = mctx.read()
2609 m = mctx.read()
2610 m1 = m1ctx.read()
2610 m1 = m1ctx.read()
2611 m2 = m2ctx.read()
2611 m2 = m2ctx.read()
2612
2612
2613 # check in files
2613 # check in files
2614 added = []
2614 added = []
2615 changed = []
2615 changed = []
2616 removed = list(ctx.removed())
2616 removed = list(ctx.removed())
2617 linkrev = len(self)
2617 linkrev = len(self)
2618 self.ui.note(_("committing files:\n"))
2618 self.ui.note(_("committing files:\n"))
2619 uipathfn = scmutil.getuipathfn(self)
2619 uipathfn = scmutil.getuipathfn(self)
2620 for f in sorted(ctx.modified() + ctx.added()):
2620 for f in sorted(ctx.modified() + ctx.added()):
2621 self.ui.note(uipathfn(f) + "\n")
2621 self.ui.note(uipathfn(f) + "\n")
2622 try:
2622 try:
2623 fctx = ctx[f]
2623 fctx = ctx[f]
2624 if fctx is None:
2624 if fctx is None:
2625 removed.append(f)
2625 removed.append(f)
2626 else:
2626 else:
2627 added.append(f)
2627 added.append(f)
2628 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2628 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2629 trp, changed,
2629 trp, changed,
2630 writefilecopymeta)
2630 writefilecopymeta)
2631 m.setflag(f, fctx.flags())
2631 m.setflag(f, fctx.flags())
2632 except OSError:
2632 except OSError:
2633 self.ui.warn(_("trouble committing %s!\n") %
2633 self.ui.warn(_("trouble committing %s!\n") %
2634 uipathfn(f))
2634 uipathfn(f))
2635 raise
2635 raise
2636 except IOError as inst:
2636 except IOError as inst:
2637 errcode = getattr(inst, 'errno', errno.ENOENT)
2637 errcode = getattr(inst, 'errno', errno.ENOENT)
2638 if error or errcode and errcode != errno.ENOENT:
2638 if error or errcode and errcode != errno.ENOENT:
2639 self.ui.warn(_("trouble committing %s!\n") %
2639 self.ui.warn(_("trouble committing %s!\n") %
2640 uipathfn(f))
2640 uipathfn(f))
2641 raise
2641 raise
2642
2642
2643 # update manifest
2643 # update manifest
2644 removed = [f for f in removed if f in m1 or f in m2]
2644 removed = [f for f in removed if f in m1 or f in m2]
2645 drop = sorted([f for f in removed if f in m])
2645 drop = sorted([f for f in removed if f in m])
2646 for f in drop:
2646 for f in drop:
2647 del m[f]
2647 del m[f]
2648 files = changed + removed
2648 files = changed + removed
2649 md = None
2649 md = None
2650 if not files:
2650 if not files:
2651 # if no "files" actually changed in terms of the changelog,
2651 # if no "files" actually changed in terms of the changelog,
2652 # try hard to detect unmodified manifest entry so that the
2652 # try hard to detect unmodified manifest entry so that the
2653 # exact same commit can be reproduced later on convert.
2653 # exact same commit can be reproduced later on convert.
2654 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2654 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2655 if not files and md:
2655 if not files and md:
2656 self.ui.debug('not reusing manifest (no file change in '
2656 self.ui.debug('not reusing manifest (no file change in '
2657 'changelog, but manifest differs)\n')
2657 'changelog, but manifest differs)\n')
2658 if files or md:
2658 if files or md:
2659 self.ui.note(_("committing manifest\n"))
2659 self.ui.note(_("committing manifest\n"))
2660 # we're using narrowmatch here since it's already applied at
2660 # we're using narrowmatch here since it's already applied at
2661 # other stages (such as dirstate.walk), so we're already
2661 # other stages (such as dirstate.walk), so we're already
2662 # ignoring things outside of narrowspec in most cases. The
2662 # ignoring things outside of narrowspec in most cases. The
2663 # one case where we might have files outside the narrowspec
2663 # one case where we might have files outside the narrowspec
2664 # at this point is merges, and we already error out in the
2664 # at this point is merges, and we already error out in the
2665 # case where the merge has files outside of the narrowspec,
2665 # case where the merge has files outside of the narrowspec,
2666 # so this is safe.
2666 # so this is safe.
2667 mn = mctx.write(trp, linkrev,
2667 mn = mctx.write(trp, linkrev,
2668 p1.manifestnode(), p2.manifestnode(),
2668 p1.manifestnode(), p2.manifestnode(),
2669 added, drop, match=self.narrowmatch())
2669 added, drop, match=self.narrowmatch())
2670 else:
2670 else:
2671 self.ui.debug('reusing manifest from p1 (listed files '
2671 self.ui.debug('reusing manifest from p1 (listed files '
2672 'actually unchanged)\n')
2672 'actually unchanged)\n')
2673 mn = p1.manifestnode()
2673 mn = p1.manifestnode()
2674 else:
2674 else:
2675 self.ui.debug('reusing manifest from p1 (no file change)\n')
2675 self.ui.debug('reusing manifest from p1 (no file change)\n')
2676 mn = p1.manifestnode()
2676 mn = p1.manifestnode()
2677 files = []
2677 files = []
2678
2678
2679 if writecopiesto == 'changeset-only':
2679 if writecopiesto == 'changeset-only':
2680 # If writing only to changeset extras, use None to indicate that
2680 # If writing only to changeset extras, use None to indicate that
2681 # no entry should be written. If writing to both, write an empty
2681 # no entry should be written. If writing to both, write an empty
2682 # entry to prevent the reader from falling back to reading
2682 # entry to prevent the reader from falling back to reading
2683 # filelogs.
2683 # filelogs.
2684 p1copies = p1copies or None
2684 p1copies = p1copies or None
2685 p2copies = p2copies or None
2685 p2copies = p2copies or None
2686
2686
2687 # update changelog
2687 # update changelog
2688 self.ui.note(_("committing changelog\n"))
2688 self.ui.note(_("committing changelog\n"))
2689 self.changelog.delayupdate(tr)
2689 self.changelog.delayupdate(tr)
2690 n = self.changelog.add(mn, files, ctx.description(),
2690 n = self.changelog.add(mn, files, ctx.description(),
2691 trp, p1.node(), p2.node(),
2691 trp, p1.node(), p2.node(),
2692 user, ctx.date(), ctx.extra().copy(),
2692 user, ctx.date(), ctx.extra().copy(),
2693 p1copies, p2copies)
2693 p1copies, p2copies)
2694 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2694 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2695 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2696 parent2=xp2)
2696 parent2=xp2)
2697 # set the new commit is proper phase
2697 # set the new commit is proper phase
2698 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2698 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2699 if targetphase:
2699 if targetphase:
2700 # retract boundary do not alter parent changeset.
2700 # retract boundary do not alter parent changeset.
2701 # if a parent have higher the resulting phase will
2701 # if a parent have higher the resulting phase will
2702 # be compliant anyway
2702 # be compliant anyway
2703 #
2703 #
2704 # if minimal phase was 0 we don't need to retract anything
2704 # if minimal phase was 0 we don't need to retract anything
2705 phases.registernew(self, tr, targetphase, [n])
2705 phases.registernew(self, tr, targetphase, [n])
2706 return n
2706 return n
2707
2707
2708 @unfilteredmethod
2708 @unfilteredmethod
2709 def destroying(self):
2709 def destroying(self):
2710 '''Inform the repository that nodes are about to be destroyed.
2710 '''Inform the repository that nodes are about to be destroyed.
2711 Intended for use by strip and rollback, so there's a common
2711 Intended for use by strip and rollback, so there's a common
2712 place for anything that has to be done before destroying history.
2712 place for anything that has to be done before destroying history.
2713
2713
2714 This is mostly useful for saving state that is in memory and waiting
2714 This is mostly useful for saving state that is in memory and waiting
2715 to be flushed when the current lock is released. Because a call to
2715 to be flushed when the current lock is released. Because a call to
2716 destroyed is imminent, the repo will be invalidated causing those
2716 destroyed is imminent, the repo will be invalidated causing those
2717 changes to stay in memory (waiting for the next unlock), or vanish
2717 changes to stay in memory (waiting for the next unlock), or vanish
2718 completely.
2718 completely.
2719 '''
2719 '''
2720 # When using the same lock to commit and strip, the phasecache is left
2720 # When using the same lock to commit and strip, the phasecache is left
2721 # dirty after committing. Then when we strip, the repo is invalidated,
2721 # dirty after committing. Then when we strip, the repo is invalidated,
2722 # causing those changes to disappear.
2722 # causing those changes to disappear.
2723 if '_phasecache' in vars(self):
2723 if '_phasecache' in vars(self):
2724 self._phasecache.write()
2724 self._phasecache.write()
2725
2725
2726 @unfilteredmethod
2726 @unfilteredmethod
2727 def destroyed(self):
2727 def destroyed(self):
2728 '''Inform the repository that nodes have been destroyed.
2728 '''Inform the repository that nodes have been destroyed.
2729 Intended for use by strip and rollback, so there's a common
2729 Intended for use by strip and rollback, so there's a common
2730 place for anything that has to be done after destroying history.
2730 place for anything that has to be done after destroying history.
2731 '''
2731 '''
2732 # When one tries to:
2732 # When one tries to:
2733 # 1) destroy nodes thus calling this method (e.g. strip)
2733 # 1) destroy nodes thus calling this method (e.g. strip)
2734 # 2) use phasecache somewhere (e.g. commit)
2734 # 2) use phasecache somewhere (e.g. commit)
2735 #
2735 #
2736 # then 2) will fail because the phasecache contains nodes that were
2736 # then 2) will fail because the phasecache contains nodes that were
2737 # removed. We can either remove phasecache from the filecache,
2737 # removed. We can either remove phasecache from the filecache,
2738 # causing it to reload next time it is accessed, or simply filter
2738 # causing it to reload next time it is accessed, or simply filter
2739 # the removed nodes now and write the updated cache.
2739 # the removed nodes now and write the updated cache.
2740 self._phasecache.filterunknown(self)
2740 self._phasecache.filterunknown(self)
2741 self._phasecache.write()
2741 self._phasecache.write()
2742
2742
2743 # refresh all repository caches
2743 # refresh all repository caches
2744 self.updatecaches()
2744 self.updatecaches()
2745
2745
2746 # Ensure the persistent tag cache is updated. Doing it now
2746 # Ensure the persistent tag cache is updated. Doing it now
2747 # means that the tag cache only has to worry about destroyed
2747 # means that the tag cache only has to worry about destroyed
2748 # heads immediately after a strip/rollback. That in turn
2748 # heads immediately after a strip/rollback. That in turn
2749 # guarantees that "cachetip == currenttip" (comparing both rev
2749 # guarantees that "cachetip == currenttip" (comparing both rev
2750 # and node) always means no nodes have been added or destroyed.
2750 # and node) always means no nodes have been added or destroyed.
2751
2751
2752 # XXX this is suboptimal when qrefresh'ing: we strip the current
2752 # XXX this is suboptimal when qrefresh'ing: we strip the current
2753 # head, refresh the tag cache, then immediately add a new head.
2753 # head, refresh the tag cache, then immediately add a new head.
2754 # But I think doing it this way is necessary for the "instant
2754 # But I think doing it this way is necessary for the "instant
2755 # tag cache retrieval" case to work.
2755 # tag cache retrieval" case to work.
2756 self.invalidate()
2756 self.invalidate()
2757
2757
2758 def status(self, node1='.', node2=None, match=None,
2758 def status(self, node1='.', node2=None, match=None,
2759 ignored=False, clean=False, unknown=False,
2759 ignored=False, clean=False, unknown=False,
2760 listsubrepos=False):
2760 listsubrepos=False):
2761 '''a convenience method that calls node1.status(node2)'''
2761 '''a convenience method that calls node1.status(node2)'''
2762 return self[node1].status(node2, match, ignored, clean, unknown,
2762 return self[node1].status(node2, match, ignored, clean, unknown,
2763 listsubrepos)
2763 listsubrepos)
2764
2764
2765 def addpostdsstatus(self, ps):
2765 def addpostdsstatus(self, ps):
2766 """Add a callback to run within the wlock, at the point at which status
2766 """Add a callback to run within the wlock, at the point at which status
2767 fixups happen.
2767 fixups happen.
2768
2768
2769 On status completion, callback(wctx, status) will be called with the
2769 On status completion, callback(wctx, status) will be called with the
2770 wlock held, unless the dirstate has changed from underneath or the wlock
2770 wlock held, unless the dirstate has changed from underneath or the wlock
2771 couldn't be grabbed.
2771 couldn't be grabbed.
2772
2772
2773 Callbacks should not capture and use a cached copy of the dirstate --
2773 Callbacks should not capture and use a cached copy of the dirstate --
2774 it might change in the meanwhile. Instead, they should access the
2774 it might change in the meanwhile. Instead, they should access the
2775 dirstate via wctx.repo().dirstate.
2775 dirstate via wctx.repo().dirstate.
2776
2776
2777 This list is emptied out after each status run -- extensions should
2777 This list is emptied out after each status run -- extensions should
2778 make sure it adds to this list each time dirstate.status is called.
2778 make sure it adds to this list each time dirstate.status is called.
2779 Extensions should also make sure they don't call this for statuses
2779 Extensions should also make sure they don't call this for statuses
2780 that don't involve the dirstate.
2780 that don't involve the dirstate.
2781 """
2781 """
2782
2782
2783 # The list is located here for uniqueness reasons -- it is actually
2783 # The list is located here for uniqueness reasons -- it is actually
2784 # managed by the workingctx, but that isn't unique per-repo.
2784 # managed by the workingctx, but that isn't unique per-repo.
2785 self._postdsstatus.append(ps)
2785 self._postdsstatus.append(ps)
2786
2786
2787 def postdsstatus(self):
2787 def postdsstatus(self):
2788 """Used by workingctx to get the list of post-dirstate-status hooks."""
2788 """Used by workingctx to get the list of post-dirstate-status hooks."""
2789 return self._postdsstatus
2789 return self._postdsstatus
2790
2790
2791 def clearpostdsstatus(self):
2791 def clearpostdsstatus(self):
2792 """Used by workingctx to clear post-dirstate-status hooks."""
2792 """Used by workingctx to clear post-dirstate-status hooks."""
2793 del self._postdsstatus[:]
2793 del self._postdsstatus[:]
2794
2794
2795 def heads(self, start=None):
2795 def heads(self, start=None):
2796 if start is None:
2796 if start is None:
2797 cl = self.changelog
2797 cl = self.changelog
2798 headrevs = reversed(cl.headrevs())
2798 headrevs = reversed(cl.headrevs())
2799 return [cl.node(rev) for rev in headrevs]
2799 return [cl.node(rev) for rev in headrevs]
2800
2800
2801 heads = self.changelog.heads(start)
2801 heads = self.changelog.heads(start)
2802 # sort the output in rev descending order
2802 # sort the output in rev descending order
2803 return sorted(heads, key=self.changelog.rev, reverse=True)
2803 return sorted(heads, key=self.changelog.rev, reverse=True)
2804
2804
2805 def branchheads(self, branch=None, start=None, closed=False):
2805 def branchheads(self, branch=None, start=None, closed=False):
2806 '''return a (possibly filtered) list of heads for the given branch
2806 '''return a (possibly filtered) list of heads for the given branch
2807
2807
2808 Heads are returned in topological order, from newest to oldest.
2808 Heads are returned in topological order, from newest to oldest.
2809 If branch is None, use the dirstate branch.
2809 If branch is None, use the dirstate branch.
2810 If start is not None, return only heads reachable from start.
2810 If start is not None, return only heads reachable from start.
2811 If closed is True, return heads that are marked as closed as well.
2811 If closed is True, return heads that are marked as closed as well.
2812 '''
2812 '''
2813 if branch is None:
2813 if branch is None:
2814 branch = self[None].branch()
2814 branch = self[None].branch()
2815 branches = self.branchmap()
2815 branches = self.branchmap()
2816 if not branches.hasbranch(branch):
2816 if not branches.hasbranch(branch):
2817 return []
2817 return []
2818 # the cache returns heads ordered lowest to highest
2818 # the cache returns heads ordered lowest to highest
2819 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2819 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2820 if start is not None:
2820 if start is not None:
2821 # filter out the heads that cannot be reached from startrev
2821 # filter out the heads that cannot be reached from startrev
2822 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2822 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2823 bheads = [h for h in bheads if h in fbheads]
2823 bheads = [h for h in bheads if h in fbheads]
2824 return bheads
2824 return bheads
2825
2825
2826 def branches(self, nodes):
2826 def branches(self, nodes):
2827 if not nodes:
2827 if not nodes:
2828 nodes = [self.changelog.tip()]
2828 nodes = [self.changelog.tip()]
2829 b = []
2829 b = []
2830 for n in nodes:
2830 for n in nodes:
2831 t = n
2831 t = n
2832 while True:
2832 while True:
2833 p = self.changelog.parents(n)
2833 p = self.changelog.parents(n)
2834 if p[1] != nullid or p[0] == nullid:
2834 if p[1] != nullid or p[0] == nullid:
2835 b.append((t, n, p[0], p[1]))
2835 b.append((t, n, p[0], p[1]))
2836 break
2836 break
2837 n = p[0]
2837 n = p[0]
2838 return b
2838 return b
2839
2839
2840 def between(self, pairs):
2840 def between(self, pairs):
2841 r = []
2841 r = []
2842
2842
2843 for top, bottom in pairs:
2843 for top, bottom in pairs:
2844 n, l, i = top, [], 0
2844 n, l, i = top, [], 0
2845 f = 1
2845 f = 1
2846
2846
2847 while n != bottom and n != nullid:
2847 while n != bottom and n != nullid:
2848 p = self.changelog.parents(n)[0]
2848 p = self.changelog.parents(n)[0]
2849 if i == f:
2849 if i == f:
2850 l.append(n)
2850 l.append(n)
2851 f = f * 2
2851 f = f * 2
2852 n = p
2852 n = p
2853 i += 1
2853 i += 1
2854
2854
2855 r.append(l)
2855 r.append(l)
2856
2856
2857 return r
2857 return r
2858
2858
2859 def checkpush(self, pushop):
2859 def checkpush(self, pushop):
2860 """Extensions can override this function if additional checks have
2860 """Extensions can override this function if additional checks have
2861 to be performed before pushing, or call it if they override push
2861 to be performed before pushing, or call it if they override push
2862 command.
2862 command.
2863 """
2863 """
2864
2864
2865 @unfilteredpropertycache
2865 @unfilteredpropertycache
2866 def prepushoutgoinghooks(self):
2866 def prepushoutgoinghooks(self):
2867 """Return util.hooks consists of a pushop with repo, remote, outgoing
2867 """Return util.hooks consists of a pushop with repo, remote, outgoing
2868 methods, which are called before pushing changesets.
2868 methods, which are called before pushing changesets.
2869 """
2869 """
2870 return util.hooks()
2870 return util.hooks()
2871
2871
2872 def pushkey(self, namespace, key, old, new):
2872 def pushkey(self, namespace, key, old, new):
2873 try:
2873 try:
2874 tr = self.currenttransaction()
2874 tr = self.currenttransaction()
2875 hookargs = {}
2875 hookargs = {}
2876 if tr is not None:
2876 if tr is not None:
2877 hookargs.update(tr.hookargs)
2877 hookargs.update(tr.hookargs)
2878 hookargs = pycompat.strkwargs(hookargs)
2878 hookargs = pycompat.strkwargs(hookargs)
2879 hookargs[r'namespace'] = namespace
2879 hookargs[r'namespace'] = namespace
2880 hookargs[r'key'] = key
2880 hookargs[r'key'] = key
2881 hookargs[r'old'] = old
2881 hookargs[r'old'] = old
2882 hookargs[r'new'] = new
2882 hookargs[r'new'] = new
2883 self.hook('prepushkey', throw=True, **hookargs)
2883 self.hook('prepushkey', throw=True, **hookargs)
2884 except error.HookAbort as exc:
2884 except error.HookAbort as exc:
2885 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2885 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2886 if exc.hint:
2886 if exc.hint:
2887 self.ui.write_err(_("(%s)\n") % exc.hint)
2887 self.ui.write_err(_("(%s)\n") % exc.hint)
2888 return False
2888 return False
2889 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2889 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2890 ret = pushkey.push(self, namespace, key, old, new)
2890 ret = pushkey.push(self, namespace, key, old, new)
2891 def runhook():
2891 def runhook():
2892 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2892 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2893 ret=ret)
2893 ret=ret)
2894 self._afterlock(runhook)
2894 self._afterlock(runhook)
2895 return ret
2895 return ret
2896
2896
2897 def listkeys(self, namespace):
2897 def listkeys(self, namespace):
2898 self.hook('prelistkeys', throw=True, namespace=namespace)
2898 self.hook('prelistkeys', throw=True, namespace=namespace)
2899 self.ui.debug('listing keys for "%s"\n' % namespace)
2899 self.ui.debug('listing keys for "%s"\n' % namespace)
2900 values = pushkey.list(self, namespace)
2900 values = pushkey.list(self, namespace)
2901 self.hook('listkeys', namespace=namespace, values=values)
2901 self.hook('listkeys', namespace=namespace, values=values)
2902 return values
2902 return values
2903
2903
2904 def debugwireargs(self, one, two, three=None, four=None, five=None):
2904 def debugwireargs(self, one, two, three=None, four=None, five=None):
2905 '''used to test argument passing over the wire'''
2905 '''used to test argument passing over the wire'''
2906 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2906 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2907 pycompat.bytestr(four),
2907 pycompat.bytestr(four),
2908 pycompat.bytestr(five))
2908 pycompat.bytestr(five))
2909
2909
2910 def savecommitmessage(self, text):
2910 def savecommitmessage(self, text):
2911 fp = self.vfs('last-message.txt', 'wb')
2911 fp = self.vfs('last-message.txt', 'wb')
2912 try:
2912 try:
2913 fp.write(text)
2913 fp.write(text)
2914 finally:
2914 finally:
2915 fp.close()
2915 fp.close()
2916 return self.pathto(fp.name[len(self.root) + 1:])
2916 return self.pathto(fp.name[len(self.root) + 1:])
2917
2917
2918 # used to avoid circular references so destructors work
2918 # used to avoid circular references so destructors work
2919 def aftertrans(files):
2919 def aftertrans(files):
2920 renamefiles = [tuple(t) for t in files]
2920 renamefiles = [tuple(t) for t in files]
2921 def a():
2921 def a():
2922 for vfs, src, dest in renamefiles:
2922 for vfs, src, dest in renamefiles:
2923 # if src and dest refer to a same file, vfs.rename is a no-op,
2923 # if src and dest refer to a same file, vfs.rename is a no-op,
2924 # leaving both src and dest on disk. delete dest to make sure
2924 # leaving both src and dest on disk. delete dest to make sure
2925 # the rename couldn't be such a no-op.
2925 # the rename couldn't be such a no-op.
2926 vfs.tryunlink(dest)
2926 vfs.tryunlink(dest)
2927 try:
2927 try:
2928 vfs.rename(src, dest)
2928 vfs.rename(src, dest)
2929 except OSError: # journal file does not yet exist
2929 except OSError: # journal file does not yet exist
2930 pass
2930 pass
2931 return a
2931 return a
2932
2932
2933 def undoname(fn):
2933 def undoname(fn):
2934 base, name = os.path.split(fn)
2934 base, name = os.path.split(fn)
2935 assert name.startswith('journal')
2935 assert name.startswith('journal')
2936 return os.path.join(base, name.replace('journal', 'undo', 1))
2936 return os.path.join(base, name.replace('journal', 'undo', 1))
2937
2937
2938 def instance(ui, path, create, intents=None, createopts=None):
2938 def instance(ui, path, create, intents=None, createopts=None):
2939 localpath = util.urllocalpath(path)
2939 localpath = util.urllocalpath(path)
2940 if create:
2940 if create:
2941 createrepository(ui, localpath, createopts=createopts)
2941 createrepository(ui, localpath, createopts=createopts)
2942
2942
2943 return makelocalrepository(ui, localpath, intents=intents)
2943 return makelocalrepository(ui, localpath, intents=intents)
2944
2944
2945 def islocal(path):
2945 def islocal(path):
2946 return True
2946 return True
2947
2947
2948 def defaultcreateopts(ui, createopts=None):
2948 def defaultcreateopts(ui, createopts=None):
2949 """Populate the default creation options for a repository.
2949 """Populate the default creation options for a repository.
2950
2950
2951 A dictionary of explicitly requested creation options can be passed
2951 A dictionary of explicitly requested creation options can be passed
2952 in. Missing keys will be populated.
2952 in. Missing keys will be populated.
2953 """
2953 """
2954 createopts = dict(createopts or {})
2954 createopts = dict(createopts or {})
2955
2955
2956 if 'backend' not in createopts:
2956 if 'backend' not in createopts:
2957 # experimental config: storage.new-repo-backend
2957 # experimental config: storage.new-repo-backend
2958 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2958 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2959
2959
2960 return createopts
2960 return createopts
2961
2961
2962 def newreporequirements(ui, createopts):
2962 def newreporequirements(ui, createopts):
2963 """Determine the set of requirements for a new local repository.
2963 """Determine the set of requirements for a new local repository.
2964
2964
2965 Extensions can wrap this function to specify custom requirements for
2965 Extensions can wrap this function to specify custom requirements for
2966 new repositories.
2966 new repositories.
2967 """
2967 """
2968 # If the repo is being created from a shared repository, we copy
2968 # If the repo is being created from a shared repository, we copy
2969 # its requirements.
2969 # its requirements.
2970 if 'sharedrepo' in createopts:
2970 if 'sharedrepo' in createopts:
2971 requirements = set(createopts['sharedrepo'].requirements)
2971 requirements = set(createopts['sharedrepo'].requirements)
2972 if createopts.get('sharedrelative'):
2972 if createopts.get('sharedrelative'):
2973 requirements.add('relshared')
2973 requirements.add('relshared')
2974 else:
2974 else:
2975 requirements.add('shared')
2975 requirements.add('shared')
2976
2976
2977 return requirements
2977 return requirements
2978
2978
2979 if 'backend' not in createopts:
2979 if 'backend' not in createopts:
2980 raise error.ProgrammingError('backend key not present in createopts; '
2980 raise error.ProgrammingError('backend key not present in createopts; '
2981 'was defaultcreateopts() called?')
2981 'was defaultcreateopts() called?')
2982
2982
2983 if createopts['backend'] != 'revlogv1':
2983 if createopts['backend'] != 'revlogv1':
2984 raise error.Abort(_('unable to determine repository requirements for '
2984 raise error.Abort(_('unable to determine repository requirements for '
2985 'storage backend: %s') % createopts['backend'])
2985 'storage backend: %s') % createopts['backend'])
2986
2986
2987 requirements = {'revlogv1'}
2987 requirements = {'revlogv1'}
2988 if ui.configbool('format', 'usestore'):
2988 if ui.configbool('format', 'usestore'):
2989 requirements.add('store')
2989 requirements.add('store')
2990 if ui.configbool('format', 'usefncache'):
2990 if ui.configbool('format', 'usefncache'):
2991 requirements.add('fncache')
2991 requirements.add('fncache')
2992 if ui.configbool('format', 'dotencode'):
2992 if ui.configbool('format', 'dotencode'):
2993 requirements.add('dotencode')
2993 requirements.add('dotencode')
2994
2994
2995 compengine = ui.config('format', 'revlog-compression')
2995 compengine = ui.config('format', 'revlog-compression')
2996 if compengine not in util.compengines:
2996 if compengine not in util.compengines:
2997 raise error.Abort(_('compression engine %s defined by '
2997 raise error.Abort(_('compression engine %s defined by '
2998 'format.revlog-compression not available') %
2998 'format.revlog-compression not available') %
2999 compengine,
2999 compengine,
3000 hint=_('run "hg debuginstall" to list available '
3000 hint=_('run "hg debuginstall" to list available '
3001 'compression engines'))
3001 'compression engines'))
3002
3002
3003 # zlib is the historical default and doesn't need an explicit requirement.
3003 # zlib is the historical default and doesn't need an explicit requirement.
3004 elif compengine == 'zstd':
3004 elif compengine == 'zstd':
3005 requirements.add('revlog-compression-zstd')
3005 requirements.add('revlog-compression-zstd')
3006 elif compengine != 'zlib':
3006 elif compengine != 'zlib':
3007 requirements.add('exp-compression-%s' % compengine)
3007 requirements.add('exp-compression-%s' % compengine)
3008
3008
3009 if scmutil.gdinitconfig(ui):
3009 if scmutil.gdinitconfig(ui):
3010 requirements.add('generaldelta')
3010 requirements.add('generaldelta')
3011 if ui.configbool('format', 'sparse-revlog'):
3011 if ui.configbool('format', 'sparse-revlog'):
3012 requirements.add(SPARSEREVLOG_REQUIREMENT)
3012 requirements.add(SPARSEREVLOG_REQUIREMENT)
3013 if ui.configbool('experimental', 'treemanifest'):
3013 if ui.configbool('experimental', 'treemanifest'):
3014 requirements.add('treemanifest')
3014 requirements.add('treemanifest')
3015
3015
3016 revlogv2 = ui.config('experimental', 'revlogv2')
3016 revlogv2 = ui.config('experimental', 'revlogv2')
3017 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3017 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
3018 requirements.remove('revlogv1')
3018 requirements.remove('revlogv1')
3019 # generaldelta is implied by revlogv2.
3019 # generaldelta is implied by revlogv2.
3020 requirements.discard('generaldelta')
3020 requirements.discard('generaldelta')
3021 requirements.add(REVLOGV2_REQUIREMENT)
3021 requirements.add(REVLOGV2_REQUIREMENT)
3022 # experimental config: format.internal-phase
3022 # experimental config: format.internal-phase
3023 if ui.configbool('format', 'internal-phase'):
3023 if ui.configbool('format', 'internal-phase'):
3024 requirements.add('internal-phase')
3024 requirements.add('internal-phase')
3025
3025
3026 if createopts.get('narrowfiles'):
3026 if createopts.get('narrowfiles'):
3027 requirements.add(repository.NARROW_REQUIREMENT)
3027 requirements.add(repository.NARROW_REQUIREMENT)
3028
3028
3029 if createopts.get('lfs'):
3029 if createopts.get('lfs'):
3030 requirements.add('lfs')
3030 requirements.add('lfs')
3031
3031
3032 if ui.configbool('format', 'bookmarks-in-store'):
3032 if ui.configbool('format', 'bookmarks-in-store'):
3033 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3033 requirements.add(bookmarks.BOOKMARKS_IN_STORE_REQUIREMENT)
3034
3034
3035 return requirements
3035 return requirements
3036
3036
3037 def filterknowncreateopts(ui, createopts):
3037 def filterknowncreateopts(ui, createopts):
3038 """Filters a dict of repo creation options against options that are known.
3038 """Filters a dict of repo creation options against options that are known.
3039
3039
3040 Receives a dict of repo creation options and returns a dict of those
3040 Receives a dict of repo creation options and returns a dict of those
3041 options that we don't know how to handle.
3041 options that we don't know how to handle.
3042
3042
3043 This function is called as part of repository creation. If the
3043 This function is called as part of repository creation. If the
3044 returned dict contains any items, repository creation will not
3044 returned dict contains any items, repository creation will not
3045 be allowed, as it means there was a request to create a repository
3045 be allowed, as it means there was a request to create a repository
3046 with options not recognized by loaded code.
3046 with options not recognized by loaded code.
3047
3047
3048 Extensions can wrap this function to filter out creation options
3048 Extensions can wrap this function to filter out creation options
3049 they know how to handle.
3049 they know how to handle.
3050 """
3050 """
3051 known = {
3051 known = {
3052 'backend',
3052 'backend',
3053 'lfs',
3053 'lfs',
3054 'narrowfiles',
3054 'narrowfiles',
3055 'sharedrepo',
3055 'sharedrepo',
3056 'sharedrelative',
3056 'sharedrelative',
3057 'shareditems',
3057 'shareditems',
3058 'shallowfilestore',
3058 'shallowfilestore',
3059 }
3059 }
3060
3060
3061 return {k: v for k, v in createopts.items() if k not in known}
3061 return {k: v for k, v in createopts.items() if k not in known}
3062
3062
3063 def createrepository(ui, path, createopts=None):
3063 def createrepository(ui, path, createopts=None):
3064 """Create a new repository in a vfs.
3064 """Create a new repository in a vfs.
3065
3065
3066 ``path`` path to the new repo's working directory.
3066 ``path`` path to the new repo's working directory.
3067 ``createopts`` options for the new repository.
3067 ``createopts`` options for the new repository.
3068
3068
3069 The following keys for ``createopts`` are recognized:
3069 The following keys for ``createopts`` are recognized:
3070
3070
3071 backend
3071 backend
3072 The storage backend to use.
3072 The storage backend to use.
3073 lfs
3073 lfs
3074 Repository will be created with ``lfs`` requirement. The lfs extension
3074 Repository will be created with ``lfs`` requirement. The lfs extension
3075 will automatically be loaded when the repository is accessed.
3075 will automatically be loaded when the repository is accessed.
3076 narrowfiles
3076 narrowfiles
3077 Set up repository to support narrow file storage.
3077 Set up repository to support narrow file storage.
3078 sharedrepo
3078 sharedrepo
3079 Repository object from which storage should be shared.
3079 Repository object from which storage should be shared.
3080 sharedrelative
3080 sharedrelative
3081 Boolean indicating if the path to the shared repo should be
3081 Boolean indicating if the path to the shared repo should be
3082 stored as relative. By default, the pointer to the "parent" repo
3082 stored as relative. By default, the pointer to the "parent" repo
3083 is stored as an absolute path.
3083 is stored as an absolute path.
3084 shareditems
3084 shareditems
3085 Set of items to share to the new repository (in addition to storage).
3085 Set of items to share to the new repository (in addition to storage).
3086 shallowfilestore
3086 shallowfilestore
3087 Indicates that storage for files should be shallow (not all ancestor
3087 Indicates that storage for files should be shallow (not all ancestor
3088 revisions are known).
3088 revisions are known).
3089 """
3089 """
3090 createopts = defaultcreateopts(ui, createopts=createopts)
3090 createopts = defaultcreateopts(ui, createopts=createopts)
3091
3091
3092 unknownopts = filterknowncreateopts(ui, createopts)
3092 unknownopts = filterknowncreateopts(ui, createopts)
3093
3093
3094 if not isinstance(unknownopts, dict):
3094 if not isinstance(unknownopts, dict):
3095 raise error.ProgrammingError('filterknowncreateopts() did not return '
3095 raise error.ProgrammingError('filterknowncreateopts() did not return '
3096 'a dict')
3096 'a dict')
3097
3097
3098 if unknownopts:
3098 if unknownopts:
3099 raise error.Abort(_('unable to create repository because of unknown '
3099 raise error.Abort(_('unable to create repository because of unknown '
3100 'creation option: %s') %
3100 'creation option: %s') %
3101 ', '.join(sorted(unknownopts)),
3101 ', '.join(sorted(unknownopts)),
3102 hint=_('is a required extension not loaded?'))
3102 hint=_('is a required extension not loaded?'))
3103
3103
3104 requirements = newreporequirements(ui, createopts=createopts)
3104 requirements = newreporequirements(ui, createopts=createopts)
3105
3105
3106 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3106 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3107
3107
3108 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3108 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3109 if hgvfs.exists():
3109 if hgvfs.exists():
3110 raise error.RepoError(_('repository %s already exists') % path)
3110 raise error.RepoError(_('repository %s already exists') % path)
3111
3111
3112 if 'sharedrepo' in createopts:
3112 if 'sharedrepo' in createopts:
3113 sharedpath = createopts['sharedrepo'].sharedpath
3113 sharedpath = createopts['sharedrepo'].sharedpath
3114
3114
3115 if createopts.get('sharedrelative'):
3115 if createopts.get('sharedrelative'):
3116 try:
3116 try:
3117 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3117 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3118 except (IOError, ValueError) as e:
3118 except (IOError, ValueError) as e:
3119 # ValueError is raised on Windows if the drive letters differ
3119 # ValueError is raised on Windows if the drive letters differ
3120 # on each path.
3120 # on each path.
3121 raise error.Abort(_('cannot calculate relative path'),
3121 raise error.Abort(_('cannot calculate relative path'),
3122 hint=stringutil.forcebytestr(e))
3122 hint=stringutil.forcebytestr(e))
3123
3123
3124 if not wdirvfs.exists():
3124 if not wdirvfs.exists():
3125 wdirvfs.makedirs()
3125 wdirvfs.makedirs()
3126
3126
3127 hgvfs.makedir(notindexed=True)
3127 hgvfs.makedir(notindexed=True)
3128 if 'sharedrepo' not in createopts:
3128 if 'sharedrepo' not in createopts:
3129 hgvfs.mkdir(b'cache')
3129 hgvfs.mkdir(b'cache')
3130 hgvfs.mkdir(b'wcache')
3130 hgvfs.mkdir(b'wcache')
3131
3131
3132 if b'store' in requirements and 'sharedrepo' not in createopts:
3132 if b'store' in requirements and 'sharedrepo' not in createopts:
3133 hgvfs.mkdir(b'store')
3133 hgvfs.mkdir(b'store')
3134
3134
3135 # We create an invalid changelog outside the store so very old
3135 # We create an invalid changelog outside the store so very old
3136 # Mercurial versions (which didn't know about the requirements
3136 # Mercurial versions (which didn't know about the requirements
3137 # file) encounter an error on reading the changelog. This
3137 # file) encounter an error on reading the changelog. This
3138 # effectively locks out old clients and prevents them from
3138 # effectively locks out old clients and prevents them from
3139 # mucking with a repo in an unknown format.
3139 # mucking with a repo in an unknown format.
3140 #
3140 #
3141 # The revlog header has version 2, which won't be recognized by
3141 # The revlog header has version 2, which won't be recognized by
3142 # such old clients.
3142 # such old clients.
3143 hgvfs.append(b'00changelog.i',
3143 hgvfs.append(b'00changelog.i',
3144 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3144 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3145 b'layout')
3145 b'layout')
3146
3146
3147 scmutil.writerequires(hgvfs, requirements)
3147 scmutil.writerequires(hgvfs, requirements)
3148
3148
3149 # Write out file telling readers where to find the shared store.
3149 # Write out file telling readers where to find the shared store.
3150 if 'sharedrepo' in createopts:
3150 if 'sharedrepo' in createopts:
3151 hgvfs.write(b'sharedpath', sharedpath)
3151 hgvfs.write(b'sharedpath', sharedpath)
3152
3152
3153 if createopts.get('shareditems'):
3153 if createopts.get('shareditems'):
3154 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3154 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3155 hgvfs.write(b'shared', shared)
3155 hgvfs.write(b'shared', shared)
3156
3156
3157 def poisonrepository(repo):
3157 def poisonrepository(repo):
3158 """Poison a repository instance so it can no longer be used."""
3158 """Poison a repository instance so it can no longer be used."""
3159 # Perform any cleanup on the instance.
3159 # Perform any cleanup on the instance.
3160 repo.close()
3160 repo.close()
3161
3161
3162 # Our strategy is to replace the type of the object with one that
3162 # Our strategy is to replace the type of the object with one that
3163 # has all attribute lookups result in error.
3163 # has all attribute lookups result in error.
3164 #
3164 #
3165 # But we have to allow the close() method because some constructors
3165 # But we have to allow the close() method because some constructors
3166 # of repos call close() on repo references.
3166 # of repos call close() on repo references.
3167 class poisonedrepository(object):
3167 class poisonedrepository(object):
3168 def __getattribute__(self, item):
3168 def __getattribute__(self, item):
3169 if item == r'close':
3169 if item == r'close':
3170 return object.__getattribute__(self, item)
3170 return object.__getattribute__(self, item)
3171
3171
3172 raise error.ProgrammingError('repo instances should not be used '
3172 raise error.ProgrammingError('repo instances should not be used '
3173 'after unshare')
3173 'after unshare')
3174
3174
3175 def close(self):
3175 def close(self):
3176 pass
3176 pass
3177
3177
3178 # We may have a repoview, which intercepts __setattr__. So be sure
3178 # We may have a repoview, which intercepts __setattr__. So be sure
3179 # we operate at the lowest level possible.
3179 # we operate at the lowest level possible.
3180 object.__setattr__(repo, r'__class__', poisonedrepository)
3180 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,195 +1,199 b''
1 A new repository uses zlib storage, which doesn't need a requirement
1 A new repository uses zlib storage, which doesn't need a requirement
2
2
3 $ hg init default
3 $ hg init default
4 $ cd default
4 $ cd default
5 $ cat .hg/requires
5 $ cat .hg/requires
6 dotencode
6 dotencode
7 fncache
7 fncache
8 generaldelta
8 generaldelta
9 revlogv1
9 revlogv1
10 sparserevlog
10 sparserevlog
11 store
11 store
12 testonly-simplestore (reposimplestore !)
12 testonly-simplestore (reposimplestore !)
13
13
14 $ touch foo
14 $ touch foo
15 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
15 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text to trigger compression'
16 $ hg debugrevlog -c | grep 0x78
16 $ hg debugrevlog -c | grep 0x78
17 0x78 (x) : 1 (100.00%)
17 0x78 (x) : 1 (100.00%)
18 0x78 (x) : 110 (100.00%)
18 0x78 (x) : 110 (100.00%)
19
19
20 $ cd ..
20 $ cd ..
21
21
22 Unknown compression engine to format.compression aborts
22 Unknown compression engine to format.compression aborts
23
23
24 $ hg --config format.revlog-compression=unknown init unknown
24 $ hg --config format.revlog-compression=unknown init unknown
25 abort: compression engine unknown defined by format.revlog-compression not available
25 abort: compression engine unknown defined by format.revlog-compression not available
26 (run "hg debuginstall" to list available compression engines)
26 (run "hg debuginstall" to list available compression engines)
27 [255]
27 [255]
28
28
29 A requirement specifying an unknown compression engine results in bail
29 A requirement specifying an unknown compression engine results in bail
30
30
31 $ hg init unknownrequirement
31 $ hg init unknownrequirement
32 $ cd unknownrequirement
32 $ cd unknownrequirement
33 $ echo exp-compression-unknown >> .hg/requires
33 $ echo exp-compression-unknown >> .hg/requires
34 $ hg log
34 $ hg log
35 abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
35 abort: repository requires features unknown to this Mercurial: exp-compression-unknown!
36 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
36 (see https://mercurial-scm.org/wiki/MissingRequirement for more information)
37 [255]
37 [255]
38
38
39 $ cd ..
39 $ cd ..
40
40
41 #if zstd
41 #if zstd
42
42
43 $ hg --config format.revlog-compression=zstd init zstd
43 $ hg --config format.revlog-compression=zstd init zstd
44 $ cd zstd
44 $ cd zstd
45 $ cat .hg/requires
45 $ cat .hg/requires
46 dotencode
46 dotencode
47 fncache
47 fncache
48 generaldelta
48 generaldelta
49 revlog-compression-zstd
49 revlog-compression-zstd
50 revlogv1
50 revlogv1
51 sparserevlog
51 sparserevlog
52 store
52 store
53 testonly-simplestore (reposimplestore !)
53 testonly-simplestore (reposimplestore !)
54
54
55 $ touch foo
55 $ touch foo
56 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
56 $ hg -q commit -A -m 'initial commit with a lot of repeated repeated repeated text'
57
57
58 $ hg debugrevlog -c | grep 0x28
58 $ hg debugrevlog -c | grep 0x28
59 0x28 : 1 (100.00%)
59 0x28 : 1 (100.00%)
60 0x28 : 98 (100.00%)
60 0x28 : 98 (100.00%)
61
61
62 $ cd ..
62 $ cd ..
63
63
64 Specifying a new format.compression on an existing repo won't introduce data
64 Specifying a new format.compression on an existing repo won't introduce data
65 with that engine or a requirement
65 with that engine or a requirement
66
66
67 $ cd default
67 $ cd default
68 $ touch bar
68 $ touch bar
69 $ hg --config format.revlog-compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
69 $ hg --config format.revlog-compression=zstd -q commit -A -m 'add bar with a lot of repeated repeated repeated text'
70
70
71 $ cat .hg/requires
71 $ cat .hg/requires
72 dotencode
72 dotencode
73 fncache
73 fncache
74 generaldelta
74 generaldelta
75 revlogv1
75 revlogv1
76 sparserevlog
76 sparserevlog
77 store
77 store
78 testonly-simplestore (reposimplestore !)
78 testonly-simplestore (reposimplestore !)
79
79
80 $ hg debugrevlog -c | grep 0x78
80 $ hg debugrevlog -c | grep 0x78
81 0x78 (x) : 2 (100.00%)
81 0x78 (x) : 2 (100.00%)
82 0x78 (x) : 199 (100.00%)
82 0x78 (x) : 199 (100.00%)
83
83
84 #endif
84 #endif
85
85
86 checking zlib options
86 checking zlib options
87 =====================
87 =====================
88
88
89 $ hg init zlib-level-default
89 $ hg init zlib-level-default
90 $ hg init zlib-level-1
90 $ hg init zlib-level-1
91 $ cat << EOF >> zlib-level-1/.hg/hgrc
91 $ cat << EOF >> zlib-level-1/.hg/hgrc
92 > [storage]
92 > [storage]
93 > revlog.zlib.level=1
93 > revlog.zlib.level=1
94 > EOF
94 > EOF
95 $ hg init zlib-level-9
95 $ hg init zlib-level-9
96 $ cat << EOF >> zlib-level-9/.hg/hgrc
96 $ cat << EOF >> zlib-level-9/.hg/hgrc
97 > [storage]
97 > [storage]
98 > revlog.zlib.level=9
98 > revlog.zlib.level=9
99 > EOF
99 > EOF
100
100
101
101
102 $ commitone() {
102 $ commitone() {
103 > repo=$1
103 > repo=$1
104 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
104 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
105 > hg -R $repo add $repo/a
105 > hg -R $repo add $repo/a
106 > hg -R $repo commit -m some-commit
106 > hg -R $repo commit -m some-commit
107 > }
107 > }
108
108
109 $ for repo in zlib-level-default zlib-level-1 zlib-level-9; do
109 $ for repo in zlib-level-default zlib-level-1 zlib-level-9; do
110 > commitone $repo
110 > commitone $repo
111 > done
111 > done
112
112
113 $ $RUNTESTDIR/f -s */.hg/store/data/*
113 $ $RUNTESTDIR/f -s */.hg/store/data/*
114 default/.hg/store/data/foo.i: size=64 (pure !)
114 zlib-level-1/.hg/store/data/a.i: size=4146
115 zlib-level-1/.hg/store/data/a.i: size=4146
115 zlib-level-9/.hg/store/data/a.i: size=4138
116 zlib-level-9/.hg/store/data/a.i: size=4138
116 zlib-level-default/.hg/store/data/a.i: size=4138
117 zlib-level-default/.hg/store/data/a.i: size=4138
117
118
118 Test error cases
119 Test error cases
119
120
120 $ hg init zlib-level-invalid
121 $ hg init zlib-level-invalid
121 $ cat << EOF >> zlib-level-invalid/.hg/hgrc
122 $ cat << EOF >> zlib-level-invalid/.hg/hgrc
122 > [storage]
123 > [storage]
123 > revlog.zlib.level=foobar
124 > revlog.zlib.level=foobar
124 > EOF
125 > EOF
125 $ commitone zlib-level-invalid
126 $ commitone zlib-level-invalid
126 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
127 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
127 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
128 abort: storage.revlog.zlib.level is not a valid integer ('foobar')
128 [255]
129 [255]
129
130
130 $ hg init zlib-level-out-of-range
131 $ hg init zlib-level-out-of-range
131 $ cat << EOF >> zlib-level-out-of-range/.hg/hgrc
132 $ cat << EOF >> zlib-level-out-of-range/.hg/hgrc
132 > [storage]
133 > [storage]
133 > revlog.zlib.level=42
134 > revlog.zlib.level=42
134 > EOF
135 > EOF
135
136
136 $ commitone zlib-level-out-of-range
137 $ commitone zlib-level-out-of-range
137 abort: invalid value for `storage.revlog.zlib.level` config: 42
138 abort: invalid value for `storage.revlog.zlib.level` config: 42
138 abort: invalid value for `storage.revlog.zlib.level` config: 42
139 abort: invalid value for `storage.revlog.zlib.level` config: 42
139 [255]
140 [255]
140
141
142 #if zstd
143
141 checking zstd options
144 checking zstd options
142 =====================
145 =====================
143
146
144 $ hg init zstd-level-default --config format.revlog-compression=zstd
147 $ hg init zstd-level-default --config format.revlog-compression=zstd
145 $ hg init zstd-level-1 --config format.revlog-compression=zstd
148 $ hg init zstd-level-1 --config format.revlog-compression=zstd
146 $ cat << EOF >> zstd-level-1/.hg/hgrc
149 $ cat << EOF >> zstd-level-1/.hg/hgrc
147 > [storage]
150 > [storage]
148 > revlog.zstd.level=1
151 > revlog.zstd.level=1
149 > EOF
152 > EOF
150 $ hg init zstd-level-22 --config format.revlog-compression=zstd
153 $ hg init zstd-level-22 --config format.revlog-compression=zstd
151 $ cat << EOF >> zstd-level-22/.hg/hgrc
154 $ cat << EOF >> zstd-level-22/.hg/hgrc
152 > [storage]
155 > [storage]
153 > revlog.zstd.level=22
156 > revlog.zstd.level=22
154 > EOF
157 > EOF
155
158
156
159
157 $ commitone() {
160 $ commitone() {
158 > repo=$1
161 > repo=$1
159 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
162 > cp $RUNTESTDIR/bundles/issue4438-r1.hg $repo/a
160 > hg -R $repo add $repo/a
163 > hg -R $repo add $repo/a
161 > hg -R $repo commit -m some-commit
164 > hg -R $repo commit -m some-commit
162 > }
165 > }
163
166
164 $ for repo in zstd-level-default zstd-level-1 zstd-level-22; do
167 $ for repo in zstd-level-default zstd-level-1 zstd-level-22; do
165 > commitone $repo
168 > commitone $repo
166 > done
169 > done
167
170
168 $ $RUNTESTDIR/f -s zstd-*/.hg/store/data/*
171 $ $RUNTESTDIR/f -s zstd-*/.hg/store/data/*
169 zstd-level-1/.hg/store/data/a.i: size=4097
172 zstd-level-1/.hg/store/data/a.i: size=4097
170 zstd-level-22/.hg/store/data/a.i: size=4091
173 zstd-level-22/.hg/store/data/a.i: size=4091
171 zstd-level-default/.hg/store/data/a.i: size=4094
174 zstd-level-default/.hg/store/data/a.i: size=4094
172
175
173 Test error cases
176 Test error cases
174
177
175 $ hg init zstd-level-invalid --config format.revlog-compression=zstd
178 $ hg init zstd-level-invalid --config format.revlog-compression=zstd
176 $ cat << EOF >> zstd-level-invalid/.hg/hgrc
179 $ cat << EOF >> zstd-level-invalid/.hg/hgrc
177 > [storage]
180 > [storage]
178 > revlog.zstd.level=foobar
181 > revlog.zstd.level=foobar
179 > EOF
182 > EOF
180 $ commitone zstd-level-invalid
183 $ commitone zstd-level-invalid
181 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
184 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
182 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
185 abort: storage.revlog.zstd.level is not a valid integer ('foobar')
183 [255]
186 [255]
184
187
185 $ hg init zstd-level-out-of-range --config format.revlog-compression=zstd
188 $ hg init zstd-level-out-of-range --config format.revlog-compression=zstd
186 $ cat << EOF >> zstd-level-out-of-range/.hg/hgrc
189 $ cat << EOF >> zstd-level-out-of-range/.hg/hgrc
187 > [storage]
190 > [storage]
188 > revlog.zstd.level=42
191 > revlog.zstd.level=42
189 > EOF
192 > EOF
190
193
191 $ commitone zstd-level-out-of-range
194 $ commitone zstd-level-out-of-range
192 abort: invalid value for `storage.revlog.zstd.level` config: 42
195 abort: invalid value for `storage.revlog.zstd.level` config: 42
193 abort: invalid value for `storage.revlog.zstd.level` config: 42
196 abort: invalid value for `storage.revlog.zstd.level` config: 42
194 [255]
197 [255]
195
198
199 #endif
General Comments 0
You need to be logged in to leave comments. Login now