##// END OF EJS Templates
filelog: custom filelog to be used with narrow repos...
Gregory Szorc -
r39801:3e801ffd default
parent child Browse files
Show More
@@ -1,227 +1,278 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # full name of the user visible file, relative to the repository root
25 # full name of the user visible file, relative to the repository root
26 self.filename = path
26 self.filename = path
27 self.index = self._revlog.index
27 self.index = self._revlog.index
28 self.version = self._revlog.version
28 self.version = self._revlog.version
29 self._generaldelta = self._revlog._generaldelta
29 self._generaldelta = self._revlog._generaldelta
30
30
31 def __len__(self):
31 def __len__(self):
32 return len(self._revlog)
32 return len(self._revlog)
33
33
34 def __iter__(self):
34 def __iter__(self):
35 return self._revlog.__iter__()
35 return self._revlog.__iter__()
36
36
37 def revs(self, start=0, stop=None):
37 def revs(self, start=0, stop=None):
38 return self._revlog.revs(start=start, stop=stop)
38 return self._revlog.revs(start=start, stop=stop)
39
39
40 def parents(self, node):
40 def parents(self, node):
41 return self._revlog.parents(node)
41 return self._revlog.parents(node)
42
42
43 def parentrevs(self, rev):
43 def parentrevs(self, rev):
44 return self._revlog.parentrevs(rev)
44 return self._revlog.parentrevs(rev)
45
45
46 def rev(self, node):
46 def rev(self, node):
47 return self._revlog.rev(node)
47 return self._revlog.rev(node)
48
48
49 def node(self, rev):
49 def node(self, rev):
50 return self._revlog.node(rev)
50 return self._revlog.node(rev)
51
51
52 def lookup(self, node):
52 def lookup(self, node):
53 return self._revlog.lookup(node)
53 return self._revlog.lookup(node)
54
54
55 def linkrev(self, rev):
55 def linkrev(self, rev):
56 return self._revlog.linkrev(rev)
56 return self._revlog.linkrev(rev)
57
57
58 def flags(self, rev):
58 def flags(self, rev):
59 return self._revlog.flags(rev)
59 return self._revlog.flags(rev)
60
60
61 def commonancestorsheads(self, node1, node2):
61 def commonancestorsheads(self, node1, node2):
62 return self._revlog.commonancestorsheads(node1, node2)
62 return self._revlog.commonancestorsheads(node1, node2)
63
63
64 def descendants(self, revs):
64 def descendants(self, revs):
65 return self._revlog.descendants(revs)
65 return self._revlog.descendants(revs)
66
66
67 def headrevs(self):
67 def headrevs(self):
68 return self._revlog.headrevs()
68 return self._revlog.headrevs()
69
69
70 def heads(self, start=None, stop=None):
70 def heads(self, start=None, stop=None):
71 return self._revlog.heads(start, stop)
71 return self._revlog.heads(start, stop)
72
72
73 def children(self, node):
73 def children(self, node):
74 return self._revlog.children(node)
74 return self._revlog.children(node)
75
75
76 def deltaparent(self, rev):
76 def deltaparent(self, rev):
77 return self._revlog.deltaparent(rev)
77 return self._revlog.deltaparent(rev)
78
78
79 def iscensored(self, rev):
79 def iscensored(self, rev):
80 return self._revlog.iscensored(rev)
80 return self._revlog.iscensored(rev)
81
81
82 def rawsize(self, rev):
82 def rawsize(self, rev):
83 return self._revlog.rawsize(rev)
83 return self._revlog.rawsize(rev)
84
84
85 def checkhash(self, text, node, p1=None, p2=None, rev=None):
85 def checkhash(self, text, node, p1=None, p2=None, rev=None):
86 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
86 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
87
87
88 def revision(self, node, _df=None, raw=False):
88 def revision(self, node, _df=None, raw=False):
89 return self._revlog.revision(node, _df=_df, raw=raw)
89 return self._revlog.revision(node, _df=_df, raw=raw)
90
90
91 def revdiff(self, rev1, rev2):
91 def revdiff(self, rev1, rev2):
92 return self._revlog.revdiff(rev1, rev2)
92 return self._revlog.revdiff(rev1, rev2)
93
93
94 def emitrevisiondeltas(self, requests):
94 def emitrevisiondeltas(self, requests):
95 return self._revlog.emitrevisiondeltas(requests)
95 return self._revlog.emitrevisiondeltas(requests)
96
96
97 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
97 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
98 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
98 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
99 cachedelta=None):
99 cachedelta=None):
100 return self._revlog.addrevision(revisiondata, transaction, linkrev,
100 return self._revlog.addrevision(revisiondata, transaction, linkrev,
101 p1, p2, node=node, flags=flags,
101 p1, p2, node=node, flags=flags,
102 cachedelta=cachedelta)
102 cachedelta=cachedelta)
103
103
104 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
104 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
105 return self._revlog.addgroup(deltas, linkmapper, transaction,
105 return self._revlog.addgroup(deltas, linkmapper, transaction,
106 addrevisioncb=addrevisioncb)
106 addrevisioncb=addrevisioncb)
107
107
108 def getstrippoint(self, minlink):
108 def getstrippoint(self, minlink):
109 return self._revlog.getstrippoint(minlink)
109 return self._revlog.getstrippoint(minlink)
110
110
111 def strip(self, minlink, transaction):
111 def strip(self, minlink, transaction):
112 return self._revlog.strip(minlink, transaction)
112 return self._revlog.strip(minlink, transaction)
113
113
114 def files(self):
114 def files(self):
115 return self._revlog.files()
115 return self._revlog.files()
116
116
117 def checksize(self):
117 def checksize(self):
118 return self._revlog.checksize()
118 return self._revlog.checksize()
119
119
120 def read(self, node):
120 def read(self, node):
121 t = self.revision(node)
121 t = self.revision(node)
122 if not t.startswith('\1\n'):
122 if not t.startswith('\1\n'):
123 return t
123 return t
124 s = t.index('\1\n', 2)
124 s = t.index('\1\n', 2)
125 return t[s + 2:]
125 return t[s + 2:]
126
126
127 def add(self, text, meta, transaction, link, p1=None, p2=None):
127 def add(self, text, meta, transaction, link, p1=None, p2=None):
128 if meta or text.startswith('\1\n'):
128 if meta or text.startswith('\1\n'):
129 text = revlog.packmeta(meta, text)
129 text = revlog.packmeta(meta, text)
130 return self.addrevision(text, transaction, link, p1, p2)
130 return self.addrevision(text, transaction, link, p1, p2)
131
131
132 def renamed(self, node):
132 def renamed(self, node):
133 if self.parents(node)[0] != revlog.nullid:
133 if self.parents(node)[0] != revlog.nullid:
134 return False
134 return False
135 t = self.revision(node)
135 t = self.revision(node)
136 m = revlog.parsemeta(t)[0]
136 m = revlog.parsemeta(t)[0]
137 # copy and copyrev occur in pairs. In rare cases due to bugs,
137 # copy and copyrev occur in pairs. In rare cases due to bugs,
138 # one can occur without the other.
138 # one can occur without the other.
139 if m and "copy" in m and "copyrev" in m:
139 if m and "copy" in m and "copyrev" in m:
140 return (m["copy"], revlog.bin(m["copyrev"]))
140 return (m["copy"], revlog.bin(m["copyrev"]))
141 return False
141 return False
142
142
143 def size(self, rev):
143 def size(self, rev):
144 """return the size of a given revision"""
144 """return the size of a given revision"""
145
145
146 # for revisions with renames, we have to go the slow way
146 # for revisions with renames, we have to go the slow way
147 node = self.node(rev)
147 node = self.node(rev)
148 if self.renamed(node):
148 if self.renamed(node):
149 return len(self.read(node))
149 return len(self.read(node))
150 if self.iscensored(rev):
150 if self.iscensored(rev):
151 return 0
151 return 0
152
152
153 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
153 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
154 return self._revlog.size(rev)
154 return self._revlog.size(rev)
155
155
156 def cmp(self, node, text):
156 def cmp(self, node, text):
157 """compare text with a given file revision
157 """compare text with a given file revision
158
158
159 returns True if text is different than what is stored.
159 returns True if text is different than what is stored.
160 """
160 """
161
161
162 t = text
162 t = text
163 if text.startswith('\1\n'):
163 if text.startswith('\1\n'):
164 t = '\1\n\1\n' + text
164 t = '\1\n\1\n' + text
165
165
166 samehashes = not self._revlog.cmp(node, t)
166 samehashes = not self._revlog.cmp(node, t)
167 if samehashes:
167 if samehashes:
168 return False
168 return False
169
169
170 # censored files compare against the empty file
170 # censored files compare against the empty file
171 if self.iscensored(self.rev(node)):
171 if self.iscensored(self.rev(node)):
172 return text != ''
172 return text != ''
173
173
174 # renaming a file produces a different hash, even if the data
174 # renaming a file produces a different hash, even if the data
175 # remains unchanged. Check if it's the case (slow):
175 # remains unchanged. Check if it's the case (slow):
176 if self.renamed(node):
176 if self.renamed(node):
177 t2 = self.read(node)
177 t2 = self.read(node)
178 return t2 != text
178 return t2 != text
179
179
180 return True
180 return True
181
181
182 @property
182 @property
183 def filename(self):
183 def filename(self):
184 return self._revlog.filename
184 return self._revlog.filename
185
185
186 @filename.setter
186 @filename.setter
187 def filename(self, value):
187 def filename(self, value):
188 self._revlog.filename = value
188 self._revlog.filename = value
189
189
190 # TODO these aren't part of the interface and aren't internal methods.
190 # TODO these aren't part of the interface and aren't internal methods.
191 # Callers should be fixed to not use them.
191 # Callers should be fixed to not use them.
192 @property
192 @property
193 def indexfile(self):
193 def indexfile(self):
194 return self._revlog.indexfile
194 return self._revlog.indexfile
195
195
196 @indexfile.setter
196 @indexfile.setter
197 def indexfile(self, value):
197 def indexfile(self, value):
198 self._revlog.indexfile = value
198 self._revlog.indexfile = value
199
199
200 @property
200 @property
201 def datafile(self):
201 def datafile(self):
202 return self._revlog.datafile
202 return self._revlog.datafile
203
203
204 @property
204 @property
205 def opener(self):
205 def opener(self):
206 return self._revlog.opener
206 return self._revlog.opener
207
207
208 def clone(self, tr, destrevlog, **kwargs):
208 def clone(self, tr, destrevlog, **kwargs):
209 if not isinstance(destrevlog, filelog):
209 if not isinstance(destrevlog, filelog):
210 raise error.ProgrammingError('expected filelog to clone()')
210 raise error.ProgrammingError('expected filelog to clone()')
211
211
212 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
212 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
213
213
214 def start(self, rev):
214 def start(self, rev):
215 return self._revlog.start(rev)
215 return self._revlog.start(rev)
216
216
217 def end(self, rev):
217 def end(self, rev):
218 return self._revlog.end(rev)
218 return self._revlog.end(rev)
219
219
220 def length(self, rev):
220 def length(self, rev):
221 return self._revlog.length(rev)
221 return self._revlog.length(rev)
222
222
223 def compress(self, data):
223 def compress(self, data):
224 return self._revlog.compress(data)
224 return self._revlog.compress(data)
225
225
226 def _addrevision(self, *args, **kwargs):
226 def _addrevision(self, *args, **kwargs):
227 return self._revlog._addrevision(*args, **kwargs)
227 return self._revlog._addrevision(*args, **kwargs)
228
229 class narrowfilelog(filelog):
230 """Filelog variation to be used with narrow stores."""
231
232 def __init__(self, opener, path, narrowmatch):
233 super(narrowfilelog, self).__init__(opener, path)
234 self._narrowmatch = narrowmatch
235
236 def renamed(self, node):
237 res = super(narrowfilelog, self).renamed(node)
238
239 # Renames that come from outside the narrowspec are problematic
240 # because we may lack the base text for the rename. This can result
241 # in code attempting to walk the ancestry or compute a diff
242 # encountering a missing revision. We address this by silently
243 # removing rename metadata if the source file is outside the
244 # narrow spec.
245 #
246 # A better solution would be to see if the base revision is available,
247 # rather than assuming it isn't.
248 #
249 # An even better solution would be to teach all consumers of rename
250 # metadata that the base revision may not be available.
251 #
252 # TODO consider better ways of doing this.
253 if res and not self._narrowmatch(res[0]):
254 return None
255
256 return res
257
258 def size(self, rev):
259 # Because we have a custom renamed() that may lie, we need to call
260 # the base renamed() to report accurate results.
261 node = self.node(rev)
262 if super(narrowfilelog, self).renamed(node):
263 return len(self.read(node))
264 else:
265 return super(narrowfilelog, self).size(rev)
266
267 def cmp(self, node, text):
268 different = super(narrowfilelog, self).cmp(node, text)
269
270 # Because renamed() may lie, we may get false positives for
271 # different content. Check for this by comparing against the original
272 # renamed() implementation.
273 if different:
274 if super(narrowfilelog, self).renamed(node):
275 t2 = self.read(node)
276 return t2 != text
277
278 return different
@@ -1,2834 +1,2847 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store as storemod,
59 store as storemod,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 from .revlogutils import (
73 from .revlogutils import (
74 constants as revlogconst,
74 constants as revlogconst,
75 )
75 )
76
76
77 release = lockmod.release
77 release = lockmod.release
78 urlerr = util.urlerr
78 urlerr = util.urlerr
79 urlreq = util.urlreq
79 urlreq = util.urlreq
80
80
81 # set of (path, vfs-location) tuples. vfs-location is:
81 # set of (path, vfs-location) tuples. vfs-location is:
82 # - 'plain for vfs relative paths
82 # - 'plain for vfs relative paths
83 # - '' for svfs relative paths
83 # - '' for svfs relative paths
84 _cachedfiles = set()
84 _cachedfiles = set()
85
85
86 class _basefilecache(scmutil.filecache):
86 class _basefilecache(scmutil.filecache):
87 """All filecache usage on repo are done for logic that should be unfiltered
87 """All filecache usage on repo are done for logic that should be unfiltered
88 """
88 """
89 def __get__(self, repo, type=None):
89 def __get__(self, repo, type=None):
90 if repo is None:
90 if repo is None:
91 return self
91 return self
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
92 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
93 def __set__(self, repo, value):
93 def __set__(self, repo, value):
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
94 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
95 def __delete__(self, repo):
95 def __delete__(self, repo):
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
96 return super(_basefilecache, self).__delete__(repo.unfiltered())
97
97
98 class repofilecache(_basefilecache):
98 class repofilecache(_basefilecache):
99 """filecache for files in .hg but outside of .hg/store"""
99 """filecache for files in .hg but outside of .hg/store"""
100 def __init__(self, *paths):
100 def __init__(self, *paths):
101 super(repofilecache, self).__init__(*paths)
101 super(repofilecache, self).__init__(*paths)
102 for path in paths:
102 for path in paths:
103 _cachedfiles.add((path, 'plain'))
103 _cachedfiles.add((path, 'plain'))
104
104
105 def join(self, obj, fname):
105 def join(self, obj, fname):
106 return obj.vfs.join(fname)
106 return obj.vfs.join(fname)
107
107
108 class storecache(_basefilecache):
108 class storecache(_basefilecache):
109 """filecache for files in the store"""
109 """filecache for files in the store"""
110 def __init__(self, *paths):
110 def __init__(self, *paths):
111 super(storecache, self).__init__(*paths)
111 super(storecache, self).__init__(*paths)
112 for path in paths:
112 for path in paths:
113 _cachedfiles.add((path, ''))
113 _cachedfiles.add((path, ''))
114
114
115 def join(self, obj, fname):
115 def join(self, obj, fname):
116 return obj.sjoin(fname)
116 return obj.sjoin(fname)
117
117
118 def isfilecached(repo, name):
118 def isfilecached(repo, name):
119 """check if a repo has already cached "name" filecache-ed property
119 """check if a repo has already cached "name" filecache-ed property
120
120
121 This returns (cachedobj-or-None, iscached) tuple.
121 This returns (cachedobj-or-None, iscached) tuple.
122 """
122 """
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
123 cacheentry = repo.unfiltered()._filecache.get(name, None)
124 if not cacheentry:
124 if not cacheentry:
125 return None, False
125 return None, False
126 return cacheentry.obj, True
126 return cacheentry.obj, True
127
127
128 class unfilteredpropertycache(util.propertycache):
128 class unfilteredpropertycache(util.propertycache):
129 """propertycache that apply to unfiltered repo only"""
129 """propertycache that apply to unfiltered repo only"""
130
130
131 def __get__(self, repo, type=None):
131 def __get__(self, repo, type=None):
132 unfi = repo.unfiltered()
132 unfi = repo.unfiltered()
133 if unfi is repo:
133 if unfi is repo:
134 return super(unfilteredpropertycache, self).__get__(unfi)
134 return super(unfilteredpropertycache, self).__get__(unfi)
135 return getattr(unfi, self.name)
135 return getattr(unfi, self.name)
136
136
137 class filteredpropertycache(util.propertycache):
137 class filteredpropertycache(util.propertycache):
138 """propertycache that must take filtering in account"""
138 """propertycache that must take filtering in account"""
139
139
140 def cachevalue(self, obj, value):
140 def cachevalue(self, obj, value):
141 object.__setattr__(obj, self.name, value)
141 object.__setattr__(obj, self.name, value)
142
142
143
143
144 def hasunfilteredcache(repo, name):
144 def hasunfilteredcache(repo, name):
145 """check if a repo has an unfilteredpropertycache value for <name>"""
145 """check if a repo has an unfilteredpropertycache value for <name>"""
146 return name in vars(repo.unfiltered())
146 return name in vars(repo.unfiltered())
147
147
148 def unfilteredmethod(orig):
148 def unfilteredmethod(orig):
149 """decorate method that always need to be run on unfiltered version"""
149 """decorate method that always need to be run on unfiltered version"""
150 def wrapper(repo, *args, **kwargs):
150 def wrapper(repo, *args, **kwargs):
151 return orig(repo.unfiltered(), *args, **kwargs)
151 return orig(repo.unfiltered(), *args, **kwargs)
152 return wrapper
152 return wrapper
153
153
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
154 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
155 'unbundle'}
155 'unbundle'}
156 legacycaps = moderncaps.union({'changegroupsubset'})
156 legacycaps = moderncaps.union({'changegroupsubset'})
157
157
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
158 @interfaceutil.implementer(repository.ipeercommandexecutor)
159 class localcommandexecutor(object):
159 class localcommandexecutor(object):
160 def __init__(self, peer):
160 def __init__(self, peer):
161 self._peer = peer
161 self._peer = peer
162 self._sent = False
162 self._sent = False
163 self._closed = False
163 self._closed = False
164
164
165 def __enter__(self):
165 def __enter__(self):
166 return self
166 return self
167
167
168 def __exit__(self, exctype, excvalue, exctb):
168 def __exit__(self, exctype, excvalue, exctb):
169 self.close()
169 self.close()
170
170
171 def callcommand(self, command, args):
171 def callcommand(self, command, args):
172 if self._sent:
172 if self._sent:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'sendcommands()')
174 'sendcommands()')
175
175
176 if self._closed:
176 if self._closed:
177 raise error.ProgrammingError('callcommand() cannot be used after '
177 raise error.ProgrammingError('callcommand() cannot be used after '
178 'close()')
178 'close()')
179
179
180 # We don't need to support anything fancy. Just call the named
180 # We don't need to support anything fancy. Just call the named
181 # method on the peer and return a resolved future.
181 # method on the peer and return a resolved future.
182 fn = getattr(self._peer, pycompat.sysstr(command))
182 fn = getattr(self._peer, pycompat.sysstr(command))
183
183
184 f = pycompat.futures.Future()
184 f = pycompat.futures.Future()
185
185
186 try:
186 try:
187 result = fn(**pycompat.strkwargs(args))
187 result = fn(**pycompat.strkwargs(args))
188 except Exception:
188 except Exception:
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
189 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
190 else:
190 else:
191 f.set_result(result)
191 f.set_result(result)
192
192
193 return f
193 return f
194
194
195 def sendcommands(self):
195 def sendcommands(self):
196 self._sent = True
196 self._sent = True
197
197
198 def close(self):
198 def close(self):
199 self._closed = True
199 self._closed = True
200
200
201 @interfaceutil.implementer(repository.ipeercommands)
201 @interfaceutil.implementer(repository.ipeercommands)
202 class localpeer(repository.peer):
202 class localpeer(repository.peer):
203 '''peer for a local repo; reflects only the most recent API'''
203 '''peer for a local repo; reflects only the most recent API'''
204
204
205 def __init__(self, repo, caps=None):
205 def __init__(self, repo, caps=None):
206 super(localpeer, self).__init__()
206 super(localpeer, self).__init__()
207
207
208 if caps is None:
208 if caps is None:
209 caps = moderncaps.copy()
209 caps = moderncaps.copy()
210 self._repo = repo.filtered('served')
210 self._repo = repo.filtered('served')
211 self.ui = repo.ui
211 self.ui = repo.ui
212 self._caps = repo._restrictcapabilities(caps)
212 self._caps = repo._restrictcapabilities(caps)
213
213
214 # Begin of _basepeer interface.
214 # Begin of _basepeer interface.
215
215
216 def url(self):
216 def url(self):
217 return self._repo.url()
217 return self._repo.url()
218
218
219 def local(self):
219 def local(self):
220 return self._repo
220 return self._repo
221
221
222 def peer(self):
222 def peer(self):
223 return self
223 return self
224
224
225 def canpush(self):
225 def canpush(self):
226 return True
226 return True
227
227
228 def close(self):
228 def close(self):
229 self._repo.close()
229 self._repo.close()
230
230
231 # End of _basepeer interface.
231 # End of _basepeer interface.
232
232
233 # Begin of _basewirecommands interface.
233 # Begin of _basewirecommands interface.
234
234
235 def branchmap(self):
235 def branchmap(self):
236 return self._repo.branchmap()
236 return self._repo.branchmap()
237
237
238 def capabilities(self):
238 def capabilities(self):
239 return self._caps
239 return self._caps
240
240
241 def clonebundles(self):
241 def clonebundles(self):
242 return self._repo.tryread('clonebundles.manifest')
242 return self._repo.tryread('clonebundles.manifest')
243
243
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
244 def debugwireargs(self, one, two, three=None, four=None, five=None):
245 """Used to test argument passing over the wire"""
245 """Used to test argument passing over the wire"""
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
246 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
247 pycompat.bytestr(four),
247 pycompat.bytestr(four),
248 pycompat.bytestr(five))
248 pycompat.bytestr(five))
249
249
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
250 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
251 **kwargs):
251 **kwargs):
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
252 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
253 common=common, bundlecaps=bundlecaps,
253 common=common, bundlecaps=bundlecaps,
254 **kwargs)[1]
254 **kwargs)[1]
255 cb = util.chunkbuffer(chunks)
255 cb = util.chunkbuffer(chunks)
256
256
257 if exchange.bundle2requested(bundlecaps):
257 if exchange.bundle2requested(bundlecaps):
258 # When requesting a bundle2, getbundle returns a stream to make the
258 # When requesting a bundle2, getbundle returns a stream to make the
259 # wire level function happier. We need to build a proper object
259 # wire level function happier. We need to build a proper object
260 # from it in local peer.
260 # from it in local peer.
261 return bundle2.getunbundler(self.ui, cb)
261 return bundle2.getunbundler(self.ui, cb)
262 else:
262 else:
263 return changegroup.getunbundler('01', cb, None)
263 return changegroup.getunbundler('01', cb, None)
264
264
265 def heads(self):
265 def heads(self):
266 return self._repo.heads()
266 return self._repo.heads()
267
267
268 def known(self, nodes):
268 def known(self, nodes):
269 return self._repo.known(nodes)
269 return self._repo.known(nodes)
270
270
271 def listkeys(self, namespace):
271 def listkeys(self, namespace):
272 return self._repo.listkeys(namespace)
272 return self._repo.listkeys(namespace)
273
273
274 def lookup(self, key):
274 def lookup(self, key):
275 return self._repo.lookup(key)
275 return self._repo.lookup(key)
276
276
277 def pushkey(self, namespace, key, old, new):
277 def pushkey(self, namespace, key, old, new):
278 return self._repo.pushkey(namespace, key, old, new)
278 return self._repo.pushkey(namespace, key, old, new)
279
279
280 def stream_out(self):
280 def stream_out(self):
281 raise error.Abort(_('cannot perform stream clone against local '
281 raise error.Abort(_('cannot perform stream clone against local '
282 'peer'))
282 'peer'))
283
283
284 def unbundle(self, bundle, heads, url):
284 def unbundle(self, bundle, heads, url):
285 """apply a bundle on a repo
285 """apply a bundle on a repo
286
286
287 This function handles the repo locking itself."""
287 This function handles the repo locking itself."""
288 try:
288 try:
289 try:
289 try:
290 bundle = exchange.readbundle(self.ui, bundle, None)
290 bundle = exchange.readbundle(self.ui, bundle, None)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
291 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
292 if util.safehasattr(ret, 'getchunks'):
292 if util.safehasattr(ret, 'getchunks'):
293 # This is a bundle20 object, turn it into an unbundler.
293 # This is a bundle20 object, turn it into an unbundler.
294 # This little dance should be dropped eventually when the
294 # This little dance should be dropped eventually when the
295 # API is finally improved.
295 # API is finally improved.
296 stream = util.chunkbuffer(ret.getchunks())
296 stream = util.chunkbuffer(ret.getchunks())
297 ret = bundle2.getunbundler(self.ui, stream)
297 ret = bundle2.getunbundler(self.ui, stream)
298 return ret
298 return ret
299 except Exception as exc:
299 except Exception as exc:
300 # If the exception contains output salvaged from a bundle2
300 # If the exception contains output salvaged from a bundle2
301 # reply, we need to make sure it is printed before continuing
301 # reply, we need to make sure it is printed before continuing
302 # to fail. So we build a bundle2 with such output and consume
302 # to fail. So we build a bundle2 with such output and consume
303 # it directly.
303 # it directly.
304 #
304 #
305 # This is not very elegant but allows a "simple" solution for
305 # This is not very elegant but allows a "simple" solution for
306 # issue4594
306 # issue4594
307 output = getattr(exc, '_bundle2salvagedoutput', ())
307 output = getattr(exc, '_bundle2salvagedoutput', ())
308 if output:
308 if output:
309 bundler = bundle2.bundle20(self._repo.ui)
309 bundler = bundle2.bundle20(self._repo.ui)
310 for out in output:
310 for out in output:
311 bundler.addpart(out)
311 bundler.addpart(out)
312 stream = util.chunkbuffer(bundler.getchunks())
312 stream = util.chunkbuffer(bundler.getchunks())
313 b = bundle2.getunbundler(self.ui, stream)
313 b = bundle2.getunbundler(self.ui, stream)
314 bundle2.processbundle(self._repo, b)
314 bundle2.processbundle(self._repo, b)
315 raise
315 raise
316 except error.PushRaced as exc:
316 except error.PushRaced as exc:
317 raise error.ResponseError(_('push failed:'),
317 raise error.ResponseError(_('push failed:'),
318 stringutil.forcebytestr(exc))
318 stringutil.forcebytestr(exc))
319
319
320 # End of _basewirecommands interface.
320 # End of _basewirecommands interface.
321
321
322 # Begin of peer interface.
322 # Begin of peer interface.
323
323
324 def commandexecutor(self):
324 def commandexecutor(self):
325 return localcommandexecutor(self)
325 return localcommandexecutor(self)
326
326
327 # End of peer interface.
327 # End of peer interface.
328
328
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
329 @interfaceutil.implementer(repository.ipeerlegacycommands)
330 class locallegacypeer(localpeer):
330 class locallegacypeer(localpeer):
331 '''peer extension which implements legacy methods too; used for tests with
331 '''peer extension which implements legacy methods too; used for tests with
332 restricted capabilities'''
332 restricted capabilities'''
333
333
334 def __init__(self, repo):
334 def __init__(self, repo):
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
335 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
336
336
337 # Begin of baselegacywirecommands interface.
337 # Begin of baselegacywirecommands interface.
338
338
339 def between(self, pairs):
339 def between(self, pairs):
340 return self._repo.between(pairs)
340 return self._repo.between(pairs)
341
341
342 def branches(self, nodes):
342 def branches(self, nodes):
343 return self._repo.branches(nodes)
343 return self._repo.branches(nodes)
344
344
345 def changegroup(self, nodes, source):
345 def changegroup(self, nodes, source):
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
346 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
347 missingheads=self._repo.heads())
347 missingheads=self._repo.heads())
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349
349
350 def changegroupsubset(self, bases, heads, source):
350 def changegroupsubset(self, bases, heads, source):
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
351 outgoing = discovery.outgoing(self._repo, missingroots=bases,
352 missingheads=heads)
352 missingheads=heads)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
353 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
354
354
355 # End of baselegacywirecommands interface.
355 # End of baselegacywirecommands interface.
356
356
357 # Increment the sub-version when the revlog v2 format changes to lock out old
357 # Increment the sub-version when the revlog v2 format changes to lock out old
358 # clients.
358 # clients.
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
359 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
360
360
361 # A repository with the sparserevlog feature will have delta chains that
361 # A repository with the sparserevlog feature will have delta chains that
362 # can spread over a larger span. Sparse reading cuts these large spans into
362 # can spread over a larger span. Sparse reading cuts these large spans into
363 # pieces, so that each piece isn't too big.
363 # pieces, so that each piece isn't too big.
364 # Without the sparserevlog capability, reading from the repository could use
364 # Without the sparserevlog capability, reading from the repository could use
365 # huge amounts of memory, because the whole span would be read at once,
365 # huge amounts of memory, because the whole span would be read at once,
366 # including all the intermediate revisions that aren't pertinent for the chain.
366 # including all the intermediate revisions that aren't pertinent for the chain.
367 # This is why once a repository has enabled sparse-read, it becomes required.
367 # This is why once a repository has enabled sparse-read, it becomes required.
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
368 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
369
369
370 # Functions receiving (ui, features) that extensions can register to impact
370 # Functions receiving (ui, features) that extensions can register to impact
371 # the ability to load repositories with custom requirements. Only
371 # the ability to load repositories with custom requirements. Only
372 # functions defined in loaded extensions are called.
372 # functions defined in loaded extensions are called.
373 #
373 #
374 # The function receives a set of requirement strings that the repository
374 # The function receives a set of requirement strings that the repository
375 # is capable of opening. Functions will typically add elements to the
375 # is capable of opening. Functions will typically add elements to the
376 # set to reflect that the extension knows how to handle that requirements.
376 # set to reflect that the extension knows how to handle that requirements.
377 featuresetupfuncs = set()
377 featuresetupfuncs = set()
378
378
379 def makelocalrepository(baseui, path, intents=None):
379 def makelocalrepository(baseui, path, intents=None):
380 """Create a local repository object.
380 """Create a local repository object.
381
381
382 Given arguments needed to construct a local repository, this function
382 Given arguments needed to construct a local repository, this function
383 performs various early repository loading functionality (such as
383 performs various early repository loading functionality (such as
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
384 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
385 the repository can be opened, derives a type suitable for representing
385 the repository can be opened, derives a type suitable for representing
386 that repository, and returns an instance of it.
386 that repository, and returns an instance of it.
387
387
388 The returned object conforms to the ``repository.completelocalrepository``
388 The returned object conforms to the ``repository.completelocalrepository``
389 interface.
389 interface.
390
390
391 The repository type is derived by calling a series of factory functions
391 The repository type is derived by calling a series of factory functions
392 for each aspect/interface of the final repository. These are defined by
392 for each aspect/interface of the final repository. These are defined by
393 ``REPO_INTERFACES``.
393 ``REPO_INTERFACES``.
394
394
395 Each factory function is called to produce a type implementing a specific
395 Each factory function is called to produce a type implementing a specific
396 interface. The cumulative list of returned types will be combined into a
396 interface. The cumulative list of returned types will be combined into a
397 new type and that type will be instantiated to represent the local
397 new type and that type will be instantiated to represent the local
398 repository.
398 repository.
399
399
400 The factory functions each receive various state that may be consulted
400 The factory functions each receive various state that may be consulted
401 as part of deriving a type.
401 as part of deriving a type.
402
402
403 Extensions should wrap these factory functions to customize repository type
403 Extensions should wrap these factory functions to customize repository type
404 creation. Note that an extension's wrapped function may be called even if
404 creation. Note that an extension's wrapped function may be called even if
405 that extension is not loaded for the repo being constructed. Extensions
405 that extension is not loaded for the repo being constructed. Extensions
406 should check if their ``__name__`` appears in the
406 should check if their ``__name__`` appears in the
407 ``extensionmodulenames`` set passed to the factory function and no-op if
407 ``extensionmodulenames`` set passed to the factory function and no-op if
408 not.
408 not.
409 """
409 """
410 ui = baseui.copy()
410 ui = baseui.copy()
411 # Prevent copying repo configuration.
411 # Prevent copying repo configuration.
412 ui.copy = baseui.copy
412 ui.copy = baseui.copy
413
413
414 # Working directory VFS rooted at repository root.
414 # Working directory VFS rooted at repository root.
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
415 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
416
416
417 # Main VFS for .hg/ directory.
417 # Main VFS for .hg/ directory.
418 hgpath = wdirvfs.join(b'.hg')
418 hgpath = wdirvfs.join(b'.hg')
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
419 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
420
420
421 # The .hg/ path should exist and should be a directory. All other
421 # The .hg/ path should exist and should be a directory. All other
422 # cases are errors.
422 # cases are errors.
423 if not hgvfs.isdir():
423 if not hgvfs.isdir():
424 try:
424 try:
425 hgvfs.stat()
425 hgvfs.stat()
426 except OSError as e:
426 except OSError as e:
427 if e.errno != errno.ENOENT:
427 if e.errno != errno.ENOENT:
428 raise
428 raise
429
429
430 raise error.RepoError(_(b'repository %s not found') % path)
430 raise error.RepoError(_(b'repository %s not found') % path)
431
431
432 # .hg/requires file contains a newline-delimited list of
432 # .hg/requires file contains a newline-delimited list of
433 # features/capabilities the opener (us) must have in order to use
433 # features/capabilities the opener (us) must have in order to use
434 # the repository. This file was introduced in Mercurial 0.9.2,
434 # the repository. This file was introduced in Mercurial 0.9.2,
435 # which means very old repositories may not have one. We assume
435 # which means very old repositories may not have one. We assume
436 # a missing file translates to no requirements.
436 # a missing file translates to no requirements.
437 try:
437 try:
438 requirements = set(hgvfs.read(b'requires').splitlines())
438 requirements = set(hgvfs.read(b'requires').splitlines())
439 except IOError as e:
439 except IOError as e:
440 if e.errno != errno.ENOENT:
440 if e.errno != errno.ENOENT:
441 raise
441 raise
442 requirements = set()
442 requirements = set()
443
443
444 # The .hg/hgrc file may load extensions or contain config options
444 # The .hg/hgrc file may load extensions or contain config options
445 # that influence repository construction. Attempt to load it and
445 # that influence repository construction. Attempt to load it and
446 # process any new extensions that it may have pulled in.
446 # process any new extensions that it may have pulled in.
447 try:
447 try:
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
448 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
449 except IOError:
449 except IOError:
450 pass
450 pass
451 else:
451 else:
452 extensions.loadall(ui)
452 extensions.loadall(ui)
453
453
454 # Set of module names of extensions loaded for this repository.
454 # Set of module names of extensions loaded for this repository.
455 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
455 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
456
456
457 supportedrequirements = gathersupportedrequirements(ui)
457 supportedrequirements = gathersupportedrequirements(ui)
458
458
459 # We first validate the requirements are known.
459 # We first validate the requirements are known.
460 ensurerequirementsrecognized(requirements, supportedrequirements)
460 ensurerequirementsrecognized(requirements, supportedrequirements)
461
461
462 # Then we validate that the known set is reasonable to use together.
462 # Then we validate that the known set is reasonable to use together.
463 ensurerequirementscompatible(ui, requirements)
463 ensurerequirementscompatible(ui, requirements)
464
464
465 # TODO there are unhandled edge cases related to opening repositories with
465 # TODO there are unhandled edge cases related to opening repositories with
466 # shared storage. If storage is shared, we should also test for requirements
466 # shared storage. If storage is shared, we should also test for requirements
467 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
467 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
468 # that repo, as that repo may load extensions needed to open it. This is a
468 # that repo, as that repo may load extensions needed to open it. This is a
469 # bit complicated because we don't want the other hgrc to overwrite settings
469 # bit complicated because we don't want the other hgrc to overwrite settings
470 # in this hgrc.
470 # in this hgrc.
471 #
471 #
472 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
472 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
473 # file when sharing repos. But if a requirement is added after the share is
473 # file when sharing repos. But if a requirement is added after the share is
474 # performed, thereby introducing a new requirement for the opener, we may
474 # performed, thereby introducing a new requirement for the opener, we may
475 # will not see that and could encounter a run-time error interacting with
475 # will not see that and could encounter a run-time error interacting with
476 # that shared store since it has an unknown-to-us requirement.
476 # that shared store since it has an unknown-to-us requirement.
477
477
478 # At this point, we know we should be capable of opening the repository.
478 # At this point, we know we should be capable of opening the repository.
479 # Now get on with doing that.
479 # Now get on with doing that.
480
480
481 # The "store" part of the repository holds versioned data. How it is
481 # The "store" part of the repository holds versioned data. How it is
482 # accessed is determined by various requirements. The ``shared`` or
482 # accessed is determined by various requirements. The ``shared`` or
483 # ``relshared`` requirements indicate the store lives in the path contained
483 # ``relshared`` requirements indicate the store lives in the path contained
484 # in the ``.hg/sharedpath`` file. This is an absolute path for
484 # in the ``.hg/sharedpath`` file. This is an absolute path for
485 # ``shared`` and relative to ``.hg/`` for ``relshared``.
485 # ``shared`` and relative to ``.hg/`` for ``relshared``.
486 if b'shared' in requirements or b'relshared' in requirements:
486 if b'shared' in requirements or b'relshared' in requirements:
487 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
487 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
488 if b'relshared' in requirements:
488 if b'relshared' in requirements:
489 sharedpath = hgvfs.join(sharedpath)
489 sharedpath = hgvfs.join(sharedpath)
490
490
491 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
491 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
492
492
493 if not sharedvfs.exists():
493 if not sharedvfs.exists():
494 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
494 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
495 b'directory %s') % sharedvfs.base)
495 b'directory %s') % sharedvfs.base)
496
496
497 storebasepath = sharedvfs.base
497 storebasepath = sharedvfs.base
498 cachepath = sharedvfs.join(b'cache')
498 cachepath = sharedvfs.join(b'cache')
499 else:
499 else:
500 storebasepath = hgvfs.base
500 storebasepath = hgvfs.base
501 cachepath = hgvfs.join(b'cache')
501 cachepath = hgvfs.join(b'cache')
502
502
503 # The store has changed over time and the exact layout is dictated by
503 # The store has changed over time and the exact layout is dictated by
504 # requirements. The store interface abstracts differences across all
504 # requirements. The store interface abstracts differences across all
505 # of them.
505 # of them.
506 store = makestore(requirements, storebasepath,
506 store = makestore(requirements, storebasepath,
507 lambda base: vfsmod.vfs(base, cacheaudited=True))
507 lambda base: vfsmod.vfs(base, cacheaudited=True))
508 hgvfs.createmode = store.createmode
508 hgvfs.createmode = store.createmode
509
509
510 storevfs = store.vfs
510 storevfs = store.vfs
511 storevfs.options = resolvestorevfsoptions(ui, requirements)
511 storevfs.options = resolvestorevfsoptions(ui, requirements)
512
512
513 # The cache vfs is used to manage cache files.
513 # The cache vfs is used to manage cache files.
514 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
514 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
515 cachevfs.createmode = store.createmode
515 cachevfs.createmode = store.createmode
516
516
517 # Now resolve the type for the repository object. We do this by repeatedly
517 # Now resolve the type for the repository object. We do this by repeatedly
518 # calling a factory function to produces types for specific aspects of the
518 # calling a factory function to produces types for specific aspects of the
519 # repo's operation. The aggregate returned types are used as base classes
519 # repo's operation. The aggregate returned types are used as base classes
520 # for a dynamically-derived type, which will represent our new repository.
520 # for a dynamically-derived type, which will represent our new repository.
521
521
522 bases = []
522 bases = []
523 extrastate = {}
523 extrastate = {}
524
524
525 for iface, fn in REPO_INTERFACES:
525 for iface, fn in REPO_INTERFACES:
526 # We pass all potentially useful state to give extensions tons of
526 # We pass all potentially useful state to give extensions tons of
527 # flexibility.
527 # flexibility.
528 typ = fn(ui=ui,
528 typ = fn(ui=ui,
529 intents=intents,
529 intents=intents,
530 requirements=requirements,
530 requirements=requirements,
531 wdirvfs=wdirvfs,
531 wdirvfs=wdirvfs,
532 hgvfs=hgvfs,
532 hgvfs=hgvfs,
533 store=store,
533 store=store,
534 storevfs=storevfs,
534 storevfs=storevfs,
535 storeoptions=storevfs.options,
535 storeoptions=storevfs.options,
536 cachevfs=cachevfs,
536 cachevfs=cachevfs,
537 extensionmodulenames=extensionmodulenames,
537 extensionmodulenames=extensionmodulenames,
538 extrastate=extrastate,
538 extrastate=extrastate,
539 baseclasses=bases)
539 baseclasses=bases)
540
540
541 if not isinstance(typ, type):
541 if not isinstance(typ, type):
542 raise error.ProgrammingError('unable to construct type for %s' %
542 raise error.ProgrammingError('unable to construct type for %s' %
543 iface)
543 iface)
544
544
545 bases.append(typ)
545 bases.append(typ)
546
546
547 # type() allows you to use characters in type names that wouldn't be
547 # type() allows you to use characters in type names that wouldn't be
548 # recognized as Python symbols in source code. We abuse that to add
548 # recognized as Python symbols in source code. We abuse that to add
549 # rich information about our constructed repo.
549 # rich information about our constructed repo.
550 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
550 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
551 wdirvfs.base,
551 wdirvfs.base,
552 b','.join(sorted(requirements))))
552 b','.join(sorted(requirements))))
553
553
554 cls = type(name, tuple(bases), {})
554 cls = type(name, tuple(bases), {})
555
555
556 return cls(
556 return cls(
557 baseui=baseui,
557 baseui=baseui,
558 ui=ui,
558 ui=ui,
559 origroot=path,
559 origroot=path,
560 wdirvfs=wdirvfs,
560 wdirvfs=wdirvfs,
561 hgvfs=hgvfs,
561 hgvfs=hgvfs,
562 requirements=requirements,
562 requirements=requirements,
563 supportedrequirements=supportedrequirements,
563 supportedrequirements=supportedrequirements,
564 sharedpath=storebasepath,
564 sharedpath=storebasepath,
565 store=store,
565 store=store,
566 cachevfs=cachevfs,
566 cachevfs=cachevfs,
567 intents=intents)
567 intents=intents)
568
568
569 def gathersupportedrequirements(ui):
569 def gathersupportedrequirements(ui):
570 """Determine the complete set of recognized requirements."""
570 """Determine the complete set of recognized requirements."""
571 # Start with all requirements supported by this file.
571 # Start with all requirements supported by this file.
572 supported = set(localrepository._basesupported)
572 supported = set(localrepository._basesupported)
573
573
574 # Execute ``featuresetupfuncs`` entries if they belong to an extension
574 # Execute ``featuresetupfuncs`` entries if they belong to an extension
575 # relevant to this ui instance.
575 # relevant to this ui instance.
576 modules = {m.__name__ for n, m in extensions.extensions(ui)}
576 modules = {m.__name__ for n, m in extensions.extensions(ui)}
577
577
578 for fn in featuresetupfuncs:
578 for fn in featuresetupfuncs:
579 if fn.__module__ in modules:
579 if fn.__module__ in modules:
580 fn(ui, supported)
580 fn(ui, supported)
581
581
582 # Add derived requirements from registered compression engines.
582 # Add derived requirements from registered compression engines.
583 for name in util.compengines:
583 for name in util.compengines:
584 engine = util.compengines[name]
584 engine = util.compengines[name]
585 if engine.revlogheader():
585 if engine.revlogheader():
586 supported.add(b'exp-compression-%s' % name)
586 supported.add(b'exp-compression-%s' % name)
587
587
588 return supported
588 return supported
589
589
590 def ensurerequirementsrecognized(requirements, supported):
590 def ensurerequirementsrecognized(requirements, supported):
591 """Validate that a set of local requirements is recognized.
591 """Validate that a set of local requirements is recognized.
592
592
593 Receives a set of requirements. Raises an ``error.RepoError`` if there
593 Receives a set of requirements. Raises an ``error.RepoError`` if there
594 exists any requirement in that set that currently loaded code doesn't
594 exists any requirement in that set that currently loaded code doesn't
595 recognize.
595 recognize.
596
596
597 Returns a set of supported requirements.
597 Returns a set of supported requirements.
598 """
598 """
599 missing = set()
599 missing = set()
600
600
601 for requirement in requirements:
601 for requirement in requirements:
602 if requirement in supported:
602 if requirement in supported:
603 continue
603 continue
604
604
605 if not requirement or not requirement[0:1].isalnum():
605 if not requirement or not requirement[0:1].isalnum():
606 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
606 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
607
607
608 missing.add(requirement)
608 missing.add(requirement)
609
609
610 if missing:
610 if missing:
611 raise error.RequirementError(
611 raise error.RequirementError(
612 _(b'repository requires features unknown to this Mercurial: %s') %
612 _(b'repository requires features unknown to this Mercurial: %s') %
613 b' '.join(sorted(missing)),
613 b' '.join(sorted(missing)),
614 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
614 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
615 b'for more information'))
615 b'for more information'))
616
616
617 def ensurerequirementscompatible(ui, requirements):
617 def ensurerequirementscompatible(ui, requirements):
618 """Validates that a set of recognized requirements is mutually compatible.
618 """Validates that a set of recognized requirements is mutually compatible.
619
619
620 Some requirements may not be compatible with others or require
620 Some requirements may not be compatible with others or require
621 config options that aren't enabled. This function is called during
621 config options that aren't enabled. This function is called during
622 repository opening to ensure that the set of requirements needed
622 repository opening to ensure that the set of requirements needed
623 to open a repository is sane and compatible with config options.
623 to open a repository is sane and compatible with config options.
624
624
625 Extensions can monkeypatch this function to perform additional
625 Extensions can monkeypatch this function to perform additional
626 checking.
626 checking.
627
627
628 ``error.RepoError`` should be raised on failure.
628 ``error.RepoError`` should be raised on failure.
629 """
629 """
630 if b'exp-sparse' in requirements and not sparse.enabled:
630 if b'exp-sparse' in requirements and not sparse.enabled:
631 raise error.RepoError(_(b'repository is using sparse feature but '
631 raise error.RepoError(_(b'repository is using sparse feature but '
632 b'sparse is not enabled; enable the '
632 b'sparse is not enabled; enable the '
633 b'"sparse" extensions to access'))
633 b'"sparse" extensions to access'))
634
634
635 def makestore(requirements, path, vfstype):
635 def makestore(requirements, path, vfstype):
636 """Construct a storage object for a repository."""
636 """Construct a storage object for a repository."""
637 if b'store' in requirements:
637 if b'store' in requirements:
638 if b'fncache' in requirements:
638 if b'fncache' in requirements:
639 return storemod.fncachestore(path, vfstype,
639 return storemod.fncachestore(path, vfstype,
640 b'dotencode' in requirements)
640 b'dotencode' in requirements)
641
641
642 return storemod.encodedstore(path, vfstype)
642 return storemod.encodedstore(path, vfstype)
643
643
644 return storemod.basicstore(path, vfstype)
644 return storemod.basicstore(path, vfstype)
645
645
646 def resolvestorevfsoptions(ui, requirements):
646 def resolvestorevfsoptions(ui, requirements):
647 """Resolve the options to pass to the store vfs opener.
647 """Resolve the options to pass to the store vfs opener.
648
648
649 The returned dict is used to influence behavior of the storage layer.
649 The returned dict is used to influence behavior of the storage layer.
650 """
650 """
651 options = {}
651 options = {}
652
652
653 if b'treemanifest' in requirements:
653 if b'treemanifest' in requirements:
654 options[b'treemanifest'] = True
654 options[b'treemanifest'] = True
655
655
656 # experimental config: format.manifestcachesize
656 # experimental config: format.manifestcachesize
657 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
657 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
658 if manifestcachesize is not None:
658 if manifestcachesize is not None:
659 options[b'manifestcachesize'] = manifestcachesize
659 options[b'manifestcachesize'] = manifestcachesize
660
660
661 # In the absence of another requirement superseding a revlog-related
661 # In the absence of another requirement superseding a revlog-related
662 # requirement, we have to assume the repo is using revlog version 0.
662 # requirement, we have to assume the repo is using revlog version 0.
663 # This revlog format is super old and we don't bother trying to parse
663 # This revlog format is super old and we don't bother trying to parse
664 # opener options for it because those options wouldn't do anything
664 # opener options for it because those options wouldn't do anything
665 # meaningful on such old repos.
665 # meaningful on such old repos.
666 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
666 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
667 options.update(resolverevlogstorevfsoptions(ui, requirements))
667 options.update(resolverevlogstorevfsoptions(ui, requirements))
668
668
669 return options
669 return options
670
670
671 def resolverevlogstorevfsoptions(ui, requirements):
671 def resolverevlogstorevfsoptions(ui, requirements):
672 """Resolve opener options specific to revlogs."""
672 """Resolve opener options specific to revlogs."""
673
673
674 options = {}
674 options = {}
675
675
676 if b'revlogv1' in requirements:
676 if b'revlogv1' in requirements:
677 options[b'revlogv1'] = True
677 options[b'revlogv1'] = True
678 if REVLOGV2_REQUIREMENT in requirements:
678 if REVLOGV2_REQUIREMENT in requirements:
679 options[b'revlogv2'] = True
679 options[b'revlogv2'] = True
680
680
681 if b'generaldelta' in requirements:
681 if b'generaldelta' in requirements:
682 options[b'generaldelta'] = True
682 options[b'generaldelta'] = True
683
683
684 # experimental config: format.chunkcachesize
684 # experimental config: format.chunkcachesize
685 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
685 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
686 if chunkcachesize is not None:
686 if chunkcachesize is not None:
687 options[b'chunkcachesize'] = chunkcachesize
687 options[b'chunkcachesize'] = chunkcachesize
688
688
689 deltabothparents = ui.configbool(b'storage',
689 deltabothparents = ui.configbool(b'storage',
690 b'revlog.optimize-delta-parent-choice')
690 b'revlog.optimize-delta-parent-choice')
691 options[b'deltabothparents'] = deltabothparents
691 options[b'deltabothparents'] = deltabothparents
692
692
693 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
693 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
694
694
695 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
695 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
696 if 0 <= chainspan:
696 if 0 <= chainspan:
697 options[b'maxdeltachainspan'] = chainspan
697 options[b'maxdeltachainspan'] = chainspan
698
698
699 mmapindexthreshold = ui.configbytes(b'experimental',
699 mmapindexthreshold = ui.configbytes(b'experimental',
700 b'mmapindexthreshold')
700 b'mmapindexthreshold')
701 if mmapindexthreshold is not None:
701 if mmapindexthreshold is not None:
702 options[b'mmapindexthreshold'] = mmapindexthreshold
702 options[b'mmapindexthreshold'] = mmapindexthreshold
703
703
704 withsparseread = ui.configbool(b'experimental', b'sparse-read')
704 withsparseread = ui.configbool(b'experimental', b'sparse-read')
705 srdensitythres = float(ui.config(b'experimental',
705 srdensitythres = float(ui.config(b'experimental',
706 b'sparse-read.density-threshold'))
706 b'sparse-read.density-threshold'))
707 srmingapsize = ui.configbytes(b'experimental',
707 srmingapsize = ui.configbytes(b'experimental',
708 b'sparse-read.min-gap-size')
708 b'sparse-read.min-gap-size')
709 options[b'with-sparse-read'] = withsparseread
709 options[b'with-sparse-read'] = withsparseread
710 options[b'sparse-read-density-threshold'] = srdensitythres
710 options[b'sparse-read-density-threshold'] = srdensitythres
711 options[b'sparse-read-min-gap-size'] = srmingapsize
711 options[b'sparse-read-min-gap-size'] = srmingapsize
712
712
713 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
713 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
714 options[b'sparse-revlog'] = sparserevlog
714 options[b'sparse-revlog'] = sparserevlog
715 if sparserevlog:
715 if sparserevlog:
716 options[b'generaldelta'] = True
716 options[b'generaldelta'] = True
717
717
718 maxchainlen = None
718 maxchainlen = None
719 if sparserevlog:
719 if sparserevlog:
720 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
720 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
721 # experimental config: format.maxchainlen
721 # experimental config: format.maxchainlen
722 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
722 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
723 if maxchainlen is not None:
723 if maxchainlen is not None:
724 options[b'maxchainlen'] = maxchainlen
724 options[b'maxchainlen'] = maxchainlen
725
725
726 for r in requirements:
726 for r in requirements:
727 if r.startswith(b'exp-compression-'):
727 if r.startswith(b'exp-compression-'):
728 options[b'compengine'] = r[len(b'exp-compression-'):]
728 options[b'compengine'] = r[len(b'exp-compression-'):]
729
729
730 return options
730 return options
731
731
732 def makemain(**kwargs):
732 def makemain(**kwargs):
733 """Produce a type conforming to ``ilocalrepositorymain``."""
733 """Produce a type conforming to ``ilocalrepositorymain``."""
734 return localrepository
734 return localrepository
735
735
736 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
736 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
737 class revlogfilestorage(object):
737 class revlogfilestorage(object):
738 """File storage when using revlogs."""
738 """File storage when using revlogs."""
739
739
740 def file(self, path):
740 def file(self, path):
741 if path[0] == b'/':
741 if path[0] == b'/':
742 path = path[1:]
742 path = path[1:]
743
743
744 return filelog.filelog(self.svfs, path)
744 return filelog.filelog(self.svfs, path)
745
745
746 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
747 class revlognarrowfilestorage(object):
748 """File storage when using revlogs and narrow files."""
749
750 def file(self, path):
751 if path[0] == b'/':
752 path = path[1:]
753
754 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
755
746 def makefilestorage(requirements, **kwargs):
756 def makefilestorage(requirements, **kwargs):
747 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
757 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
748 return revlogfilestorage
758 if repository.NARROW_REQUIREMENT in requirements:
759 return revlognarrowfilestorage
760 else:
761 return revlogfilestorage
749
762
750 # List of repository interfaces and factory functions for them. Each
763 # List of repository interfaces and factory functions for them. Each
751 # will be called in order during ``makelocalrepository()`` to iteratively
764 # will be called in order during ``makelocalrepository()`` to iteratively
752 # derive the final type for a local repository instance.
765 # derive the final type for a local repository instance.
753 REPO_INTERFACES = [
766 REPO_INTERFACES = [
754 (repository.ilocalrepositorymain, makemain),
767 (repository.ilocalrepositorymain, makemain),
755 (repository.ilocalrepositoryfilestorage, makefilestorage),
768 (repository.ilocalrepositoryfilestorage, makefilestorage),
756 ]
769 ]
757
770
758 @interfaceutil.implementer(repository.ilocalrepositorymain)
771 @interfaceutil.implementer(repository.ilocalrepositorymain)
759 class localrepository(object):
772 class localrepository(object):
760 """Main class for representing local repositories.
773 """Main class for representing local repositories.
761
774
762 All local repositories are instances of this class.
775 All local repositories are instances of this class.
763
776
764 Constructed on its own, instances of this class are not usable as
777 Constructed on its own, instances of this class are not usable as
765 repository objects. To obtain a usable repository object, call
778 repository objects. To obtain a usable repository object, call
766 ``hg.repository()``, ``localrepo.instance()``, or
779 ``hg.repository()``, ``localrepo.instance()``, or
767 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
780 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
768 ``instance()`` adds support for creating new repositories.
781 ``instance()`` adds support for creating new repositories.
769 ``hg.repository()`` adds more extension integration, including calling
782 ``hg.repository()`` adds more extension integration, including calling
770 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
783 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
771 used.
784 used.
772 """
785 """
773
786
774 # obsolete experimental requirements:
787 # obsolete experimental requirements:
775 # - manifestv2: An experimental new manifest format that allowed
788 # - manifestv2: An experimental new manifest format that allowed
776 # for stem compression of long paths. Experiment ended up not
789 # for stem compression of long paths. Experiment ended up not
777 # being successful (repository sizes went up due to worse delta
790 # being successful (repository sizes went up due to worse delta
778 # chains), and the code was deleted in 4.6.
791 # chains), and the code was deleted in 4.6.
779 supportedformats = {
792 supportedformats = {
780 'revlogv1',
793 'revlogv1',
781 'generaldelta',
794 'generaldelta',
782 'treemanifest',
795 'treemanifest',
783 REVLOGV2_REQUIREMENT,
796 REVLOGV2_REQUIREMENT,
784 SPARSEREVLOG_REQUIREMENT,
797 SPARSEREVLOG_REQUIREMENT,
785 }
798 }
786 _basesupported = supportedformats | {
799 _basesupported = supportedformats | {
787 'store',
800 'store',
788 'fncache',
801 'fncache',
789 'shared',
802 'shared',
790 'relshared',
803 'relshared',
791 'dotencode',
804 'dotencode',
792 'exp-sparse',
805 'exp-sparse',
793 'internal-phase'
806 'internal-phase'
794 }
807 }
795
808
796 # list of prefix for file which can be written without 'wlock'
809 # list of prefix for file which can be written without 'wlock'
797 # Extensions should extend this list when needed
810 # Extensions should extend this list when needed
798 _wlockfreeprefix = {
811 _wlockfreeprefix = {
799 # We migh consider requiring 'wlock' for the next
812 # We migh consider requiring 'wlock' for the next
800 # two, but pretty much all the existing code assume
813 # two, but pretty much all the existing code assume
801 # wlock is not needed so we keep them excluded for
814 # wlock is not needed so we keep them excluded for
802 # now.
815 # now.
803 'hgrc',
816 'hgrc',
804 'requires',
817 'requires',
805 # XXX cache is a complicatged business someone
818 # XXX cache is a complicatged business someone
806 # should investigate this in depth at some point
819 # should investigate this in depth at some point
807 'cache/',
820 'cache/',
808 # XXX shouldn't be dirstate covered by the wlock?
821 # XXX shouldn't be dirstate covered by the wlock?
809 'dirstate',
822 'dirstate',
810 # XXX bisect was still a bit too messy at the time
823 # XXX bisect was still a bit too messy at the time
811 # this changeset was introduced. Someone should fix
824 # this changeset was introduced. Someone should fix
812 # the remainig bit and drop this line
825 # the remainig bit and drop this line
813 'bisect.state',
826 'bisect.state',
814 }
827 }
815
828
816 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
829 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
817 supportedrequirements, sharedpath, store, cachevfs,
830 supportedrequirements, sharedpath, store, cachevfs,
818 intents=None):
831 intents=None):
819 """Create a new local repository instance.
832 """Create a new local repository instance.
820
833
821 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
834 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
822 or ``localrepo.makelocalrepository()`` for obtaining a new repository
835 or ``localrepo.makelocalrepository()`` for obtaining a new repository
823 object.
836 object.
824
837
825 Arguments:
838 Arguments:
826
839
827 baseui
840 baseui
828 ``ui.ui`` instance that ``ui`` argument was based off of.
841 ``ui.ui`` instance that ``ui`` argument was based off of.
829
842
830 ui
843 ui
831 ``ui.ui`` instance for use by the repository.
844 ``ui.ui`` instance for use by the repository.
832
845
833 origroot
846 origroot
834 ``bytes`` path to working directory root of this repository.
847 ``bytes`` path to working directory root of this repository.
835
848
836 wdirvfs
849 wdirvfs
837 ``vfs.vfs`` rooted at the working directory.
850 ``vfs.vfs`` rooted at the working directory.
838
851
839 hgvfs
852 hgvfs
840 ``vfs.vfs`` rooted at .hg/
853 ``vfs.vfs`` rooted at .hg/
841
854
842 requirements
855 requirements
843 ``set`` of bytestrings representing repository opening requirements.
856 ``set`` of bytestrings representing repository opening requirements.
844
857
845 supportedrequirements
858 supportedrequirements
846 ``set`` of bytestrings representing repository requirements that we
859 ``set`` of bytestrings representing repository requirements that we
847 know how to open. May be a supetset of ``requirements``.
860 know how to open. May be a supetset of ``requirements``.
848
861
849 sharedpath
862 sharedpath
850 ``bytes`` Defining path to storage base directory. Points to a
863 ``bytes`` Defining path to storage base directory. Points to a
851 ``.hg/`` directory somewhere.
864 ``.hg/`` directory somewhere.
852
865
853 store
866 store
854 ``store.basicstore`` (or derived) instance providing access to
867 ``store.basicstore`` (or derived) instance providing access to
855 versioned storage.
868 versioned storage.
856
869
857 cachevfs
870 cachevfs
858 ``vfs.vfs`` used for cache files.
871 ``vfs.vfs`` used for cache files.
859
872
860 intents
873 intents
861 ``set`` of system strings indicating what this repo will be used
874 ``set`` of system strings indicating what this repo will be used
862 for.
875 for.
863 """
876 """
864 self.baseui = baseui
877 self.baseui = baseui
865 self.ui = ui
878 self.ui = ui
866 self.origroot = origroot
879 self.origroot = origroot
867 # vfs rooted at working directory.
880 # vfs rooted at working directory.
868 self.wvfs = wdirvfs
881 self.wvfs = wdirvfs
869 self.root = wdirvfs.base
882 self.root = wdirvfs.base
870 # vfs rooted at .hg/. Used to access most non-store paths.
883 # vfs rooted at .hg/. Used to access most non-store paths.
871 self.vfs = hgvfs
884 self.vfs = hgvfs
872 self.path = hgvfs.base
885 self.path = hgvfs.base
873 self.requirements = requirements
886 self.requirements = requirements
874 self.supported = supportedrequirements
887 self.supported = supportedrequirements
875 self.sharedpath = sharedpath
888 self.sharedpath = sharedpath
876 self.store = store
889 self.store = store
877 self.cachevfs = cachevfs
890 self.cachevfs = cachevfs
878
891
879 self.filtername = None
892 self.filtername = None
880
893
881 if (self.ui.configbool('devel', 'all-warnings') or
894 if (self.ui.configbool('devel', 'all-warnings') or
882 self.ui.configbool('devel', 'check-locks')):
895 self.ui.configbool('devel', 'check-locks')):
883 self.vfs.audit = self._getvfsward(self.vfs.audit)
896 self.vfs.audit = self._getvfsward(self.vfs.audit)
884 # A list of callback to shape the phase if no data were found.
897 # A list of callback to shape the phase if no data were found.
885 # Callback are in the form: func(repo, roots) --> processed root.
898 # Callback are in the form: func(repo, roots) --> processed root.
886 # This list it to be filled by extension during repo setup
899 # This list it to be filled by extension during repo setup
887 self._phasedefaults = []
900 self._phasedefaults = []
888
901
889 color.setup(self.ui)
902 color.setup(self.ui)
890
903
891 self.spath = self.store.path
904 self.spath = self.store.path
892 self.svfs = self.store.vfs
905 self.svfs = self.store.vfs
893 self.sjoin = self.store.join
906 self.sjoin = self.store.join
894 if (self.ui.configbool('devel', 'all-warnings') or
907 if (self.ui.configbool('devel', 'all-warnings') or
895 self.ui.configbool('devel', 'check-locks')):
908 self.ui.configbool('devel', 'check-locks')):
896 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
909 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
897 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
910 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
898 else: # standard vfs
911 else: # standard vfs
899 self.svfs.audit = self._getsvfsward(self.svfs.audit)
912 self.svfs.audit = self._getsvfsward(self.svfs.audit)
900
913
901 self._dirstatevalidatewarned = False
914 self._dirstatevalidatewarned = False
902
915
903 self._branchcaches = {}
916 self._branchcaches = {}
904 self._revbranchcache = None
917 self._revbranchcache = None
905 self._filterpats = {}
918 self._filterpats = {}
906 self._datafilters = {}
919 self._datafilters = {}
907 self._transref = self._lockref = self._wlockref = None
920 self._transref = self._lockref = self._wlockref = None
908
921
909 # A cache for various files under .hg/ that tracks file changes,
922 # A cache for various files under .hg/ that tracks file changes,
910 # (used by the filecache decorator)
923 # (used by the filecache decorator)
911 #
924 #
912 # Maps a property name to its util.filecacheentry
925 # Maps a property name to its util.filecacheentry
913 self._filecache = {}
926 self._filecache = {}
914
927
915 # hold sets of revision to be filtered
928 # hold sets of revision to be filtered
916 # should be cleared when something might have changed the filter value:
929 # should be cleared when something might have changed the filter value:
917 # - new changesets,
930 # - new changesets,
918 # - phase change,
931 # - phase change,
919 # - new obsolescence marker,
932 # - new obsolescence marker,
920 # - working directory parent change,
933 # - working directory parent change,
921 # - bookmark changes
934 # - bookmark changes
922 self.filteredrevcache = {}
935 self.filteredrevcache = {}
923
936
924 # post-dirstate-status hooks
937 # post-dirstate-status hooks
925 self._postdsstatus = []
938 self._postdsstatus = []
926
939
927 # generic mapping between names and nodes
940 # generic mapping between names and nodes
928 self.names = namespaces.namespaces()
941 self.names = namespaces.namespaces()
929
942
930 # Key to signature value.
943 # Key to signature value.
931 self._sparsesignaturecache = {}
944 self._sparsesignaturecache = {}
932 # Signature to cached matcher instance.
945 # Signature to cached matcher instance.
933 self._sparsematchercache = {}
946 self._sparsematchercache = {}
934
947
935 def _getvfsward(self, origfunc):
948 def _getvfsward(self, origfunc):
936 """build a ward for self.vfs"""
949 """build a ward for self.vfs"""
937 rref = weakref.ref(self)
950 rref = weakref.ref(self)
938 def checkvfs(path, mode=None):
951 def checkvfs(path, mode=None):
939 ret = origfunc(path, mode=mode)
952 ret = origfunc(path, mode=mode)
940 repo = rref()
953 repo = rref()
941 if (repo is None
954 if (repo is None
942 or not util.safehasattr(repo, '_wlockref')
955 or not util.safehasattr(repo, '_wlockref')
943 or not util.safehasattr(repo, '_lockref')):
956 or not util.safehasattr(repo, '_lockref')):
944 return
957 return
945 if mode in (None, 'r', 'rb'):
958 if mode in (None, 'r', 'rb'):
946 return
959 return
947 if path.startswith(repo.path):
960 if path.startswith(repo.path):
948 # truncate name relative to the repository (.hg)
961 # truncate name relative to the repository (.hg)
949 path = path[len(repo.path) + 1:]
962 path = path[len(repo.path) + 1:]
950 if path.startswith('cache/'):
963 if path.startswith('cache/'):
951 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
964 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
952 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
965 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
953 if path.startswith('journal.'):
966 if path.startswith('journal.'):
954 # journal is covered by 'lock'
967 # journal is covered by 'lock'
955 if repo._currentlock(repo._lockref) is None:
968 if repo._currentlock(repo._lockref) is None:
956 repo.ui.develwarn('write with no lock: "%s"' % path,
969 repo.ui.develwarn('write with no lock: "%s"' % path,
957 stacklevel=2, config='check-locks')
970 stacklevel=2, config='check-locks')
958 elif repo._currentlock(repo._wlockref) is None:
971 elif repo._currentlock(repo._wlockref) is None:
959 # rest of vfs files are covered by 'wlock'
972 # rest of vfs files are covered by 'wlock'
960 #
973 #
961 # exclude special files
974 # exclude special files
962 for prefix in self._wlockfreeprefix:
975 for prefix in self._wlockfreeprefix:
963 if path.startswith(prefix):
976 if path.startswith(prefix):
964 return
977 return
965 repo.ui.develwarn('write with no wlock: "%s"' % path,
978 repo.ui.develwarn('write with no wlock: "%s"' % path,
966 stacklevel=2, config='check-locks')
979 stacklevel=2, config='check-locks')
967 return ret
980 return ret
968 return checkvfs
981 return checkvfs
969
982
970 def _getsvfsward(self, origfunc):
983 def _getsvfsward(self, origfunc):
971 """build a ward for self.svfs"""
984 """build a ward for self.svfs"""
972 rref = weakref.ref(self)
985 rref = weakref.ref(self)
973 def checksvfs(path, mode=None):
986 def checksvfs(path, mode=None):
974 ret = origfunc(path, mode=mode)
987 ret = origfunc(path, mode=mode)
975 repo = rref()
988 repo = rref()
976 if repo is None or not util.safehasattr(repo, '_lockref'):
989 if repo is None or not util.safehasattr(repo, '_lockref'):
977 return
990 return
978 if mode in (None, 'r', 'rb'):
991 if mode in (None, 'r', 'rb'):
979 return
992 return
980 if path.startswith(repo.sharedpath):
993 if path.startswith(repo.sharedpath):
981 # truncate name relative to the repository (.hg)
994 # truncate name relative to the repository (.hg)
982 path = path[len(repo.sharedpath) + 1:]
995 path = path[len(repo.sharedpath) + 1:]
983 if repo._currentlock(repo._lockref) is None:
996 if repo._currentlock(repo._lockref) is None:
984 repo.ui.develwarn('write with no lock: "%s"' % path,
997 repo.ui.develwarn('write with no lock: "%s"' % path,
985 stacklevel=3)
998 stacklevel=3)
986 return ret
999 return ret
987 return checksvfs
1000 return checksvfs
988
1001
989 def close(self):
1002 def close(self):
990 self._writecaches()
1003 self._writecaches()
991
1004
992 def _writecaches(self):
1005 def _writecaches(self):
993 if self._revbranchcache:
1006 if self._revbranchcache:
994 self._revbranchcache.write()
1007 self._revbranchcache.write()
995
1008
996 def _restrictcapabilities(self, caps):
1009 def _restrictcapabilities(self, caps):
997 if self.ui.configbool('experimental', 'bundle2-advertise'):
1010 if self.ui.configbool('experimental', 'bundle2-advertise'):
998 caps = set(caps)
1011 caps = set(caps)
999 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1012 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1000 role='client'))
1013 role='client'))
1001 caps.add('bundle2=' + urlreq.quote(capsblob))
1014 caps.add('bundle2=' + urlreq.quote(capsblob))
1002 return caps
1015 return caps
1003
1016
1004 def _writerequirements(self):
1017 def _writerequirements(self):
1005 scmutil.writerequires(self.vfs, self.requirements)
1018 scmutil.writerequires(self.vfs, self.requirements)
1006
1019
1007 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1020 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1008 # self -> auditor -> self._checknested -> self
1021 # self -> auditor -> self._checknested -> self
1009
1022
1010 @property
1023 @property
1011 def auditor(self):
1024 def auditor(self):
1012 # This is only used by context.workingctx.match in order to
1025 # This is only used by context.workingctx.match in order to
1013 # detect files in subrepos.
1026 # detect files in subrepos.
1014 return pathutil.pathauditor(self.root, callback=self._checknested)
1027 return pathutil.pathauditor(self.root, callback=self._checknested)
1015
1028
1016 @property
1029 @property
1017 def nofsauditor(self):
1030 def nofsauditor(self):
1018 # This is only used by context.basectx.match in order to detect
1031 # This is only used by context.basectx.match in order to detect
1019 # files in subrepos.
1032 # files in subrepos.
1020 return pathutil.pathauditor(self.root, callback=self._checknested,
1033 return pathutil.pathauditor(self.root, callback=self._checknested,
1021 realfs=False, cached=True)
1034 realfs=False, cached=True)
1022
1035
1023 def _checknested(self, path):
1036 def _checknested(self, path):
1024 """Determine if path is a legal nested repository."""
1037 """Determine if path is a legal nested repository."""
1025 if not path.startswith(self.root):
1038 if not path.startswith(self.root):
1026 return False
1039 return False
1027 subpath = path[len(self.root) + 1:]
1040 subpath = path[len(self.root) + 1:]
1028 normsubpath = util.pconvert(subpath)
1041 normsubpath = util.pconvert(subpath)
1029
1042
1030 # XXX: Checking against the current working copy is wrong in
1043 # XXX: Checking against the current working copy is wrong in
1031 # the sense that it can reject things like
1044 # the sense that it can reject things like
1032 #
1045 #
1033 # $ hg cat -r 10 sub/x.txt
1046 # $ hg cat -r 10 sub/x.txt
1034 #
1047 #
1035 # if sub/ is no longer a subrepository in the working copy
1048 # if sub/ is no longer a subrepository in the working copy
1036 # parent revision.
1049 # parent revision.
1037 #
1050 #
1038 # However, it can of course also allow things that would have
1051 # However, it can of course also allow things that would have
1039 # been rejected before, such as the above cat command if sub/
1052 # been rejected before, such as the above cat command if sub/
1040 # is a subrepository now, but was a normal directory before.
1053 # is a subrepository now, but was a normal directory before.
1041 # The old path auditor would have rejected by mistake since it
1054 # The old path auditor would have rejected by mistake since it
1042 # panics when it sees sub/.hg/.
1055 # panics when it sees sub/.hg/.
1043 #
1056 #
1044 # All in all, checking against the working copy seems sensible
1057 # All in all, checking against the working copy seems sensible
1045 # since we want to prevent access to nested repositories on
1058 # since we want to prevent access to nested repositories on
1046 # the filesystem *now*.
1059 # the filesystem *now*.
1047 ctx = self[None]
1060 ctx = self[None]
1048 parts = util.splitpath(subpath)
1061 parts = util.splitpath(subpath)
1049 while parts:
1062 while parts:
1050 prefix = '/'.join(parts)
1063 prefix = '/'.join(parts)
1051 if prefix in ctx.substate:
1064 if prefix in ctx.substate:
1052 if prefix == normsubpath:
1065 if prefix == normsubpath:
1053 return True
1066 return True
1054 else:
1067 else:
1055 sub = ctx.sub(prefix)
1068 sub = ctx.sub(prefix)
1056 return sub.checknested(subpath[len(prefix) + 1:])
1069 return sub.checknested(subpath[len(prefix) + 1:])
1057 else:
1070 else:
1058 parts.pop()
1071 parts.pop()
1059 return False
1072 return False
1060
1073
1061 def peer(self):
1074 def peer(self):
1062 return localpeer(self) # not cached to avoid reference cycle
1075 return localpeer(self) # not cached to avoid reference cycle
1063
1076
1064 def unfiltered(self):
1077 def unfiltered(self):
1065 """Return unfiltered version of the repository
1078 """Return unfiltered version of the repository
1066
1079
1067 Intended to be overwritten by filtered repo."""
1080 Intended to be overwritten by filtered repo."""
1068 return self
1081 return self
1069
1082
1070 def filtered(self, name, visibilityexceptions=None):
1083 def filtered(self, name, visibilityexceptions=None):
1071 """Return a filtered version of a repository"""
1084 """Return a filtered version of a repository"""
1072 cls = repoview.newtype(self.unfiltered().__class__)
1085 cls = repoview.newtype(self.unfiltered().__class__)
1073 return cls(self, name, visibilityexceptions)
1086 return cls(self, name, visibilityexceptions)
1074
1087
1075 @repofilecache('bookmarks', 'bookmarks.current')
1088 @repofilecache('bookmarks', 'bookmarks.current')
1076 def _bookmarks(self):
1089 def _bookmarks(self):
1077 return bookmarks.bmstore(self)
1090 return bookmarks.bmstore(self)
1078
1091
1079 @property
1092 @property
1080 def _activebookmark(self):
1093 def _activebookmark(self):
1081 return self._bookmarks.active
1094 return self._bookmarks.active
1082
1095
1083 # _phasesets depend on changelog. what we need is to call
1096 # _phasesets depend on changelog. what we need is to call
1084 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1097 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1085 # can't be easily expressed in filecache mechanism.
1098 # can't be easily expressed in filecache mechanism.
1086 @storecache('phaseroots', '00changelog.i')
1099 @storecache('phaseroots', '00changelog.i')
1087 def _phasecache(self):
1100 def _phasecache(self):
1088 return phases.phasecache(self, self._phasedefaults)
1101 return phases.phasecache(self, self._phasedefaults)
1089
1102
1090 @storecache('obsstore')
1103 @storecache('obsstore')
1091 def obsstore(self):
1104 def obsstore(self):
1092 return obsolete.makestore(self.ui, self)
1105 return obsolete.makestore(self.ui, self)
1093
1106
1094 @storecache('00changelog.i')
1107 @storecache('00changelog.i')
1095 def changelog(self):
1108 def changelog(self):
1096 return changelog.changelog(self.svfs,
1109 return changelog.changelog(self.svfs,
1097 trypending=txnutil.mayhavepending(self.root))
1110 trypending=txnutil.mayhavepending(self.root))
1098
1111
1099 @storecache('00manifest.i')
1112 @storecache('00manifest.i')
1100 def manifestlog(self):
1113 def manifestlog(self):
1101 rootstore = manifest.manifestrevlog(self.svfs)
1114 rootstore = manifest.manifestrevlog(self.svfs)
1102 return manifest.manifestlog(self.svfs, self, rootstore)
1115 return manifest.manifestlog(self.svfs, self, rootstore)
1103
1116
1104 @repofilecache('dirstate')
1117 @repofilecache('dirstate')
1105 def dirstate(self):
1118 def dirstate(self):
1106 return self._makedirstate()
1119 return self._makedirstate()
1107
1120
1108 def _makedirstate(self):
1121 def _makedirstate(self):
1109 """Extension point for wrapping the dirstate per-repo."""
1122 """Extension point for wrapping the dirstate per-repo."""
1110 sparsematchfn = lambda: sparse.matcher(self)
1123 sparsematchfn = lambda: sparse.matcher(self)
1111
1124
1112 return dirstate.dirstate(self.vfs, self.ui, self.root,
1125 return dirstate.dirstate(self.vfs, self.ui, self.root,
1113 self._dirstatevalidate, sparsematchfn)
1126 self._dirstatevalidate, sparsematchfn)
1114
1127
1115 def _dirstatevalidate(self, node):
1128 def _dirstatevalidate(self, node):
1116 try:
1129 try:
1117 self.changelog.rev(node)
1130 self.changelog.rev(node)
1118 return node
1131 return node
1119 except error.LookupError:
1132 except error.LookupError:
1120 if not self._dirstatevalidatewarned:
1133 if not self._dirstatevalidatewarned:
1121 self._dirstatevalidatewarned = True
1134 self._dirstatevalidatewarned = True
1122 self.ui.warn(_("warning: ignoring unknown"
1135 self.ui.warn(_("warning: ignoring unknown"
1123 " working parent %s!\n") % short(node))
1136 " working parent %s!\n") % short(node))
1124 return nullid
1137 return nullid
1125
1138
1126 @storecache(narrowspec.FILENAME)
1139 @storecache(narrowspec.FILENAME)
1127 def narrowpats(self):
1140 def narrowpats(self):
1128 """matcher patterns for this repository's narrowspec
1141 """matcher patterns for this repository's narrowspec
1129
1142
1130 A tuple of (includes, excludes).
1143 A tuple of (includes, excludes).
1131 """
1144 """
1132 return narrowspec.load(self)
1145 return narrowspec.load(self)
1133
1146
1134 @storecache(narrowspec.FILENAME)
1147 @storecache(narrowspec.FILENAME)
1135 def _narrowmatch(self):
1148 def _narrowmatch(self):
1136 if repository.NARROW_REQUIREMENT not in self.requirements:
1149 if repository.NARROW_REQUIREMENT not in self.requirements:
1137 return matchmod.always(self.root, '')
1150 return matchmod.always(self.root, '')
1138 include, exclude = self.narrowpats
1151 include, exclude = self.narrowpats
1139 return narrowspec.match(self.root, include=include, exclude=exclude)
1152 return narrowspec.match(self.root, include=include, exclude=exclude)
1140
1153
1141 # TODO(martinvonz): make this property-like instead?
1154 # TODO(martinvonz): make this property-like instead?
1142 def narrowmatch(self):
1155 def narrowmatch(self):
1143 return self._narrowmatch
1156 return self._narrowmatch
1144
1157
1145 def setnarrowpats(self, newincludes, newexcludes):
1158 def setnarrowpats(self, newincludes, newexcludes):
1146 narrowspec.save(self, newincludes, newexcludes)
1159 narrowspec.save(self, newincludes, newexcludes)
1147 self.invalidate(clearfilecache=True)
1160 self.invalidate(clearfilecache=True)
1148
1161
1149 def __getitem__(self, changeid):
1162 def __getitem__(self, changeid):
1150 if changeid is None:
1163 if changeid is None:
1151 return context.workingctx(self)
1164 return context.workingctx(self)
1152 if isinstance(changeid, context.basectx):
1165 if isinstance(changeid, context.basectx):
1153 return changeid
1166 return changeid
1154 if isinstance(changeid, slice):
1167 if isinstance(changeid, slice):
1155 # wdirrev isn't contiguous so the slice shouldn't include it
1168 # wdirrev isn't contiguous so the slice shouldn't include it
1156 return [context.changectx(self, i)
1169 return [context.changectx(self, i)
1157 for i in pycompat.xrange(*changeid.indices(len(self)))
1170 for i in pycompat.xrange(*changeid.indices(len(self)))
1158 if i not in self.changelog.filteredrevs]
1171 if i not in self.changelog.filteredrevs]
1159 try:
1172 try:
1160 return context.changectx(self, changeid)
1173 return context.changectx(self, changeid)
1161 except error.WdirUnsupported:
1174 except error.WdirUnsupported:
1162 return context.workingctx(self)
1175 return context.workingctx(self)
1163
1176
1164 def __contains__(self, changeid):
1177 def __contains__(self, changeid):
1165 """True if the given changeid exists
1178 """True if the given changeid exists
1166
1179
1167 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1180 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1168 specified.
1181 specified.
1169 """
1182 """
1170 try:
1183 try:
1171 self[changeid]
1184 self[changeid]
1172 return True
1185 return True
1173 except error.RepoLookupError:
1186 except error.RepoLookupError:
1174 return False
1187 return False
1175
1188
1176 def __nonzero__(self):
1189 def __nonzero__(self):
1177 return True
1190 return True
1178
1191
1179 __bool__ = __nonzero__
1192 __bool__ = __nonzero__
1180
1193
1181 def __len__(self):
1194 def __len__(self):
1182 # no need to pay the cost of repoview.changelog
1195 # no need to pay the cost of repoview.changelog
1183 unfi = self.unfiltered()
1196 unfi = self.unfiltered()
1184 return len(unfi.changelog)
1197 return len(unfi.changelog)
1185
1198
1186 def __iter__(self):
1199 def __iter__(self):
1187 return iter(self.changelog)
1200 return iter(self.changelog)
1188
1201
1189 def revs(self, expr, *args):
1202 def revs(self, expr, *args):
1190 '''Find revisions matching a revset.
1203 '''Find revisions matching a revset.
1191
1204
1192 The revset is specified as a string ``expr`` that may contain
1205 The revset is specified as a string ``expr`` that may contain
1193 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1206 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1194
1207
1195 Revset aliases from the configuration are not expanded. To expand
1208 Revset aliases from the configuration are not expanded. To expand
1196 user aliases, consider calling ``scmutil.revrange()`` or
1209 user aliases, consider calling ``scmutil.revrange()`` or
1197 ``repo.anyrevs([expr], user=True)``.
1210 ``repo.anyrevs([expr], user=True)``.
1198
1211
1199 Returns a revset.abstractsmartset, which is a list-like interface
1212 Returns a revset.abstractsmartset, which is a list-like interface
1200 that contains integer revisions.
1213 that contains integer revisions.
1201 '''
1214 '''
1202 expr = revsetlang.formatspec(expr, *args)
1215 expr = revsetlang.formatspec(expr, *args)
1203 m = revset.match(None, expr)
1216 m = revset.match(None, expr)
1204 return m(self)
1217 return m(self)
1205
1218
1206 def set(self, expr, *args):
1219 def set(self, expr, *args):
1207 '''Find revisions matching a revset and emit changectx instances.
1220 '''Find revisions matching a revset and emit changectx instances.
1208
1221
1209 This is a convenience wrapper around ``revs()`` that iterates the
1222 This is a convenience wrapper around ``revs()`` that iterates the
1210 result and is a generator of changectx instances.
1223 result and is a generator of changectx instances.
1211
1224
1212 Revset aliases from the configuration are not expanded. To expand
1225 Revset aliases from the configuration are not expanded. To expand
1213 user aliases, consider calling ``scmutil.revrange()``.
1226 user aliases, consider calling ``scmutil.revrange()``.
1214 '''
1227 '''
1215 for r in self.revs(expr, *args):
1228 for r in self.revs(expr, *args):
1216 yield self[r]
1229 yield self[r]
1217
1230
1218 def anyrevs(self, specs, user=False, localalias=None):
1231 def anyrevs(self, specs, user=False, localalias=None):
1219 '''Find revisions matching one of the given revsets.
1232 '''Find revisions matching one of the given revsets.
1220
1233
1221 Revset aliases from the configuration are not expanded by default. To
1234 Revset aliases from the configuration are not expanded by default. To
1222 expand user aliases, specify ``user=True``. To provide some local
1235 expand user aliases, specify ``user=True``. To provide some local
1223 definitions overriding user aliases, set ``localalias`` to
1236 definitions overriding user aliases, set ``localalias`` to
1224 ``{name: definitionstring}``.
1237 ``{name: definitionstring}``.
1225 '''
1238 '''
1226 if user:
1239 if user:
1227 m = revset.matchany(self.ui, specs,
1240 m = revset.matchany(self.ui, specs,
1228 lookup=revset.lookupfn(self),
1241 lookup=revset.lookupfn(self),
1229 localalias=localalias)
1242 localalias=localalias)
1230 else:
1243 else:
1231 m = revset.matchany(None, specs, localalias=localalias)
1244 m = revset.matchany(None, specs, localalias=localalias)
1232 return m(self)
1245 return m(self)
1233
1246
1234 def url(self):
1247 def url(self):
1235 return 'file:' + self.root
1248 return 'file:' + self.root
1236
1249
1237 def hook(self, name, throw=False, **args):
1250 def hook(self, name, throw=False, **args):
1238 """Call a hook, passing this repo instance.
1251 """Call a hook, passing this repo instance.
1239
1252
1240 This a convenience method to aid invoking hooks. Extensions likely
1253 This a convenience method to aid invoking hooks. Extensions likely
1241 won't call this unless they have registered a custom hook or are
1254 won't call this unless they have registered a custom hook or are
1242 replacing code that is expected to call a hook.
1255 replacing code that is expected to call a hook.
1243 """
1256 """
1244 return hook.hook(self.ui, self, name, throw, **args)
1257 return hook.hook(self.ui, self, name, throw, **args)
1245
1258
1246 @filteredpropertycache
1259 @filteredpropertycache
1247 def _tagscache(self):
1260 def _tagscache(self):
1248 '''Returns a tagscache object that contains various tags related
1261 '''Returns a tagscache object that contains various tags related
1249 caches.'''
1262 caches.'''
1250
1263
1251 # This simplifies its cache management by having one decorated
1264 # This simplifies its cache management by having one decorated
1252 # function (this one) and the rest simply fetch things from it.
1265 # function (this one) and the rest simply fetch things from it.
1253 class tagscache(object):
1266 class tagscache(object):
1254 def __init__(self):
1267 def __init__(self):
1255 # These two define the set of tags for this repository. tags
1268 # These two define the set of tags for this repository. tags
1256 # maps tag name to node; tagtypes maps tag name to 'global' or
1269 # maps tag name to node; tagtypes maps tag name to 'global' or
1257 # 'local'. (Global tags are defined by .hgtags across all
1270 # 'local'. (Global tags are defined by .hgtags across all
1258 # heads, and local tags are defined in .hg/localtags.)
1271 # heads, and local tags are defined in .hg/localtags.)
1259 # They constitute the in-memory cache of tags.
1272 # They constitute the in-memory cache of tags.
1260 self.tags = self.tagtypes = None
1273 self.tags = self.tagtypes = None
1261
1274
1262 self.nodetagscache = self.tagslist = None
1275 self.nodetagscache = self.tagslist = None
1263
1276
1264 cache = tagscache()
1277 cache = tagscache()
1265 cache.tags, cache.tagtypes = self._findtags()
1278 cache.tags, cache.tagtypes = self._findtags()
1266
1279
1267 return cache
1280 return cache
1268
1281
1269 def tags(self):
1282 def tags(self):
1270 '''return a mapping of tag to node'''
1283 '''return a mapping of tag to node'''
1271 t = {}
1284 t = {}
1272 if self.changelog.filteredrevs:
1285 if self.changelog.filteredrevs:
1273 tags, tt = self._findtags()
1286 tags, tt = self._findtags()
1274 else:
1287 else:
1275 tags = self._tagscache.tags
1288 tags = self._tagscache.tags
1276 for k, v in tags.iteritems():
1289 for k, v in tags.iteritems():
1277 try:
1290 try:
1278 # ignore tags to unknown nodes
1291 # ignore tags to unknown nodes
1279 self.changelog.rev(v)
1292 self.changelog.rev(v)
1280 t[k] = v
1293 t[k] = v
1281 except (error.LookupError, ValueError):
1294 except (error.LookupError, ValueError):
1282 pass
1295 pass
1283 return t
1296 return t
1284
1297
1285 def _findtags(self):
1298 def _findtags(self):
1286 '''Do the hard work of finding tags. Return a pair of dicts
1299 '''Do the hard work of finding tags. Return a pair of dicts
1287 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1300 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1288 maps tag name to a string like \'global\' or \'local\'.
1301 maps tag name to a string like \'global\' or \'local\'.
1289 Subclasses or extensions are free to add their own tags, but
1302 Subclasses or extensions are free to add their own tags, but
1290 should be aware that the returned dicts will be retained for the
1303 should be aware that the returned dicts will be retained for the
1291 duration of the localrepo object.'''
1304 duration of the localrepo object.'''
1292
1305
1293 # XXX what tagtype should subclasses/extensions use? Currently
1306 # XXX what tagtype should subclasses/extensions use? Currently
1294 # mq and bookmarks add tags, but do not set the tagtype at all.
1307 # mq and bookmarks add tags, but do not set the tagtype at all.
1295 # Should each extension invent its own tag type? Should there
1308 # Should each extension invent its own tag type? Should there
1296 # be one tagtype for all such "virtual" tags? Or is the status
1309 # be one tagtype for all such "virtual" tags? Or is the status
1297 # quo fine?
1310 # quo fine?
1298
1311
1299
1312
1300 # map tag name to (node, hist)
1313 # map tag name to (node, hist)
1301 alltags = tagsmod.findglobaltags(self.ui, self)
1314 alltags = tagsmod.findglobaltags(self.ui, self)
1302 # map tag name to tag type
1315 # map tag name to tag type
1303 tagtypes = dict((tag, 'global') for tag in alltags)
1316 tagtypes = dict((tag, 'global') for tag in alltags)
1304
1317
1305 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1318 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1306
1319
1307 # Build the return dicts. Have to re-encode tag names because
1320 # Build the return dicts. Have to re-encode tag names because
1308 # the tags module always uses UTF-8 (in order not to lose info
1321 # the tags module always uses UTF-8 (in order not to lose info
1309 # writing to the cache), but the rest of Mercurial wants them in
1322 # writing to the cache), but the rest of Mercurial wants them in
1310 # local encoding.
1323 # local encoding.
1311 tags = {}
1324 tags = {}
1312 for (name, (node, hist)) in alltags.iteritems():
1325 for (name, (node, hist)) in alltags.iteritems():
1313 if node != nullid:
1326 if node != nullid:
1314 tags[encoding.tolocal(name)] = node
1327 tags[encoding.tolocal(name)] = node
1315 tags['tip'] = self.changelog.tip()
1328 tags['tip'] = self.changelog.tip()
1316 tagtypes = dict([(encoding.tolocal(name), value)
1329 tagtypes = dict([(encoding.tolocal(name), value)
1317 for (name, value) in tagtypes.iteritems()])
1330 for (name, value) in tagtypes.iteritems()])
1318 return (tags, tagtypes)
1331 return (tags, tagtypes)
1319
1332
1320 def tagtype(self, tagname):
1333 def tagtype(self, tagname):
1321 '''
1334 '''
1322 return the type of the given tag. result can be:
1335 return the type of the given tag. result can be:
1323
1336
1324 'local' : a local tag
1337 'local' : a local tag
1325 'global' : a global tag
1338 'global' : a global tag
1326 None : tag does not exist
1339 None : tag does not exist
1327 '''
1340 '''
1328
1341
1329 return self._tagscache.tagtypes.get(tagname)
1342 return self._tagscache.tagtypes.get(tagname)
1330
1343
1331 def tagslist(self):
1344 def tagslist(self):
1332 '''return a list of tags ordered by revision'''
1345 '''return a list of tags ordered by revision'''
1333 if not self._tagscache.tagslist:
1346 if not self._tagscache.tagslist:
1334 l = []
1347 l = []
1335 for t, n in self.tags().iteritems():
1348 for t, n in self.tags().iteritems():
1336 l.append((self.changelog.rev(n), t, n))
1349 l.append((self.changelog.rev(n), t, n))
1337 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1350 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1338
1351
1339 return self._tagscache.tagslist
1352 return self._tagscache.tagslist
1340
1353
1341 def nodetags(self, node):
1354 def nodetags(self, node):
1342 '''return the tags associated with a node'''
1355 '''return the tags associated with a node'''
1343 if not self._tagscache.nodetagscache:
1356 if not self._tagscache.nodetagscache:
1344 nodetagscache = {}
1357 nodetagscache = {}
1345 for t, n in self._tagscache.tags.iteritems():
1358 for t, n in self._tagscache.tags.iteritems():
1346 nodetagscache.setdefault(n, []).append(t)
1359 nodetagscache.setdefault(n, []).append(t)
1347 for tags in nodetagscache.itervalues():
1360 for tags in nodetagscache.itervalues():
1348 tags.sort()
1361 tags.sort()
1349 self._tagscache.nodetagscache = nodetagscache
1362 self._tagscache.nodetagscache = nodetagscache
1350 return self._tagscache.nodetagscache.get(node, [])
1363 return self._tagscache.nodetagscache.get(node, [])
1351
1364
1352 def nodebookmarks(self, node):
1365 def nodebookmarks(self, node):
1353 """return the list of bookmarks pointing to the specified node"""
1366 """return the list of bookmarks pointing to the specified node"""
1354 return self._bookmarks.names(node)
1367 return self._bookmarks.names(node)
1355
1368
1356 def branchmap(self):
1369 def branchmap(self):
1357 '''returns a dictionary {branch: [branchheads]} with branchheads
1370 '''returns a dictionary {branch: [branchheads]} with branchheads
1358 ordered by increasing revision number'''
1371 ordered by increasing revision number'''
1359 branchmap.updatecache(self)
1372 branchmap.updatecache(self)
1360 return self._branchcaches[self.filtername]
1373 return self._branchcaches[self.filtername]
1361
1374
1362 @unfilteredmethod
1375 @unfilteredmethod
1363 def revbranchcache(self):
1376 def revbranchcache(self):
1364 if not self._revbranchcache:
1377 if not self._revbranchcache:
1365 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1378 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1366 return self._revbranchcache
1379 return self._revbranchcache
1367
1380
1368 def branchtip(self, branch, ignoremissing=False):
1381 def branchtip(self, branch, ignoremissing=False):
1369 '''return the tip node for a given branch
1382 '''return the tip node for a given branch
1370
1383
1371 If ignoremissing is True, then this method will not raise an error.
1384 If ignoremissing is True, then this method will not raise an error.
1372 This is helpful for callers that only expect None for a missing branch
1385 This is helpful for callers that only expect None for a missing branch
1373 (e.g. namespace).
1386 (e.g. namespace).
1374
1387
1375 '''
1388 '''
1376 try:
1389 try:
1377 return self.branchmap().branchtip(branch)
1390 return self.branchmap().branchtip(branch)
1378 except KeyError:
1391 except KeyError:
1379 if not ignoremissing:
1392 if not ignoremissing:
1380 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1393 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1381 else:
1394 else:
1382 pass
1395 pass
1383
1396
1384 def lookup(self, key):
1397 def lookup(self, key):
1385 return scmutil.revsymbol(self, key).node()
1398 return scmutil.revsymbol(self, key).node()
1386
1399
1387 def lookupbranch(self, key):
1400 def lookupbranch(self, key):
1388 if key in self.branchmap():
1401 if key in self.branchmap():
1389 return key
1402 return key
1390
1403
1391 return scmutil.revsymbol(self, key).branch()
1404 return scmutil.revsymbol(self, key).branch()
1392
1405
1393 def known(self, nodes):
1406 def known(self, nodes):
1394 cl = self.changelog
1407 cl = self.changelog
1395 nm = cl.nodemap
1408 nm = cl.nodemap
1396 filtered = cl.filteredrevs
1409 filtered = cl.filteredrevs
1397 result = []
1410 result = []
1398 for n in nodes:
1411 for n in nodes:
1399 r = nm.get(n)
1412 r = nm.get(n)
1400 resp = not (r is None or r in filtered)
1413 resp = not (r is None or r in filtered)
1401 result.append(resp)
1414 result.append(resp)
1402 return result
1415 return result
1403
1416
1404 def local(self):
1417 def local(self):
1405 return self
1418 return self
1406
1419
1407 def publishing(self):
1420 def publishing(self):
1408 # it's safe (and desirable) to trust the publish flag unconditionally
1421 # it's safe (and desirable) to trust the publish flag unconditionally
1409 # so that we don't finalize changes shared between users via ssh or nfs
1422 # so that we don't finalize changes shared between users via ssh or nfs
1410 return self.ui.configbool('phases', 'publish', untrusted=True)
1423 return self.ui.configbool('phases', 'publish', untrusted=True)
1411
1424
1412 def cancopy(self):
1425 def cancopy(self):
1413 # so statichttprepo's override of local() works
1426 # so statichttprepo's override of local() works
1414 if not self.local():
1427 if not self.local():
1415 return False
1428 return False
1416 if not self.publishing():
1429 if not self.publishing():
1417 return True
1430 return True
1418 # if publishing we can't copy if there is filtered content
1431 # if publishing we can't copy if there is filtered content
1419 return not self.filtered('visible').changelog.filteredrevs
1432 return not self.filtered('visible').changelog.filteredrevs
1420
1433
1421 def shared(self):
1434 def shared(self):
1422 '''the type of shared repository (None if not shared)'''
1435 '''the type of shared repository (None if not shared)'''
1423 if self.sharedpath != self.path:
1436 if self.sharedpath != self.path:
1424 return 'store'
1437 return 'store'
1425 return None
1438 return None
1426
1439
1427 def wjoin(self, f, *insidef):
1440 def wjoin(self, f, *insidef):
1428 return self.vfs.reljoin(self.root, f, *insidef)
1441 return self.vfs.reljoin(self.root, f, *insidef)
1429
1442
1430 def setparents(self, p1, p2=nullid):
1443 def setparents(self, p1, p2=nullid):
1431 with self.dirstate.parentchange():
1444 with self.dirstate.parentchange():
1432 copies = self.dirstate.setparents(p1, p2)
1445 copies = self.dirstate.setparents(p1, p2)
1433 pctx = self[p1]
1446 pctx = self[p1]
1434 if copies:
1447 if copies:
1435 # Adjust copy records, the dirstate cannot do it, it
1448 # Adjust copy records, the dirstate cannot do it, it
1436 # requires access to parents manifests. Preserve them
1449 # requires access to parents manifests. Preserve them
1437 # only for entries added to first parent.
1450 # only for entries added to first parent.
1438 for f in copies:
1451 for f in copies:
1439 if f not in pctx and copies[f] in pctx:
1452 if f not in pctx and copies[f] in pctx:
1440 self.dirstate.copy(copies[f], f)
1453 self.dirstate.copy(copies[f], f)
1441 if p2 == nullid:
1454 if p2 == nullid:
1442 for f, s in sorted(self.dirstate.copies().items()):
1455 for f, s in sorted(self.dirstate.copies().items()):
1443 if f not in pctx and s not in pctx:
1456 if f not in pctx and s not in pctx:
1444 self.dirstate.copy(None, f)
1457 self.dirstate.copy(None, f)
1445
1458
1446 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1459 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1447 """changeid can be a changeset revision, node, or tag.
1460 """changeid can be a changeset revision, node, or tag.
1448 fileid can be a file revision or node."""
1461 fileid can be a file revision or node."""
1449 return context.filectx(self, path, changeid, fileid,
1462 return context.filectx(self, path, changeid, fileid,
1450 changectx=changectx)
1463 changectx=changectx)
1451
1464
1452 def getcwd(self):
1465 def getcwd(self):
1453 return self.dirstate.getcwd()
1466 return self.dirstate.getcwd()
1454
1467
1455 def pathto(self, f, cwd=None):
1468 def pathto(self, f, cwd=None):
1456 return self.dirstate.pathto(f, cwd)
1469 return self.dirstate.pathto(f, cwd)
1457
1470
1458 def _loadfilter(self, filter):
1471 def _loadfilter(self, filter):
1459 if filter not in self._filterpats:
1472 if filter not in self._filterpats:
1460 l = []
1473 l = []
1461 for pat, cmd in self.ui.configitems(filter):
1474 for pat, cmd in self.ui.configitems(filter):
1462 if cmd == '!':
1475 if cmd == '!':
1463 continue
1476 continue
1464 mf = matchmod.match(self.root, '', [pat])
1477 mf = matchmod.match(self.root, '', [pat])
1465 fn = None
1478 fn = None
1466 params = cmd
1479 params = cmd
1467 for name, filterfn in self._datafilters.iteritems():
1480 for name, filterfn in self._datafilters.iteritems():
1468 if cmd.startswith(name):
1481 if cmd.startswith(name):
1469 fn = filterfn
1482 fn = filterfn
1470 params = cmd[len(name):].lstrip()
1483 params = cmd[len(name):].lstrip()
1471 break
1484 break
1472 if not fn:
1485 if not fn:
1473 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1486 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1474 # Wrap old filters not supporting keyword arguments
1487 # Wrap old filters not supporting keyword arguments
1475 if not pycompat.getargspec(fn)[2]:
1488 if not pycompat.getargspec(fn)[2]:
1476 oldfn = fn
1489 oldfn = fn
1477 fn = lambda s, c, **kwargs: oldfn(s, c)
1490 fn = lambda s, c, **kwargs: oldfn(s, c)
1478 l.append((mf, fn, params))
1491 l.append((mf, fn, params))
1479 self._filterpats[filter] = l
1492 self._filterpats[filter] = l
1480 return self._filterpats[filter]
1493 return self._filterpats[filter]
1481
1494
1482 def _filter(self, filterpats, filename, data):
1495 def _filter(self, filterpats, filename, data):
1483 for mf, fn, cmd in filterpats:
1496 for mf, fn, cmd in filterpats:
1484 if mf(filename):
1497 if mf(filename):
1485 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1498 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1486 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1499 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1487 break
1500 break
1488
1501
1489 return data
1502 return data
1490
1503
1491 @unfilteredpropertycache
1504 @unfilteredpropertycache
1492 def _encodefilterpats(self):
1505 def _encodefilterpats(self):
1493 return self._loadfilter('encode')
1506 return self._loadfilter('encode')
1494
1507
1495 @unfilteredpropertycache
1508 @unfilteredpropertycache
1496 def _decodefilterpats(self):
1509 def _decodefilterpats(self):
1497 return self._loadfilter('decode')
1510 return self._loadfilter('decode')
1498
1511
1499 def adddatafilter(self, name, filter):
1512 def adddatafilter(self, name, filter):
1500 self._datafilters[name] = filter
1513 self._datafilters[name] = filter
1501
1514
1502 def wread(self, filename):
1515 def wread(self, filename):
1503 if self.wvfs.islink(filename):
1516 if self.wvfs.islink(filename):
1504 data = self.wvfs.readlink(filename)
1517 data = self.wvfs.readlink(filename)
1505 else:
1518 else:
1506 data = self.wvfs.read(filename)
1519 data = self.wvfs.read(filename)
1507 return self._filter(self._encodefilterpats, filename, data)
1520 return self._filter(self._encodefilterpats, filename, data)
1508
1521
1509 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1522 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1510 """write ``data`` into ``filename`` in the working directory
1523 """write ``data`` into ``filename`` in the working directory
1511
1524
1512 This returns length of written (maybe decoded) data.
1525 This returns length of written (maybe decoded) data.
1513 """
1526 """
1514 data = self._filter(self._decodefilterpats, filename, data)
1527 data = self._filter(self._decodefilterpats, filename, data)
1515 if 'l' in flags:
1528 if 'l' in flags:
1516 self.wvfs.symlink(data, filename)
1529 self.wvfs.symlink(data, filename)
1517 else:
1530 else:
1518 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1531 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1519 **kwargs)
1532 **kwargs)
1520 if 'x' in flags:
1533 if 'x' in flags:
1521 self.wvfs.setflags(filename, False, True)
1534 self.wvfs.setflags(filename, False, True)
1522 else:
1535 else:
1523 self.wvfs.setflags(filename, False, False)
1536 self.wvfs.setflags(filename, False, False)
1524 return len(data)
1537 return len(data)
1525
1538
1526 def wwritedata(self, filename, data):
1539 def wwritedata(self, filename, data):
1527 return self._filter(self._decodefilterpats, filename, data)
1540 return self._filter(self._decodefilterpats, filename, data)
1528
1541
1529 def currenttransaction(self):
1542 def currenttransaction(self):
1530 """return the current transaction or None if non exists"""
1543 """return the current transaction or None if non exists"""
1531 if self._transref:
1544 if self._transref:
1532 tr = self._transref()
1545 tr = self._transref()
1533 else:
1546 else:
1534 tr = None
1547 tr = None
1535
1548
1536 if tr and tr.running():
1549 if tr and tr.running():
1537 return tr
1550 return tr
1538 return None
1551 return None
1539
1552
1540 def transaction(self, desc, report=None):
1553 def transaction(self, desc, report=None):
1541 if (self.ui.configbool('devel', 'all-warnings')
1554 if (self.ui.configbool('devel', 'all-warnings')
1542 or self.ui.configbool('devel', 'check-locks')):
1555 or self.ui.configbool('devel', 'check-locks')):
1543 if self._currentlock(self._lockref) is None:
1556 if self._currentlock(self._lockref) is None:
1544 raise error.ProgrammingError('transaction requires locking')
1557 raise error.ProgrammingError('transaction requires locking')
1545 tr = self.currenttransaction()
1558 tr = self.currenttransaction()
1546 if tr is not None:
1559 if tr is not None:
1547 return tr.nest(name=desc)
1560 return tr.nest(name=desc)
1548
1561
1549 # abort here if the journal already exists
1562 # abort here if the journal already exists
1550 if self.svfs.exists("journal"):
1563 if self.svfs.exists("journal"):
1551 raise error.RepoError(
1564 raise error.RepoError(
1552 _("abandoned transaction found"),
1565 _("abandoned transaction found"),
1553 hint=_("run 'hg recover' to clean up transaction"))
1566 hint=_("run 'hg recover' to clean up transaction"))
1554
1567
1555 idbase = "%.40f#%f" % (random.random(), time.time())
1568 idbase = "%.40f#%f" % (random.random(), time.time())
1556 ha = hex(hashlib.sha1(idbase).digest())
1569 ha = hex(hashlib.sha1(idbase).digest())
1557 txnid = 'TXN:' + ha
1570 txnid = 'TXN:' + ha
1558 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1571 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1559
1572
1560 self._writejournal(desc)
1573 self._writejournal(desc)
1561 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1574 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1562 if report:
1575 if report:
1563 rp = report
1576 rp = report
1564 else:
1577 else:
1565 rp = self.ui.warn
1578 rp = self.ui.warn
1566 vfsmap = {'plain': self.vfs} # root of .hg/
1579 vfsmap = {'plain': self.vfs} # root of .hg/
1567 # we must avoid cyclic reference between repo and transaction.
1580 # we must avoid cyclic reference between repo and transaction.
1568 reporef = weakref.ref(self)
1581 reporef = weakref.ref(self)
1569 # Code to track tag movement
1582 # Code to track tag movement
1570 #
1583 #
1571 # Since tags are all handled as file content, it is actually quite hard
1584 # Since tags are all handled as file content, it is actually quite hard
1572 # to track these movement from a code perspective. So we fallback to a
1585 # to track these movement from a code perspective. So we fallback to a
1573 # tracking at the repository level. One could envision to track changes
1586 # tracking at the repository level. One could envision to track changes
1574 # to the '.hgtags' file through changegroup apply but that fails to
1587 # to the '.hgtags' file through changegroup apply but that fails to
1575 # cope with case where transaction expose new heads without changegroup
1588 # cope with case where transaction expose new heads without changegroup
1576 # being involved (eg: phase movement).
1589 # being involved (eg: phase movement).
1577 #
1590 #
1578 # For now, We gate the feature behind a flag since this likely comes
1591 # For now, We gate the feature behind a flag since this likely comes
1579 # with performance impacts. The current code run more often than needed
1592 # with performance impacts. The current code run more often than needed
1580 # and do not use caches as much as it could. The current focus is on
1593 # and do not use caches as much as it could. The current focus is on
1581 # the behavior of the feature so we disable it by default. The flag
1594 # the behavior of the feature so we disable it by default. The flag
1582 # will be removed when we are happy with the performance impact.
1595 # will be removed when we are happy with the performance impact.
1583 #
1596 #
1584 # Once this feature is no longer experimental move the following
1597 # Once this feature is no longer experimental move the following
1585 # documentation to the appropriate help section:
1598 # documentation to the appropriate help section:
1586 #
1599 #
1587 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1600 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1588 # tags (new or changed or deleted tags). In addition the details of
1601 # tags (new or changed or deleted tags). In addition the details of
1589 # these changes are made available in a file at:
1602 # these changes are made available in a file at:
1590 # ``REPOROOT/.hg/changes/tags.changes``.
1603 # ``REPOROOT/.hg/changes/tags.changes``.
1591 # Make sure you check for HG_TAG_MOVED before reading that file as it
1604 # Make sure you check for HG_TAG_MOVED before reading that file as it
1592 # might exist from a previous transaction even if no tag were touched
1605 # might exist from a previous transaction even if no tag were touched
1593 # in this one. Changes are recorded in a line base format::
1606 # in this one. Changes are recorded in a line base format::
1594 #
1607 #
1595 # <action> <hex-node> <tag-name>\n
1608 # <action> <hex-node> <tag-name>\n
1596 #
1609 #
1597 # Actions are defined as follow:
1610 # Actions are defined as follow:
1598 # "-R": tag is removed,
1611 # "-R": tag is removed,
1599 # "+A": tag is added,
1612 # "+A": tag is added,
1600 # "-M": tag is moved (old value),
1613 # "-M": tag is moved (old value),
1601 # "+M": tag is moved (new value),
1614 # "+M": tag is moved (new value),
1602 tracktags = lambda x: None
1615 tracktags = lambda x: None
1603 # experimental config: experimental.hook-track-tags
1616 # experimental config: experimental.hook-track-tags
1604 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1617 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1605 if desc != 'strip' and shouldtracktags:
1618 if desc != 'strip' and shouldtracktags:
1606 oldheads = self.changelog.headrevs()
1619 oldheads = self.changelog.headrevs()
1607 def tracktags(tr2):
1620 def tracktags(tr2):
1608 repo = reporef()
1621 repo = reporef()
1609 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1622 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1610 newheads = repo.changelog.headrevs()
1623 newheads = repo.changelog.headrevs()
1611 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1624 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1612 # notes: we compare lists here.
1625 # notes: we compare lists here.
1613 # As we do it only once buiding set would not be cheaper
1626 # As we do it only once buiding set would not be cheaper
1614 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1627 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1615 if changes:
1628 if changes:
1616 tr2.hookargs['tag_moved'] = '1'
1629 tr2.hookargs['tag_moved'] = '1'
1617 with repo.vfs('changes/tags.changes', 'w',
1630 with repo.vfs('changes/tags.changes', 'w',
1618 atomictemp=True) as changesfile:
1631 atomictemp=True) as changesfile:
1619 # note: we do not register the file to the transaction
1632 # note: we do not register the file to the transaction
1620 # because we needs it to still exist on the transaction
1633 # because we needs it to still exist on the transaction
1621 # is close (for txnclose hooks)
1634 # is close (for txnclose hooks)
1622 tagsmod.writediff(changesfile, changes)
1635 tagsmod.writediff(changesfile, changes)
1623 def validate(tr2):
1636 def validate(tr2):
1624 """will run pre-closing hooks"""
1637 """will run pre-closing hooks"""
1625 # XXX the transaction API is a bit lacking here so we take a hacky
1638 # XXX the transaction API is a bit lacking here so we take a hacky
1626 # path for now
1639 # path for now
1627 #
1640 #
1628 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1641 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1629 # dict is copied before these run. In addition we needs the data
1642 # dict is copied before these run. In addition we needs the data
1630 # available to in memory hooks too.
1643 # available to in memory hooks too.
1631 #
1644 #
1632 # Moreover, we also need to make sure this runs before txnclose
1645 # Moreover, we also need to make sure this runs before txnclose
1633 # hooks and there is no "pending" mechanism that would execute
1646 # hooks and there is no "pending" mechanism that would execute
1634 # logic only if hooks are about to run.
1647 # logic only if hooks are about to run.
1635 #
1648 #
1636 # Fixing this limitation of the transaction is also needed to track
1649 # Fixing this limitation of the transaction is also needed to track
1637 # other families of changes (bookmarks, phases, obsolescence).
1650 # other families of changes (bookmarks, phases, obsolescence).
1638 #
1651 #
1639 # This will have to be fixed before we remove the experimental
1652 # This will have to be fixed before we remove the experimental
1640 # gating.
1653 # gating.
1641 tracktags(tr2)
1654 tracktags(tr2)
1642 repo = reporef()
1655 repo = reporef()
1643 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1656 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1644 scmutil.enforcesinglehead(repo, tr2, desc)
1657 scmutil.enforcesinglehead(repo, tr2, desc)
1645 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1658 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1646 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1659 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1647 args = tr.hookargs.copy()
1660 args = tr.hookargs.copy()
1648 args.update(bookmarks.preparehookargs(name, old, new))
1661 args.update(bookmarks.preparehookargs(name, old, new))
1649 repo.hook('pretxnclose-bookmark', throw=True,
1662 repo.hook('pretxnclose-bookmark', throw=True,
1650 txnname=desc,
1663 txnname=desc,
1651 **pycompat.strkwargs(args))
1664 **pycompat.strkwargs(args))
1652 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1665 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1653 cl = repo.unfiltered().changelog
1666 cl = repo.unfiltered().changelog
1654 for rev, (old, new) in tr.changes['phases'].items():
1667 for rev, (old, new) in tr.changes['phases'].items():
1655 args = tr.hookargs.copy()
1668 args = tr.hookargs.copy()
1656 node = hex(cl.node(rev))
1669 node = hex(cl.node(rev))
1657 args.update(phases.preparehookargs(node, old, new))
1670 args.update(phases.preparehookargs(node, old, new))
1658 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1671 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1659 **pycompat.strkwargs(args))
1672 **pycompat.strkwargs(args))
1660
1673
1661 repo.hook('pretxnclose', throw=True,
1674 repo.hook('pretxnclose', throw=True,
1662 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1675 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1663 def releasefn(tr, success):
1676 def releasefn(tr, success):
1664 repo = reporef()
1677 repo = reporef()
1665 if success:
1678 if success:
1666 # this should be explicitly invoked here, because
1679 # this should be explicitly invoked here, because
1667 # in-memory changes aren't written out at closing
1680 # in-memory changes aren't written out at closing
1668 # transaction, if tr.addfilegenerator (via
1681 # transaction, if tr.addfilegenerator (via
1669 # dirstate.write or so) isn't invoked while
1682 # dirstate.write or so) isn't invoked while
1670 # transaction running
1683 # transaction running
1671 repo.dirstate.write(None)
1684 repo.dirstate.write(None)
1672 else:
1685 else:
1673 # discard all changes (including ones already written
1686 # discard all changes (including ones already written
1674 # out) in this transaction
1687 # out) in this transaction
1675 narrowspec.restorebackup(self, 'journal.narrowspec')
1688 narrowspec.restorebackup(self, 'journal.narrowspec')
1676 repo.dirstate.restorebackup(None, 'journal.dirstate')
1689 repo.dirstate.restorebackup(None, 'journal.dirstate')
1677
1690
1678 repo.invalidate(clearfilecache=True)
1691 repo.invalidate(clearfilecache=True)
1679
1692
1680 tr = transaction.transaction(rp, self.svfs, vfsmap,
1693 tr = transaction.transaction(rp, self.svfs, vfsmap,
1681 "journal",
1694 "journal",
1682 "undo",
1695 "undo",
1683 aftertrans(renames),
1696 aftertrans(renames),
1684 self.store.createmode,
1697 self.store.createmode,
1685 validator=validate,
1698 validator=validate,
1686 releasefn=releasefn,
1699 releasefn=releasefn,
1687 checkambigfiles=_cachedfiles,
1700 checkambigfiles=_cachedfiles,
1688 name=desc)
1701 name=desc)
1689 tr.changes['origrepolen'] = len(self)
1702 tr.changes['origrepolen'] = len(self)
1690 tr.changes['obsmarkers'] = set()
1703 tr.changes['obsmarkers'] = set()
1691 tr.changes['phases'] = {}
1704 tr.changes['phases'] = {}
1692 tr.changes['bookmarks'] = {}
1705 tr.changes['bookmarks'] = {}
1693
1706
1694 tr.hookargs['txnid'] = txnid
1707 tr.hookargs['txnid'] = txnid
1695 # note: writing the fncache only during finalize mean that the file is
1708 # note: writing the fncache only during finalize mean that the file is
1696 # outdated when running hooks. As fncache is used for streaming clone,
1709 # outdated when running hooks. As fncache is used for streaming clone,
1697 # this is not expected to break anything that happen during the hooks.
1710 # this is not expected to break anything that happen during the hooks.
1698 tr.addfinalize('flush-fncache', self.store.write)
1711 tr.addfinalize('flush-fncache', self.store.write)
1699 def txnclosehook(tr2):
1712 def txnclosehook(tr2):
1700 """To be run if transaction is successful, will schedule a hook run
1713 """To be run if transaction is successful, will schedule a hook run
1701 """
1714 """
1702 # Don't reference tr2 in hook() so we don't hold a reference.
1715 # Don't reference tr2 in hook() so we don't hold a reference.
1703 # This reduces memory consumption when there are multiple
1716 # This reduces memory consumption when there are multiple
1704 # transactions per lock. This can likely go away if issue5045
1717 # transactions per lock. This can likely go away if issue5045
1705 # fixes the function accumulation.
1718 # fixes the function accumulation.
1706 hookargs = tr2.hookargs
1719 hookargs = tr2.hookargs
1707
1720
1708 def hookfunc():
1721 def hookfunc():
1709 repo = reporef()
1722 repo = reporef()
1710 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1723 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1711 bmchanges = sorted(tr.changes['bookmarks'].items())
1724 bmchanges = sorted(tr.changes['bookmarks'].items())
1712 for name, (old, new) in bmchanges:
1725 for name, (old, new) in bmchanges:
1713 args = tr.hookargs.copy()
1726 args = tr.hookargs.copy()
1714 args.update(bookmarks.preparehookargs(name, old, new))
1727 args.update(bookmarks.preparehookargs(name, old, new))
1715 repo.hook('txnclose-bookmark', throw=False,
1728 repo.hook('txnclose-bookmark', throw=False,
1716 txnname=desc, **pycompat.strkwargs(args))
1729 txnname=desc, **pycompat.strkwargs(args))
1717
1730
1718 if hook.hashook(repo.ui, 'txnclose-phase'):
1731 if hook.hashook(repo.ui, 'txnclose-phase'):
1719 cl = repo.unfiltered().changelog
1732 cl = repo.unfiltered().changelog
1720 phasemv = sorted(tr.changes['phases'].items())
1733 phasemv = sorted(tr.changes['phases'].items())
1721 for rev, (old, new) in phasemv:
1734 for rev, (old, new) in phasemv:
1722 args = tr.hookargs.copy()
1735 args = tr.hookargs.copy()
1723 node = hex(cl.node(rev))
1736 node = hex(cl.node(rev))
1724 args.update(phases.preparehookargs(node, old, new))
1737 args.update(phases.preparehookargs(node, old, new))
1725 repo.hook('txnclose-phase', throw=False, txnname=desc,
1738 repo.hook('txnclose-phase', throw=False, txnname=desc,
1726 **pycompat.strkwargs(args))
1739 **pycompat.strkwargs(args))
1727
1740
1728 repo.hook('txnclose', throw=False, txnname=desc,
1741 repo.hook('txnclose', throw=False, txnname=desc,
1729 **pycompat.strkwargs(hookargs))
1742 **pycompat.strkwargs(hookargs))
1730 reporef()._afterlock(hookfunc)
1743 reporef()._afterlock(hookfunc)
1731 tr.addfinalize('txnclose-hook', txnclosehook)
1744 tr.addfinalize('txnclose-hook', txnclosehook)
1732 # Include a leading "-" to make it happen before the transaction summary
1745 # Include a leading "-" to make it happen before the transaction summary
1733 # reports registered via scmutil.registersummarycallback() whose names
1746 # reports registered via scmutil.registersummarycallback() whose names
1734 # are 00-txnreport etc. That way, the caches will be warm when the
1747 # are 00-txnreport etc. That way, the caches will be warm when the
1735 # callbacks run.
1748 # callbacks run.
1736 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1749 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1737 def txnaborthook(tr2):
1750 def txnaborthook(tr2):
1738 """To be run if transaction is aborted
1751 """To be run if transaction is aborted
1739 """
1752 """
1740 reporef().hook('txnabort', throw=False, txnname=desc,
1753 reporef().hook('txnabort', throw=False, txnname=desc,
1741 **pycompat.strkwargs(tr2.hookargs))
1754 **pycompat.strkwargs(tr2.hookargs))
1742 tr.addabort('txnabort-hook', txnaborthook)
1755 tr.addabort('txnabort-hook', txnaborthook)
1743 # avoid eager cache invalidation. in-memory data should be identical
1756 # avoid eager cache invalidation. in-memory data should be identical
1744 # to stored data if transaction has no error.
1757 # to stored data if transaction has no error.
1745 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1758 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1746 self._transref = weakref.ref(tr)
1759 self._transref = weakref.ref(tr)
1747 scmutil.registersummarycallback(self, tr, desc)
1760 scmutil.registersummarycallback(self, tr, desc)
1748 return tr
1761 return tr
1749
1762
1750 def _journalfiles(self):
1763 def _journalfiles(self):
1751 return ((self.svfs, 'journal'),
1764 return ((self.svfs, 'journal'),
1752 (self.vfs, 'journal.dirstate'),
1765 (self.vfs, 'journal.dirstate'),
1753 (self.vfs, 'journal.branch'),
1766 (self.vfs, 'journal.branch'),
1754 (self.vfs, 'journal.desc'),
1767 (self.vfs, 'journal.desc'),
1755 (self.vfs, 'journal.bookmarks'),
1768 (self.vfs, 'journal.bookmarks'),
1756 (self.svfs, 'journal.phaseroots'))
1769 (self.svfs, 'journal.phaseroots'))
1757
1770
1758 def undofiles(self):
1771 def undofiles(self):
1759 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1772 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1760
1773
1761 @unfilteredmethod
1774 @unfilteredmethod
1762 def _writejournal(self, desc):
1775 def _writejournal(self, desc):
1763 self.dirstate.savebackup(None, 'journal.dirstate')
1776 self.dirstate.savebackup(None, 'journal.dirstate')
1764 narrowspec.savebackup(self, 'journal.narrowspec')
1777 narrowspec.savebackup(self, 'journal.narrowspec')
1765 self.vfs.write("journal.branch",
1778 self.vfs.write("journal.branch",
1766 encoding.fromlocal(self.dirstate.branch()))
1779 encoding.fromlocal(self.dirstate.branch()))
1767 self.vfs.write("journal.desc",
1780 self.vfs.write("journal.desc",
1768 "%d\n%s\n" % (len(self), desc))
1781 "%d\n%s\n" % (len(self), desc))
1769 self.vfs.write("journal.bookmarks",
1782 self.vfs.write("journal.bookmarks",
1770 self.vfs.tryread("bookmarks"))
1783 self.vfs.tryread("bookmarks"))
1771 self.svfs.write("journal.phaseroots",
1784 self.svfs.write("journal.phaseroots",
1772 self.svfs.tryread("phaseroots"))
1785 self.svfs.tryread("phaseroots"))
1773
1786
1774 def recover(self):
1787 def recover(self):
1775 with self.lock():
1788 with self.lock():
1776 if self.svfs.exists("journal"):
1789 if self.svfs.exists("journal"):
1777 self.ui.status(_("rolling back interrupted transaction\n"))
1790 self.ui.status(_("rolling back interrupted transaction\n"))
1778 vfsmap = {'': self.svfs,
1791 vfsmap = {'': self.svfs,
1779 'plain': self.vfs,}
1792 'plain': self.vfs,}
1780 transaction.rollback(self.svfs, vfsmap, "journal",
1793 transaction.rollback(self.svfs, vfsmap, "journal",
1781 self.ui.warn,
1794 self.ui.warn,
1782 checkambigfiles=_cachedfiles)
1795 checkambigfiles=_cachedfiles)
1783 self.invalidate()
1796 self.invalidate()
1784 return True
1797 return True
1785 else:
1798 else:
1786 self.ui.warn(_("no interrupted transaction available\n"))
1799 self.ui.warn(_("no interrupted transaction available\n"))
1787 return False
1800 return False
1788
1801
1789 def rollback(self, dryrun=False, force=False):
1802 def rollback(self, dryrun=False, force=False):
1790 wlock = lock = dsguard = None
1803 wlock = lock = dsguard = None
1791 try:
1804 try:
1792 wlock = self.wlock()
1805 wlock = self.wlock()
1793 lock = self.lock()
1806 lock = self.lock()
1794 if self.svfs.exists("undo"):
1807 if self.svfs.exists("undo"):
1795 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1808 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1796
1809
1797 return self._rollback(dryrun, force, dsguard)
1810 return self._rollback(dryrun, force, dsguard)
1798 else:
1811 else:
1799 self.ui.warn(_("no rollback information available\n"))
1812 self.ui.warn(_("no rollback information available\n"))
1800 return 1
1813 return 1
1801 finally:
1814 finally:
1802 release(dsguard, lock, wlock)
1815 release(dsguard, lock, wlock)
1803
1816
1804 @unfilteredmethod # Until we get smarter cache management
1817 @unfilteredmethod # Until we get smarter cache management
1805 def _rollback(self, dryrun, force, dsguard):
1818 def _rollback(self, dryrun, force, dsguard):
1806 ui = self.ui
1819 ui = self.ui
1807 try:
1820 try:
1808 args = self.vfs.read('undo.desc').splitlines()
1821 args = self.vfs.read('undo.desc').splitlines()
1809 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1810 if len(args) >= 3:
1823 if len(args) >= 3:
1811 detail = args[2]
1824 detail = args[2]
1812 oldtip = oldlen - 1
1825 oldtip = oldlen - 1
1813
1826
1814 if detail and ui.verbose:
1827 if detail and ui.verbose:
1815 msg = (_('repository tip rolled back to revision %d'
1828 msg = (_('repository tip rolled back to revision %d'
1816 ' (undo %s: %s)\n')
1829 ' (undo %s: %s)\n')
1817 % (oldtip, desc, detail))
1830 % (oldtip, desc, detail))
1818 else:
1831 else:
1819 msg = (_('repository tip rolled back to revision %d'
1832 msg = (_('repository tip rolled back to revision %d'
1820 ' (undo %s)\n')
1833 ' (undo %s)\n')
1821 % (oldtip, desc))
1834 % (oldtip, desc))
1822 except IOError:
1835 except IOError:
1823 msg = _('rolling back unknown transaction\n')
1836 msg = _('rolling back unknown transaction\n')
1824 desc = None
1837 desc = None
1825
1838
1826 if not force and self['.'] != self['tip'] and desc == 'commit':
1839 if not force and self['.'] != self['tip'] and desc == 'commit':
1827 raise error.Abort(
1840 raise error.Abort(
1828 _('rollback of last commit while not checked out '
1841 _('rollback of last commit while not checked out '
1829 'may lose data'), hint=_('use -f to force'))
1842 'may lose data'), hint=_('use -f to force'))
1830
1843
1831 ui.status(msg)
1844 ui.status(msg)
1832 if dryrun:
1845 if dryrun:
1833 return 0
1846 return 0
1834
1847
1835 parents = self.dirstate.parents()
1848 parents = self.dirstate.parents()
1836 self.destroying()
1849 self.destroying()
1837 vfsmap = {'plain': self.vfs, '': self.svfs}
1850 vfsmap = {'plain': self.vfs, '': self.svfs}
1838 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1851 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1839 checkambigfiles=_cachedfiles)
1852 checkambigfiles=_cachedfiles)
1840 if self.vfs.exists('undo.bookmarks'):
1853 if self.vfs.exists('undo.bookmarks'):
1841 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1854 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1842 if self.svfs.exists('undo.phaseroots'):
1855 if self.svfs.exists('undo.phaseroots'):
1843 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1856 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1844 self.invalidate()
1857 self.invalidate()
1845
1858
1846 parentgone = (parents[0] not in self.changelog.nodemap or
1859 parentgone = (parents[0] not in self.changelog.nodemap or
1847 parents[1] not in self.changelog.nodemap)
1860 parents[1] not in self.changelog.nodemap)
1848 if parentgone:
1861 if parentgone:
1849 # prevent dirstateguard from overwriting already restored one
1862 # prevent dirstateguard from overwriting already restored one
1850 dsguard.close()
1863 dsguard.close()
1851
1864
1852 narrowspec.restorebackup(self, 'undo.narrowspec')
1865 narrowspec.restorebackup(self, 'undo.narrowspec')
1853 self.dirstate.restorebackup(None, 'undo.dirstate')
1866 self.dirstate.restorebackup(None, 'undo.dirstate')
1854 try:
1867 try:
1855 branch = self.vfs.read('undo.branch')
1868 branch = self.vfs.read('undo.branch')
1856 self.dirstate.setbranch(encoding.tolocal(branch))
1869 self.dirstate.setbranch(encoding.tolocal(branch))
1857 except IOError:
1870 except IOError:
1858 ui.warn(_('named branch could not be reset: '
1871 ui.warn(_('named branch could not be reset: '
1859 'current branch is still \'%s\'\n')
1872 'current branch is still \'%s\'\n')
1860 % self.dirstate.branch())
1873 % self.dirstate.branch())
1861
1874
1862 parents = tuple([p.rev() for p in self[None].parents()])
1875 parents = tuple([p.rev() for p in self[None].parents()])
1863 if len(parents) > 1:
1876 if len(parents) > 1:
1864 ui.status(_('working directory now based on '
1877 ui.status(_('working directory now based on '
1865 'revisions %d and %d\n') % parents)
1878 'revisions %d and %d\n') % parents)
1866 else:
1879 else:
1867 ui.status(_('working directory now based on '
1880 ui.status(_('working directory now based on '
1868 'revision %d\n') % parents)
1881 'revision %d\n') % parents)
1869 mergemod.mergestate.clean(self, self['.'].node())
1882 mergemod.mergestate.clean(self, self['.'].node())
1870
1883
1871 # TODO: if we know which new heads may result from this rollback, pass
1884 # TODO: if we know which new heads may result from this rollback, pass
1872 # them to destroy(), which will prevent the branchhead cache from being
1885 # them to destroy(), which will prevent the branchhead cache from being
1873 # invalidated.
1886 # invalidated.
1874 self.destroyed()
1887 self.destroyed()
1875 return 0
1888 return 0
1876
1889
1877 def _buildcacheupdater(self, newtransaction):
1890 def _buildcacheupdater(self, newtransaction):
1878 """called during transaction to build the callback updating cache
1891 """called during transaction to build the callback updating cache
1879
1892
1880 Lives on the repository to help extension who might want to augment
1893 Lives on the repository to help extension who might want to augment
1881 this logic. For this purpose, the created transaction is passed to the
1894 this logic. For this purpose, the created transaction is passed to the
1882 method.
1895 method.
1883 """
1896 """
1884 # we must avoid cyclic reference between repo and transaction.
1897 # we must avoid cyclic reference between repo and transaction.
1885 reporef = weakref.ref(self)
1898 reporef = weakref.ref(self)
1886 def updater(tr):
1899 def updater(tr):
1887 repo = reporef()
1900 repo = reporef()
1888 repo.updatecaches(tr)
1901 repo.updatecaches(tr)
1889 return updater
1902 return updater
1890
1903
1891 @unfilteredmethod
1904 @unfilteredmethod
1892 def updatecaches(self, tr=None, full=False):
1905 def updatecaches(self, tr=None, full=False):
1893 """warm appropriate caches
1906 """warm appropriate caches
1894
1907
1895 If this function is called after a transaction closed. The transaction
1908 If this function is called after a transaction closed. The transaction
1896 will be available in the 'tr' argument. This can be used to selectively
1909 will be available in the 'tr' argument. This can be used to selectively
1897 update caches relevant to the changes in that transaction.
1910 update caches relevant to the changes in that transaction.
1898
1911
1899 If 'full' is set, make sure all caches the function knows about have
1912 If 'full' is set, make sure all caches the function knows about have
1900 up-to-date data. Even the ones usually loaded more lazily.
1913 up-to-date data. Even the ones usually loaded more lazily.
1901 """
1914 """
1902 if tr is not None and tr.hookargs.get('source') == 'strip':
1915 if tr is not None and tr.hookargs.get('source') == 'strip':
1903 # During strip, many caches are invalid but
1916 # During strip, many caches are invalid but
1904 # later call to `destroyed` will refresh them.
1917 # later call to `destroyed` will refresh them.
1905 return
1918 return
1906
1919
1907 if tr is None or tr.changes['origrepolen'] < len(self):
1920 if tr is None or tr.changes['origrepolen'] < len(self):
1908 # updating the unfiltered branchmap should refresh all the others,
1921 # updating the unfiltered branchmap should refresh all the others,
1909 self.ui.debug('updating the branch cache\n')
1922 self.ui.debug('updating the branch cache\n')
1910 branchmap.updatecache(self.filtered('served'))
1923 branchmap.updatecache(self.filtered('served'))
1911
1924
1912 if full:
1925 if full:
1913 rbc = self.revbranchcache()
1926 rbc = self.revbranchcache()
1914 for r in self.changelog:
1927 for r in self.changelog:
1915 rbc.branchinfo(r)
1928 rbc.branchinfo(r)
1916 rbc.write()
1929 rbc.write()
1917
1930
1918 # ensure the working copy parents are in the manifestfulltextcache
1931 # ensure the working copy parents are in the manifestfulltextcache
1919 for ctx in self['.'].parents():
1932 for ctx in self['.'].parents():
1920 ctx.manifest() # accessing the manifest is enough
1933 ctx.manifest() # accessing the manifest is enough
1921
1934
1922 def invalidatecaches(self):
1935 def invalidatecaches(self):
1923
1936
1924 if '_tagscache' in vars(self):
1937 if '_tagscache' in vars(self):
1925 # can't use delattr on proxy
1938 # can't use delattr on proxy
1926 del self.__dict__['_tagscache']
1939 del self.__dict__['_tagscache']
1927
1940
1928 self.unfiltered()._branchcaches.clear()
1941 self.unfiltered()._branchcaches.clear()
1929 self.invalidatevolatilesets()
1942 self.invalidatevolatilesets()
1930 self._sparsesignaturecache.clear()
1943 self._sparsesignaturecache.clear()
1931
1944
1932 def invalidatevolatilesets(self):
1945 def invalidatevolatilesets(self):
1933 self.filteredrevcache.clear()
1946 self.filteredrevcache.clear()
1934 obsolete.clearobscaches(self)
1947 obsolete.clearobscaches(self)
1935
1948
1936 def invalidatedirstate(self):
1949 def invalidatedirstate(self):
1937 '''Invalidates the dirstate, causing the next call to dirstate
1950 '''Invalidates the dirstate, causing the next call to dirstate
1938 to check if it was modified since the last time it was read,
1951 to check if it was modified since the last time it was read,
1939 rereading it if it has.
1952 rereading it if it has.
1940
1953
1941 This is different to dirstate.invalidate() that it doesn't always
1954 This is different to dirstate.invalidate() that it doesn't always
1942 rereads the dirstate. Use dirstate.invalidate() if you want to
1955 rereads the dirstate. Use dirstate.invalidate() if you want to
1943 explicitly read the dirstate again (i.e. restoring it to a previous
1956 explicitly read the dirstate again (i.e. restoring it to a previous
1944 known good state).'''
1957 known good state).'''
1945 if hasunfilteredcache(self, 'dirstate'):
1958 if hasunfilteredcache(self, 'dirstate'):
1946 for k in self.dirstate._filecache:
1959 for k in self.dirstate._filecache:
1947 try:
1960 try:
1948 delattr(self.dirstate, k)
1961 delattr(self.dirstate, k)
1949 except AttributeError:
1962 except AttributeError:
1950 pass
1963 pass
1951 delattr(self.unfiltered(), 'dirstate')
1964 delattr(self.unfiltered(), 'dirstate')
1952
1965
1953 def invalidate(self, clearfilecache=False):
1966 def invalidate(self, clearfilecache=False):
1954 '''Invalidates both store and non-store parts other than dirstate
1967 '''Invalidates both store and non-store parts other than dirstate
1955
1968
1956 If a transaction is running, invalidation of store is omitted,
1969 If a transaction is running, invalidation of store is omitted,
1957 because discarding in-memory changes might cause inconsistency
1970 because discarding in-memory changes might cause inconsistency
1958 (e.g. incomplete fncache causes unintentional failure, but
1971 (e.g. incomplete fncache causes unintentional failure, but
1959 redundant one doesn't).
1972 redundant one doesn't).
1960 '''
1973 '''
1961 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1974 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1962 for k in list(self._filecache.keys()):
1975 for k in list(self._filecache.keys()):
1963 # dirstate is invalidated separately in invalidatedirstate()
1976 # dirstate is invalidated separately in invalidatedirstate()
1964 if k == 'dirstate':
1977 if k == 'dirstate':
1965 continue
1978 continue
1966 if (k == 'changelog' and
1979 if (k == 'changelog' and
1967 self.currenttransaction() and
1980 self.currenttransaction() and
1968 self.changelog._delayed):
1981 self.changelog._delayed):
1969 # The changelog object may store unwritten revisions. We don't
1982 # The changelog object may store unwritten revisions. We don't
1970 # want to lose them.
1983 # want to lose them.
1971 # TODO: Solve the problem instead of working around it.
1984 # TODO: Solve the problem instead of working around it.
1972 continue
1985 continue
1973
1986
1974 if clearfilecache:
1987 if clearfilecache:
1975 del self._filecache[k]
1988 del self._filecache[k]
1976 try:
1989 try:
1977 delattr(unfiltered, k)
1990 delattr(unfiltered, k)
1978 except AttributeError:
1991 except AttributeError:
1979 pass
1992 pass
1980 self.invalidatecaches()
1993 self.invalidatecaches()
1981 if not self.currenttransaction():
1994 if not self.currenttransaction():
1982 # TODO: Changing contents of store outside transaction
1995 # TODO: Changing contents of store outside transaction
1983 # causes inconsistency. We should make in-memory store
1996 # causes inconsistency. We should make in-memory store
1984 # changes detectable, and abort if changed.
1997 # changes detectable, and abort if changed.
1985 self.store.invalidatecaches()
1998 self.store.invalidatecaches()
1986
1999
1987 def invalidateall(self):
2000 def invalidateall(self):
1988 '''Fully invalidates both store and non-store parts, causing the
2001 '''Fully invalidates both store and non-store parts, causing the
1989 subsequent operation to reread any outside changes.'''
2002 subsequent operation to reread any outside changes.'''
1990 # extension should hook this to invalidate its caches
2003 # extension should hook this to invalidate its caches
1991 self.invalidate()
2004 self.invalidate()
1992 self.invalidatedirstate()
2005 self.invalidatedirstate()
1993
2006
1994 @unfilteredmethod
2007 @unfilteredmethod
1995 def _refreshfilecachestats(self, tr):
2008 def _refreshfilecachestats(self, tr):
1996 """Reload stats of cached files so that they are flagged as valid"""
2009 """Reload stats of cached files so that they are flagged as valid"""
1997 for k, ce in self._filecache.items():
2010 for k, ce in self._filecache.items():
1998 k = pycompat.sysstr(k)
2011 k = pycompat.sysstr(k)
1999 if k == r'dirstate' or k not in self.__dict__:
2012 if k == r'dirstate' or k not in self.__dict__:
2000 continue
2013 continue
2001 ce.refresh()
2014 ce.refresh()
2002
2015
2003 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2016 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2004 inheritchecker=None, parentenvvar=None):
2017 inheritchecker=None, parentenvvar=None):
2005 parentlock = None
2018 parentlock = None
2006 # the contents of parentenvvar are used by the underlying lock to
2019 # the contents of parentenvvar are used by the underlying lock to
2007 # determine whether it can be inherited
2020 # determine whether it can be inherited
2008 if parentenvvar is not None:
2021 if parentenvvar is not None:
2009 parentlock = encoding.environ.get(parentenvvar)
2022 parentlock = encoding.environ.get(parentenvvar)
2010
2023
2011 timeout = 0
2024 timeout = 0
2012 warntimeout = 0
2025 warntimeout = 0
2013 if wait:
2026 if wait:
2014 timeout = self.ui.configint("ui", "timeout")
2027 timeout = self.ui.configint("ui", "timeout")
2015 warntimeout = self.ui.configint("ui", "timeout.warn")
2028 warntimeout = self.ui.configint("ui", "timeout.warn")
2016 # internal config: ui.signal-safe-lock
2029 # internal config: ui.signal-safe-lock
2017 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2030 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2018
2031
2019 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2032 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2020 releasefn=releasefn,
2033 releasefn=releasefn,
2021 acquirefn=acquirefn, desc=desc,
2034 acquirefn=acquirefn, desc=desc,
2022 inheritchecker=inheritchecker,
2035 inheritchecker=inheritchecker,
2023 parentlock=parentlock,
2036 parentlock=parentlock,
2024 signalsafe=signalsafe)
2037 signalsafe=signalsafe)
2025 return l
2038 return l
2026
2039
2027 def _afterlock(self, callback):
2040 def _afterlock(self, callback):
2028 """add a callback to be run when the repository is fully unlocked
2041 """add a callback to be run when the repository is fully unlocked
2029
2042
2030 The callback will be executed when the outermost lock is released
2043 The callback will be executed when the outermost lock is released
2031 (with wlock being higher level than 'lock')."""
2044 (with wlock being higher level than 'lock')."""
2032 for ref in (self._wlockref, self._lockref):
2045 for ref in (self._wlockref, self._lockref):
2033 l = ref and ref()
2046 l = ref and ref()
2034 if l and l.held:
2047 if l and l.held:
2035 l.postrelease.append(callback)
2048 l.postrelease.append(callback)
2036 break
2049 break
2037 else: # no lock have been found.
2050 else: # no lock have been found.
2038 callback()
2051 callback()
2039
2052
2040 def lock(self, wait=True):
2053 def lock(self, wait=True):
2041 '''Lock the repository store (.hg/store) and return a weak reference
2054 '''Lock the repository store (.hg/store) and return a weak reference
2042 to the lock. Use this before modifying the store (e.g. committing or
2055 to the lock. Use this before modifying the store (e.g. committing or
2043 stripping). If you are opening a transaction, get a lock as well.)
2056 stripping). If you are opening a transaction, get a lock as well.)
2044
2057
2045 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2058 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2046 'wlock' first to avoid a dead-lock hazard.'''
2059 'wlock' first to avoid a dead-lock hazard.'''
2047 l = self._currentlock(self._lockref)
2060 l = self._currentlock(self._lockref)
2048 if l is not None:
2061 if l is not None:
2049 l.lock()
2062 l.lock()
2050 return l
2063 return l
2051
2064
2052 l = self._lock(self.svfs, "lock", wait, None,
2065 l = self._lock(self.svfs, "lock", wait, None,
2053 self.invalidate, _('repository %s') % self.origroot)
2066 self.invalidate, _('repository %s') % self.origroot)
2054 self._lockref = weakref.ref(l)
2067 self._lockref = weakref.ref(l)
2055 return l
2068 return l
2056
2069
2057 def _wlockchecktransaction(self):
2070 def _wlockchecktransaction(self):
2058 if self.currenttransaction() is not None:
2071 if self.currenttransaction() is not None:
2059 raise error.LockInheritanceContractViolation(
2072 raise error.LockInheritanceContractViolation(
2060 'wlock cannot be inherited in the middle of a transaction')
2073 'wlock cannot be inherited in the middle of a transaction')
2061
2074
2062 def wlock(self, wait=True):
2075 def wlock(self, wait=True):
2063 '''Lock the non-store parts of the repository (everything under
2076 '''Lock the non-store parts of the repository (everything under
2064 .hg except .hg/store) and return a weak reference to the lock.
2077 .hg except .hg/store) and return a weak reference to the lock.
2065
2078
2066 Use this before modifying files in .hg.
2079 Use this before modifying files in .hg.
2067
2080
2068 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2081 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2069 'wlock' first to avoid a dead-lock hazard.'''
2082 'wlock' first to avoid a dead-lock hazard.'''
2070 l = self._wlockref and self._wlockref()
2083 l = self._wlockref and self._wlockref()
2071 if l is not None and l.held:
2084 if l is not None and l.held:
2072 l.lock()
2085 l.lock()
2073 return l
2086 return l
2074
2087
2075 # We do not need to check for non-waiting lock acquisition. Such
2088 # We do not need to check for non-waiting lock acquisition. Such
2076 # acquisition would not cause dead-lock as they would just fail.
2089 # acquisition would not cause dead-lock as they would just fail.
2077 if wait and (self.ui.configbool('devel', 'all-warnings')
2090 if wait and (self.ui.configbool('devel', 'all-warnings')
2078 or self.ui.configbool('devel', 'check-locks')):
2091 or self.ui.configbool('devel', 'check-locks')):
2079 if self._currentlock(self._lockref) is not None:
2092 if self._currentlock(self._lockref) is not None:
2080 self.ui.develwarn('"wlock" acquired after "lock"')
2093 self.ui.develwarn('"wlock" acquired after "lock"')
2081
2094
2082 def unlock():
2095 def unlock():
2083 if self.dirstate.pendingparentchange():
2096 if self.dirstate.pendingparentchange():
2084 self.dirstate.invalidate()
2097 self.dirstate.invalidate()
2085 else:
2098 else:
2086 self.dirstate.write(None)
2099 self.dirstate.write(None)
2087
2100
2088 self._filecache['dirstate'].refresh()
2101 self._filecache['dirstate'].refresh()
2089
2102
2090 l = self._lock(self.vfs, "wlock", wait, unlock,
2103 l = self._lock(self.vfs, "wlock", wait, unlock,
2091 self.invalidatedirstate, _('working directory of %s') %
2104 self.invalidatedirstate, _('working directory of %s') %
2092 self.origroot,
2105 self.origroot,
2093 inheritchecker=self._wlockchecktransaction,
2106 inheritchecker=self._wlockchecktransaction,
2094 parentenvvar='HG_WLOCK_LOCKER')
2107 parentenvvar='HG_WLOCK_LOCKER')
2095 self._wlockref = weakref.ref(l)
2108 self._wlockref = weakref.ref(l)
2096 return l
2109 return l
2097
2110
2098 def _currentlock(self, lockref):
2111 def _currentlock(self, lockref):
2099 """Returns the lock if it's held, or None if it's not."""
2112 """Returns the lock if it's held, or None if it's not."""
2100 if lockref is None:
2113 if lockref is None:
2101 return None
2114 return None
2102 l = lockref()
2115 l = lockref()
2103 if l is None or not l.held:
2116 if l is None or not l.held:
2104 return None
2117 return None
2105 return l
2118 return l
2106
2119
2107 def currentwlock(self):
2120 def currentwlock(self):
2108 """Returns the wlock if it's held, or None if it's not."""
2121 """Returns the wlock if it's held, or None if it's not."""
2109 return self._currentlock(self._wlockref)
2122 return self._currentlock(self._wlockref)
2110
2123
2111 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2124 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2112 """
2125 """
2113 commit an individual file as part of a larger transaction
2126 commit an individual file as part of a larger transaction
2114 """
2127 """
2115
2128
2116 fname = fctx.path()
2129 fname = fctx.path()
2117 fparent1 = manifest1.get(fname, nullid)
2130 fparent1 = manifest1.get(fname, nullid)
2118 fparent2 = manifest2.get(fname, nullid)
2131 fparent2 = manifest2.get(fname, nullid)
2119 if isinstance(fctx, context.filectx):
2132 if isinstance(fctx, context.filectx):
2120 node = fctx.filenode()
2133 node = fctx.filenode()
2121 if node in [fparent1, fparent2]:
2134 if node in [fparent1, fparent2]:
2122 self.ui.debug('reusing %s filelog entry\n' % fname)
2135 self.ui.debug('reusing %s filelog entry\n' % fname)
2123 if manifest1.flags(fname) != fctx.flags():
2136 if manifest1.flags(fname) != fctx.flags():
2124 changelist.append(fname)
2137 changelist.append(fname)
2125 return node
2138 return node
2126
2139
2127 flog = self.file(fname)
2140 flog = self.file(fname)
2128 meta = {}
2141 meta = {}
2129 copy = fctx.renamed()
2142 copy = fctx.renamed()
2130 if copy and copy[0] != fname:
2143 if copy and copy[0] != fname:
2131 # Mark the new revision of this file as a copy of another
2144 # Mark the new revision of this file as a copy of another
2132 # file. This copy data will effectively act as a parent
2145 # file. This copy data will effectively act as a parent
2133 # of this new revision. If this is a merge, the first
2146 # of this new revision. If this is a merge, the first
2134 # parent will be the nullid (meaning "look up the copy data")
2147 # parent will be the nullid (meaning "look up the copy data")
2135 # and the second one will be the other parent. For example:
2148 # and the second one will be the other parent. For example:
2136 #
2149 #
2137 # 0 --- 1 --- 3 rev1 changes file foo
2150 # 0 --- 1 --- 3 rev1 changes file foo
2138 # \ / rev2 renames foo to bar and changes it
2151 # \ / rev2 renames foo to bar and changes it
2139 # \- 2 -/ rev3 should have bar with all changes and
2152 # \- 2 -/ rev3 should have bar with all changes and
2140 # should record that bar descends from
2153 # should record that bar descends from
2141 # bar in rev2 and foo in rev1
2154 # bar in rev2 and foo in rev1
2142 #
2155 #
2143 # this allows this merge to succeed:
2156 # this allows this merge to succeed:
2144 #
2157 #
2145 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2158 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2146 # \ / merging rev3 and rev4 should use bar@rev2
2159 # \ / merging rev3 and rev4 should use bar@rev2
2147 # \- 2 --- 4 as the merge base
2160 # \- 2 --- 4 as the merge base
2148 #
2161 #
2149
2162
2150 cfname = copy[0]
2163 cfname = copy[0]
2151 crev = manifest1.get(cfname)
2164 crev = manifest1.get(cfname)
2152 newfparent = fparent2
2165 newfparent = fparent2
2153
2166
2154 if manifest2: # branch merge
2167 if manifest2: # branch merge
2155 if fparent2 == nullid or crev is None: # copied on remote side
2168 if fparent2 == nullid or crev is None: # copied on remote side
2156 if cfname in manifest2:
2169 if cfname in manifest2:
2157 crev = manifest2[cfname]
2170 crev = manifest2[cfname]
2158 newfparent = fparent1
2171 newfparent = fparent1
2159
2172
2160 # Here, we used to search backwards through history to try to find
2173 # Here, we used to search backwards through history to try to find
2161 # where the file copy came from if the source of a copy was not in
2174 # where the file copy came from if the source of a copy was not in
2162 # the parent directory. However, this doesn't actually make sense to
2175 # the parent directory. However, this doesn't actually make sense to
2163 # do (what does a copy from something not in your working copy even
2176 # do (what does a copy from something not in your working copy even
2164 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2177 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2165 # the user that copy information was dropped, so if they didn't
2178 # the user that copy information was dropped, so if they didn't
2166 # expect this outcome it can be fixed, but this is the correct
2179 # expect this outcome it can be fixed, but this is the correct
2167 # behavior in this circumstance.
2180 # behavior in this circumstance.
2168
2181
2169 if crev:
2182 if crev:
2170 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2183 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2171 meta["copy"] = cfname
2184 meta["copy"] = cfname
2172 meta["copyrev"] = hex(crev)
2185 meta["copyrev"] = hex(crev)
2173 fparent1, fparent2 = nullid, newfparent
2186 fparent1, fparent2 = nullid, newfparent
2174 else:
2187 else:
2175 self.ui.warn(_("warning: can't find ancestor for '%s' "
2188 self.ui.warn(_("warning: can't find ancestor for '%s' "
2176 "copied from '%s'!\n") % (fname, cfname))
2189 "copied from '%s'!\n") % (fname, cfname))
2177
2190
2178 elif fparent1 == nullid:
2191 elif fparent1 == nullid:
2179 fparent1, fparent2 = fparent2, nullid
2192 fparent1, fparent2 = fparent2, nullid
2180 elif fparent2 != nullid:
2193 elif fparent2 != nullid:
2181 # is one parent an ancestor of the other?
2194 # is one parent an ancestor of the other?
2182 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2195 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2183 if fparent1 in fparentancestors:
2196 if fparent1 in fparentancestors:
2184 fparent1, fparent2 = fparent2, nullid
2197 fparent1, fparent2 = fparent2, nullid
2185 elif fparent2 in fparentancestors:
2198 elif fparent2 in fparentancestors:
2186 fparent2 = nullid
2199 fparent2 = nullid
2187
2200
2188 # is the file changed?
2201 # is the file changed?
2189 text = fctx.data()
2202 text = fctx.data()
2190 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2203 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2191 changelist.append(fname)
2204 changelist.append(fname)
2192 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2205 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2193 # are just the flags changed during merge?
2206 # are just the flags changed during merge?
2194 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2207 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2195 changelist.append(fname)
2208 changelist.append(fname)
2196
2209
2197 return fparent1
2210 return fparent1
2198
2211
2199 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2212 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2200 """check for commit arguments that aren't committable"""
2213 """check for commit arguments that aren't committable"""
2201 if match.isexact() or match.prefix():
2214 if match.isexact() or match.prefix():
2202 matched = set(status.modified + status.added + status.removed)
2215 matched = set(status.modified + status.added + status.removed)
2203
2216
2204 for f in match.files():
2217 for f in match.files():
2205 f = self.dirstate.normalize(f)
2218 f = self.dirstate.normalize(f)
2206 if f == '.' or f in matched or f in wctx.substate:
2219 if f == '.' or f in matched or f in wctx.substate:
2207 continue
2220 continue
2208 if f in status.deleted:
2221 if f in status.deleted:
2209 fail(f, _('file not found!'))
2222 fail(f, _('file not found!'))
2210 if f in vdirs: # visited directory
2223 if f in vdirs: # visited directory
2211 d = f + '/'
2224 d = f + '/'
2212 for mf in matched:
2225 for mf in matched:
2213 if mf.startswith(d):
2226 if mf.startswith(d):
2214 break
2227 break
2215 else:
2228 else:
2216 fail(f, _("no match under directory!"))
2229 fail(f, _("no match under directory!"))
2217 elif f not in self.dirstate:
2230 elif f not in self.dirstate:
2218 fail(f, _("file not tracked!"))
2231 fail(f, _("file not tracked!"))
2219
2232
2220 @unfilteredmethod
2233 @unfilteredmethod
2221 def commit(self, text="", user=None, date=None, match=None, force=False,
2234 def commit(self, text="", user=None, date=None, match=None, force=False,
2222 editor=False, extra=None):
2235 editor=False, extra=None):
2223 """Add a new revision to current repository.
2236 """Add a new revision to current repository.
2224
2237
2225 Revision information is gathered from the working directory,
2238 Revision information is gathered from the working directory,
2226 match can be used to filter the committed files. If editor is
2239 match can be used to filter the committed files. If editor is
2227 supplied, it is called to get a commit message.
2240 supplied, it is called to get a commit message.
2228 """
2241 """
2229 if extra is None:
2242 if extra is None:
2230 extra = {}
2243 extra = {}
2231
2244
2232 def fail(f, msg):
2245 def fail(f, msg):
2233 raise error.Abort('%s: %s' % (f, msg))
2246 raise error.Abort('%s: %s' % (f, msg))
2234
2247
2235 if not match:
2248 if not match:
2236 match = matchmod.always(self.root, '')
2249 match = matchmod.always(self.root, '')
2237
2250
2238 if not force:
2251 if not force:
2239 vdirs = []
2252 vdirs = []
2240 match.explicitdir = vdirs.append
2253 match.explicitdir = vdirs.append
2241 match.bad = fail
2254 match.bad = fail
2242
2255
2243 wlock = lock = tr = None
2256 wlock = lock = tr = None
2244 try:
2257 try:
2245 wlock = self.wlock()
2258 wlock = self.wlock()
2246 lock = self.lock() # for recent changelog (see issue4368)
2259 lock = self.lock() # for recent changelog (see issue4368)
2247
2260
2248 wctx = self[None]
2261 wctx = self[None]
2249 merge = len(wctx.parents()) > 1
2262 merge = len(wctx.parents()) > 1
2250
2263
2251 if not force and merge and not match.always():
2264 if not force and merge and not match.always():
2252 raise error.Abort(_('cannot partially commit a merge '
2265 raise error.Abort(_('cannot partially commit a merge '
2253 '(do not specify files or patterns)'))
2266 '(do not specify files or patterns)'))
2254
2267
2255 status = self.status(match=match, clean=force)
2268 status = self.status(match=match, clean=force)
2256 if force:
2269 if force:
2257 status.modified.extend(status.clean) # mq may commit clean files
2270 status.modified.extend(status.clean) # mq may commit clean files
2258
2271
2259 # check subrepos
2272 # check subrepos
2260 subs, commitsubs, newstate = subrepoutil.precommit(
2273 subs, commitsubs, newstate = subrepoutil.precommit(
2261 self.ui, wctx, status, match, force=force)
2274 self.ui, wctx, status, match, force=force)
2262
2275
2263 # make sure all explicit patterns are matched
2276 # make sure all explicit patterns are matched
2264 if not force:
2277 if not force:
2265 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2278 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2266
2279
2267 cctx = context.workingcommitctx(self, status,
2280 cctx = context.workingcommitctx(self, status,
2268 text, user, date, extra)
2281 text, user, date, extra)
2269
2282
2270 # internal config: ui.allowemptycommit
2283 # internal config: ui.allowemptycommit
2271 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2284 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2272 or extra.get('close') or merge or cctx.files()
2285 or extra.get('close') or merge or cctx.files()
2273 or self.ui.configbool('ui', 'allowemptycommit'))
2286 or self.ui.configbool('ui', 'allowemptycommit'))
2274 if not allowemptycommit:
2287 if not allowemptycommit:
2275 return None
2288 return None
2276
2289
2277 if merge and cctx.deleted():
2290 if merge and cctx.deleted():
2278 raise error.Abort(_("cannot commit merge with missing files"))
2291 raise error.Abort(_("cannot commit merge with missing files"))
2279
2292
2280 ms = mergemod.mergestate.read(self)
2293 ms = mergemod.mergestate.read(self)
2281 mergeutil.checkunresolved(ms)
2294 mergeutil.checkunresolved(ms)
2282
2295
2283 if editor:
2296 if editor:
2284 cctx._text = editor(self, cctx, subs)
2297 cctx._text = editor(self, cctx, subs)
2285 edited = (text != cctx._text)
2298 edited = (text != cctx._text)
2286
2299
2287 # Save commit message in case this transaction gets rolled back
2300 # Save commit message in case this transaction gets rolled back
2288 # (e.g. by a pretxncommit hook). Leave the content alone on
2301 # (e.g. by a pretxncommit hook). Leave the content alone on
2289 # the assumption that the user will use the same editor again.
2302 # the assumption that the user will use the same editor again.
2290 msgfn = self.savecommitmessage(cctx._text)
2303 msgfn = self.savecommitmessage(cctx._text)
2291
2304
2292 # commit subs and write new state
2305 # commit subs and write new state
2293 if subs:
2306 if subs:
2294 for s in sorted(commitsubs):
2307 for s in sorted(commitsubs):
2295 sub = wctx.sub(s)
2308 sub = wctx.sub(s)
2296 self.ui.status(_('committing subrepository %s\n') %
2309 self.ui.status(_('committing subrepository %s\n') %
2297 subrepoutil.subrelpath(sub))
2310 subrepoutil.subrelpath(sub))
2298 sr = sub.commit(cctx._text, user, date)
2311 sr = sub.commit(cctx._text, user, date)
2299 newstate[s] = (newstate[s][0], sr)
2312 newstate[s] = (newstate[s][0], sr)
2300 subrepoutil.writestate(self, newstate)
2313 subrepoutil.writestate(self, newstate)
2301
2314
2302 p1, p2 = self.dirstate.parents()
2315 p1, p2 = self.dirstate.parents()
2303 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2316 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2304 try:
2317 try:
2305 self.hook("precommit", throw=True, parent1=hookp1,
2318 self.hook("precommit", throw=True, parent1=hookp1,
2306 parent2=hookp2)
2319 parent2=hookp2)
2307 tr = self.transaction('commit')
2320 tr = self.transaction('commit')
2308 ret = self.commitctx(cctx, True)
2321 ret = self.commitctx(cctx, True)
2309 except: # re-raises
2322 except: # re-raises
2310 if edited:
2323 if edited:
2311 self.ui.write(
2324 self.ui.write(
2312 _('note: commit message saved in %s\n') % msgfn)
2325 _('note: commit message saved in %s\n') % msgfn)
2313 raise
2326 raise
2314 # update bookmarks, dirstate and mergestate
2327 # update bookmarks, dirstate and mergestate
2315 bookmarks.update(self, [p1, p2], ret)
2328 bookmarks.update(self, [p1, p2], ret)
2316 cctx.markcommitted(ret)
2329 cctx.markcommitted(ret)
2317 ms.reset()
2330 ms.reset()
2318 tr.close()
2331 tr.close()
2319
2332
2320 finally:
2333 finally:
2321 lockmod.release(tr, lock, wlock)
2334 lockmod.release(tr, lock, wlock)
2322
2335
2323 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2324 # hack for command that use a temporary commit (eg: histedit)
2337 # hack for command that use a temporary commit (eg: histedit)
2325 # temporary commit got stripped before hook release
2338 # temporary commit got stripped before hook release
2326 if self.changelog.hasnode(ret):
2339 if self.changelog.hasnode(ret):
2327 self.hook("commit", node=node, parent1=parent1,
2340 self.hook("commit", node=node, parent1=parent1,
2328 parent2=parent2)
2341 parent2=parent2)
2329 self._afterlock(commithook)
2342 self._afterlock(commithook)
2330 return ret
2343 return ret
2331
2344
2332 @unfilteredmethod
2345 @unfilteredmethod
2333 def commitctx(self, ctx, error=False):
2346 def commitctx(self, ctx, error=False):
2334 """Add a new revision to current repository.
2347 """Add a new revision to current repository.
2335 Revision information is passed via the context argument.
2348 Revision information is passed via the context argument.
2336
2349
2337 ctx.files() should list all files involved in this commit, i.e.
2350 ctx.files() should list all files involved in this commit, i.e.
2338 modified/added/removed files. On merge, it may be wider than the
2351 modified/added/removed files. On merge, it may be wider than the
2339 ctx.files() to be committed, since any file nodes derived directly
2352 ctx.files() to be committed, since any file nodes derived directly
2340 from p1 or p2 are excluded from the committed ctx.files().
2353 from p1 or p2 are excluded from the committed ctx.files().
2341 """
2354 """
2342
2355
2343 tr = None
2356 tr = None
2344 p1, p2 = ctx.p1(), ctx.p2()
2357 p1, p2 = ctx.p1(), ctx.p2()
2345 user = ctx.user()
2358 user = ctx.user()
2346
2359
2347 lock = self.lock()
2360 lock = self.lock()
2348 try:
2361 try:
2349 tr = self.transaction("commit")
2362 tr = self.transaction("commit")
2350 trp = weakref.proxy(tr)
2363 trp = weakref.proxy(tr)
2351
2364
2352 if ctx.manifestnode():
2365 if ctx.manifestnode():
2353 # reuse an existing manifest revision
2366 # reuse an existing manifest revision
2354 self.ui.debug('reusing known manifest\n')
2367 self.ui.debug('reusing known manifest\n')
2355 mn = ctx.manifestnode()
2368 mn = ctx.manifestnode()
2356 files = ctx.files()
2369 files = ctx.files()
2357 elif ctx.files():
2370 elif ctx.files():
2358 m1ctx = p1.manifestctx()
2371 m1ctx = p1.manifestctx()
2359 m2ctx = p2.manifestctx()
2372 m2ctx = p2.manifestctx()
2360 mctx = m1ctx.copy()
2373 mctx = m1ctx.copy()
2361
2374
2362 m = mctx.read()
2375 m = mctx.read()
2363 m1 = m1ctx.read()
2376 m1 = m1ctx.read()
2364 m2 = m2ctx.read()
2377 m2 = m2ctx.read()
2365
2378
2366 # check in files
2379 # check in files
2367 added = []
2380 added = []
2368 changed = []
2381 changed = []
2369 removed = list(ctx.removed())
2382 removed = list(ctx.removed())
2370 linkrev = len(self)
2383 linkrev = len(self)
2371 self.ui.note(_("committing files:\n"))
2384 self.ui.note(_("committing files:\n"))
2372 for f in sorted(ctx.modified() + ctx.added()):
2385 for f in sorted(ctx.modified() + ctx.added()):
2373 self.ui.note(f + "\n")
2386 self.ui.note(f + "\n")
2374 try:
2387 try:
2375 fctx = ctx[f]
2388 fctx = ctx[f]
2376 if fctx is None:
2389 if fctx is None:
2377 removed.append(f)
2390 removed.append(f)
2378 else:
2391 else:
2379 added.append(f)
2392 added.append(f)
2380 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2393 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2381 trp, changed)
2394 trp, changed)
2382 m.setflag(f, fctx.flags())
2395 m.setflag(f, fctx.flags())
2383 except OSError as inst:
2396 except OSError as inst:
2384 self.ui.warn(_("trouble committing %s!\n") % f)
2397 self.ui.warn(_("trouble committing %s!\n") % f)
2385 raise
2398 raise
2386 except IOError as inst:
2399 except IOError as inst:
2387 errcode = getattr(inst, 'errno', errno.ENOENT)
2400 errcode = getattr(inst, 'errno', errno.ENOENT)
2388 if error or errcode and errcode != errno.ENOENT:
2401 if error or errcode and errcode != errno.ENOENT:
2389 self.ui.warn(_("trouble committing %s!\n") % f)
2402 self.ui.warn(_("trouble committing %s!\n") % f)
2390 raise
2403 raise
2391
2404
2392 # update manifest
2405 # update manifest
2393 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2406 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2394 drop = [f for f in removed if f in m]
2407 drop = [f for f in removed if f in m]
2395 for f in drop:
2408 for f in drop:
2396 del m[f]
2409 del m[f]
2397 files = changed + removed
2410 files = changed + removed
2398 md = None
2411 md = None
2399 if not files:
2412 if not files:
2400 # if no "files" actually changed in terms of the changelog,
2413 # if no "files" actually changed in terms of the changelog,
2401 # try hard to detect unmodified manifest entry so that the
2414 # try hard to detect unmodified manifest entry so that the
2402 # exact same commit can be reproduced later on convert.
2415 # exact same commit can be reproduced later on convert.
2403 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2416 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2404 if not files and md:
2417 if not files and md:
2405 self.ui.debug('not reusing manifest (no file change in '
2418 self.ui.debug('not reusing manifest (no file change in '
2406 'changelog, but manifest differs)\n')
2419 'changelog, but manifest differs)\n')
2407 if files or md:
2420 if files or md:
2408 self.ui.note(_("committing manifest\n"))
2421 self.ui.note(_("committing manifest\n"))
2409 # we're using narrowmatch here since it's already applied at
2422 # we're using narrowmatch here since it's already applied at
2410 # other stages (such as dirstate.walk), so we're already
2423 # other stages (such as dirstate.walk), so we're already
2411 # ignoring things outside of narrowspec in most cases. The
2424 # ignoring things outside of narrowspec in most cases. The
2412 # one case where we might have files outside the narrowspec
2425 # one case where we might have files outside the narrowspec
2413 # at this point is merges, and we already error out in the
2426 # at this point is merges, and we already error out in the
2414 # case where the merge has files outside of the narrowspec,
2427 # case where the merge has files outside of the narrowspec,
2415 # so this is safe.
2428 # so this is safe.
2416 mn = mctx.write(trp, linkrev,
2429 mn = mctx.write(trp, linkrev,
2417 p1.manifestnode(), p2.manifestnode(),
2430 p1.manifestnode(), p2.manifestnode(),
2418 added, drop, match=self.narrowmatch())
2431 added, drop, match=self.narrowmatch())
2419 else:
2432 else:
2420 self.ui.debug('reusing manifest form p1 (listed files '
2433 self.ui.debug('reusing manifest form p1 (listed files '
2421 'actually unchanged)\n')
2434 'actually unchanged)\n')
2422 mn = p1.manifestnode()
2435 mn = p1.manifestnode()
2423 else:
2436 else:
2424 self.ui.debug('reusing manifest from p1 (no file change)\n')
2437 self.ui.debug('reusing manifest from p1 (no file change)\n')
2425 mn = p1.manifestnode()
2438 mn = p1.manifestnode()
2426 files = []
2439 files = []
2427
2440
2428 # update changelog
2441 # update changelog
2429 self.ui.note(_("committing changelog\n"))
2442 self.ui.note(_("committing changelog\n"))
2430 self.changelog.delayupdate(tr)
2443 self.changelog.delayupdate(tr)
2431 n = self.changelog.add(mn, files, ctx.description(),
2444 n = self.changelog.add(mn, files, ctx.description(),
2432 trp, p1.node(), p2.node(),
2445 trp, p1.node(), p2.node(),
2433 user, ctx.date(), ctx.extra().copy())
2446 user, ctx.date(), ctx.extra().copy())
2434 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2435 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2436 parent2=xp2)
2449 parent2=xp2)
2437 # set the new commit is proper phase
2450 # set the new commit is proper phase
2438 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2451 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2439 if targetphase:
2452 if targetphase:
2440 # retract boundary do not alter parent changeset.
2453 # retract boundary do not alter parent changeset.
2441 # if a parent have higher the resulting phase will
2454 # if a parent have higher the resulting phase will
2442 # be compliant anyway
2455 # be compliant anyway
2443 #
2456 #
2444 # if minimal phase was 0 we don't need to retract anything
2457 # if minimal phase was 0 we don't need to retract anything
2445 phases.registernew(self, tr, targetphase, [n])
2458 phases.registernew(self, tr, targetphase, [n])
2446 tr.close()
2459 tr.close()
2447 return n
2460 return n
2448 finally:
2461 finally:
2449 if tr:
2462 if tr:
2450 tr.release()
2463 tr.release()
2451 lock.release()
2464 lock.release()
2452
2465
2453 @unfilteredmethod
2466 @unfilteredmethod
2454 def destroying(self):
2467 def destroying(self):
2455 '''Inform the repository that nodes are about to be destroyed.
2468 '''Inform the repository that nodes are about to be destroyed.
2456 Intended for use by strip and rollback, so there's a common
2469 Intended for use by strip and rollback, so there's a common
2457 place for anything that has to be done before destroying history.
2470 place for anything that has to be done before destroying history.
2458
2471
2459 This is mostly useful for saving state that is in memory and waiting
2472 This is mostly useful for saving state that is in memory and waiting
2460 to be flushed when the current lock is released. Because a call to
2473 to be flushed when the current lock is released. Because a call to
2461 destroyed is imminent, the repo will be invalidated causing those
2474 destroyed is imminent, the repo will be invalidated causing those
2462 changes to stay in memory (waiting for the next unlock), or vanish
2475 changes to stay in memory (waiting for the next unlock), or vanish
2463 completely.
2476 completely.
2464 '''
2477 '''
2465 # When using the same lock to commit and strip, the phasecache is left
2478 # When using the same lock to commit and strip, the phasecache is left
2466 # dirty after committing. Then when we strip, the repo is invalidated,
2479 # dirty after committing. Then when we strip, the repo is invalidated,
2467 # causing those changes to disappear.
2480 # causing those changes to disappear.
2468 if '_phasecache' in vars(self):
2481 if '_phasecache' in vars(self):
2469 self._phasecache.write()
2482 self._phasecache.write()
2470
2483
2471 @unfilteredmethod
2484 @unfilteredmethod
2472 def destroyed(self):
2485 def destroyed(self):
2473 '''Inform the repository that nodes have been destroyed.
2486 '''Inform the repository that nodes have been destroyed.
2474 Intended for use by strip and rollback, so there's a common
2487 Intended for use by strip and rollback, so there's a common
2475 place for anything that has to be done after destroying history.
2488 place for anything that has to be done after destroying history.
2476 '''
2489 '''
2477 # When one tries to:
2490 # When one tries to:
2478 # 1) destroy nodes thus calling this method (e.g. strip)
2491 # 1) destroy nodes thus calling this method (e.g. strip)
2479 # 2) use phasecache somewhere (e.g. commit)
2492 # 2) use phasecache somewhere (e.g. commit)
2480 #
2493 #
2481 # then 2) will fail because the phasecache contains nodes that were
2494 # then 2) will fail because the phasecache contains nodes that were
2482 # removed. We can either remove phasecache from the filecache,
2495 # removed. We can either remove phasecache from the filecache,
2483 # causing it to reload next time it is accessed, or simply filter
2496 # causing it to reload next time it is accessed, or simply filter
2484 # the removed nodes now and write the updated cache.
2497 # the removed nodes now and write the updated cache.
2485 self._phasecache.filterunknown(self)
2498 self._phasecache.filterunknown(self)
2486 self._phasecache.write()
2499 self._phasecache.write()
2487
2500
2488 # refresh all repository caches
2501 # refresh all repository caches
2489 self.updatecaches()
2502 self.updatecaches()
2490
2503
2491 # Ensure the persistent tag cache is updated. Doing it now
2504 # Ensure the persistent tag cache is updated. Doing it now
2492 # means that the tag cache only has to worry about destroyed
2505 # means that the tag cache only has to worry about destroyed
2493 # heads immediately after a strip/rollback. That in turn
2506 # heads immediately after a strip/rollback. That in turn
2494 # guarantees that "cachetip == currenttip" (comparing both rev
2507 # guarantees that "cachetip == currenttip" (comparing both rev
2495 # and node) always means no nodes have been added or destroyed.
2508 # and node) always means no nodes have been added or destroyed.
2496
2509
2497 # XXX this is suboptimal when qrefresh'ing: we strip the current
2510 # XXX this is suboptimal when qrefresh'ing: we strip the current
2498 # head, refresh the tag cache, then immediately add a new head.
2511 # head, refresh the tag cache, then immediately add a new head.
2499 # But I think doing it this way is necessary for the "instant
2512 # But I think doing it this way is necessary for the "instant
2500 # tag cache retrieval" case to work.
2513 # tag cache retrieval" case to work.
2501 self.invalidate()
2514 self.invalidate()
2502
2515
2503 def status(self, node1='.', node2=None, match=None,
2516 def status(self, node1='.', node2=None, match=None,
2504 ignored=False, clean=False, unknown=False,
2517 ignored=False, clean=False, unknown=False,
2505 listsubrepos=False):
2518 listsubrepos=False):
2506 '''a convenience method that calls node1.status(node2)'''
2519 '''a convenience method that calls node1.status(node2)'''
2507 return self[node1].status(node2, match, ignored, clean, unknown,
2520 return self[node1].status(node2, match, ignored, clean, unknown,
2508 listsubrepos)
2521 listsubrepos)
2509
2522
2510 def addpostdsstatus(self, ps):
2523 def addpostdsstatus(self, ps):
2511 """Add a callback to run within the wlock, at the point at which status
2524 """Add a callback to run within the wlock, at the point at which status
2512 fixups happen.
2525 fixups happen.
2513
2526
2514 On status completion, callback(wctx, status) will be called with the
2527 On status completion, callback(wctx, status) will be called with the
2515 wlock held, unless the dirstate has changed from underneath or the wlock
2528 wlock held, unless the dirstate has changed from underneath or the wlock
2516 couldn't be grabbed.
2529 couldn't be grabbed.
2517
2530
2518 Callbacks should not capture and use a cached copy of the dirstate --
2531 Callbacks should not capture and use a cached copy of the dirstate --
2519 it might change in the meanwhile. Instead, they should access the
2532 it might change in the meanwhile. Instead, they should access the
2520 dirstate via wctx.repo().dirstate.
2533 dirstate via wctx.repo().dirstate.
2521
2534
2522 This list is emptied out after each status run -- extensions should
2535 This list is emptied out after each status run -- extensions should
2523 make sure it adds to this list each time dirstate.status is called.
2536 make sure it adds to this list each time dirstate.status is called.
2524 Extensions should also make sure they don't call this for statuses
2537 Extensions should also make sure they don't call this for statuses
2525 that don't involve the dirstate.
2538 that don't involve the dirstate.
2526 """
2539 """
2527
2540
2528 # The list is located here for uniqueness reasons -- it is actually
2541 # The list is located here for uniqueness reasons -- it is actually
2529 # managed by the workingctx, but that isn't unique per-repo.
2542 # managed by the workingctx, but that isn't unique per-repo.
2530 self._postdsstatus.append(ps)
2543 self._postdsstatus.append(ps)
2531
2544
2532 def postdsstatus(self):
2545 def postdsstatus(self):
2533 """Used by workingctx to get the list of post-dirstate-status hooks."""
2546 """Used by workingctx to get the list of post-dirstate-status hooks."""
2534 return self._postdsstatus
2547 return self._postdsstatus
2535
2548
2536 def clearpostdsstatus(self):
2549 def clearpostdsstatus(self):
2537 """Used by workingctx to clear post-dirstate-status hooks."""
2550 """Used by workingctx to clear post-dirstate-status hooks."""
2538 del self._postdsstatus[:]
2551 del self._postdsstatus[:]
2539
2552
2540 def heads(self, start=None):
2553 def heads(self, start=None):
2541 if start is None:
2554 if start is None:
2542 cl = self.changelog
2555 cl = self.changelog
2543 headrevs = reversed(cl.headrevs())
2556 headrevs = reversed(cl.headrevs())
2544 return [cl.node(rev) for rev in headrevs]
2557 return [cl.node(rev) for rev in headrevs]
2545
2558
2546 heads = self.changelog.heads(start)
2559 heads = self.changelog.heads(start)
2547 # sort the output in rev descending order
2560 # sort the output in rev descending order
2548 return sorted(heads, key=self.changelog.rev, reverse=True)
2561 return sorted(heads, key=self.changelog.rev, reverse=True)
2549
2562
2550 def branchheads(self, branch=None, start=None, closed=False):
2563 def branchheads(self, branch=None, start=None, closed=False):
2551 '''return a (possibly filtered) list of heads for the given branch
2564 '''return a (possibly filtered) list of heads for the given branch
2552
2565
2553 Heads are returned in topological order, from newest to oldest.
2566 Heads are returned in topological order, from newest to oldest.
2554 If branch is None, use the dirstate branch.
2567 If branch is None, use the dirstate branch.
2555 If start is not None, return only heads reachable from start.
2568 If start is not None, return only heads reachable from start.
2556 If closed is True, return heads that are marked as closed as well.
2569 If closed is True, return heads that are marked as closed as well.
2557 '''
2570 '''
2558 if branch is None:
2571 if branch is None:
2559 branch = self[None].branch()
2572 branch = self[None].branch()
2560 branches = self.branchmap()
2573 branches = self.branchmap()
2561 if branch not in branches:
2574 if branch not in branches:
2562 return []
2575 return []
2563 # the cache returns heads ordered lowest to highest
2576 # the cache returns heads ordered lowest to highest
2564 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2577 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2565 if start is not None:
2578 if start is not None:
2566 # filter out the heads that cannot be reached from startrev
2579 # filter out the heads that cannot be reached from startrev
2567 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2580 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2568 bheads = [h for h in bheads if h in fbheads]
2581 bheads = [h for h in bheads if h in fbheads]
2569 return bheads
2582 return bheads
2570
2583
2571 def branches(self, nodes):
2584 def branches(self, nodes):
2572 if not nodes:
2585 if not nodes:
2573 nodes = [self.changelog.tip()]
2586 nodes = [self.changelog.tip()]
2574 b = []
2587 b = []
2575 for n in nodes:
2588 for n in nodes:
2576 t = n
2589 t = n
2577 while True:
2590 while True:
2578 p = self.changelog.parents(n)
2591 p = self.changelog.parents(n)
2579 if p[1] != nullid or p[0] == nullid:
2592 if p[1] != nullid or p[0] == nullid:
2580 b.append((t, n, p[0], p[1]))
2593 b.append((t, n, p[0], p[1]))
2581 break
2594 break
2582 n = p[0]
2595 n = p[0]
2583 return b
2596 return b
2584
2597
2585 def between(self, pairs):
2598 def between(self, pairs):
2586 r = []
2599 r = []
2587
2600
2588 for top, bottom in pairs:
2601 for top, bottom in pairs:
2589 n, l, i = top, [], 0
2602 n, l, i = top, [], 0
2590 f = 1
2603 f = 1
2591
2604
2592 while n != bottom and n != nullid:
2605 while n != bottom and n != nullid:
2593 p = self.changelog.parents(n)[0]
2606 p = self.changelog.parents(n)[0]
2594 if i == f:
2607 if i == f:
2595 l.append(n)
2608 l.append(n)
2596 f = f * 2
2609 f = f * 2
2597 n = p
2610 n = p
2598 i += 1
2611 i += 1
2599
2612
2600 r.append(l)
2613 r.append(l)
2601
2614
2602 return r
2615 return r
2603
2616
2604 def checkpush(self, pushop):
2617 def checkpush(self, pushop):
2605 """Extensions can override this function if additional checks have
2618 """Extensions can override this function if additional checks have
2606 to be performed before pushing, or call it if they override push
2619 to be performed before pushing, or call it if they override push
2607 command.
2620 command.
2608 """
2621 """
2609
2622
2610 @unfilteredpropertycache
2623 @unfilteredpropertycache
2611 def prepushoutgoinghooks(self):
2624 def prepushoutgoinghooks(self):
2612 """Return util.hooks consists of a pushop with repo, remote, outgoing
2625 """Return util.hooks consists of a pushop with repo, remote, outgoing
2613 methods, which are called before pushing changesets.
2626 methods, which are called before pushing changesets.
2614 """
2627 """
2615 return util.hooks()
2628 return util.hooks()
2616
2629
2617 def pushkey(self, namespace, key, old, new):
2630 def pushkey(self, namespace, key, old, new):
2618 try:
2631 try:
2619 tr = self.currenttransaction()
2632 tr = self.currenttransaction()
2620 hookargs = {}
2633 hookargs = {}
2621 if tr is not None:
2634 if tr is not None:
2622 hookargs.update(tr.hookargs)
2635 hookargs.update(tr.hookargs)
2623 hookargs = pycompat.strkwargs(hookargs)
2636 hookargs = pycompat.strkwargs(hookargs)
2624 hookargs[r'namespace'] = namespace
2637 hookargs[r'namespace'] = namespace
2625 hookargs[r'key'] = key
2638 hookargs[r'key'] = key
2626 hookargs[r'old'] = old
2639 hookargs[r'old'] = old
2627 hookargs[r'new'] = new
2640 hookargs[r'new'] = new
2628 self.hook('prepushkey', throw=True, **hookargs)
2641 self.hook('prepushkey', throw=True, **hookargs)
2629 except error.HookAbort as exc:
2642 except error.HookAbort as exc:
2630 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2643 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2631 if exc.hint:
2644 if exc.hint:
2632 self.ui.write_err(_("(%s)\n") % exc.hint)
2645 self.ui.write_err(_("(%s)\n") % exc.hint)
2633 return False
2646 return False
2634 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2647 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2635 ret = pushkey.push(self, namespace, key, old, new)
2648 ret = pushkey.push(self, namespace, key, old, new)
2636 def runhook():
2649 def runhook():
2637 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2650 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 ret=ret)
2651 ret=ret)
2639 self._afterlock(runhook)
2652 self._afterlock(runhook)
2640 return ret
2653 return ret
2641
2654
2642 def listkeys(self, namespace):
2655 def listkeys(self, namespace):
2643 self.hook('prelistkeys', throw=True, namespace=namespace)
2656 self.hook('prelistkeys', throw=True, namespace=namespace)
2644 self.ui.debug('listing keys for "%s"\n' % namespace)
2657 self.ui.debug('listing keys for "%s"\n' % namespace)
2645 values = pushkey.list(self, namespace)
2658 values = pushkey.list(self, namespace)
2646 self.hook('listkeys', namespace=namespace, values=values)
2659 self.hook('listkeys', namespace=namespace, values=values)
2647 return values
2660 return values
2648
2661
2649 def debugwireargs(self, one, two, three=None, four=None, five=None):
2662 def debugwireargs(self, one, two, three=None, four=None, five=None):
2650 '''used to test argument passing over the wire'''
2663 '''used to test argument passing over the wire'''
2651 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2664 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2652 pycompat.bytestr(four),
2665 pycompat.bytestr(four),
2653 pycompat.bytestr(five))
2666 pycompat.bytestr(five))
2654
2667
2655 def savecommitmessage(self, text):
2668 def savecommitmessage(self, text):
2656 fp = self.vfs('last-message.txt', 'wb')
2669 fp = self.vfs('last-message.txt', 'wb')
2657 try:
2670 try:
2658 fp.write(text)
2671 fp.write(text)
2659 finally:
2672 finally:
2660 fp.close()
2673 fp.close()
2661 return self.pathto(fp.name[len(self.root) + 1:])
2674 return self.pathto(fp.name[len(self.root) + 1:])
2662
2675
2663 # used to avoid circular references so destructors work
2676 # used to avoid circular references so destructors work
2664 def aftertrans(files):
2677 def aftertrans(files):
2665 renamefiles = [tuple(t) for t in files]
2678 renamefiles = [tuple(t) for t in files]
2666 def a():
2679 def a():
2667 for vfs, src, dest in renamefiles:
2680 for vfs, src, dest in renamefiles:
2668 # if src and dest refer to a same file, vfs.rename is a no-op,
2681 # if src and dest refer to a same file, vfs.rename is a no-op,
2669 # leaving both src and dest on disk. delete dest to make sure
2682 # leaving both src and dest on disk. delete dest to make sure
2670 # the rename couldn't be such a no-op.
2683 # the rename couldn't be such a no-op.
2671 vfs.tryunlink(dest)
2684 vfs.tryunlink(dest)
2672 try:
2685 try:
2673 vfs.rename(src, dest)
2686 vfs.rename(src, dest)
2674 except OSError: # journal file does not yet exist
2687 except OSError: # journal file does not yet exist
2675 pass
2688 pass
2676 return a
2689 return a
2677
2690
2678 def undoname(fn):
2691 def undoname(fn):
2679 base, name = os.path.split(fn)
2692 base, name = os.path.split(fn)
2680 assert name.startswith('journal')
2693 assert name.startswith('journal')
2681 return os.path.join(base, name.replace('journal', 'undo', 1))
2694 return os.path.join(base, name.replace('journal', 'undo', 1))
2682
2695
2683 def instance(ui, path, create, intents=None, createopts=None):
2696 def instance(ui, path, create, intents=None, createopts=None):
2684 localpath = util.urllocalpath(path)
2697 localpath = util.urllocalpath(path)
2685 if create:
2698 if create:
2686 createrepository(ui, localpath, createopts=createopts)
2699 createrepository(ui, localpath, createopts=createopts)
2687
2700
2688 return makelocalrepository(ui, localpath, intents=intents)
2701 return makelocalrepository(ui, localpath, intents=intents)
2689
2702
2690 def islocal(path):
2703 def islocal(path):
2691 return True
2704 return True
2692
2705
2693 def newreporequirements(ui, createopts=None):
2706 def newreporequirements(ui, createopts=None):
2694 """Determine the set of requirements for a new local repository.
2707 """Determine the set of requirements for a new local repository.
2695
2708
2696 Extensions can wrap this function to specify custom requirements for
2709 Extensions can wrap this function to specify custom requirements for
2697 new repositories.
2710 new repositories.
2698 """
2711 """
2699 createopts = createopts or {}
2712 createopts = createopts or {}
2700
2713
2701 requirements = {'revlogv1'}
2714 requirements = {'revlogv1'}
2702 if ui.configbool('format', 'usestore'):
2715 if ui.configbool('format', 'usestore'):
2703 requirements.add('store')
2716 requirements.add('store')
2704 if ui.configbool('format', 'usefncache'):
2717 if ui.configbool('format', 'usefncache'):
2705 requirements.add('fncache')
2718 requirements.add('fncache')
2706 if ui.configbool('format', 'dotencode'):
2719 if ui.configbool('format', 'dotencode'):
2707 requirements.add('dotencode')
2720 requirements.add('dotencode')
2708
2721
2709 compengine = ui.config('experimental', 'format.compression')
2722 compengine = ui.config('experimental', 'format.compression')
2710 if compengine not in util.compengines:
2723 if compengine not in util.compengines:
2711 raise error.Abort(_('compression engine %s defined by '
2724 raise error.Abort(_('compression engine %s defined by '
2712 'experimental.format.compression not available') %
2725 'experimental.format.compression not available') %
2713 compengine,
2726 compengine,
2714 hint=_('run "hg debuginstall" to list available '
2727 hint=_('run "hg debuginstall" to list available '
2715 'compression engines'))
2728 'compression engines'))
2716
2729
2717 # zlib is the historical default and doesn't need an explicit requirement.
2730 # zlib is the historical default and doesn't need an explicit requirement.
2718 if compengine != 'zlib':
2731 if compengine != 'zlib':
2719 requirements.add('exp-compression-%s' % compengine)
2732 requirements.add('exp-compression-%s' % compengine)
2720
2733
2721 if scmutil.gdinitconfig(ui):
2734 if scmutil.gdinitconfig(ui):
2722 requirements.add('generaldelta')
2735 requirements.add('generaldelta')
2723 if ui.configbool('experimental', 'treemanifest'):
2736 if ui.configbool('experimental', 'treemanifest'):
2724 requirements.add('treemanifest')
2737 requirements.add('treemanifest')
2725 # experimental config: format.sparse-revlog
2738 # experimental config: format.sparse-revlog
2726 if ui.configbool('format', 'sparse-revlog'):
2739 if ui.configbool('format', 'sparse-revlog'):
2727 requirements.add(SPARSEREVLOG_REQUIREMENT)
2740 requirements.add(SPARSEREVLOG_REQUIREMENT)
2728
2741
2729 revlogv2 = ui.config('experimental', 'revlogv2')
2742 revlogv2 = ui.config('experimental', 'revlogv2')
2730 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2743 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2731 requirements.remove('revlogv1')
2744 requirements.remove('revlogv1')
2732 # generaldelta is implied by revlogv2.
2745 # generaldelta is implied by revlogv2.
2733 requirements.discard('generaldelta')
2746 requirements.discard('generaldelta')
2734 requirements.add(REVLOGV2_REQUIREMENT)
2747 requirements.add(REVLOGV2_REQUIREMENT)
2735 # experimental config: format.internal-phase
2748 # experimental config: format.internal-phase
2736 if ui.configbool('format', 'internal-phase'):
2749 if ui.configbool('format', 'internal-phase'):
2737 requirements.add('internal-phase')
2750 requirements.add('internal-phase')
2738
2751
2739 if createopts.get('narrowfiles'):
2752 if createopts.get('narrowfiles'):
2740 requirements.add(repository.NARROW_REQUIREMENT)
2753 requirements.add(repository.NARROW_REQUIREMENT)
2741
2754
2742 return requirements
2755 return requirements
2743
2756
2744 def filterknowncreateopts(ui, createopts):
2757 def filterknowncreateopts(ui, createopts):
2745 """Filters a dict of repo creation options against options that are known.
2758 """Filters a dict of repo creation options against options that are known.
2746
2759
2747 Receives a dict of repo creation options and returns a dict of those
2760 Receives a dict of repo creation options and returns a dict of those
2748 options that we don't know how to handle.
2761 options that we don't know how to handle.
2749
2762
2750 This function is called as part of repository creation. If the
2763 This function is called as part of repository creation. If the
2751 returned dict contains any items, repository creation will not
2764 returned dict contains any items, repository creation will not
2752 be allowed, as it means there was a request to create a repository
2765 be allowed, as it means there was a request to create a repository
2753 with options not recognized by loaded code.
2766 with options not recognized by loaded code.
2754
2767
2755 Extensions can wrap this function to filter out creation options
2768 Extensions can wrap this function to filter out creation options
2756 they know how to handle.
2769 they know how to handle.
2757 """
2770 """
2758 known = {'narrowfiles'}
2771 known = {'narrowfiles'}
2759
2772
2760 return {k: v for k, v in createopts.items() if k not in known}
2773 return {k: v for k, v in createopts.items() if k not in known}
2761
2774
2762 def createrepository(ui, path, createopts=None):
2775 def createrepository(ui, path, createopts=None):
2763 """Create a new repository in a vfs.
2776 """Create a new repository in a vfs.
2764
2777
2765 ``path`` path to the new repo's working directory.
2778 ``path`` path to the new repo's working directory.
2766 ``createopts`` options for the new repository.
2779 ``createopts`` options for the new repository.
2767 """
2780 """
2768 createopts = createopts or {}
2781 createopts = createopts or {}
2769
2782
2770 unknownopts = filterknowncreateopts(ui, createopts)
2783 unknownopts = filterknowncreateopts(ui, createopts)
2771
2784
2772 if not isinstance(unknownopts, dict):
2785 if not isinstance(unknownopts, dict):
2773 raise error.ProgrammingError('filterknowncreateopts() did not return '
2786 raise error.ProgrammingError('filterknowncreateopts() did not return '
2774 'a dict')
2787 'a dict')
2775
2788
2776 if unknownopts:
2789 if unknownopts:
2777 raise error.Abort(_('unable to create repository because of unknown '
2790 raise error.Abort(_('unable to create repository because of unknown '
2778 'creation option: %s') %
2791 'creation option: %s') %
2779 ', '.sorted(unknownopts),
2792 ', '.sorted(unknownopts),
2780 hint=_('is a required extension not loaded?'))
2793 hint=_('is a required extension not loaded?'))
2781
2794
2782 requirements = newreporequirements(ui, createopts=createopts)
2795 requirements = newreporequirements(ui, createopts=createopts)
2783
2796
2784 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2797 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2785 if not wdirvfs.exists():
2798 if not wdirvfs.exists():
2786 wdirvfs.makedirs()
2799 wdirvfs.makedirs()
2787
2800
2788 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2801 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2789 if hgvfs.exists():
2802 if hgvfs.exists():
2790 raise error.RepoError(_('repository %s already exists') % path)
2803 raise error.RepoError(_('repository %s already exists') % path)
2791
2804
2792 hgvfs.makedir(notindexed=True)
2805 hgvfs.makedir(notindexed=True)
2793
2806
2794 if b'store' in requirements:
2807 if b'store' in requirements:
2795 hgvfs.mkdir(b'store')
2808 hgvfs.mkdir(b'store')
2796
2809
2797 # We create an invalid changelog outside the store so very old
2810 # We create an invalid changelog outside the store so very old
2798 # Mercurial versions (which didn't know about the requirements
2811 # Mercurial versions (which didn't know about the requirements
2799 # file) encounter an error on reading the changelog. This
2812 # file) encounter an error on reading the changelog. This
2800 # effectively locks out old clients and prevents them from
2813 # effectively locks out old clients and prevents them from
2801 # mucking with a repo in an unknown format.
2814 # mucking with a repo in an unknown format.
2802 #
2815 #
2803 # The revlog header has version 2, which won't be recognized by
2816 # The revlog header has version 2, which won't be recognized by
2804 # such old clients.
2817 # such old clients.
2805 hgvfs.append(b'00changelog.i',
2818 hgvfs.append(b'00changelog.i',
2806 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2819 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2807 b'layout')
2820 b'layout')
2808
2821
2809 scmutil.writerequires(hgvfs, requirements)
2822 scmutil.writerequires(hgvfs, requirements)
2810
2823
2811 def poisonrepository(repo):
2824 def poisonrepository(repo):
2812 """Poison a repository instance so it can no longer be used."""
2825 """Poison a repository instance so it can no longer be used."""
2813 # Perform any cleanup on the instance.
2826 # Perform any cleanup on the instance.
2814 repo.close()
2827 repo.close()
2815
2828
2816 # Our strategy is to replace the type of the object with one that
2829 # Our strategy is to replace the type of the object with one that
2817 # has all attribute lookups result in error.
2830 # has all attribute lookups result in error.
2818 #
2831 #
2819 # But we have to allow the close() method because some constructors
2832 # But we have to allow the close() method because some constructors
2820 # of repos call close() on repo references.
2833 # of repos call close() on repo references.
2821 class poisonedrepository(object):
2834 class poisonedrepository(object):
2822 def __getattribute__(self, item):
2835 def __getattribute__(self, item):
2823 if item == r'close':
2836 if item == r'close':
2824 return object.__getattribute__(self, item)
2837 return object.__getattribute__(self, item)
2825
2838
2826 raise error.ProgrammingError('repo instances should not be used '
2839 raise error.ProgrammingError('repo instances should not be used '
2827 'after unshare')
2840 'after unshare')
2828
2841
2829 def close(self):
2842 def close(self):
2830 pass
2843 pass
2831
2844
2832 # We may have a repoview, which intercepts __setattr__. So be sure
2845 # We may have a repoview, which intercepts __setattr__. So be sure
2833 # we operate at the lowest level possible.
2846 # we operate at the lowest level possible.
2834 object.__setattr__(repo, r'__class__', poisonedrepository)
2847 object.__setattr__(repo, r'__class__', poisonedrepository)
General Comments 0
You need to be logged in to leave comments. Login now