##// END OF EJS Templates
filelog: remove version attribute (API)...
Gregory Szorc -
r40134:68282a7b default
parent child Browse files
Show More
@@ -1,277 +1,275
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # Full name of the user visible file, relative to the repository root.
25 # Full name of the user visible file, relative to the repository root.
26 # Used by LFS.
26 # Used by LFS.
27 self.filename = path
27 self.filename = path
28 # Used by repo upgrade.
28 # Used by repo upgrade.
29 self.index = self._revlog.index
29 self.index = self._revlog.index
30 # Used by verify.
31 self.version = self._revlog.version
32 # Used by changegroup generation.
30 # Used by changegroup generation.
33 self._generaldelta = self._revlog._generaldelta
31 self._generaldelta = self._revlog._generaldelta
34
32
35 def __len__(self):
33 def __len__(self):
36 return len(self._revlog)
34 return len(self._revlog)
37
35
38 def __iter__(self):
36 def __iter__(self):
39 return self._revlog.__iter__()
37 return self._revlog.__iter__()
40
38
41 def revs(self, start=0, stop=None):
39 def revs(self, start=0, stop=None):
42 return self._revlog.revs(start=start, stop=stop)
40 return self._revlog.revs(start=start, stop=stop)
43
41
44 def parents(self, node):
42 def parents(self, node):
45 return self._revlog.parents(node)
43 return self._revlog.parents(node)
46
44
47 def parentrevs(self, rev):
45 def parentrevs(self, rev):
48 return self._revlog.parentrevs(rev)
46 return self._revlog.parentrevs(rev)
49
47
50 def rev(self, node):
48 def rev(self, node):
51 return self._revlog.rev(node)
49 return self._revlog.rev(node)
52
50
53 def node(self, rev):
51 def node(self, rev):
54 return self._revlog.node(rev)
52 return self._revlog.node(rev)
55
53
56 def lookup(self, node):
54 def lookup(self, node):
57 return self._revlog.lookup(node)
55 return self._revlog.lookup(node)
58
56
59 def linkrev(self, rev):
57 def linkrev(self, rev):
60 return self._revlog.linkrev(rev)
58 return self._revlog.linkrev(rev)
61
59
62 # Used by LFS, verify.
60 # Used by LFS, verify.
63 def flags(self, rev):
61 def flags(self, rev):
64 return self._revlog.flags(rev)
62 return self._revlog.flags(rev)
65
63
66 def commonancestorsheads(self, node1, node2):
64 def commonancestorsheads(self, node1, node2):
67 return self._revlog.commonancestorsheads(node1, node2)
65 return self._revlog.commonancestorsheads(node1, node2)
68
66
69 # Used by dagop.blockdescendants().
67 # Used by dagop.blockdescendants().
70 def descendants(self, revs):
68 def descendants(self, revs):
71 return self._revlog.descendants(revs)
69 return self._revlog.descendants(revs)
72
70
73 def heads(self, start=None, stop=None):
71 def heads(self, start=None, stop=None):
74 return self._revlog.heads(start, stop)
72 return self._revlog.heads(start, stop)
75
73
76 # Used by hgweb, children extension.
74 # Used by hgweb, children extension.
77 def children(self, node):
75 def children(self, node):
78 return self._revlog.children(node)
76 return self._revlog.children(node)
79
77
80 def deltaparent(self, rev):
78 def deltaparent(self, rev):
81 return self._revlog.deltaparent(rev)
79 return self._revlog.deltaparent(rev)
82
80
83 def iscensored(self, rev):
81 def iscensored(self, rev):
84 return self._revlog.iscensored(rev)
82 return self._revlog.iscensored(rev)
85
83
86 # Used by verify.
84 # Used by verify.
87 def rawsize(self, rev):
85 def rawsize(self, rev):
88 return self._revlog.rawsize(rev)
86 return self._revlog.rawsize(rev)
89
87
90 # Might be unused.
88 # Might be unused.
91 def checkhash(self, text, node, p1=None, p2=None, rev=None):
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
92 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
93
91
94 def revision(self, node, _df=None, raw=False):
92 def revision(self, node, _df=None, raw=False):
95 return self._revlog.revision(node, _df=_df, raw=raw)
93 return self._revlog.revision(node, _df=_df, raw=raw)
96
94
97 def revdiff(self, rev1, rev2):
95 def revdiff(self, rev1, rev2):
98 return self._revlog.revdiff(rev1, rev2)
96 return self._revlog.revdiff(rev1, rev2)
99
97
100 def emitrevisiondeltas(self, requests):
98 def emitrevisiondeltas(self, requests):
101 return self._revlog.emitrevisiondeltas(requests)
99 return self._revlog.emitrevisiondeltas(requests)
102
100
103 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
104 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
105 cachedelta=None):
103 cachedelta=None):
106 return self._revlog.addrevision(revisiondata, transaction, linkrev,
104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
107 p1, p2, node=node, flags=flags,
105 p1, p2, node=node, flags=flags,
108 cachedelta=cachedelta)
106 cachedelta=cachedelta)
109
107
110 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
111 return self._revlog.addgroup(deltas, linkmapper, transaction,
109 return self._revlog.addgroup(deltas, linkmapper, transaction,
112 addrevisioncb=addrevisioncb)
110 addrevisioncb=addrevisioncb)
113
111
114 def getstrippoint(self, minlink):
112 def getstrippoint(self, minlink):
115 return self._revlog.getstrippoint(minlink)
113 return self._revlog.getstrippoint(minlink)
116
114
117 def strip(self, minlink, transaction):
115 def strip(self, minlink, transaction):
118 return self._revlog.strip(minlink, transaction)
116 return self._revlog.strip(minlink, transaction)
119
117
120 def censorrevision(self, tr, node, tombstone=b''):
118 def censorrevision(self, tr, node, tombstone=b''):
121 return self._revlog.censorrevision(node, tombstone=tombstone)
119 return self._revlog.censorrevision(node, tombstone=tombstone)
122
120
123 def files(self):
121 def files(self):
124 return self._revlog.files()
122 return self._revlog.files()
125
123
126 # Used by verify.
124 # Used by verify.
127 def checksize(self):
125 def checksize(self):
128 return self._revlog.checksize()
126 return self._revlog.checksize()
129
127
130 def read(self, node):
128 def read(self, node):
131 t = self.revision(node)
129 t = self.revision(node)
132 if not t.startswith('\1\n'):
130 if not t.startswith('\1\n'):
133 return t
131 return t
134 s = t.index('\1\n', 2)
132 s = t.index('\1\n', 2)
135 return t[s + 2:]
133 return t[s + 2:]
136
134
137 def add(self, text, meta, transaction, link, p1=None, p2=None):
135 def add(self, text, meta, transaction, link, p1=None, p2=None):
138 if meta or text.startswith('\1\n'):
136 if meta or text.startswith('\1\n'):
139 text = revlog.packmeta(meta, text)
137 text = revlog.packmeta(meta, text)
140 return self.addrevision(text, transaction, link, p1, p2)
138 return self.addrevision(text, transaction, link, p1, p2)
141
139
142 def renamed(self, node):
140 def renamed(self, node):
143 if self.parents(node)[0] != revlog.nullid:
141 if self.parents(node)[0] != revlog.nullid:
144 return False
142 return False
145 t = self.revision(node)
143 t = self.revision(node)
146 m = revlog.parsemeta(t)[0]
144 m = revlog.parsemeta(t)[0]
147 # copy and copyrev occur in pairs. In rare cases due to bugs,
145 # copy and copyrev occur in pairs. In rare cases due to bugs,
148 # one can occur without the other.
146 # one can occur without the other.
149 if m and "copy" in m and "copyrev" in m:
147 if m and "copy" in m and "copyrev" in m:
150 return (m["copy"], revlog.bin(m["copyrev"]))
148 return (m["copy"], revlog.bin(m["copyrev"]))
151 return False
149 return False
152
150
153 def size(self, rev):
151 def size(self, rev):
154 """return the size of a given revision"""
152 """return the size of a given revision"""
155
153
156 # for revisions with renames, we have to go the slow way
154 # for revisions with renames, we have to go the slow way
157 node = self.node(rev)
155 node = self.node(rev)
158 if self.renamed(node):
156 if self.renamed(node):
159 return len(self.read(node))
157 return len(self.read(node))
160 if self.iscensored(rev):
158 if self.iscensored(rev):
161 return 0
159 return 0
162
160
163 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
161 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
164 return self._revlog.size(rev)
162 return self._revlog.size(rev)
165
163
166 def cmp(self, node, text):
164 def cmp(self, node, text):
167 """compare text with a given file revision
165 """compare text with a given file revision
168
166
169 returns True if text is different than what is stored.
167 returns True if text is different than what is stored.
170 """
168 """
171
169
172 t = text
170 t = text
173 if text.startswith('\1\n'):
171 if text.startswith('\1\n'):
174 t = '\1\n\1\n' + text
172 t = '\1\n\1\n' + text
175
173
176 samehashes = not self._revlog.cmp(node, t)
174 samehashes = not self._revlog.cmp(node, t)
177 if samehashes:
175 if samehashes:
178 return False
176 return False
179
177
180 # censored files compare against the empty file
178 # censored files compare against the empty file
181 if self.iscensored(self.rev(node)):
179 if self.iscensored(self.rev(node)):
182 return text != ''
180 return text != ''
183
181
184 # renaming a file produces a different hash, even if the data
182 # renaming a file produces a different hash, even if the data
185 # remains unchanged. Check if it's the case (slow):
183 # remains unchanged. Check if it's the case (slow):
186 if self.renamed(node):
184 if self.renamed(node):
187 t2 = self.read(node)
185 t2 = self.read(node)
188 return t2 != text
186 return t2 != text
189
187
190 return True
188 return True
191
189
192 def verifyintegrity(self, state):
190 def verifyintegrity(self, state):
193 return self._revlog.verifyintegrity(state)
191 return self._revlog.verifyintegrity(state)
194
192
195 # TODO these aren't part of the interface and aren't internal methods.
193 # TODO these aren't part of the interface and aren't internal methods.
196 # Callers should be fixed to not use them.
194 # Callers should be fixed to not use them.
197
195
198 # Used by LFS.
196 # Used by LFS.
199 @property
197 @property
200 def filename(self):
198 def filename(self):
201 return self._revlog.filename
199 return self._revlog.filename
202
200
203 @filename.setter
201 @filename.setter
204 def filename(self, value):
202 def filename(self, value):
205 self._revlog.filename = value
203 self._revlog.filename = value
206
204
207 # Used by bundlefilelog, unionfilelog.
205 # Used by bundlefilelog, unionfilelog.
208 @property
206 @property
209 def indexfile(self):
207 def indexfile(self):
210 return self._revlog.indexfile
208 return self._revlog.indexfile
211
209
212 @indexfile.setter
210 @indexfile.setter
213 def indexfile(self, value):
211 def indexfile(self, value):
214 self._revlog.indexfile = value
212 self._revlog.indexfile = value
215
213
216 # Used by LFS, repo upgrade.
214 # Used by LFS, repo upgrade.
217 @property
215 @property
218 def opener(self):
216 def opener(self):
219 return self._revlog.opener
217 return self._revlog.opener
220
218
221 # Used by repo upgrade.
219 # Used by repo upgrade.
222 def clone(self, tr, destrevlog, **kwargs):
220 def clone(self, tr, destrevlog, **kwargs):
223 if not isinstance(destrevlog, filelog):
221 if not isinstance(destrevlog, filelog):
224 raise error.ProgrammingError('expected filelog to clone()')
222 raise error.ProgrammingError('expected filelog to clone()')
225
223
226 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
224 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
227
225
228 class narrowfilelog(filelog):
226 class narrowfilelog(filelog):
229 """Filelog variation to be used with narrow stores."""
227 """Filelog variation to be used with narrow stores."""
230
228
231 def __init__(self, opener, path, narrowmatch):
229 def __init__(self, opener, path, narrowmatch):
232 super(narrowfilelog, self).__init__(opener, path)
230 super(narrowfilelog, self).__init__(opener, path)
233 self._narrowmatch = narrowmatch
231 self._narrowmatch = narrowmatch
234
232
235 def renamed(self, node):
233 def renamed(self, node):
236 res = super(narrowfilelog, self).renamed(node)
234 res = super(narrowfilelog, self).renamed(node)
237
235
238 # Renames that come from outside the narrowspec are problematic
236 # Renames that come from outside the narrowspec are problematic
239 # because we may lack the base text for the rename. This can result
237 # because we may lack the base text for the rename. This can result
240 # in code attempting to walk the ancestry or compute a diff
238 # in code attempting to walk the ancestry or compute a diff
241 # encountering a missing revision. We address this by silently
239 # encountering a missing revision. We address this by silently
242 # removing rename metadata if the source file is outside the
240 # removing rename metadata if the source file is outside the
243 # narrow spec.
241 # narrow spec.
244 #
242 #
245 # A better solution would be to see if the base revision is available,
243 # A better solution would be to see if the base revision is available,
246 # rather than assuming it isn't.
244 # rather than assuming it isn't.
247 #
245 #
248 # An even better solution would be to teach all consumers of rename
246 # An even better solution would be to teach all consumers of rename
249 # metadata that the base revision may not be available.
247 # metadata that the base revision may not be available.
250 #
248 #
251 # TODO consider better ways of doing this.
249 # TODO consider better ways of doing this.
252 if res and not self._narrowmatch(res[0]):
250 if res and not self._narrowmatch(res[0]):
253 return None
251 return None
254
252
255 return res
253 return res
256
254
257 def size(self, rev):
255 def size(self, rev):
258 # Because we have a custom renamed() that may lie, we need to call
256 # Because we have a custom renamed() that may lie, we need to call
259 # the base renamed() to report accurate results.
257 # the base renamed() to report accurate results.
260 node = self.node(rev)
258 node = self.node(rev)
261 if super(narrowfilelog, self).renamed(node):
259 if super(narrowfilelog, self).renamed(node):
262 return len(self.read(node))
260 return len(self.read(node))
263 else:
261 else:
264 return super(narrowfilelog, self).size(rev)
262 return super(narrowfilelog, self).size(rev)
265
263
266 def cmp(self, node, text):
264 def cmp(self, node, text):
267 different = super(narrowfilelog, self).cmp(node, text)
265 different = super(narrowfilelog, self).cmp(node, text)
268
266
269 # Because renamed() may lie, we may get false positives for
267 # Because renamed() may lie, we may get false positives for
270 # different content. Check for this by comparing against the original
268 # different content. Check for this by comparing against the original
271 # renamed() implementation.
269 # renamed() implementation.
272 if different:
270 if different:
273 if super(narrowfilelog, self).renamed(node):
271 if super(narrowfilelog, self).renamed(node):
274 t2 = self.read(node)
272 t2 = self.read(node)
275 return t2 != text
273 return t2 != text
276
274
277 return different
275 return different
@@ -1,1619 +1,1613
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 class ipeerconnection(interfaceutil.Interface):
22 class ipeerconnection(interfaceutil.Interface):
23 """Represents a "connection" to a repository.
23 """Represents a "connection" to a repository.
24
24
25 This is the base interface for representing a connection to a repository.
25 This is the base interface for representing a connection to a repository.
26 It holds basic properties and methods applicable to all peer types.
26 It holds basic properties and methods applicable to all peer types.
27
27
28 This is not a complete interface definition and should not be used
28 This is not a complete interface definition and should not be used
29 outside of this module.
29 outside of this module.
30 """
30 """
31 ui = interfaceutil.Attribute("""ui.ui instance""")
31 ui = interfaceutil.Attribute("""ui.ui instance""")
32
32
33 def url():
33 def url():
34 """Returns a URL string representing this peer.
34 """Returns a URL string representing this peer.
35
35
36 Currently, implementations expose the raw URL used to construct the
36 Currently, implementations expose the raw URL used to construct the
37 instance. It may contain credentials as part of the URL. The
37 instance. It may contain credentials as part of the URL. The
38 expectations of the value aren't well-defined and this could lead to
38 expectations of the value aren't well-defined and this could lead to
39 data leakage.
39 data leakage.
40
40
41 TODO audit/clean consumers and more clearly define the contents of this
41 TODO audit/clean consumers and more clearly define the contents of this
42 value.
42 value.
43 """
43 """
44
44
45 def local():
45 def local():
46 """Returns a local repository instance.
46 """Returns a local repository instance.
47
47
48 If the peer represents a local repository, returns an object that
48 If the peer represents a local repository, returns an object that
49 can be used to interface with it. Otherwise returns ``None``.
49 can be used to interface with it. Otherwise returns ``None``.
50 """
50 """
51
51
52 def peer():
52 def peer():
53 """Returns an object conforming to this interface.
53 """Returns an object conforming to this interface.
54
54
55 Most implementations will ``return self``.
55 Most implementations will ``return self``.
56 """
56 """
57
57
58 def canpush():
58 def canpush():
59 """Returns a boolean indicating if this peer can be pushed to."""
59 """Returns a boolean indicating if this peer can be pushed to."""
60
60
61 def close():
61 def close():
62 """Close the connection to this peer.
62 """Close the connection to this peer.
63
63
64 This is called when the peer will no longer be used. Resources
64 This is called when the peer will no longer be used. Resources
65 associated with the peer should be cleaned up.
65 associated with the peer should be cleaned up.
66 """
66 """
67
67
68 class ipeercapabilities(interfaceutil.Interface):
68 class ipeercapabilities(interfaceutil.Interface):
69 """Peer sub-interface related to capabilities."""
69 """Peer sub-interface related to capabilities."""
70
70
71 def capable(name):
71 def capable(name):
72 """Determine support for a named capability.
72 """Determine support for a named capability.
73
73
74 Returns ``False`` if capability not supported.
74 Returns ``False`` if capability not supported.
75
75
76 Returns ``True`` if boolean capability is supported. Returns a string
76 Returns ``True`` if boolean capability is supported. Returns a string
77 if capability support is non-boolean.
77 if capability support is non-boolean.
78
78
79 Capability strings may or may not map to wire protocol capabilities.
79 Capability strings may or may not map to wire protocol capabilities.
80 """
80 """
81
81
82 def requirecap(name, purpose):
82 def requirecap(name, purpose):
83 """Require a capability to be present.
83 """Require a capability to be present.
84
84
85 Raises a ``CapabilityError`` if the capability isn't present.
85 Raises a ``CapabilityError`` if the capability isn't present.
86 """
86 """
87
87
88 class ipeercommands(interfaceutil.Interface):
88 class ipeercommands(interfaceutil.Interface):
89 """Client-side interface for communicating over the wire protocol.
89 """Client-side interface for communicating over the wire protocol.
90
90
91 This interface is used as a gateway to the Mercurial wire protocol.
91 This interface is used as a gateway to the Mercurial wire protocol.
92 methods commonly call wire protocol commands of the same name.
92 methods commonly call wire protocol commands of the same name.
93 """
93 """
94
94
95 def branchmap():
95 def branchmap():
96 """Obtain heads in named branches.
96 """Obtain heads in named branches.
97
97
98 Returns a dict mapping branch name to an iterable of nodes that are
98 Returns a dict mapping branch name to an iterable of nodes that are
99 heads on that branch.
99 heads on that branch.
100 """
100 """
101
101
102 def capabilities():
102 def capabilities():
103 """Obtain capabilities of the peer.
103 """Obtain capabilities of the peer.
104
104
105 Returns a set of string capabilities.
105 Returns a set of string capabilities.
106 """
106 """
107
107
108 def clonebundles():
108 def clonebundles():
109 """Obtains the clone bundles manifest for the repo.
109 """Obtains the clone bundles manifest for the repo.
110
110
111 Returns the manifest as unparsed bytes.
111 Returns the manifest as unparsed bytes.
112 """
112 """
113
113
114 def debugwireargs(one, two, three=None, four=None, five=None):
114 def debugwireargs(one, two, three=None, four=None, five=None):
115 """Used to facilitate debugging of arguments passed over the wire."""
115 """Used to facilitate debugging of arguments passed over the wire."""
116
116
117 def getbundle(source, **kwargs):
117 def getbundle(source, **kwargs):
118 """Obtain remote repository data as a bundle.
118 """Obtain remote repository data as a bundle.
119
119
120 This command is how the bulk of repository data is transferred from
120 This command is how the bulk of repository data is transferred from
121 the peer to the local repository
121 the peer to the local repository
122
122
123 Returns a generator of bundle data.
123 Returns a generator of bundle data.
124 """
124 """
125
125
126 def heads():
126 def heads():
127 """Determine all known head revisions in the peer.
127 """Determine all known head revisions in the peer.
128
128
129 Returns an iterable of binary nodes.
129 Returns an iterable of binary nodes.
130 """
130 """
131
131
132 def known(nodes):
132 def known(nodes):
133 """Determine whether multiple nodes are known.
133 """Determine whether multiple nodes are known.
134
134
135 Accepts an iterable of nodes whose presence to check for.
135 Accepts an iterable of nodes whose presence to check for.
136
136
137 Returns an iterable of booleans indicating of the corresponding node
137 Returns an iterable of booleans indicating of the corresponding node
138 at that index is known to the peer.
138 at that index is known to the peer.
139 """
139 """
140
140
141 def listkeys(namespace):
141 def listkeys(namespace):
142 """Obtain all keys in a pushkey namespace.
142 """Obtain all keys in a pushkey namespace.
143
143
144 Returns an iterable of key names.
144 Returns an iterable of key names.
145 """
145 """
146
146
147 def lookup(key):
147 def lookup(key):
148 """Resolve a value to a known revision.
148 """Resolve a value to a known revision.
149
149
150 Returns a binary node of the resolved revision on success.
150 Returns a binary node of the resolved revision on success.
151 """
151 """
152
152
153 def pushkey(namespace, key, old, new):
153 def pushkey(namespace, key, old, new):
154 """Set a value using the ``pushkey`` protocol.
154 """Set a value using the ``pushkey`` protocol.
155
155
156 Arguments correspond to the pushkey namespace and key to operate on and
156 Arguments correspond to the pushkey namespace and key to operate on and
157 the old and new values for that key.
157 the old and new values for that key.
158
158
159 Returns a string with the peer result. The value inside varies by the
159 Returns a string with the peer result. The value inside varies by the
160 namespace.
160 namespace.
161 """
161 """
162
162
163 def stream_out():
163 def stream_out():
164 """Obtain streaming clone data.
164 """Obtain streaming clone data.
165
165
166 Successful result should be a generator of data chunks.
166 Successful result should be a generator of data chunks.
167 """
167 """
168
168
169 def unbundle(bundle, heads, url):
169 def unbundle(bundle, heads, url):
170 """Transfer repository data to the peer.
170 """Transfer repository data to the peer.
171
171
172 This is how the bulk of data during a push is transferred.
172 This is how the bulk of data during a push is transferred.
173
173
174 Returns the integer number of heads added to the peer.
174 Returns the integer number of heads added to the peer.
175 """
175 """
176
176
177 class ipeerlegacycommands(interfaceutil.Interface):
177 class ipeerlegacycommands(interfaceutil.Interface):
178 """Interface for implementing support for legacy wire protocol commands.
178 """Interface for implementing support for legacy wire protocol commands.
179
179
180 Wire protocol commands transition to legacy status when they are no longer
180 Wire protocol commands transition to legacy status when they are no longer
181 used by modern clients. To facilitate identifying which commands are
181 used by modern clients. To facilitate identifying which commands are
182 legacy, the interfaces are split.
182 legacy, the interfaces are split.
183 """
183 """
184
184
185 def between(pairs):
185 def between(pairs):
186 """Obtain nodes between pairs of nodes.
186 """Obtain nodes between pairs of nodes.
187
187
188 ``pairs`` is an iterable of node pairs.
188 ``pairs`` is an iterable of node pairs.
189
189
190 Returns an iterable of iterables of nodes corresponding to each
190 Returns an iterable of iterables of nodes corresponding to each
191 requested pair.
191 requested pair.
192 """
192 """
193
193
194 def branches(nodes):
194 def branches(nodes):
195 """Obtain ancestor changesets of specific nodes back to a branch point.
195 """Obtain ancestor changesets of specific nodes back to a branch point.
196
196
197 For each requested node, the peer finds the first ancestor node that is
197 For each requested node, the peer finds the first ancestor node that is
198 a DAG root or is a merge.
198 a DAG root or is a merge.
199
199
200 Returns an iterable of iterables with the resolved values for each node.
200 Returns an iterable of iterables with the resolved values for each node.
201 """
201 """
202
202
203 def changegroup(nodes, source):
203 def changegroup(nodes, source):
204 """Obtain a changegroup with data for descendants of specified nodes."""
204 """Obtain a changegroup with data for descendants of specified nodes."""
205
205
206 def changegroupsubset(bases, heads, source):
206 def changegroupsubset(bases, heads, source):
207 pass
207 pass
208
208
209 class ipeercommandexecutor(interfaceutil.Interface):
209 class ipeercommandexecutor(interfaceutil.Interface):
210 """Represents a mechanism to execute remote commands.
210 """Represents a mechanism to execute remote commands.
211
211
212 This is the primary interface for requesting that wire protocol commands
212 This is the primary interface for requesting that wire protocol commands
213 be executed. Instances of this interface are active in a context manager
213 be executed. Instances of this interface are active in a context manager
214 and have a well-defined lifetime. When the context manager exits, all
214 and have a well-defined lifetime. When the context manager exits, all
215 outstanding requests are waited on.
215 outstanding requests are waited on.
216 """
216 """
217
217
218 def callcommand(name, args):
218 def callcommand(name, args):
219 """Request that a named command be executed.
219 """Request that a named command be executed.
220
220
221 Receives the command name and a dictionary of command arguments.
221 Receives the command name and a dictionary of command arguments.
222
222
223 Returns a ``concurrent.futures.Future`` that will resolve to the
223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 result of that command request. That exact value is left up to
224 result of that command request. That exact value is left up to
225 the implementation and possibly varies by command.
225 the implementation and possibly varies by command.
226
226
227 Not all commands can coexist with other commands in an executor
227 Not all commands can coexist with other commands in an executor
228 instance: it depends on the underlying wire protocol transport being
228 instance: it depends on the underlying wire protocol transport being
229 used and the command itself.
229 used and the command itself.
230
230
231 Implementations MAY call ``sendcommands()`` automatically if the
231 Implementations MAY call ``sendcommands()`` automatically if the
232 requested command can not coexist with other commands in this executor.
232 requested command can not coexist with other commands in this executor.
233
233
234 Implementations MAY call ``sendcommands()`` automatically when the
234 Implementations MAY call ``sendcommands()`` automatically when the
235 future's ``result()`` is called. So, consumers using multiple
235 future's ``result()`` is called. So, consumers using multiple
236 commands with an executor MUST ensure that ``result()`` is not called
236 commands with an executor MUST ensure that ``result()`` is not called
237 until all command requests have been issued.
237 until all command requests have been issued.
238 """
238 """
239
239
240 def sendcommands():
240 def sendcommands():
241 """Trigger submission of queued command requests.
241 """Trigger submission of queued command requests.
242
242
243 Not all transports submit commands as soon as they are requested to
243 Not all transports submit commands as soon as they are requested to
244 run. When called, this method forces queued command requests to be
244 run. When called, this method forces queued command requests to be
245 issued. It will no-op if all commands have already been sent.
245 issued. It will no-op if all commands have already been sent.
246
246
247 When called, no more new commands may be issued with this executor.
247 When called, no more new commands may be issued with this executor.
248 """
248 """
249
249
250 def close():
250 def close():
251 """Signal that this command request is finished.
251 """Signal that this command request is finished.
252
252
253 When called, no more new commands may be issued. All outstanding
253 When called, no more new commands may be issued. All outstanding
254 commands that have previously been issued are waited on before
254 commands that have previously been issued are waited on before
255 returning. This not only includes waiting for the futures to resolve,
255 returning. This not only includes waiting for the futures to resolve,
256 but also waiting for all response data to arrive. In other words,
256 but also waiting for all response data to arrive. In other words,
257 calling this waits for all on-wire state for issued command requests
257 calling this waits for all on-wire state for issued command requests
258 to finish.
258 to finish.
259
259
260 When used as a context manager, this method is called when exiting the
260 When used as a context manager, this method is called when exiting the
261 context manager.
261 context manager.
262
262
263 This method may call ``sendcommands()`` if there are buffered commands.
263 This method may call ``sendcommands()`` if there are buffered commands.
264 """
264 """
265
265
266 class ipeerrequests(interfaceutil.Interface):
266 class ipeerrequests(interfaceutil.Interface):
267 """Interface for executing commands on a peer."""
267 """Interface for executing commands on a peer."""
268
268
269 def commandexecutor():
269 def commandexecutor():
270 """A context manager that resolves to an ipeercommandexecutor.
270 """A context manager that resolves to an ipeercommandexecutor.
271
271
272 The object this resolves to can be used to issue command requests
272 The object this resolves to can be used to issue command requests
273 to the peer.
273 to the peer.
274
274
275 Callers should call its ``callcommand`` method to issue command
275 Callers should call its ``callcommand`` method to issue command
276 requests.
276 requests.
277
277
278 A new executor should be obtained for each distinct set of commands
278 A new executor should be obtained for each distinct set of commands
279 (possibly just a single command) that the consumer wants to execute
279 (possibly just a single command) that the consumer wants to execute
280 as part of a single operation or round trip. This is because some
280 as part of a single operation or round trip. This is because some
281 peers are half-duplex and/or don't support persistent connections.
281 peers are half-duplex and/or don't support persistent connections.
282 e.g. in the case of HTTP peers, commands sent to an executor represent
282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 a single HTTP request. While some peers may support multiple command
283 a single HTTP request. While some peers may support multiple command
284 sends over the wire per executor, consumers need to code to the least
284 sends over the wire per executor, consumers need to code to the least
285 capable peer. So it should be assumed that command executors buffer
285 capable peer. So it should be assumed that command executors buffer
286 called commands until they are told to send them and that each
286 called commands until they are told to send them and that each
287 command executor could result in a new connection or wire-level request
287 command executor could result in a new connection or wire-level request
288 being issued.
288 being issued.
289 """
289 """
290
290
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 """Unified interface for peer repositories.
292 """Unified interface for peer repositories.
293
293
294 All peer instances must conform to this interface.
294 All peer instances must conform to this interface.
295 """
295 """
296
296
297 @interfaceutil.implementer(ipeerbase)
297 @interfaceutil.implementer(ipeerbase)
298 class peer(object):
298 class peer(object):
299 """Base class for peer repositories."""
299 """Base class for peer repositories."""
300
300
301 def capable(self, name):
301 def capable(self, name):
302 caps = self.capabilities()
302 caps = self.capabilities()
303 if name in caps:
303 if name in caps:
304 return True
304 return True
305
305
306 name = '%s=' % name
306 name = '%s=' % name
307 for cap in caps:
307 for cap in caps:
308 if cap.startswith(name):
308 if cap.startswith(name):
309 return cap[len(name):]
309 return cap[len(name):]
310
310
311 return False
311 return False
312
312
313 def requirecap(self, name, purpose):
313 def requirecap(self, name, purpose):
314 if self.capable(name):
314 if self.capable(name):
315 return
315 return
316
316
317 raise error.CapabilityError(
317 raise error.CapabilityError(
318 _('cannot %s; remote repository does not support the %r '
318 _('cannot %s; remote repository does not support the %r '
319 'capability') % (purpose, name))
319 'capability') % (purpose, name))
320
320
321 class iverifyproblem(interfaceutil.Interface):
321 class iverifyproblem(interfaceutil.Interface):
322 """Represents a problem with the integrity of the repository.
322 """Represents a problem with the integrity of the repository.
323
323
324 Instances of this interface are emitted to describe an integrity issue
324 Instances of this interface are emitted to describe an integrity issue
325 with a repository (e.g. corrupt storage, missing data, etc).
325 with a repository (e.g. corrupt storage, missing data, etc).
326
326
327 Instances are essentially messages associated with severity.
327 Instances are essentially messages associated with severity.
328 """
328 """
329 warning = interfaceutil.Attribute(
329 warning = interfaceutil.Attribute(
330 """Message indicating a non-fatal problem.""")
330 """Message indicating a non-fatal problem.""")
331
331
332 error = interfaceutil.Attribute(
332 error = interfaceutil.Attribute(
333 """Message indicating a fatal problem.""")
333 """Message indicating a fatal problem.""")
334
334
335 class irevisiondelta(interfaceutil.Interface):
335 class irevisiondelta(interfaceutil.Interface):
336 """Represents a delta between one revision and another.
336 """Represents a delta between one revision and another.
337
337
338 Instances convey enough information to allow a revision to be exchanged
338 Instances convey enough information to allow a revision to be exchanged
339 with another repository.
339 with another repository.
340
340
341 Instances represent the fulltext revision data or a delta against
341 Instances represent the fulltext revision data or a delta against
342 another revision. Therefore the ``revision`` and ``delta`` attributes
342 another revision. Therefore the ``revision`` and ``delta`` attributes
343 are mutually exclusive.
343 are mutually exclusive.
344
344
345 Typically used for changegroup generation.
345 Typically used for changegroup generation.
346 """
346 """
347
347
348 node = interfaceutil.Attribute(
348 node = interfaceutil.Attribute(
349 """20 byte node of this revision.""")
349 """20 byte node of this revision.""")
350
350
351 p1node = interfaceutil.Attribute(
351 p1node = interfaceutil.Attribute(
352 """20 byte node of 1st parent of this revision.""")
352 """20 byte node of 1st parent of this revision.""")
353
353
354 p2node = interfaceutil.Attribute(
354 p2node = interfaceutil.Attribute(
355 """20 byte node of 2nd parent of this revision.""")
355 """20 byte node of 2nd parent of this revision.""")
356
356
357 linknode = interfaceutil.Attribute(
357 linknode = interfaceutil.Attribute(
358 """20 byte node of the changelog revision this node is linked to.""")
358 """20 byte node of the changelog revision this node is linked to.""")
359
359
360 flags = interfaceutil.Attribute(
360 flags = interfaceutil.Attribute(
361 """2 bytes of integer flags that apply to this revision.""")
361 """2 bytes of integer flags that apply to this revision.""")
362
362
363 basenode = interfaceutil.Attribute(
363 basenode = interfaceutil.Attribute(
364 """20 byte node of the revision this data is a delta against.
364 """20 byte node of the revision this data is a delta against.
365
365
366 ``nullid`` indicates that the revision is a full revision and not
366 ``nullid`` indicates that the revision is a full revision and not
367 a delta.
367 a delta.
368 """)
368 """)
369
369
370 baserevisionsize = interfaceutil.Attribute(
370 baserevisionsize = interfaceutil.Attribute(
371 """Size of base revision this delta is against.
371 """Size of base revision this delta is against.
372
372
373 May be ``None`` if ``basenode`` is ``nullid``.
373 May be ``None`` if ``basenode`` is ``nullid``.
374 """)
374 """)
375
375
376 revision = interfaceutil.Attribute(
376 revision = interfaceutil.Attribute(
377 """Raw fulltext of revision data for this node.""")
377 """Raw fulltext of revision data for this node.""")
378
378
379 delta = interfaceutil.Attribute(
379 delta = interfaceutil.Attribute(
380 """Delta between ``basenode`` and ``node``.
380 """Delta between ``basenode`` and ``node``.
381
381
382 Stored in the bdiff delta format.
382 Stored in the bdiff delta format.
383 """)
383 """)
384
384
385 class irevisiondeltarequest(interfaceutil.Interface):
385 class irevisiondeltarequest(interfaceutil.Interface):
386 """Represents a request to generate an ``irevisiondelta``."""
386 """Represents a request to generate an ``irevisiondelta``."""
387
387
388 node = interfaceutil.Attribute(
388 node = interfaceutil.Attribute(
389 """20 byte node of revision being requested.""")
389 """20 byte node of revision being requested.""")
390
390
391 p1node = interfaceutil.Attribute(
391 p1node = interfaceutil.Attribute(
392 """20 byte node of 1st parent of revision.""")
392 """20 byte node of 1st parent of revision.""")
393
393
394 p2node = interfaceutil.Attribute(
394 p2node = interfaceutil.Attribute(
395 """20 byte node of 2nd parent of revision.""")
395 """20 byte node of 2nd parent of revision.""")
396
396
397 linknode = interfaceutil.Attribute(
397 linknode = interfaceutil.Attribute(
398 """20 byte node to store in ``linknode`` attribute.""")
398 """20 byte node to store in ``linknode`` attribute.""")
399
399
400 basenode = interfaceutil.Attribute(
400 basenode = interfaceutil.Attribute(
401 """Base revision that delta should be generated against.
401 """Base revision that delta should be generated against.
402
402
403 If ``nullid``, the derived ``irevisiondelta`` should have its
403 If ``nullid``, the derived ``irevisiondelta`` should have its
404 ``revision`` field populated and no delta should be generated.
404 ``revision`` field populated and no delta should be generated.
405
405
406 If ``None``, the delta may be generated against any revision that
406 If ``None``, the delta may be generated against any revision that
407 is an ancestor of this revision. Or a full revision may be used.
407 is an ancestor of this revision. Or a full revision may be used.
408
408
409 If any other value, the delta should be produced against that
409 If any other value, the delta should be produced against that
410 revision.
410 revision.
411 """)
411 """)
412
412
413 ellipsis = interfaceutil.Attribute(
413 ellipsis = interfaceutil.Attribute(
414 """Boolean on whether the ellipsis flag should be set.""")
414 """Boolean on whether the ellipsis flag should be set.""")
415
415
416 class ifilerevisionssequence(interfaceutil.Interface):
416 class ifilerevisionssequence(interfaceutil.Interface):
417 """Contains index data for all revisions of a file.
417 """Contains index data for all revisions of a file.
418
418
419 Types implementing this behave like lists of tuples. The index
419 Types implementing this behave like lists of tuples. The index
420 in the list corresponds to the revision number. The values contain
420 in the list corresponds to the revision number. The values contain
421 index metadata.
421 index metadata.
422
422
423 The *null* revision (revision number -1) is always the last item
423 The *null* revision (revision number -1) is always the last item
424 in the index.
424 in the index.
425 """
425 """
426
426
427 def __len__():
427 def __len__():
428 """The total number of revisions."""
428 """The total number of revisions."""
429
429
430 def __getitem__(rev):
430 def __getitem__(rev):
431 """Returns the object having a specific revision number.
431 """Returns the object having a specific revision number.
432
432
433 Returns an 8-tuple with the following fields:
433 Returns an 8-tuple with the following fields:
434
434
435 offset+flags
435 offset+flags
436 Contains the offset and flags for the revision. 64-bit unsigned
436 Contains the offset and flags for the revision. 64-bit unsigned
437 integer where first 6 bytes are the offset and the next 2 bytes
437 integer where first 6 bytes are the offset and the next 2 bytes
438 are flags. The offset can be 0 if it is not used by the store.
438 are flags. The offset can be 0 if it is not used by the store.
439 compressed size
439 compressed size
440 Size of the revision data in the store. It can be 0 if it isn't
440 Size of the revision data in the store. It can be 0 if it isn't
441 needed by the store.
441 needed by the store.
442 uncompressed size
442 uncompressed size
443 Fulltext size. It can be 0 if it isn't needed by the store.
443 Fulltext size. It can be 0 if it isn't needed by the store.
444 base revision
444 base revision
445 Revision number of revision the delta for storage is encoded
445 Revision number of revision the delta for storage is encoded
446 against. -1 indicates not encoded against a base revision.
446 against. -1 indicates not encoded against a base revision.
447 link revision
447 link revision
448 Revision number of changelog revision this entry is related to.
448 Revision number of changelog revision this entry is related to.
449 p1 revision
449 p1 revision
450 Revision number of 1st parent. -1 if no 1st parent.
450 Revision number of 1st parent. -1 if no 1st parent.
451 p2 revision
451 p2 revision
452 Revision number of 2nd parent. -1 if no 1st parent.
452 Revision number of 2nd parent. -1 if no 1st parent.
453 node
453 node
454 Binary node value for this revision number.
454 Binary node value for this revision number.
455
455
456 Negative values should index off the end of the sequence. ``-1``
456 Negative values should index off the end of the sequence. ``-1``
457 should return the null revision. ``-2`` should return the most
457 should return the null revision. ``-2`` should return the most
458 recent revision.
458 recent revision.
459 """
459 """
460
460
461 def __contains__(rev):
461 def __contains__(rev):
462 """Whether a revision number exists."""
462 """Whether a revision number exists."""
463
463
464 def insert(self, i, entry):
464 def insert(self, i, entry):
465 """Add an item to the index at specific revision."""
465 """Add an item to the index at specific revision."""
466
466
467 class ifileindex(interfaceutil.Interface):
467 class ifileindex(interfaceutil.Interface):
468 """Storage interface for index data of a single file.
468 """Storage interface for index data of a single file.
469
469
470 File storage data is divided into index metadata and data storage.
470 File storage data is divided into index metadata and data storage.
471 This interface defines the index portion of the interface.
471 This interface defines the index portion of the interface.
472
472
473 The index logically consists of:
473 The index logically consists of:
474
474
475 * A mapping between revision numbers and nodes.
475 * A mapping between revision numbers and nodes.
476 * DAG data (storing and querying the relationship between nodes).
476 * DAG data (storing and querying the relationship between nodes).
477 * Metadata to facilitate storage.
477 * Metadata to facilitate storage.
478 """
478 """
479 index = interfaceutil.Attribute(
479 index = interfaceutil.Attribute(
480 """An ``ifilerevisionssequence`` instance.""")
480 """An ``ifilerevisionssequence`` instance.""")
481
481
482 def __len__():
482 def __len__():
483 """Obtain the number of revisions stored for this file."""
483 """Obtain the number of revisions stored for this file."""
484
484
485 def __iter__():
485 def __iter__():
486 """Iterate over revision numbers for this file."""
486 """Iterate over revision numbers for this file."""
487
487
488 def revs(start=0, stop=None):
488 def revs(start=0, stop=None):
489 """Iterate over revision numbers for this file, with control."""
489 """Iterate over revision numbers for this file, with control."""
490
490
491 def parents(node):
491 def parents(node):
492 """Returns a 2-tuple of parent nodes for a revision.
492 """Returns a 2-tuple of parent nodes for a revision.
493
493
494 Values will be ``nullid`` if the parent is empty.
494 Values will be ``nullid`` if the parent is empty.
495 """
495 """
496
496
497 def parentrevs(rev):
497 def parentrevs(rev):
498 """Like parents() but operates on revision numbers."""
498 """Like parents() but operates on revision numbers."""
499
499
500 def rev(node):
500 def rev(node):
501 """Obtain the revision number given a node.
501 """Obtain the revision number given a node.
502
502
503 Raises ``error.LookupError`` if the node is not known.
503 Raises ``error.LookupError`` if the node is not known.
504 """
504 """
505
505
506 def node(rev):
506 def node(rev):
507 """Obtain the node value given a revision number.
507 """Obtain the node value given a revision number.
508
508
509 Raises ``IndexError`` if the node is not known.
509 Raises ``IndexError`` if the node is not known.
510 """
510 """
511
511
512 def lookup(node):
512 def lookup(node):
513 """Attempt to resolve a value to a node.
513 """Attempt to resolve a value to a node.
514
514
515 Value can be a binary node, hex node, revision number, or a string
515 Value can be a binary node, hex node, revision number, or a string
516 that can be converted to an integer.
516 that can be converted to an integer.
517
517
518 Raises ``error.LookupError`` if a node could not be resolved.
518 Raises ``error.LookupError`` if a node could not be resolved.
519 """
519 """
520
520
521 def linkrev(rev):
521 def linkrev(rev):
522 """Obtain the changeset revision number a revision is linked to."""
522 """Obtain the changeset revision number a revision is linked to."""
523
523
524 def flags(rev):
524 def flags(rev):
525 """Obtain flags used to affect storage of a revision."""
525 """Obtain flags used to affect storage of a revision."""
526
526
527 def iscensored(rev):
527 def iscensored(rev):
528 """Return whether a revision's content has been censored."""
528 """Return whether a revision's content has been censored."""
529
529
530 def commonancestorsheads(node1, node2):
530 def commonancestorsheads(node1, node2):
531 """Obtain an iterable of nodes containing heads of common ancestors.
531 """Obtain an iterable of nodes containing heads of common ancestors.
532
532
533 See ``ancestor.commonancestorsheads()``.
533 See ``ancestor.commonancestorsheads()``.
534 """
534 """
535
535
536 def descendants(revs):
536 def descendants(revs):
537 """Obtain descendant revision numbers for a set of revision numbers.
537 """Obtain descendant revision numbers for a set of revision numbers.
538
538
539 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
539 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
540 """
540 """
541
541
542 def heads(start=None, stop=None):
542 def heads(start=None, stop=None):
543 """Obtain a list of nodes that are DAG heads, with control.
543 """Obtain a list of nodes that are DAG heads, with control.
544
544
545 The set of revisions examined can be limited by specifying
545 The set of revisions examined can be limited by specifying
546 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
546 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
547 iterable of nodes. DAG traversal starts at earlier revision
547 iterable of nodes. DAG traversal starts at earlier revision
548 ``start`` and iterates forward until any node in ``stop`` is
548 ``start`` and iterates forward until any node in ``stop`` is
549 encountered.
549 encountered.
550 """
550 """
551
551
552 def children(node):
552 def children(node):
553 """Obtain nodes that are children of a node.
553 """Obtain nodes that are children of a node.
554
554
555 Returns a list of nodes.
555 Returns a list of nodes.
556 """
556 """
557
557
558 def deltaparent(rev):
558 def deltaparent(rev):
559 """"Return the revision that is a suitable parent to delta against."""
559 """"Return the revision that is a suitable parent to delta against."""
560
560
561 class ifiledata(interfaceutil.Interface):
561 class ifiledata(interfaceutil.Interface):
562 """Storage interface for data storage of a specific file.
562 """Storage interface for data storage of a specific file.
563
563
564 This complements ``ifileindex`` and provides an interface for accessing
564 This complements ``ifileindex`` and provides an interface for accessing
565 data for a tracked file.
565 data for a tracked file.
566 """
566 """
567 def rawsize(rev):
567 def rawsize(rev):
568 """The size of the fulltext data for a revision as stored."""
568 """The size of the fulltext data for a revision as stored."""
569
569
570 def size(rev):
570 def size(rev):
571 """Obtain the fulltext size of file data.
571 """Obtain the fulltext size of file data.
572
572
573 Any metadata is excluded from size measurements. Use ``rawsize()`` if
573 Any metadata is excluded from size measurements. Use ``rawsize()`` if
574 metadata size is important.
574 metadata size is important.
575 """
575 """
576
576
577 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
577 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
578 """Validate the stored hash of a given fulltext and node.
578 """Validate the stored hash of a given fulltext and node.
579
579
580 Raises ``error.StorageError`` is hash validation fails.
580 Raises ``error.StorageError`` is hash validation fails.
581 """
581 """
582
582
583 def revision(node, raw=False):
583 def revision(node, raw=False):
584 """"Obtain fulltext data for a node.
584 """"Obtain fulltext data for a node.
585
585
586 By default, any storage transformations are applied before the data
586 By default, any storage transformations are applied before the data
587 is returned. If ``raw`` is True, non-raw storage transformations
587 is returned. If ``raw`` is True, non-raw storage transformations
588 are not applied.
588 are not applied.
589
589
590 The fulltext data may contain a header containing metadata. Most
590 The fulltext data may contain a header containing metadata. Most
591 consumers should use ``read()`` to obtain the actual file data.
591 consumers should use ``read()`` to obtain the actual file data.
592 """
592 """
593
593
594 def read(node):
594 def read(node):
595 """Resolve file fulltext data.
595 """Resolve file fulltext data.
596
596
597 This is similar to ``revision()`` except any metadata in the data
597 This is similar to ``revision()`` except any metadata in the data
598 headers is stripped.
598 headers is stripped.
599 """
599 """
600
600
601 def renamed(node):
601 def renamed(node):
602 """Obtain copy metadata for a node.
602 """Obtain copy metadata for a node.
603
603
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
604 Returns ``False`` if no copy metadata is stored or a 2-tuple of
605 (path, node) from which this revision was copied.
605 (path, node) from which this revision was copied.
606 """
606 """
607
607
608 def cmp(node, fulltext):
608 def cmp(node, fulltext):
609 """Compare fulltext to another revision.
609 """Compare fulltext to another revision.
610
610
611 Returns True if the fulltext is different from what is stored.
611 Returns True if the fulltext is different from what is stored.
612
612
613 This takes copy metadata into account.
613 This takes copy metadata into account.
614
614
615 TODO better document the copy metadata and censoring logic.
615 TODO better document the copy metadata and censoring logic.
616 """
616 """
617
617
618 def revdiff(rev1, rev2):
618 def revdiff(rev1, rev2):
619 """Obtain a delta between two revision numbers.
619 """Obtain a delta between two revision numbers.
620
620
621 Operates on raw data in the store (``revision(node, raw=True)``).
621 Operates on raw data in the store (``revision(node, raw=True)``).
622
622
623 The returned data is the result of ``bdiff.bdiff`` on the raw
623 The returned data is the result of ``bdiff.bdiff`` on the raw
624 revision data.
624 revision data.
625 """
625 """
626
626
627 def emitrevisiondeltas(requests):
627 def emitrevisiondeltas(requests):
628 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
628 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
629
629
630 Given an iterable of objects conforming to the ``irevisiondeltarequest``
630 Given an iterable of objects conforming to the ``irevisiondeltarequest``
631 interface, emits objects conforming to the ``irevisiondelta``
631 interface, emits objects conforming to the ``irevisiondelta``
632 interface.
632 interface.
633
633
634 This method is a generator.
634 This method is a generator.
635
635
636 ``irevisiondelta`` should be emitted in the same order of
636 ``irevisiondelta`` should be emitted in the same order of
637 ``irevisiondeltarequest`` that was passed in.
637 ``irevisiondeltarequest`` that was passed in.
638
638
639 The emitted objects MUST conform by the results of
639 The emitted objects MUST conform by the results of
640 ``irevisiondeltarequest``. Namely, they must respect any requests
640 ``irevisiondeltarequest``. Namely, they must respect any requests
641 for building a delta from a specific ``basenode`` if defined.
641 for building a delta from a specific ``basenode`` if defined.
642
642
643 When sending deltas, implementations must take into account whether
643 When sending deltas, implementations must take into account whether
644 the client has the base delta before encoding a delta against that
644 the client has the base delta before encoding a delta against that
645 revision. A revision encountered previously in ``requests`` is
645 revision. A revision encountered previously in ``requests`` is
646 always a suitable base revision. An example of a bad delta is a delta
646 always a suitable base revision. An example of a bad delta is a delta
647 against a non-ancestor revision. Another example of a bad delta is a
647 against a non-ancestor revision. Another example of a bad delta is a
648 delta against a censored revision.
648 delta against a censored revision.
649 """
649 """
650
650
651 class ifilemutation(interfaceutil.Interface):
651 class ifilemutation(interfaceutil.Interface):
652 """Storage interface for mutation events of a tracked file."""
652 """Storage interface for mutation events of a tracked file."""
653
653
654 def add(filedata, meta, transaction, linkrev, p1, p2):
654 def add(filedata, meta, transaction, linkrev, p1, p2):
655 """Add a new revision to the store.
655 """Add a new revision to the store.
656
656
657 Takes file data, dictionary of metadata, a transaction, linkrev,
657 Takes file data, dictionary of metadata, a transaction, linkrev,
658 and parent nodes.
658 and parent nodes.
659
659
660 Returns the node that was added.
660 Returns the node that was added.
661
661
662 May no-op if a revision matching the supplied data is already stored.
662 May no-op if a revision matching the supplied data is already stored.
663 """
663 """
664
664
665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
666 flags=0, cachedelta=None):
666 flags=0, cachedelta=None):
667 """Add a new revision to the store.
667 """Add a new revision to the store.
668
668
669 This is similar to ``add()`` except it operates at a lower level.
669 This is similar to ``add()`` except it operates at a lower level.
670
670
671 The data passed in already contains a metadata header, if any.
671 The data passed in already contains a metadata header, if any.
672
672
673 ``node`` and ``flags`` can be used to define the expected node and
673 ``node`` and ``flags`` can be used to define the expected node and
674 the flags to use with storage.
674 the flags to use with storage.
675
675
676 ``add()`` is usually called when adding files from e.g. the working
676 ``add()`` is usually called when adding files from e.g. the working
677 directory. ``addrevision()`` is often called by ``add()`` and for
677 directory. ``addrevision()`` is often called by ``add()`` and for
678 scenarios where revision data has already been computed, such as when
678 scenarios where revision data has already been computed, such as when
679 applying raw data from a peer repo.
679 applying raw data from a peer repo.
680 """
680 """
681
681
682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
683 """Process a series of deltas for storage.
683 """Process a series of deltas for storage.
684
684
685 ``deltas`` is an iterable of 7-tuples of
685 ``deltas`` is an iterable of 7-tuples of
686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
687 to add.
687 to add.
688
688
689 The ``delta`` field contains ``mpatch`` data to apply to a base
689 The ``delta`` field contains ``mpatch`` data to apply to a base
690 revision, identified by ``deltabase``. The base node can be
690 revision, identified by ``deltabase``. The base node can be
691 ``nullid``, in which case the header from the delta can be ignored
691 ``nullid``, in which case the header from the delta can be ignored
692 and the delta used as the fulltext.
692 and the delta used as the fulltext.
693
693
694 ``addrevisioncb`` should be called for each node as it is committed.
694 ``addrevisioncb`` should be called for each node as it is committed.
695
695
696 Returns a list of nodes that were processed. A node will be in the list
696 Returns a list of nodes that were processed. A node will be in the list
697 even if it existed in the store previously.
697 even if it existed in the store previously.
698 """
698 """
699
699
700 def censorrevision(tr, node, tombstone=b''):
700 def censorrevision(tr, node, tombstone=b''):
701 """Remove the content of a single revision.
701 """Remove the content of a single revision.
702
702
703 The specified ``node`` will have its content purged from storage.
703 The specified ``node`` will have its content purged from storage.
704 Future attempts to access the revision data for this node will
704 Future attempts to access the revision data for this node will
705 result in failure.
705 result in failure.
706
706
707 A ``tombstone`` message can optionally be stored. This message may be
707 A ``tombstone`` message can optionally be stored. This message may be
708 displayed to users when they attempt to access the missing revision
708 displayed to users when they attempt to access the missing revision
709 data.
709 data.
710
710
711 Storage backends may have stored deltas against the previous content
711 Storage backends may have stored deltas against the previous content
712 in this revision. As part of censoring a revision, these storage
712 in this revision. As part of censoring a revision, these storage
713 backends are expected to rewrite any internally stored deltas such
713 backends are expected to rewrite any internally stored deltas such
714 that they no longer reference the deleted content.
714 that they no longer reference the deleted content.
715 """
715 """
716
716
717 def getstrippoint(minlink):
717 def getstrippoint(minlink):
718 """Find the minimum revision that must be stripped to strip a linkrev.
718 """Find the minimum revision that must be stripped to strip a linkrev.
719
719
720 Returns a 2-tuple containing the minimum revision number and a set
720 Returns a 2-tuple containing the minimum revision number and a set
721 of all revisions numbers that would be broken by this strip.
721 of all revisions numbers that would be broken by this strip.
722
722
723 TODO this is highly revlog centric and should be abstracted into
723 TODO this is highly revlog centric and should be abstracted into
724 a higher-level deletion API. ``repair.strip()`` relies on this.
724 a higher-level deletion API. ``repair.strip()`` relies on this.
725 """
725 """
726
726
727 def strip(minlink, transaction):
727 def strip(minlink, transaction):
728 """Remove storage of items starting at a linkrev.
728 """Remove storage of items starting at a linkrev.
729
729
730 This uses ``getstrippoint()`` to determine the first node to remove.
730 This uses ``getstrippoint()`` to determine the first node to remove.
731 Then it effectively truncates storage for all revisions after that.
731 Then it effectively truncates storage for all revisions after that.
732
732
733 TODO this is highly revlog centric and should be abstracted into a
733 TODO this is highly revlog centric and should be abstracted into a
734 higher-level deletion API.
734 higher-level deletion API.
735 """
735 """
736
736
737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
738 """Complete storage interface for a single tracked file."""
738 """Complete storage interface for a single tracked file."""
739
739
740 version = interfaceutil.Attribute(
741 """Version number of storage.
742
743 TODO this feels revlog centric and could likely be removed.
744 """)
745
746 _generaldelta = interfaceutil.Attribute(
740 _generaldelta = interfaceutil.Attribute(
747 """Whether deltas can be against any parent revision.
741 """Whether deltas can be against any parent revision.
748
742
749 TODO this is used by changegroup code and it could probably be
743 TODO this is used by changegroup code and it could probably be
750 folded into another API.
744 folded into another API.
751 """)
745 """)
752
746
753 def files():
747 def files():
754 """Obtain paths that are backing storage for this file.
748 """Obtain paths that are backing storage for this file.
755
749
756 TODO this is used heavily by verify code and there should probably
750 TODO this is used heavily by verify code and there should probably
757 be a better API for that.
751 be a better API for that.
758 """
752 """
759
753
760 def checksize():
754 def checksize():
761 """Obtain the expected sizes of backing files.
755 """Obtain the expected sizes of backing files.
762
756
763 TODO this is used by verify and it should not be part of the interface.
757 TODO this is used by verify and it should not be part of the interface.
764 """
758 """
765
759
766 def verifyintegrity(state):
760 def verifyintegrity(state):
767 """Verifies the integrity of file storage.
761 """Verifies the integrity of file storage.
768
762
769 ``state`` is a dict holding state of the verifier process. It can be
763 ``state`` is a dict holding state of the verifier process. It can be
770 used to communicate data between invocations of multiple storage
764 used to communicate data between invocations of multiple storage
771 primitives.
765 primitives.
772
766
773 The method yields objects conforming to the ``iverifyproblem``
767 The method yields objects conforming to the ``iverifyproblem``
774 interface.
768 interface.
775 """
769 """
776
770
777 class idirs(interfaceutil.Interface):
771 class idirs(interfaceutil.Interface):
778 """Interface representing a collection of directories from paths.
772 """Interface representing a collection of directories from paths.
779
773
780 This interface is essentially a derived data structure representing
774 This interface is essentially a derived data structure representing
781 directories from a collection of paths.
775 directories from a collection of paths.
782 """
776 """
783
777
784 def addpath(path):
778 def addpath(path):
785 """Add a path to the collection.
779 """Add a path to the collection.
786
780
787 All directories in the path will be added to the collection.
781 All directories in the path will be added to the collection.
788 """
782 """
789
783
790 def delpath(path):
784 def delpath(path):
791 """Remove a path from the collection.
785 """Remove a path from the collection.
792
786
793 If the removal was the last path in a particular directory, the
787 If the removal was the last path in a particular directory, the
794 directory is removed from the collection.
788 directory is removed from the collection.
795 """
789 """
796
790
797 def __iter__():
791 def __iter__():
798 """Iterate over the directories in this collection of paths."""
792 """Iterate over the directories in this collection of paths."""
799
793
800 def __contains__(path):
794 def __contains__(path):
801 """Whether a specific directory is in this collection."""
795 """Whether a specific directory is in this collection."""
802
796
803 class imanifestdict(interfaceutil.Interface):
797 class imanifestdict(interfaceutil.Interface):
804 """Interface representing a manifest data structure.
798 """Interface representing a manifest data structure.
805
799
806 A manifest is effectively a dict mapping paths to entries. Each entry
800 A manifest is effectively a dict mapping paths to entries. Each entry
807 consists of a binary node and extra flags affecting that entry.
801 consists of a binary node and extra flags affecting that entry.
808 """
802 """
809
803
810 def __getitem__(path):
804 def __getitem__(path):
811 """Returns the binary node value for a path in the manifest.
805 """Returns the binary node value for a path in the manifest.
812
806
813 Raises ``KeyError`` if the path does not exist in the manifest.
807 Raises ``KeyError`` if the path does not exist in the manifest.
814
808
815 Equivalent to ``self.find(path)[0]``.
809 Equivalent to ``self.find(path)[0]``.
816 """
810 """
817
811
818 def find(path):
812 def find(path):
819 """Returns the entry for a path in the manifest.
813 """Returns the entry for a path in the manifest.
820
814
821 Returns a 2-tuple of (node, flags).
815 Returns a 2-tuple of (node, flags).
822
816
823 Raises ``KeyError`` if the path does not exist in the manifest.
817 Raises ``KeyError`` if the path does not exist in the manifest.
824 """
818 """
825
819
826 def __len__():
820 def __len__():
827 """Return the number of entries in the manifest."""
821 """Return the number of entries in the manifest."""
828
822
829 def __nonzero__():
823 def __nonzero__():
830 """Returns True if the manifest has entries, False otherwise."""
824 """Returns True if the manifest has entries, False otherwise."""
831
825
832 __bool__ = __nonzero__
826 __bool__ = __nonzero__
833
827
834 def __setitem__(path, node):
828 def __setitem__(path, node):
835 """Define the node value for a path in the manifest.
829 """Define the node value for a path in the manifest.
836
830
837 If the path is already in the manifest, its flags will be copied to
831 If the path is already in the manifest, its flags will be copied to
838 the new entry.
832 the new entry.
839 """
833 """
840
834
841 def __contains__(path):
835 def __contains__(path):
842 """Whether a path exists in the manifest."""
836 """Whether a path exists in the manifest."""
843
837
844 def __delitem__(path):
838 def __delitem__(path):
845 """Remove a path from the manifest.
839 """Remove a path from the manifest.
846
840
847 Raises ``KeyError`` if the path is not in the manifest.
841 Raises ``KeyError`` if the path is not in the manifest.
848 """
842 """
849
843
850 def __iter__():
844 def __iter__():
851 """Iterate over paths in the manifest."""
845 """Iterate over paths in the manifest."""
852
846
853 def iterkeys():
847 def iterkeys():
854 """Iterate over paths in the manifest."""
848 """Iterate over paths in the manifest."""
855
849
856 def keys():
850 def keys():
857 """Obtain a list of paths in the manifest."""
851 """Obtain a list of paths in the manifest."""
858
852
859 def filesnotin(other, match=None):
853 def filesnotin(other, match=None):
860 """Obtain the set of paths in this manifest but not in another.
854 """Obtain the set of paths in this manifest but not in another.
861
855
862 ``match`` is an optional matcher function to be applied to both
856 ``match`` is an optional matcher function to be applied to both
863 manifests.
857 manifests.
864
858
865 Returns a set of paths.
859 Returns a set of paths.
866 """
860 """
867
861
868 def dirs():
862 def dirs():
869 """Returns an object implementing the ``idirs`` interface."""
863 """Returns an object implementing the ``idirs`` interface."""
870
864
871 def hasdir(dir):
865 def hasdir(dir):
872 """Returns a bool indicating if a directory is in this manifest."""
866 """Returns a bool indicating if a directory is in this manifest."""
873
867
874 def matches(match):
868 def matches(match):
875 """Generate a new manifest filtered through a matcher.
869 """Generate a new manifest filtered through a matcher.
876
870
877 Returns an object conforming to the ``imanifestdict`` interface.
871 Returns an object conforming to the ``imanifestdict`` interface.
878 """
872 """
879
873
880 def walk(match):
874 def walk(match):
881 """Generator of paths in manifest satisfying a matcher.
875 """Generator of paths in manifest satisfying a matcher.
882
876
883 This is equivalent to ``self.matches(match).iterkeys()`` except a new
877 This is equivalent to ``self.matches(match).iterkeys()`` except a new
884 manifest object is not created.
878 manifest object is not created.
885
879
886 If the matcher has explicit files listed and they don't exist in
880 If the matcher has explicit files listed and they don't exist in
887 the manifest, ``match.bad()`` is called for each missing file.
881 the manifest, ``match.bad()`` is called for each missing file.
888 """
882 """
889
883
890 def diff(other, match=None, clean=False):
884 def diff(other, match=None, clean=False):
891 """Find differences between this manifest and another.
885 """Find differences between this manifest and another.
892
886
893 This manifest is compared to ``other``.
887 This manifest is compared to ``other``.
894
888
895 If ``match`` is provided, the two manifests are filtered against this
889 If ``match`` is provided, the two manifests are filtered against this
896 matcher and only entries satisfying the matcher are compared.
890 matcher and only entries satisfying the matcher are compared.
897
891
898 If ``clean`` is True, unchanged files are included in the returned
892 If ``clean`` is True, unchanged files are included in the returned
899 object.
893 object.
900
894
901 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
895 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
902 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
896 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
903 represents the node and flags for this manifest and ``(node2, flag2)``
897 represents the node and flags for this manifest and ``(node2, flag2)``
904 are the same for the other manifest.
898 are the same for the other manifest.
905 """
899 """
906
900
907 def setflag(path, flag):
901 def setflag(path, flag):
908 """Set the flag value for a given path.
902 """Set the flag value for a given path.
909
903
910 Raises ``KeyError`` if the path is not already in the manifest.
904 Raises ``KeyError`` if the path is not already in the manifest.
911 """
905 """
912
906
913 def get(path, default=None):
907 def get(path, default=None):
914 """Obtain the node value for a path or a default value if missing."""
908 """Obtain the node value for a path or a default value if missing."""
915
909
916 def flags(path, default=''):
910 def flags(path, default=''):
917 """Return the flags value for a path or a default value if missing."""
911 """Return the flags value for a path or a default value if missing."""
918
912
919 def copy():
913 def copy():
920 """Return a copy of this manifest."""
914 """Return a copy of this manifest."""
921
915
922 def items():
916 def items():
923 """Returns an iterable of (path, node) for items in this manifest."""
917 """Returns an iterable of (path, node) for items in this manifest."""
924
918
925 def iteritems():
919 def iteritems():
926 """Identical to items()."""
920 """Identical to items()."""
927
921
928 def iterentries():
922 def iterentries():
929 """Returns an iterable of (path, node, flags) for this manifest.
923 """Returns an iterable of (path, node, flags) for this manifest.
930
924
931 Similar to ``iteritems()`` except items are a 3-tuple and include
925 Similar to ``iteritems()`` except items are a 3-tuple and include
932 flags.
926 flags.
933 """
927 """
934
928
935 def text():
929 def text():
936 """Obtain the raw data representation for this manifest.
930 """Obtain the raw data representation for this manifest.
937
931
938 Result is used to create a manifest revision.
932 Result is used to create a manifest revision.
939 """
933 """
940
934
941 def fastdelta(base, changes):
935 def fastdelta(base, changes):
942 """Obtain a delta between this manifest and another given changes.
936 """Obtain a delta between this manifest and another given changes.
943
937
944 ``base`` in the raw data representation for another manifest.
938 ``base`` in the raw data representation for another manifest.
945
939
946 ``changes`` is an iterable of ``(path, to_delete)``.
940 ``changes`` is an iterable of ``(path, to_delete)``.
947
941
948 Returns a 2-tuple containing ``bytearray(self.text())`` and the
942 Returns a 2-tuple containing ``bytearray(self.text())`` and the
949 delta between ``base`` and this manifest.
943 delta between ``base`` and this manifest.
950 """
944 """
951
945
952 class imanifestrevisionbase(interfaceutil.Interface):
946 class imanifestrevisionbase(interfaceutil.Interface):
953 """Base interface representing a single revision of a manifest.
947 """Base interface representing a single revision of a manifest.
954
948
955 Should not be used as a primary interface: should always be inherited
949 Should not be used as a primary interface: should always be inherited
956 as part of a larger interface.
950 as part of a larger interface.
957 """
951 """
958
952
959 def new():
953 def new():
960 """Obtain a new manifest instance.
954 """Obtain a new manifest instance.
961
955
962 Returns an object conforming to the ``imanifestrevisionwritable``
956 Returns an object conforming to the ``imanifestrevisionwritable``
963 interface. The instance will be associated with the same
957 interface. The instance will be associated with the same
964 ``imanifestlog`` collection as this instance.
958 ``imanifestlog`` collection as this instance.
965 """
959 """
966
960
967 def copy():
961 def copy():
968 """Obtain a copy of this manifest instance.
962 """Obtain a copy of this manifest instance.
969
963
970 Returns an object conforming to the ``imanifestrevisionwritable``
964 Returns an object conforming to the ``imanifestrevisionwritable``
971 interface. The instance will be associated with the same
965 interface. The instance will be associated with the same
972 ``imanifestlog`` collection as this instance.
966 ``imanifestlog`` collection as this instance.
973 """
967 """
974
968
975 def read():
969 def read():
976 """Obtain the parsed manifest data structure.
970 """Obtain the parsed manifest data structure.
977
971
978 The returned object conforms to the ``imanifestdict`` interface.
972 The returned object conforms to the ``imanifestdict`` interface.
979 """
973 """
980
974
981 class imanifestrevisionstored(imanifestrevisionbase):
975 class imanifestrevisionstored(imanifestrevisionbase):
982 """Interface representing a manifest revision committed to storage."""
976 """Interface representing a manifest revision committed to storage."""
983
977
984 def node():
978 def node():
985 """The binary node for this manifest."""
979 """The binary node for this manifest."""
986
980
987 parents = interfaceutil.Attribute(
981 parents = interfaceutil.Attribute(
988 """List of binary nodes that are parents for this manifest revision."""
982 """List of binary nodes that are parents for this manifest revision."""
989 )
983 )
990
984
991 def readdelta(shallow=False):
985 def readdelta(shallow=False):
992 """Obtain the manifest data structure representing changes from parent.
986 """Obtain the manifest data structure representing changes from parent.
993
987
994 This manifest is compared to its 1st parent. A new manifest representing
988 This manifest is compared to its 1st parent. A new manifest representing
995 those differences is constructed.
989 those differences is constructed.
996
990
997 The returned object conforms to the ``imanifestdict`` interface.
991 The returned object conforms to the ``imanifestdict`` interface.
998 """
992 """
999
993
1000 def readfast(shallow=False):
994 def readfast(shallow=False):
1001 """Calls either ``read()`` or ``readdelta()``.
995 """Calls either ``read()`` or ``readdelta()``.
1002
996
1003 The faster of the two options is called.
997 The faster of the two options is called.
1004 """
998 """
1005
999
1006 def find(key):
1000 def find(key):
1007 """Calls self.read().find(key)``.
1001 """Calls self.read().find(key)``.
1008
1002
1009 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1003 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1010 """
1004 """
1011
1005
1012 class imanifestrevisionwritable(imanifestrevisionbase):
1006 class imanifestrevisionwritable(imanifestrevisionbase):
1013 """Interface representing a manifest revision that can be committed."""
1007 """Interface representing a manifest revision that can be committed."""
1014
1008
1015 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1009 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1016 """Add this revision to storage.
1010 """Add this revision to storage.
1017
1011
1018 Takes a transaction object, the changeset revision number it will
1012 Takes a transaction object, the changeset revision number it will
1019 be associated with, its parent nodes, and lists of added and
1013 be associated with, its parent nodes, and lists of added and
1020 removed paths.
1014 removed paths.
1021
1015
1022 If match is provided, storage can choose not to inspect or write out
1016 If match is provided, storage can choose not to inspect or write out
1023 items that do not match. Storage is still required to be able to provide
1017 items that do not match. Storage is still required to be able to provide
1024 the full manifest in the future for any directories written (these
1018 the full manifest in the future for any directories written (these
1025 manifests should not be "narrowed on disk").
1019 manifests should not be "narrowed on disk").
1026
1020
1027 Returns the binary node of the created revision.
1021 Returns the binary node of the created revision.
1028 """
1022 """
1029
1023
1030 class imanifeststorage(interfaceutil.Interface):
1024 class imanifeststorage(interfaceutil.Interface):
1031 """Storage interface for manifest data."""
1025 """Storage interface for manifest data."""
1032
1026
1033 tree = interfaceutil.Attribute(
1027 tree = interfaceutil.Attribute(
1034 """The path to the directory this manifest tracks.
1028 """The path to the directory this manifest tracks.
1035
1029
1036 The empty bytestring represents the root manifest.
1030 The empty bytestring represents the root manifest.
1037 """)
1031 """)
1038
1032
1039 index = interfaceutil.Attribute(
1033 index = interfaceutil.Attribute(
1040 """An ``ifilerevisionssequence`` instance.""")
1034 """An ``ifilerevisionssequence`` instance.""")
1041
1035
1042 indexfile = interfaceutil.Attribute(
1036 indexfile = interfaceutil.Attribute(
1043 """Path of revlog index file.
1037 """Path of revlog index file.
1044
1038
1045 TODO this is revlog specific and should not be exposed.
1039 TODO this is revlog specific and should not be exposed.
1046 """)
1040 """)
1047
1041
1048 opener = interfaceutil.Attribute(
1042 opener = interfaceutil.Attribute(
1049 """VFS opener to use to access underlying files used for storage.
1043 """VFS opener to use to access underlying files used for storage.
1050
1044
1051 TODO this is revlog specific and should not be exposed.
1045 TODO this is revlog specific and should not be exposed.
1052 """)
1046 """)
1053
1047
1054 version = interfaceutil.Attribute(
1048 version = interfaceutil.Attribute(
1055 """Revlog version number.
1049 """Revlog version number.
1056
1050
1057 TODO this is revlog specific and should not be exposed.
1051 TODO this is revlog specific and should not be exposed.
1058 """)
1052 """)
1059
1053
1060 _generaldelta = interfaceutil.Attribute(
1054 _generaldelta = interfaceutil.Attribute(
1061 """Whether generaldelta storage is being used.
1055 """Whether generaldelta storage is being used.
1062
1056
1063 TODO this is revlog specific and should not be exposed.
1057 TODO this is revlog specific and should not be exposed.
1064 """)
1058 """)
1065
1059
1066 fulltextcache = interfaceutil.Attribute(
1060 fulltextcache = interfaceutil.Attribute(
1067 """Dict with cache of fulltexts.
1061 """Dict with cache of fulltexts.
1068
1062
1069 TODO this doesn't feel appropriate for the storage interface.
1063 TODO this doesn't feel appropriate for the storage interface.
1070 """)
1064 """)
1071
1065
1072 def __len__():
1066 def __len__():
1073 """Obtain the number of revisions stored for this manifest."""
1067 """Obtain the number of revisions stored for this manifest."""
1074
1068
1075 def __iter__():
1069 def __iter__():
1076 """Iterate over revision numbers for this manifest."""
1070 """Iterate over revision numbers for this manifest."""
1077
1071
1078 def rev(node):
1072 def rev(node):
1079 """Obtain the revision number given a binary node.
1073 """Obtain the revision number given a binary node.
1080
1074
1081 Raises ``error.LookupError`` if the node is not known.
1075 Raises ``error.LookupError`` if the node is not known.
1082 """
1076 """
1083
1077
1084 def node(rev):
1078 def node(rev):
1085 """Obtain the node value given a revision number.
1079 """Obtain the node value given a revision number.
1086
1080
1087 Raises ``error.LookupError`` if the revision is not known.
1081 Raises ``error.LookupError`` if the revision is not known.
1088 """
1082 """
1089
1083
1090 def lookup(value):
1084 def lookup(value):
1091 """Attempt to resolve a value to a node.
1085 """Attempt to resolve a value to a node.
1092
1086
1093 Value can be a binary node, hex node, revision number, or a bytes
1087 Value can be a binary node, hex node, revision number, or a bytes
1094 that can be converted to an integer.
1088 that can be converted to an integer.
1095
1089
1096 Raises ``error.LookupError`` if a ndoe could not be resolved.
1090 Raises ``error.LookupError`` if a ndoe could not be resolved.
1097
1091
1098 TODO this is only used by debug* commands and can probably be deleted
1092 TODO this is only used by debug* commands and can probably be deleted
1099 easily.
1093 easily.
1100 """
1094 """
1101
1095
1102 def parents(node):
1096 def parents(node):
1103 """Returns a 2-tuple of parent nodes for a node.
1097 """Returns a 2-tuple of parent nodes for a node.
1104
1098
1105 Values will be ``nullid`` if the parent is empty.
1099 Values will be ``nullid`` if the parent is empty.
1106 """
1100 """
1107
1101
1108 def parentrevs(rev):
1102 def parentrevs(rev):
1109 """Like parents() but operates on revision numbers."""
1103 """Like parents() but operates on revision numbers."""
1110
1104
1111 def linkrev(rev):
1105 def linkrev(rev):
1112 """Obtain the changeset revision number a revision is linked to."""
1106 """Obtain the changeset revision number a revision is linked to."""
1113
1107
1114 def revision(node, _df=None, raw=False):
1108 def revision(node, _df=None, raw=False):
1115 """Obtain fulltext data for a node."""
1109 """Obtain fulltext data for a node."""
1116
1110
1117 def revdiff(rev1, rev2):
1111 def revdiff(rev1, rev2):
1118 """Obtain a delta between two revision numbers.
1112 """Obtain a delta between two revision numbers.
1119
1113
1120 The returned data is the result of ``bdiff.bdiff()`` on the raw
1114 The returned data is the result of ``bdiff.bdiff()`` on the raw
1121 revision data.
1115 revision data.
1122 """
1116 """
1123
1117
1124 def cmp(node, fulltext):
1118 def cmp(node, fulltext):
1125 """Compare fulltext to another revision.
1119 """Compare fulltext to another revision.
1126
1120
1127 Returns True if the fulltext is different from what is stored.
1121 Returns True if the fulltext is different from what is stored.
1128 """
1122 """
1129
1123
1130 def emitrevisiondeltas(requests):
1124 def emitrevisiondeltas(requests):
1131 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1125 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1132
1126
1133 See the documentation for ``ifiledata`` for more.
1127 See the documentation for ``ifiledata`` for more.
1134 """
1128 """
1135
1129
1136 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1130 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1137 """Process a series of deltas for storage.
1131 """Process a series of deltas for storage.
1138
1132
1139 See the documentation in ``ifilemutation`` for more.
1133 See the documentation in ``ifilemutation`` for more.
1140 """
1134 """
1141
1135
1142 def getstrippoint(minlink):
1136 def getstrippoint(minlink):
1143 """Find minimum revision that must be stripped to strip a linkrev.
1137 """Find minimum revision that must be stripped to strip a linkrev.
1144
1138
1145 See the documentation in ``ifilemutation`` for more.
1139 See the documentation in ``ifilemutation`` for more.
1146 """
1140 """
1147
1141
1148 def strip(minlink, transaction):
1142 def strip(minlink, transaction):
1149 """Remove storage of items starting at a linkrev.
1143 """Remove storage of items starting at a linkrev.
1150
1144
1151 See the documentation in ``ifilemutation`` for more.
1145 See the documentation in ``ifilemutation`` for more.
1152 """
1146 """
1153
1147
1154 def checksize():
1148 def checksize():
1155 """Obtain the expected sizes of backing files.
1149 """Obtain the expected sizes of backing files.
1156
1150
1157 TODO this is used by verify and it should not be part of the interface.
1151 TODO this is used by verify and it should not be part of the interface.
1158 """
1152 """
1159
1153
1160 def files():
1154 def files():
1161 """Obtain paths that are backing storage for this manifest.
1155 """Obtain paths that are backing storage for this manifest.
1162
1156
1163 TODO this is used by verify and there should probably be a better API
1157 TODO this is used by verify and there should probably be a better API
1164 for this functionality.
1158 for this functionality.
1165 """
1159 """
1166
1160
1167 def deltaparent(rev):
1161 def deltaparent(rev):
1168 """Obtain the revision that a revision is delta'd against.
1162 """Obtain the revision that a revision is delta'd against.
1169
1163
1170 TODO delta encoding is an implementation detail of storage and should
1164 TODO delta encoding is an implementation detail of storage and should
1171 not be exposed to the storage interface.
1165 not be exposed to the storage interface.
1172 """
1166 """
1173
1167
1174 def clone(tr, dest, **kwargs):
1168 def clone(tr, dest, **kwargs):
1175 """Clone this instance to another."""
1169 """Clone this instance to another."""
1176
1170
1177 def clearcaches(clear_persisted_data=False):
1171 def clearcaches(clear_persisted_data=False):
1178 """Clear any caches associated with this instance."""
1172 """Clear any caches associated with this instance."""
1179
1173
1180 def dirlog(d):
1174 def dirlog(d):
1181 """Obtain a manifest storage instance for a tree."""
1175 """Obtain a manifest storage instance for a tree."""
1182
1176
1183 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1177 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1184 match=None):
1178 match=None):
1185 """Add a revision to storage.
1179 """Add a revision to storage.
1186
1180
1187 ``m`` is an object conforming to ``imanifestdict``.
1181 ``m`` is an object conforming to ``imanifestdict``.
1188
1182
1189 ``link`` is the linkrev revision number.
1183 ``link`` is the linkrev revision number.
1190
1184
1191 ``p1`` and ``p2`` are the parent revision numbers.
1185 ``p1`` and ``p2`` are the parent revision numbers.
1192
1186
1193 ``added`` and ``removed`` are iterables of added and removed paths,
1187 ``added`` and ``removed`` are iterables of added and removed paths,
1194 respectively.
1188 respectively.
1195
1189
1196 ``readtree`` is a function that can be used to read the child tree(s)
1190 ``readtree`` is a function that can be used to read the child tree(s)
1197 when recursively writing the full tree structure when using
1191 when recursively writing the full tree structure when using
1198 treemanifets.
1192 treemanifets.
1199
1193
1200 ``match`` is a matcher that can be used to hint to storage that not all
1194 ``match`` is a matcher that can be used to hint to storage that not all
1201 paths must be inspected; this is an optimization and can be safely
1195 paths must be inspected; this is an optimization and can be safely
1202 ignored. Note that the storage must still be able to reproduce a full
1196 ignored. Note that the storage must still be able to reproduce a full
1203 manifest including files that did not match.
1197 manifest including files that did not match.
1204 """
1198 """
1205
1199
1206 class imanifestlog(interfaceutil.Interface):
1200 class imanifestlog(interfaceutil.Interface):
1207 """Interface representing a collection of manifest snapshots.
1201 """Interface representing a collection of manifest snapshots.
1208
1202
1209 Represents the root manifest in a repository.
1203 Represents the root manifest in a repository.
1210
1204
1211 Also serves as a means to access nested tree manifests and to cache
1205 Also serves as a means to access nested tree manifests and to cache
1212 tree manifests.
1206 tree manifests.
1213 """
1207 """
1214
1208
1215 def __getitem__(node):
1209 def __getitem__(node):
1216 """Obtain a manifest instance for a given binary node.
1210 """Obtain a manifest instance for a given binary node.
1217
1211
1218 Equivalent to calling ``self.get('', node)``.
1212 Equivalent to calling ``self.get('', node)``.
1219
1213
1220 The returned object conforms to the ``imanifestrevisionstored``
1214 The returned object conforms to the ``imanifestrevisionstored``
1221 interface.
1215 interface.
1222 """
1216 """
1223
1217
1224 def get(tree, node, verify=True):
1218 def get(tree, node, verify=True):
1225 """Retrieve the manifest instance for a given directory and binary node.
1219 """Retrieve the manifest instance for a given directory and binary node.
1226
1220
1227 ``node`` always refers to the node of the root manifest (which will be
1221 ``node`` always refers to the node of the root manifest (which will be
1228 the only manifest if flat manifests are being used).
1222 the only manifest if flat manifests are being used).
1229
1223
1230 If ``tree`` is the empty string, the root manifest is returned.
1224 If ``tree`` is the empty string, the root manifest is returned.
1231 Otherwise the manifest for the specified directory will be returned
1225 Otherwise the manifest for the specified directory will be returned
1232 (requires tree manifests).
1226 (requires tree manifests).
1233
1227
1234 If ``verify`` is True, ``LookupError`` is raised if the node is not
1228 If ``verify`` is True, ``LookupError`` is raised if the node is not
1235 known.
1229 known.
1236
1230
1237 The returned object conforms to the ``imanifestrevisionstored``
1231 The returned object conforms to the ``imanifestrevisionstored``
1238 interface.
1232 interface.
1239 """
1233 """
1240
1234
1241 def getstorage(tree):
1235 def getstorage(tree):
1242 """Retrieve an interface to storage for a particular tree.
1236 """Retrieve an interface to storage for a particular tree.
1243
1237
1244 If ``tree`` is the empty bytestring, storage for the root manifest will
1238 If ``tree`` is the empty bytestring, storage for the root manifest will
1245 be returned. Otherwise storage for a tree manifest is returned.
1239 be returned. Otherwise storage for a tree manifest is returned.
1246
1240
1247 TODO formalize interface for returned object.
1241 TODO formalize interface for returned object.
1248 """
1242 """
1249
1243
1250 def clearcaches():
1244 def clearcaches():
1251 """Clear caches associated with this collection."""
1245 """Clear caches associated with this collection."""
1252
1246
1253 def rev(node):
1247 def rev(node):
1254 """Obtain the revision number for a binary node.
1248 """Obtain the revision number for a binary node.
1255
1249
1256 Raises ``error.LookupError`` if the node is not known.
1250 Raises ``error.LookupError`` if the node is not known.
1257 """
1251 """
1258
1252
1259 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1253 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1260 """Local repository sub-interface providing access to tracked file storage.
1254 """Local repository sub-interface providing access to tracked file storage.
1261
1255
1262 This interface defines how a repository accesses storage for a single
1256 This interface defines how a repository accesses storage for a single
1263 tracked file path.
1257 tracked file path.
1264 """
1258 """
1265
1259
1266 def file(f):
1260 def file(f):
1267 """Obtain a filelog for a tracked path.
1261 """Obtain a filelog for a tracked path.
1268
1262
1269 The returned type conforms to the ``ifilestorage`` interface.
1263 The returned type conforms to the ``ifilestorage`` interface.
1270 """
1264 """
1271
1265
1272 class ilocalrepositorymain(interfaceutil.Interface):
1266 class ilocalrepositorymain(interfaceutil.Interface):
1273 """Main interface for local repositories.
1267 """Main interface for local repositories.
1274
1268
1275 This currently captures the reality of things - not how things should be.
1269 This currently captures the reality of things - not how things should be.
1276 """
1270 """
1277
1271
1278 supportedformats = interfaceutil.Attribute(
1272 supportedformats = interfaceutil.Attribute(
1279 """Set of requirements that apply to stream clone.
1273 """Set of requirements that apply to stream clone.
1280
1274
1281 This is actually a class attribute and is shared among all instances.
1275 This is actually a class attribute and is shared among all instances.
1282 """)
1276 """)
1283
1277
1284 supported = interfaceutil.Attribute(
1278 supported = interfaceutil.Attribute(
1285 """Set of requirements that this repo is capable of opening.""")
1279 """Set of requirements that this repo is capable of opening.""")
1286
1280
1287 requirements = interfaceutil.Attribute(
1281 requirements = interfaceutil.Attribute(
1288 """Set of requirements this repo uses.""")
1282 """Set of requirements this repo uses.""")
1289
1283
1290 filtername = interfaceutil.Attribute(
1284 filtername = interfaceutil.Attribute(
1291 """Name of the repoview that is active on this repo.""")
1285 """Name of the repoview that is active on this repo.""")
1292
1286
1293 wvfs = interfaceutil.Attribute(
1287 wvfs = interfaceutil.Attribute(
1294 """VFS used to access the working directory.""")
1288 """VFS used to access the working directory.""")
1295
1289
1296 vfs = interfaceutil.Attribute(
1290 vfs = interfaceutil.Attribute(
1297 """VFS rooted at the .hg directory.
1291 """VFS rooted at the .hg directory.
1298
1292
1299 Used to access repository data not in the store.
1293 Used to access repository data not in the store.
1300 """)
1294 """)
1301
1295
1302 svfs = interfaceutil.Attribute(
1296 svfs = interfaceutil.Attribute(
1303 """VFS rooted at the store.
1297 """VFS rooted at the store.
1304
1298
1305 Used to access repository data in the store. Typically .hg/store.
1299 Used to access repository data in the store. Typically .hg/store.
1306 But can point elsewhere if the store is shared.
1300 But can point elsewhere if the store is shared.
1307 """)
1301 """)
1308
1302
1309 root = interfaceutil.Attribute(
1303 root = interfaceutil.Attribute(
1310 """Path to the root of the working directory.""")
1304 """Path to the root of the working directory.""")
1311
1305
1312 path = interfaceutil.Attribute(
1306 path = interfaceutil.Attribute(
1313 """Path to the .hg directory.""")
1307 """Path to the .hg directory.""")
1314
1308
1315 origroot = interfaceutil.Attribute(
1309 origroot = interfaceutil.Attribute(
1316 """The filesystem path that was used to construct the repo.""")
1310 """The filesystem path that was used to construct the repo.""")
1317
1311
1318 auditor = interfaceutil.Attribute(
1312 auditor = interfaceutil.Attribute(
1319 """A pathauditor for the working directory.
1313 """A pathauditor for the working directory.
1320
1314
1321 This checks if a path refers to a nested repository.
1315 This checks if a path refers to a nested repository.
1322
1316
1323 Operates on the filesystem.
1317 Operates on the filesystem.
1324 """)
1318 """)
1325
1319
1326 nofsauditor = interfaceutil.Attribute(
1320 nofsauditor = interfaceutil.Attribute(
1327 """A pathauditor for the working directory.
1321 """A pathauditor for the working directory.
1328
1322
1329 This is like ``auditor`` except it doesn't do filesystem checks.
1323 This is like ``auditor`` except it doesn't do filesystem checks.
1330 """)
1324 """)
1331
1325
1332 baseui = interfaceutil.Attribute(
1326 baseui = interfaceutil.Attribute(
1333 """Original ui instance passed into constructor.""")
1327 """Original ui instance passed into constructor.""")
1334
1328
1335 ui = interfaceutil.Attribute(
1329 ui = interfaceutil.Attribute(
1336 """Main ui instance for this instance.""")
1330 """Main ui instance for this instance.""")
1337
1331
1338 sharedpath = interfaceutil.Attribute(
1332 sharedpath = interfaceutil.Attribute(
1339 """Path to the .hg directory of the repo this repo was shared from.""")
1333 """Path to the .hg directory of the repo this repo was shared from.""")
1340
1334
1341 store = interfaceutil.Attribute(
1335 store = interfaceutil.Attribute(
1342 """A store instance.""")
1336 """A store instance.""")
1343
1337
1344 spath = interfaceutil.Attribute(
1338 spath = interfaceutil.Attribute(
1345 """Path to the store.""")
1339 """Path to the store.""")
1346
1340
1347 sjoin = interfaceutil.Attribute(
1341 sjoin = interfaceutil.Attribute(
1348 """Alias to self.store.join.""")
1342 """Alias to self.store.join.""")
1349
1343
1350 cachevfs = interfaceutil.Attribute(
1344 cachevfs = interfaceutil.Attribute(
1351 """A VFS used to access the cache directory.
1345 """A VFS used to access the cache directory.
1352
1346
1353 Typically .hg/cache.
1347 Typically .hg/cache.
1354 """)
1348 """)
1355
1349
1356 filteredrevcache = interfaceutil.Attribute(
1350 filteredrevcache = interfaceutil.Attribute(
1357 """Holds sets of revisions to be filtered.""")
1351 """Holds sets of revisions to be filtered.""")
1358
1352
1359 names = interfaceutil.Attribute(
1353 names = interfaceutil.Attribute(
1360 """A ``namespaces`` instance.""")
1354 """A ``namespaces`` instance.""")
1361
1355
1362 def close():
1356 def close():
1363 """Close the handle on this repository."""
1357 """Close the handle on this repository."""
1364
1358
1365 def peer():
1359 def peer():
1366 """Obtain an object conforming to the ``peer`` interface."""
1360 """Obtain an object conforming to the ``peer`` interface."""
1367
1361
1368 def unfiltered():
1362 def unfiltered():
1369 """Obtain an unfiltered/raw view of this repo."""
1363 """Obtain an unfiltered/raw view of this repo."""
1370
1364
1371 def filtered(name, visibilityexceptions=None):
1365 def filtered(name, visibilityexceptions=None):
1372 """Obtain a named view of this repository."""
1366 """Obtain a named view of this repository."""
1373
1367
1374 obsstore = interfaceutil.Attribute(
1368 obsstore = interfaceutil.Attribute(
1375 """A store of obsolescence data.""")
1369 """A store of obsolescence data.""")
1376
1370
1377 changelog = interfaceutil.Attribute(
1371 changelog = interfaceutil.Attribute(
1378 """A handle on the changelog revlog.""")
1372 """A handle on the changelog revlog.""")
1379
1373
1380 manifestlog = interfaceutil.Attribute(
1374 manifestlog = interfaceutil.Attribute(
1381 """An instance conforming to the ``imanifestlog`` interface.
1375 """An instance conforming to the ``imanifestlog`` interface.
1382
1376
1383 Provides access to manifests for the repository.
1377 Provides access to manifests for the repository.
1384 """)
1378 """)
1385
1379
1386 dirstate = interfaceutil.Attribute(
1380 dirstate = interfaceutil.Attribute(
1387 """Working directory state.""")
1381 """Working directory state.""")
1388
1382
1389 narrowpats = interfaceutil.Attribute(
1383 narrowpats = interfaceutil.Attribute(
1390 """Matcher patterns for this repository's narrowspec.""")
1384 """Matcher patterns for this repository's narrowspec.""")
1391
1385
1392 def narrowmatch():
1386 def narrowmatch():
1393 """Obtain a matcher for the narrowspec."""
1387 """Obtain a matcher for the narrowspec."""
1394
1388
1395 def setnarrowpats(newincludes, newexcludes):
1389 def setnarrowpats(newincludes, newexcludes):
1396 """Define the narrowspec for this repository."""
1390 """Define the narrowspec for this repository."""
1397
1391
1398 def __getitem__(changeid):
1392 def __getitem__(changeid):
1399 """Try to resolve a changectx."""
1393 """Try to resolve a changectx."""
1400
1394
1401 def __contains__(changeid):
1395 def __contains__(changeid):
1402 """Whether a changeset exists."""
1396 """Whether a changeset exists."""
1403
1397
1404 def __nonzero__():
1398 def __nonzero__():
1405 """Always returns True."""
1399 """Always returns True."""
1406 return True
1400 return True
1407
1401
1408 __bool__ = __nonzero__
1402 __bool__ = __nonzero__
1409
1403
1410 def __len__():
1404 def __len__():
1411 """Returns the number of changesets in the repo."""
1405 """Returns the number of changesets in the repo."""
1412
1406
1413 def __iter__():
1407 def __iter__():
1414 """Iterate over revisions in the changelog."""
1408 """Iterate over revisions in the changelog."""
1415
1409
1416 def revs(expr, *args):
1410 def revs(expr, *args):
1417 """Evaluate a revset.
1411 """Evaluate a revset.
1418
1412
1419 Emits revisions.
1413 Emits revisions.
1420 """
1414 """
1421
1415
1422 def set(expr, *args):
1416 def set(expr, *args):
1423 """Evaluate a revset.
1417 """Evaluate a revset.
1424
1418
1425 Emits changectx instances.
1419 Emits changectx instances.
1426 """
1420 """
1427
1421
1428 def anyrevs(specs, user=False, localalias=None):
1422 def anyrevs(specs, user=False, localalias=None):
1429 """Find revisions matching one of the given revsets."""
1423 """Find revisions matching one of the given revsets."""
1430
1424
1431 def url():
1425 def url():
1432 """Returns a string representing the location of this repo."""
1426 """Returns a string representing the location of this repo."""
1433
1427
1434 def hook(name, throw=False, **args):
1428 def hook(name, throw=False, **args):
1435 """Call a hook."""
1429 """Call a hook."""
1436
1430
1437 def tags():
1431 def tags():
1438 """Return a mapping of tag to node."""
1432 """Return a mapping of tag to node."""
1439
1433
1440 def tagtype(tagname):
1434 def tagtype(tagname):
1441 """Return the type of a given tag."""
1435 """Return the type of a given tag."""
1442
1436
1443 def tagslist():
1437 def tagslist():
1444 """Return a list of tags ordered by revision."""
1438 """Return a list of tags ordered by revision."""
1445
1439
1446 def nodetags(node):
1440 def nodetags(node):
1447 """Return the tags associated with a node."""
1441 """Return the tags associated with a node."""
1448
1442
1449 def nodebookmarks(node):
1443 def nodebookmarks(node):
1450 """Return the list of bookmarks pointing to the specified node."""
1444 """Return the list of bookmarks pointing to the specified node."""
1451
1445
1452 def branchmap():
1446 def branchmap():
1453 """Return a mapping of branch to heads in that branch."""
1447 """Return a mapping of branch to heads in that branch."""
1454
1448
1455 def revbranchcache():
1449 def revbranchcache():
1456 pass
1450 pass
1457
1451
1458 def branchtip(branchtip, ignoremissing=False):
1452 def branchtip(branchtip, ignoremissing=False):
1459 """Return the tip node for a given branch."""
1453 """Return the tip node for a given branch."""
1460
1454
1461 def lookup(key):
1455 def lookup(key):
1462 """Resolve the node for a revision."""
1456 """Resolve the node for a revision."""
1463
1457
1464 def lookupbranch(key):
1458 def lookupbranch(key):
1465 """Look up the branch name of the given revision or branch name."""
1459 """Look up the branch name of the given revision or branch name."""
1466
1460
1467 def known(nodes):
1461 def known(nodes):
1468 """Determine whether a series of nodes is known.
1462 """Determine whether a series of nodes is known.
1469
1463
1470 Returns a list of bools.
1464 Returns a list of bools.
1471 """
1465 """
1472
1466
1473 def local():
1467 def local():
1474 """Whether the repository is local."""
1468 """Whether the repository is local."""
1475 return True
1469 return True
1476
1470
1477 def publishing():
1471 def publishing():
1478 """Whether the repository is a publishing repository."""
1472 """Whether the repository is a publishing repository."""
1479
1473
1480 def cancopy():
1474 def cancopy():
1481 pass
1475 pass
1482
1476
1483 def shared():
1477 def shared():
1484 """The type of shared repository or None."""
1478 """The type of shared repository or None."""
1485
1479
1486 def wjoin(f, *insidef):
1480 def wjoin(f, *insidef):
1487 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1481 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1488
1482
1489 def setparents(p1, p2):
1483 def setparents(p1, p2):
1490 """Set the parent nodes of the working directory."""
1484 """Set the parent nodes of the working directory."""
1491
1485
1492 def filectx(path, changeid=None, fileid=None):
1486 def filectx(path, changeid=None, fileid=None):
1493 """Obtain a filectx for the given file revision."""
1487 """Obtain a filectx for the given file revision."""
1494
1488
1495 def getcwd():
1489 def getcwd():
1496 """Obtain the current working directory from the dirstate."""
1490 """Obtain the current working directory from the dirstate."""
1497
1491
1498 def pathto(f, cwd=None):
1492 def pathto(f, cwd=None):
1499 """Obtain the relative path to a file."""
1493 """Obtain the relative path to a file."""
1500
1494
1501 def adddatafilter(name, fltr):
1495 def adddatafilter(name, fltr):
1502 pass
1496 pass
1503
1497
1504 def wread(filename):
1498 def wread(filename):
1505 """Read a file from wvfs, using data filters."""
1499 """Read a file from wvfs, using data filters."""
1506
1500
1507 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1501 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1508 """Write data to a file in the wvfs, using data filters."""
1502 """Write data to a file in the wvfs, using data filters."""
1509
1503
1510 def wwritedata(filename, data):
1504 def wwritedata(filename, data):
1511 """Resolve data for writing to the wvfs, using data filters."""
1505 """Resolve data for writing to the wvfs, using data filters."""
1512
1506
1513 def currenttransaction():
1507 def currenttransaction():
1514 """Obtain the current transaction instance or None."""
1508 """Obtain the current transaction instance or None."""
1515
1509
1516 def transaction(desc, report=None):
1510 def transaction(desc, report=None):
1517 """Open a new transaction to write to the repository."""
1511 """Open a new transaction to write to the repository."""
1518
1512
1519 def undofiles():
1513 def undofiles():
1520 """Returns a list of (vfs, path) for files to undo transactions."""
1514 """Returns a list of (vfs, path) for files to undo transactions."""
1521
1515
1522 def recover():
1516 def recover():
1523 """Roll back an interrupted transaction."""
1517 """Roll back an interrupted transaction."""
1524
1518
1525 def rollback(dryrun=False, force=False):
1519 def rollback(dryrun=False, force=False):
1526 """Undo the last transaction.
1520 """Undo the last transaction.
1527
1521
1528 DANGEROUS.
1522 DANGEROUS.
1529 """
1523 """
1530
1524
1531 def updatecaches(tr=None, full=False):
1525 def updatecaches(tr=None, full=False):
1532 """Warm repo caches."""
1526 """Warm repo caches."""
1533
1527
1534 def invalidatecaches():
1528 def invalidatecaches():
1535 """Invalidate cached data due to the repository mutating."""
1529 """Invalidate cached data due to the repository mutating."""
1536
1530
1537 def invalidatevolatilesets():
1531 def invalidatevolatilesets():
1538 pass
1532 pass
1539
1533
1540 def invalidatedirstate():
1534 def invalidatedirstate():
1541 """Invalidate the dirstate."""
1535 """Invalidate the dirstate."""
1542
1536
1543 def invalidate(clearfilecache=False):
1537 def invalidate(clearfilecache=False):
1544 pass
1538 pass
1545
1539
1546 def invalidateall():
1540 def invalidateall():
1547 pass
1541 pass
1548
1542
1549 def lock(wait=True):
1543 def lock(wait=True):
1550 """Lock the repository store and return a lock instance."""
1544 """Lock the repository store and return a lock instance."""
1551
1545
1552 def wlock(wait=True):
1546 def wlock(wait=True):
1553 """Lock the non-store parts of the repository."""
1547 """Lock the non-store parts of the repository."""
1554
1548
1555 def currentwlock():
1549 def currentwlock():
1556 """Return the wlock if it's held or None."""
1550 """Return the wlock if it's held or None."""
1557
1551
1558 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1552 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1559 pass
1553 pass
1560
1554
1561 def commit(text='', user=None, date=None, match=None, force=False,
1555 def commit(text='', user=None, date=None, match=None, force=False,
1562 editor=False, extra=None):
1556 editor=False, extra=None):
1563 """Add a new revision to the repository."""
1557 """Add a new revision to the repository."""
1564
1558
1565 def commitctx(ctx, error=False):
1559 def commitctx(ctx, error=False):
1566 """Commit a commitctx instance to the repository."""
1560 """Commit a commitctx instance to the repository."""
1567
1561
1568 def destroying():
1562 def destroying():
1569 """Inform the repository that nodes are about to be destroyed."""
1563 """Inform the repository that nodes are about to be destroyed."""
1570
1564
1571 def destroyed():
1565 def destroyed():
1572 """Inform the repository that nodes have been destroyed."""
1566 """Inform the repository that nodes have been destroyed."""
1573
1567
1574 def status(node1='.', node2=None, match=None, ignored=False,
1568 def status(node1='.', node2=None, match=None, ignored=False,
1575 clean=False, unknown=False, listsubrepos=False):
1569 clean=False, unknown=False, listsubrepos=False):
1576 """Convenience method to call repo[x].status()."""
1570 """Convenience method to call repo[x].status()."""
1577
1571
1578 def addpostdsstatus(ps):
1572 def addpostdsstatus(ps):
1579 pass
1573 pass
1580
1574
1581 def postdsstatus():
1575 def postdsstatus():
1582 pass
1576 pass
1583
1577
1584 def clearpostdsstatus():
1578 def clearpostdsstatus():
1585 pass
1579 pass
1586
1580
1587 def heads(start=None):
1581 def heads(start=None):
1588 """Obtain list of nodes that are DAG heads."""
1582 """Obtain list of nodes that are DAG heads."""
1589
1583
1590 def branchheads(branch=None, start=None, closed=False):
1584 def branchheads(branch=None, start=None, closed=False):
1591 pass
1585 pass
1592
1586
1593 def branches(nodes):
1587 def branches(nodes):
1594 pass
1588 pass
1595
1589
1596 def between(pairs):
1590 def between(pairs):
1597 pass
1591 pass
1598
1592
1599 def checkpush(pushop):
1593 def checkpush(pushop):
1600 pass
1594 pass
1601
1595
1602 prepushoutgoinghooks = interfaceutil.Attribute(
1596 prepushoutgoinghooks = interfaceutil.Attribute(
1603 """util.hooks instance.""")
1597 """util.hooks instance.""")
1604
1598
1605 def pushkey(namespace, key, old, new):
1599 def pushkey(namespace, key, old, new):
1606 pass
1600 pass
1607
1601
1608 def listkeys(namespace):
1602 def listkeys(namespace):
1609 pass
1603 pass
1610
1604
1611 def debugwireargs(one, two, three=None, four=None, five=None):
1605 def debugwireargs(one, two, three=None, four=None, five=None):
1612 pass
1606 pass
1613
1607
1614 def savecommitmessage(text):
1608 def savecommitmessage(text):
1615 pass
1609 pass
1616
1610
1617 class completelocalrepository(ilocalrepositorymain,
1611 class completelocalrepository(ilocalrepositorymain,
1618 ilocalrepositoryfilestorage):
1612 ilocalrepositoryfilestorage):
1619 """Complete interface for a local repository."""
1613 """Complete interface for a local repository."""
@@ -1,741 +1,739
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 )
43 )
44
44
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
48 # leave the attribute unspecified.
48 # leave the attribute unspecified.
49 testedwith = 'ships-with-hg-core'
49 testedwith = 'ships-with-hg-core'
50
50
51 REQUIREMENT = 'testonly-simplestore'
51 REQUIREMENT = 'testonly-simplestore'
52
52
53 def validatenode(node):
53 def validatenode(node):
54 if isinstance(node, int):
54 if isinstance(node, int):
55 raise ValueError('expected node; got int')
55 raise ValueError('expected node; got int')
56
56
57 if len(node) != 20:
57 if len(node) != 20:
58 raise ValueError('expected 20 byte node')
58 raise ValueError('expected 20 byte node')
59
59
60 def validaterev(rev):
60 def validaterev(rev):
61 if not isinstance(rev, int):
61 if not isinstance(rev, int):
62 raise ValueError('expected int')
62 raise ValueError('expected int')
63
63
64 class simplestoreerror(error.StorageError):
64 class simplestoreerror(error.StorageError):
65 pass
65 pass
66
66
67 @interfaceutil.implementer(repository.irevisiondelta)
67 @interfaceutil.implementer(repository.irevisiondelta)
68 @attr.s(slots=True, frozen=True)
68 @attr.s(slots=True, frozen=True)
69 class simplestorerevisiondelta(object):
69 class simplestorerevisiondelta(object):
70 node = attr.ib()
70 node = attr.ib()
71 p1node = attr.ib()
71 p1node = attr.ib()
72 p2node = attr.ib()
72 p2node = attr.ib()
73 basenode = attr.ib()
73 basenode = attr.ib()
74 linknode = attr.ib()
74 linknode = attr.ib()
75 flags = attr.ib()
75 flags = attr.ib()
76 baserevisionsize = attr.ib()
76 baserevisionsize = attr.ib()
77 revision = attr.ib()
77 revision = attr.ib()
78 delta = attr.ib()
78 delta = attr.ib()
79
79
80 @interfaceutil.implementer(repository.ifilestorage)
80 @interfaceutil.implementer(repository.ifilestorage)
81 class filestorage(object):
81 class filestorage(object):
82 """Implements storage for a tracked path.
82 """Implements storage for a tracked path.
83
83
84 Data is stored in the VFS in a directory corresponding to the tracked
84 Data is stored in the VFS in a directory corresponding to the tracked
85 path.
85 path.
86
86
87 Index data is stored in an ``index`` file using CBOR.
87 Index data is stored in an ``index`` file using CBOR.
88
88
89 Fulltext data is stored in files having names of the node.
89 Fulltext data is stored in files having names of the node.
90 """
90 """
91
91
92 def __init__(self, svfs, path):
92 def __init__(self, svfs, path):
93 self._svfs = svfs
93 self._svfs = svfs
94 self._path = path
94 self._path = path
95
95
96 self._storepath = b'/'.join([b'data', path])
96 self._storepath = b'/'.join([b'data', path])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
98
98
99 indexdata = self._svfs.tryread(self._indexpath)
99 indexdata = self._svfs.tryread(self._indexpath)
100 if indexdata:
100 if indexdata:
101 indexdata = cbor.loads(indexdata)
101 indexdata = cbor.loads(indexdata)
102
102
103 self._indexdata = indexdata or []
103 self._indexdata = indexdata or []
104 self._indexbynode = {}
104 self._indexbynode = {}
105 self._indexbyrev = {}
105 self._indexbyrev = {}
106 self.index = []
106 self.index = []
107 self._refreshindex()
107 self._refreshindex()
108
108
109 # This is used by changegroup code :/
109 # This is used by changegroup code :/
110 self._generaldelta = True
110 self._generaldelta = True
111
111
112 self.version = 1
113
114 def _refreshindex(self):
112 def _refreshindex(self):
115 self._indexbynode.clear()
113 self._indexbynode.clear()
116 self._indexbyrev.clear()
114 self._indexbyrev.clear()
117 self.index = []
115 self.index = []
118
116
119 for i, entry in enumerate(self._indexdata):
117 for i, entry in enumerate(self._indexdata):
120 self._indexbynode[entry[b'node']] = entry
118 self._indexbynode[entry[b'node']] = entry
121 self._indexbyrev[i] = entry
119 self._indexbyrev[i] = entry
122
120
123 self._indexbynode[nullid] = {
121 self._indexbynode[nullid] = {
124 b'node': nullid,
122 b'node': nullid,
125 b'p1': nullid,
123 b'p1': nullid,
126 b'p2': nullid,
124 b'p2': nullid,
127 b'linkrev': nullrev,
125 b'linkrev': nullrev,
128 b'flags': 0,
126 b'flags': 0,
129 }
127 }
130
128
131 self._indexbyrev[nullrev] = {
129 self._indexbyrev[nullrev] = {
132 b'node': nullid,
130 b'node': nullid,
133 b'p1': nullid,
131 b'p1': nullid,
134 b'p2': nullid,
132 b'p2': nullid,
135 b'linkrev': nullrev,
133 b'linkrev': nullrev,
136 b'flags': 0,
134 b'flags': 0,
137 }
135 }
138
136
139 for i, entry in enumerate(self._indexdata):
137 for i, entry in enumerate(self._indexdata):
140 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
138 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
141
139
142 # start, length, rawsize, chainbase, linkrev, p1, p2, node
140 # start, length, rawsize, chainbase, linkrev, p1, p2, node
143 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
141 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
144 entry[b'node']))
142 entry[b'node']))
145
143
146 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
144 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
147
145
148 def __len__(self):
146 def __len__(self):
149 return len(self._indexdata)
147 return len(self._indexdata)
150
148
151 def __iter__(self):
149 def __iter__(self):
152 return iter(range(len(self)))
150 return iter(range(len(self)))
153
151
154 def revs(self, start=0, stop=None):
152 def revs(self, start=0, stop=None):
155 step = 1
153 step = 1
156 if stop is not None:
154 if stop is not None:
157 if start > stop:
155 if start > stop:
158 step = -1
156 step = -1
159
157
160 stop += step
158 stop += step
161 else:
159 else:
162 stop = len(self)
160 stop = len(self)
163
161
164 return range(start, stop, step)
162 return range(start, stop, step)
165
163
166 def parents(self, node):
164 def parents(self, node):
167 validatenode(node)
165 validatenode(node)
168
166
169 if node not in self._indexbynode:
167 if node not in self._indexbynode:
170 raise KeyError('unknown node')
168 raise KeyError('unknown node')
171
169
172 entry = self._indexbynode[node]
170 entry = self._indexbynode[node]
173
171
174 return entry[b'p1'], entry[b'p2']
172 return entry[b'p1'], entry[b'p2']
175
173
176 def parentrevs(self, rev):
174 def parentrevs(self, rev):
177 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
175 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
178 return self.rev(p1), self.rev(p2)
176 return self.rev(p1), self.rev(p2)
179
177
180 def rev(self, node):
178 def rev(self, node):
181 validatenode(node)
179 validatenode(node)
182
180
183 try:
181 try:
184 self._indexbynode[node]
182 self._indexbynode[node]
185 except KeyError:
183 except KeyError:
186 raise error.LookupError(node, self._indexpath, _('no node'))
184 raise error.LookupError(node, self._indexpath, _('no node'))
187
185
188 for rev, entry in self._indexbyrev.items():
186 for rev, entry in self._indexbyrev.items():
189 if entry[b'node'] == node:
187 if entry[b'node'] == node:
190 return rev
188 return rev
191
189
192 raise error.ProgrammingError('this should not occur')
190 raise error.ProgrammingError('this should not occur')
193
191
194 def node(self, rev):
192 def node(self, rev):
195 validaterev(rev)
193 validaterev(rev)
196
194
197 return self._indexbyrev[rev][b'node']
195 return self._indexbyrev[rev][b'node']
198
196
199 def lookup(self, node):
197 def lookup(self, node):
200 if isinstance(node, int):
198 if isinstance(node, int):
201 return self.node(node)
199 return self.node(node)
202
200
203 if len(node) == 20:
201 if len(node) == 20:
204 self.rev(node)
202 self.rev(node)
205 return node
203 return node
206
204
207 try:
205 try:
208 rev = int(node)
206 rev = int(node)
209 if '%d' % rev != node:
207 if '%d' % rev != node:
210 raise ValueError
208 raise ValueError
211
209
212 if rev < 0:
210 if rev < 0:
213 rev = len(self) + rev
211 rev = len(self) + rev
214 if rev < 0 or rev >= len(self):
212 if rev < 0 or rev >= len(self):
215 raise ValueError
213 raise ValueError
216
214
217 return self.node(rev)
215 return self.node(rev)
218 except (ValueError, OverflowError):
216 except (ValueError, OverflowError):
219 pass
217 pass
220
218
221 if len(node) == 40:
219 if len(node) == 40:
222 try:
220 try:
223 rawnode = bin(node)
221 rawnode = bin(node)
224 self.rev(rawnode)
222 self.rev(rawnode)
225 return rawnode
223 return rawnode
226 except TypeError:
224 except TypeError:
227 pass
225 pass
228
226
229 raise error.LookupError(node, self._path, _('invalid lookup input'))
227 raise error.LookupError(node, self._path, _('invalid lookup input'))
230
228
231 def linkrev(self, rev):
229 def linkrev(self, rev):
232 validaterev(rev)
230 validaterev(rev)
233
231
234 return self._indexbyrev[rev][b'linkrev']
232 return self._indexbyrev[rev][b'linkrev']
235
233
236 def flags(self, rev):
234 def flags(self, rev):
237 validaterev(rev)
235 validaterev(rev)
238
236
239 return self._indexbyrev[rev][b'flags']
237 return self._indexbyrev[rev][b'flags']
240
238
241 def deltaparent(self, rev):
239 def deltaparent(self, rev):
242 validaterev(rev)
240 validaterev(rev)
243
241
244 p1node = self.parents(self.node(rev))[0]
242 p1node = self.parents(self.node(rev))[0]
245 return self.rev(p1node)
243 return self.rev(p1node)
246
244
247 def _candelta(self, baserev, rev):
245 def _candelta(self, baserev, rev):
248 validaterev(baserev)
246 validaterev(baserev)
249 validaterev(rev)
247 validaterev(rev)
250
248
251 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
249 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
252 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
250 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
253 return False
251 return False
254
252
255 return True
253 return True
256
254
257 def rawsize(self, rev):
255 def rawsize(self, rev):
258 validaterev(rev)
256 validaterev(rev)
259 node = self.node(rev)
257 node = self.node(rev)
260 return len(self.revision(node, raw=True))
258 return len(self.revision(node, raw=True))
261
259
262 def _processflags(self, text, flags, operation, raw=False):
260 def _processflags(self, text, flags, operation, raw=False):
263 if flags == 0:
261 if flags == 0:
264 return text, True
262 return text, True
265
263
266 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
264 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
267 raise simplestoreerror(_("incompatible revision flag '%#x'") %
265 raise simplestoreerror(_("incompatible revision flag '%#x'") %
268 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
266 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
269
267
270 validatehash = True
268 validatehash = True
271 # Depending on the operation (read or write), the order might be
269 # Depending on the operation (read or write), the order might be
272 # reversed due to non-commutative transforms.
270 # reversed due to non-commutative transforms.
273 orderedflags = revlog.REVIDX_FLAGS_ORDER
271 orderedflags = revlog.REVIDX_FLAGS_ORDER
274 if operation == 'write':
272 if operation == 'write':
275 orderedflags = reversed(orderedflags)
273 orderedflags = reversed(orderedflags)
276
274
277 for flag in orderedflags:
275 for flag in orderedflags:
278 # If a flagprocessor has been registered for a known flag, apply the
276 # If a flagprocessor has been registered for a known flag, apply the
279 # related operation transform and update result tuple.
277 # related operation transform and update result tuple.
280 if flag & flags:
278 if flag & flags:
281 vhash = True
279 vhash = True
282
280
283 if flag not in revlog._flagprocessors:
281 if flag not in revlog._flagprocessors:
284 message = _("missing processor for flag '%#x'") % (flag)
282 message = _("missing processor for flag '%#x'") % (flag)
285 raise simplestoreerror(message)
283 raise simplestoreerror(message)
286
284
287 processor = revlog._flagprocessors[flag]
285 processor = revlog._flagprocessors[flag]
288 if processor is not None:
286 if processor is not None:
289 readtransform, writetransform, rawtransform = processor
287 readtransform, writetransform, rawtransform = processor
290
288
291 if raw:
289 if raw:
292 vhash = rawtransform(self, text)
290 vhash = rawtransform(self, text)
293 elif operation == 'read':
291 elif operation == 'read':
294 text, vhash = readtransform(self, text)
292 text, vhash = readtransform(self, text)
295 else: # write operation
293 else: # write operation
296 text, vhash = writetransform(self, text)
294 text, vhash = writetransform(self, text)
297 validatehash = validatehash and vhash
295 validatehash = validatehash and vhash
298
296
299 return text, validatehash
297 return text, validatehash
300
298
301 def checkhash(self, text, node, p1=None, p2=None, rev=None):
299 def checkhash(self, text, node, p1=None, p2=None, rev=None):
302 if p1 is None and p2 is None:
300 if p1 is None and p2 is None:
303 p1, p2 = self.parents(node)
301 p1, p2 = self.parents(node)
304 if node != revlog.hash(text, p1, p2):
302 if node != revlog.hash(text, p1, p2):
305 raise simplestoreerror(_("integrity check failed on %s") %
303 raise simplestoreerror(_("integrity check failed on %s") %
306 self._path)
304 self._path)
307
305
308 def revision(self, node, raw=False):
306 def revision(self, node, raw=False):
309 validatenode(node)
307 validatenode(node)
310
308
311 if node == nullid:
309 if node == nullid:
312 return b''
310 return b''
313
311
314 rev = self.rev(node)
312 rev = self.rev(node)
315 flags = self.flags(rev)
313 flags = self.flags(rev)
316
314
317 path = b'/'.join([self._storepath, hex(node)])
315 path = b'/'.join([self._storepath, hex(node)])
318 rawtext = self._svfs.read(path)
316 rawtext = self._svfs.read(path)
319
317
320 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
318 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
321 if validatehash:
319 if validatehash:
322 self.checkhash(text, node, rev=rev)
320 self.checkhash(text, node, rev=rev)
323
321
324 return text
322 return text
325
323
326 def read(self, node):
324 def read(self, node):
327 validatenode(node)
325 validatenode(node)
328
326
329 revision = self.revision(node)
327 revision = self.revision(node)
330
328
331 if not revision.startswith(b'\1\n'):
329 if not revision.startswith(b'\1\n'):
332 return revision
330 return revision
333
331
334 start = revision.index(b'\1\n', 2)
332 start = revision.index(b'\1\n', 2)
335 return revision[start + 2:]
333 return revision[start + 2:]
336
334
337 def renamed(self, node):
335 def renamed(self, node):
338 validatenode(node)
336 validatenode(node)
339
337
340 if self.parents(node)[0] != nullid:
338 if self.parents(node)[0] != nullid:
341 return False
339 return False
342
340
343 fulltext = self.revision(node)
341 fulltext = self.revision(node)
344 m = revlog.parsemeta(fulltext)[0]
342 m = revlog.parsemeta(fulltext)[0]
345
343
346 if m and 'copy' in m:
344 if m and 'copy' in m:
347 return m['copy'], bin(m['copyrev'])
345 return m['copy'], bin(m['copyrev'])
348
346
349 return False
347 return False
350
348
351 def cmp(self, node, text):
349 def cmp(self, node, text):
352 validatenode(node)
350 validatenode(node)
353
351
354 t = text
352 t = text
355
353
356 if text.startswith(b'\1\n'):
354 if text.startswith(b'\1\n'):
357 t = b'\1\n\1\n' + text
355 t = b'\1\n\1\n' + text
358
356
359 p1, p2 = self.parents(node)
357 p1, p2 = self.parents(node)
360
358
361 if revlog.hash(t, p1, p2) == node:
359 if revlog.hash(t, p1, p2) == node:
362 return False
360 return False
363
361
364 if self.iscensored(self.rev(node)):
362 if self.iscensored(self.rev(node)):
365 return text != b''
363 return text != b''
366
364
367 if self.renamed(node):
365 if self.renamed(node):
368 t2 = self.read(node)
366 t2 = self.read(node)
369 return t2 != text
367 return t2 != text
370
368
371 return True
369 return True
372
370
373 def size(self, rev):
371 def size(self, rev):
374 validaterev(rev)
372 validaterev(rev)
375
373
376 node = self._indexbyrev[rev][b'node']
374 node = self._indexbyrev[rev][b'node']
377
375
378 if self.renamed(node):
376 if self.renamed(node):
379 return len(self.read(node))
377 return len(self.read(node))
380
378
381 if self.iscensored(rev):
379 if self.iscensored(rev):
382 return 0
380 return 0
383
381
384 return len(self.revision(node))
382 return len(self.revision(node))
385
383
386 def iscensored(self, rev):
384 def iscensored(self, rev):
387 validaterev(rev)
385 validaterev(rev)
388
386
389 return self.flags(rev) & revlog.REVIDX_ISCENSORED
387 return self.flags(rev) & revlog.REVIDX_ISCENSORED
390
388
391 def commonancestorsheads(self, a, b):
389 def commonancestorsheads(self, a, b):
392 validatenode(a)
390 validatenode(a)
393 validatenode(b)
391 validatenode(b)
394
392
395 a = self.rev(a)
393 a = self.rev(a)
396 b = self.rev(b)
394 b = self.rev(b)
397
395
398 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
396 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
399 return pycompat.maplist(self.node, ancestors)
397 return pycompat.maplist(self.node, ancestors)
400
398
401 def descendants(self, revs):
399 def descendants(self, revs):
402 # This is a copy of revlog.descendants()
400 # This is a copy of revlog.descendants()
403 first = min(revs)
401 first = min(revs)
404 if first == nullrev:
402 if first == nullrev:
405 for i in self:
403 for i in self:
406 yield i
404 yield i
407 return
405 return
408
406
409 seen = set(revs)
407 seen = set(revs)
410 for i in self.revs(start=first + 1):
408 for i in self.revs(start=first + 1):
411 for x in self.parentrevs(i):
409 for x in self.parentrevs(i):
412 if x != nullrev and x in seen:
410 if x != nullrev and x in seen:
413 seen.add(i)
411 seen.add(i)
414 yield i
412 yield i
415 break
413 break
416
414
417 # Required by verify.
415 # Required by verify.
418 def files(self):
416 def files(self):
419 entries = self._svfs.listdir(self._storepath)
417 entries = self._svfs.listdir(self._storepath)
420
418
421 # Strip out undo.backup.* files created as part of transaction
419 # Strip out undo.backup.* files created as part of transaction
422 # recording.
420 # recording.
423 entries = [f for f in entries if not f.startswith('undo.backup.')]
421 entries = [f for f in entries if not f.startswith('undo.backup.')]
424
422
425 return [b'/'.join((self._storepath, f)) for f in entries]
423 return [b'/'.join((self._storepath, f)) for f in entries]
426
424
427 # Required by verify.
425 # Required by verify.
428 def checksize(self):
426 def checksize(self):
429 return 0, 0
427 return 0, 0
430
428
431 def add(self, text, meta, transaction, linkrev, p1, p2):
429 def add(self, text, meta, transaction, linkrev, p1, p2):
432 if meta or text.startswith(b'\1\n'):
430 if meta or text.startswith(b'\1\n'):
433 text = revlog.packmeta(meta, text)
431 text = revlog.packmeta(meta, text)
434
432
435 return self.addrevision(text, transaction, linkrev, p1, p2)
433 return self.addrevision(text, transaction, linkrev, p1, p2)
436
434
437 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
435 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
438 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
436 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
439 validatenode(p1)
437 validatenode(p1)
440 validatenode(p2)
438 validatenode(p2)
441
439
442 if flags:
440 if flags:
443 node = node or revlog.hash(text, p1, p2)
441 node = node or revlog.hash(text, p1, p2)
444
442
445 rawtext, validatehash = self._processflags(text, flags, 'write')
443 rawtext, validatehash = self._processflags(text, flags, 'write')
446
444
447 node = node or revlog.hash(text, p1, p2)
445 node = node or revlog.hash(text, p1, p2)
448
446
449 if node in self._indexbynode:
447 if node in self._indexbynode:
450 return node
448 return node
451
449
452 if validatehash:
450 if validatehash:
453 self.checkhash(rawtext, node, p1=p1, p2=p2)
451 self.checkhash(rawtext, node, p1=p1, p2=p2)
454
452
455 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
453 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
456 flags)
454 flags)
457
455
458 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
456 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
459 transaction.addbackup(self._indexpath)
457 transaction.addbackup(self._indexpath)
460
458
461 path = b'/'.join([self._storepath, hex(node)])
459 path = b'/'.join([self._storepath, hex(node)])
462
460
463 self._svfs.write(path, rawtext)
461 self._svfs.write(path, rawtext)
464
462
465 self._indexdata.append({
463 self._indexdata.append({
466 b'node': node,
464 b'node': node,
467 b'p1': p1,
465 b'p1': p1,
468 b'p2': p2,
466 b'p2': p2,
469 b'linkrev': link,
467 b'linkrev': link,
470 b'flags': flags,
468 b'flags': flags,
471 })
469 })
472
470
473 self._reflectindexupdate()
471 self._reflectindexupdate()
474
472
475 return node
473 return node
476
474
477 def _reflectindexupdate(self):
475 def _reflectindexupdate(self):
478 self._refreshindex()
476 self._refreshindex()
479 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
477 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
480
478
481 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
479 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
482 nodes = []
480 nodes = []
483
481
484 transaction.addbackup(self._indexpath)
482 transaction.addbackup(self._indexpath)
485
483
486 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
484 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
487 linkrev = linkmapper(linknode)
485 linkrev = linkmapper(linknode)
488 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
486 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
489
487
490 nodes.append(node)
488 nodes.append(node)
491
489
492 if node in self._indexbynode:
490 if node in self._indexbynode:
493 continue
491 continue
494
492
495 # Need to resolve the fulltext from the delta base.
493 # Need to resolve the fulltext from the delta base.
496 if deltabase == nullid:
494 if deltabase == nullid:
497 text = mdiff.patch(b'', delta)
495 text = mdiff.patch(b'', delta)
498 else:
496 else:
499 text = mdiff.patch(self.revision(deltabase), delta)
497 text = mdiff.patch(self.revision(deltabase), delta)
500
498
501 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
499 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
502 flags)
500 flags)
503
501
504 if addrevisioncb:
502 if addrevisioncb:
505 addrevisioncb(self, node)
503 addrevisioncb(self, node)
506
504
507 return nodes
505 return nodes
508
506
509 def revdiff(self, rev1, rev2):
507 def revdiff(self, rev1, rev2):
510 validaterev(rev1)
508 validaterev(rev1)
511 validaterev(rev2)
509 validaterev(rev2)
512
510
513 node1 = self.node(rev1)
511 node1 = self.node(rev1)
514 node2 = self.node(rev2)
512 node2 = self.node(rev2)
515
513
516 return mdiff.textdiff(self.revision(node1, raw=True),
514 return mdiff.textdiff(self.revision(node1, raw=True),
517 self.revision(node2, raw=True))
515 self.revision(node2, raw=True))
518
516
519 def emitrevisiondeltas(self, requests):
517 def emitrevisiondeltas(self, requests):
520 for request in requests:
518 for request in requests:
521 node = request.node
519 node = request.node
522 rev = self.rev(node)
520 rev = self.rev(node)
523
521
524 if request.basenode == nullid:
522 if request.basenode == nullid:
525 baserev = nullrev
523 baserev = nullrev
526 elif request.basenode is not None:
524 elif request.basenode is not None:
527 baserev = self.rev(request.basenode)
525 baserev = self.rev(request.basenode)
528 else:
526 else:
529 # This is a test extension and we can do simple things
527 # This is a test extension and we can do simple things
530 # for choosing a delta parent.
528 # for choosing a delta parent.
531 baserev = self.deltaparent(rev)
529 baserev = self.deltaparent(rev)
532
530
533 if baserev != nullrev and not self._candelta(baserev, rev):
531 if baserev != nullrev and not self._candelta(baserev, rev):
534 baserev = nullrev
532 baserev = nullrev
535
533
536 revision = None
534 revision = None
537 delta = None
535 delta = None
538 baserevisionsize = None
536 baserevisionsize = None
539
537
540 if self.iscensored(baserev) or self.iscensored(rev):
538 if self.iscensored(baserev) or self.iscensored(rev):
541 try:
539 try:
542 revision = self.revision(node, raw=True)
540 revision = self.revision(node, raw=True)
543 except error.CensoredNodeError as e:
541 except error.CensoredNodeError as e:
544 revision = e.tombstone
542 revision = e.tombstone
545
543
546 if baserev != nullrev:
544 if baserev != nullrev:
547 baserevisionsize = self.rawsize(baserev)
545 baserevisionsize = self.rawsize(baserev)
548
546
549 elif baserev == nullrev:
547 elif baserev == nullrev:
550 revision = self.revision(node, raw=True)
548 revision = self.revision(node, raw=True)
551 else:
549 else:
552 delta = self.revdiff(baserev, rev)
550 delta = self.revdiff(baserev, rev)
553
551
554 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
552 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
555
553
556 yield simplestorerevisiondelta(
554 yield simplestorerevisiondelta(
557 node=node,
555 node=node,
558 p1node=request.p1node,
556 p1node=request.p1node,
559 p2node=request.p2node,
557 p2node=request.p2node,
560 linknode=request.linknode,
558 linknode=request.linknode,
561 basenode=self.node(baserev),
559 basenode=self.node(baserev),
562 flags=self.flags(rev) | extraflags,
560 flags=self.flags(rev) | extraflags,
563 baserevisionsize=baserevisionsize,
561 baserevisionsize=baserevisionsize,
564 revision=revision,
562 revision=revision,
565 delta=delta)
563 delta=delta)
566
564
567 def heads(self, start=None, stop=None):
565 def heads(self, start=None, stop=None):
568 # This is copied from revlog.py.
566 # This is copied from revlog.py.
569 if start is None and stop is None:
567 if start is None and stop is None:
570 if not len(self):
568 if not len(self):
571 return [nullid]
569 return [nullid]
572 return [self.node(r) for r in self.headrevs()]
570 return [self.node(r) for r in self.headrevs()]
573
571
574 if start is None:
572 if start is None:
575 start = nullid
573 start = nullid
576 if stop is None:
574 if stop is None:
577 stop = []
575 stop = []
578 stoprevs = set([self.rev(n) for n in stop])
576 stoprevs = set([self.rev(n) for n in stop])
579 startrev = self.rev(start)
577 startrev = self.rev(start)
580 reachable = {startrev}
578 reachable = {startrev}
581 heads = {startrev}
579 heads = {startrev}
582
580
583 parentrevs = self.parentrevs
581 parentrevs = self.parentrevs
584 for r in self.revs(start=startrev + 1):
582 for r in self.revs(start=startrev + 1):
585 for p in parentrevs(r):
583 for p in parentrevs(r):
586 if p in reachable:
584 if p in reachable:
587 if r not in stoprevs:
585 if r not in stoprevs:
588 reachable.add(r)
586 reachable.add(r)
589 heads.add(r)
587 heads.add(r)
590 if p in heads and p not in stoprevs:
588 if p in heads and p not in stoprevs:
591 heads.remove(p)
589 heads.remove(p)
592
590
593 return [self.node(r) for r in heads]
591 return [self.node(r) for r in heads]
594
592
595 def children(self, node):
593 def children(self, node):
596 validatenode(node)
594 validatenode(node)
597
595
598 # This is a copy of revlog.children().
596 # This is a copy of revlog.children().
599 c = []
597 c = []
600 p = self.rev(node)
598 p = self.rev(node)
601 for r in self.revs(start=p + 1):
599 for r in self.revs(start=p + 1):
602 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
600 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
603 if prevs:
601 if prevs:
604 for pr in prevs:
602 for pr in prevs:
605 if pr == p:
603 if pr == p:
606 c.append(self.node(r))
604 c.append(self.node(r))
607 elif p == nullrev:
605 elif p == nullrev:
608 c.append(self.node(r))
606 c.append(self.node(r))
609 return c
607 return c
610
608
611 def getstrippoint(self, minlink):
609 def getstrippoint(self, minlink):
612
610
613 # This is largely a copy of revlog.getstrippoint().
611 # This is largely a copy of revlog.getstrippoint().
614 brokenrevs = set()
612 brokenrevs = set()
615 strippoint = len(self)
613 strippoint = len(self)
616
614
617 heads = {}
615 heads = {}
618 futurelargelinkrevs = set()
616 futurelargelinkrevs = set()
619 for head in self.heads():
617 for head in self.heads():
620 headlinkrev = self.linkrev(self.rev(head))
618 headlinkrev = self.linkrev(self.rev(head))
621 heads[head] = headlinkrev
619 heads[head] = headlinkrev
622 if headlinkrev >= minlink:
620 if headlinkrev >= minlink:
623 futurelargelinkrevs.add(headlinkrev)
621 futurelargelinkrevs.add(headlinkrev)
624
622
625 # This algorithm involves walking down the rev graph, starting at the
623 # This algorithm involves walking down the rev graph, starting at the
626 # heads. Since the revs are topologically sorted according to linkrev,
624 # heads. Since the revs are topologically sorted according to linkrev,
627 # once all head linkrevs are below the minlink, we know there are
625 # once all head linkrevs are below the minlink, we know there are
628 # no more revs that could have a linkrev greater than minlink.
626 # no more revs that could have a linkrev greater than minlink.
629 # So we can stop walking.
627 # So we can stop walking.
630 while futurelargelinkrevs:
628 while futurelargelinkrevs:
631 strippoint -= 1
629 strippoint -= 1
632 linkrev = heads.pop(strippoint)
630 linkrev = heads.pop(strippoint)
633
631
634 if linkrev < minlink:
632 if linkrev < minlink:
635 brokenrevs.add(strippoint)
633 brokenrevs.add(strippoint)
636 else:
634 else:
637 futurelargelinkrevs.remove(linkrev)
635 futurelargelinkrevs.remove(linkrev)
638
636
639 for p in self.parentrevs(strippoint):
637 for p in self.parentrevs(strippoint):
640 if p != nullrev:
638 if p != nullrev:
641 plinkrev = self.linkrev(p)
639 plinkrev = self.linkrev(p)
642 heads[p] = plinkrev
640 heads[p] = plinkrev
643 if plinkrev >= minlink:
641 if plinkrev >= minlink:
644 futurelargelinkrevs.add(plinkrev)
642 futurelargelinkrevs.add(plinkrev)
645
643
646 return strippoint, brokenrevs
644 return strippoint, brokenrevs
647
645
648 def strip(self, minlink, transaction):
646 def strip(self, minlink, transaction):
649 if not len(self):
647 if not len(self):
650 return
648 return
651
649
652 rev, _ignored = self.getstrippoint(minlink)
650 rev, _ignored = self.getstrippoint(minlink)
653 if rev == len(self):
651 if rev == len(self):
654 return
652 return
655
653
656 # Purge index data starting at the requested revision.
654 # Purge index data starting at the requested revision.
657 self._indexdata[rev:] = []
655 self._indexdata[rev:] = []
658 self._reflectindexupdate()
656 self._reflectindexupdate()
659
657
660 def issimplestorefile(f, kind, st):
658 def issimplestorefile(f, kind, st):
661 if kind != stat.S_IFREG:
659 if kind != stat.S_IFREG:
662 return False
660 return False
663
661
664 if store.isrevlog(f, kind, st):
662 if store.isrevlog(f, kind, st):
665 return False
663 return False
666
664
667 # Ignore transaction undo files.
665 # Ignore transaction undo files.
668 if f.startswith('undo.'):
666 if f.startswith('undo.'):
669 return False
667 return False
670
668
671 # Otherwise assume it belongs to the simple store.
669 # Otherwise assume it belongs to the simple store.
672 return True
670 return True
673
671
674 class simplestore(store.encodedstore):
672 class simplestore(store.encodedstore):
675 def datafiles(self):
673 def datafiles(self):
676 for x in super(simplestore, self).datafiles():
674 for x in super(simplestore, self).datafiles():
677 yield x
675 yield x
678
676
679 # Supplement with non-revlog files.
677 # Supplement with non-revlog files.
680 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
678 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
681
679
682 for unencoded, encoded, size in extrafiles:
680 for unencoded, encoded, size in extrafiles:
683 try:
681 try:
684 unencoded = store.decodefilename(unencoded)
682 unencoded = store.decodefilename(unencoded)
685 except KeyError:
683 except KeyError:
686 unencoded = None
684 unencoded = None
687
685
688 yield unencoded, encoded, size
686 yield unencoded, encoded, size
689
687
690 def reposetup(ui, repo):
688 def reposetup(ui, repo):
691 if not repo.local():
689 if not repo.local():
692 return
690 return
693
691
694 if isinstance(repo, bundlerepo.bundlerepository):
692 if isinstance(repo, bundlerepo.bundlerepository):
695 raise error.Abort(_('cannot use simple store with bundlerepo'))
693 raise error.Abort(_('cannot use simple store with bundlerepo'))
696
694
697 class simplestorerepo(repo.__class__):
695 class simplestorerepo(repo.__class__):
698 def file(self, f):
696 def file(self, f):
699 return filestorage(self.svfs, f)
697 return filestorage(self.svfs, f)
700
698
701 repo.__class__ = simplestorerepo
699 repo.__class__ = simplestorerepo
702
700
703 def featuresetup(ui, supported):
701 def featuresetup(ui, supported):
704 supported.add(REQUIREMENT)
702 supported.add(REQUIREMENT)
705
703
706 def newreporequirements(orig, ui):
704 def newreporequirements(orig, ui):
707 """Modifies default requirements for new repos to use the simple store."""
705 """Modifies default requirements for new repos to use the simple store."""
708 requirements = orig(ui)
706 requirements = orig(ui)
709
707
710 # These requirements are only used to affect creation of the store
708 # These requirements are only used to affect creation of the store
711 # object. We have our own store. So we can remove them.
709 # object. We have our own store. So we can remove them.
712 # TODO do this once we feel like taking the test hit.
710 # TODO do this once we feel like taking the test hit.
713 #if 'fncache' in requirements:
711 #if 'fncache' in requirements:
714 # requirements.remove('fncache')
712 # requirements.remove('fncache')
715 #if 'dotencode' in requirements:
713 #if 'dotencode' in requirements:
716 # requirements.remove('dotencode')
714 # requirements.remove('dotencode')
717
715
718 requirements.add(REQUIREMENT)
716 requirements.add(REQUIREMENT)
719
717
720 return requirements
718 return requirements
721
719
722 def makestore(orig, requirements, path, vfstype):
720 def makestore(orig, requirements, path, vfstype):
723 if REQUIREMENT not in requirements:
721 if REQUIREMENT not in requirements:
724 return orig(requirements, path, vfstype)
722 return orig(requirements, path, vfstype)
725
723
726 return simplestore(path, vfstype)
724 return simplestore(path, vfstype)
727
725
728 def verifierinit(orig, self, *args, **kwargs):
726 def verifierinit(orig, self, *args, **kwargs):
729 orig(self, *args, **kwargs)
727 orig(self, *args, **kwargs)
730
728
731 # We don't care that files in the store don't align with what is
729 # We don't care that files in the store don't align with what is
732 # advertised. So suppress these warnings.
730 # advertised. So suppress these warnings.
733 self.warnorphanstorefiles = False
731 self.warnorphanstorefiles = False
734
732
735 def extsetup(ui):
733 def extsetup(ui):
736 localrepo.featuresetupfuncs.add(featuresetup)
734 localrepo.featuresetupfuncs.add(featuresetup)
737
735
738 extensions.wrapfunction(localrepo, 'newreporequirements',
736 extensions.wrapfunction(localrepo, 'newreporequirements',
739 newreporequirements)
737 newreporequirements)
740 extensions.wrapfunction(store, 'store', makestore)
738 extensions.wrapfunction(store, 'store', makestore)
741 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
739 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now