##// END OF EJS Templates
filelog: stop proxying headrevs() (API)...
Gregory Szorc -
r39821:979e9f12 default
parent child Browse files
Show More
@@ -1,278 +1,274
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # Full name of the user visible file, relative to the repository root.
25 # Full name of the user visible file, relative to the repository root.
26 # Used by LFS.
26 # Used by LFS.
27 self.filename = path
27 self.filename = path
28 # Used by repo upgrade.
28 # Used by repo upgrade.
29 self.index = self._revlog.index
29 self.index = self._revlog.index
30 # Used by verify.
30 # Used by verify.
31 self.version = self._revlog.version
31 self.version = self._revlog.version
32 # Used by changegroup generation.
32 # Used by changegroup generation.
33 self._generaldelta = self._revlog._generaldelta
33 self._generaldelta = self._revlog._generaldelta
34
34
35 def __len__(self):
35 def __len__(self):
36 return len(self._revlog)
36 return len(self._revlog)
37
37
38 def __iter__(self):
38 def __iter__(self):
39 return self._revlog.__iter__()
39 return self._revlog.__iter__()
40
40
41 def revs(self, start=0, stop=None):
41 def revs(self, start=0, stop=None):
42 return self._revlog.revs(start=start, stop=stop)
42 return self._revlog.revs(start=start, stop=stop)
43
43
44 def parents(self, node):
44 def parents(self, node):
45 return self._revlog.parents(node)
45 return self._revlog.parents(node)
46
46
47 def parentrevs(self, rev):
47 def parentrevs(self, rev):
48 return self._revlog.parentrevs(rev)
48 return self._revlog.parentrevs(rev)
49
49
50 def rev(self, node):
50 def rev(self, node):
51 return self._revlog.rev(node)
51 return self._revlog.rev(node)
52
52
53 def node(self, rev):
53 def node(self, rev):
54 return self._revlog.node(rev)
54 return self._revlog.node(rev)
55
55
56 def lookup(self, node):
56 def lookup(self, node):
57 return self._revlog.lookup(node)
57 return self._revlog.lookup(node)
58
58
59 def linkrev(self, rev):
59 def linkrev(self, rev):
60 return self._revlog.linkrev(rev)
60 return self._revlog.linkrev(rev)
61
61
62 # Used by LFS, verify.
62 # Used by LFS, verify.
63 def flags(self, rev):
63 def flags(self, rev):
64 return self._revlog.flags(rev)
64 return self._revlog.flags(rev)
65
65
66 def commonancestorsheads(self, node1, node2):
66 def commonancestorsheads(self, node1, node2):
67 return self._revlog.commonancestorsheads(node1, node2)
67 return self._revlog.commonancestorsheads(node1, node2)
68
68
69 # Used by dagop.blockdescendants().
69 # Used by dagop.blockdescendants().
70 def descendants(self, revs):
70 def descendants(self, revs):
71 return self._revlog.descendants(revs)
71 return self._revlog.descendants(revs)
72
72
73 # Used by hgweb.
74 def headrevs(self):
75 return self._revlog.headrevs()
76
77 def heads(self, start=None, stop=None):
73 def heads(self, start=None, stop=None):
78 return self._revlog.heads(start, stop)
74 return self._revlog.heads(start, stop)
79
75
80 # Used by hgweb, children extension.
76 # Used by hgweb, children extension.
81 def children(self, node):
77 def children(self, node):
82 return self._revlog.children(node)
78 return self._revlog.children(node)
83
79
84 def deltaparent(self, rev):
80 def deltaparent(self, rev):
85 return self._revlog.deltaparent(rev)
81 return self._revlog.deltaparent(rev)
86
82
87 def iscensored(self, rev):
83 def iscensored(self, rev):
88 return self._revlog.iscensored(rev)
84 return self._revlog.iscensored(rev)
89
85
90 # Used by verify.
86 # Used by verify.
91 def rawsize(self, rev):
87 def rawsize(self, rev):
92 return self._revlog.rawsize(rev)
88 return self._revlog.rawsize(rev)
93
89
94 # Might be unused.
90 # Might be unused.
95 def checkhash(self, text, node, p1=None, p2=None, rev=None):
91 def checkhash(self, text, node, p1=None, p2=None, rev=None):
96 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
92 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
97
93
98 def revision(self, node, _df=None, raw=False):
94 def revision(self, node, _df=None, raw=False):
99 return self._revlog.revision(node, _df=_df, raw=raw)
95 return self._revlog.revision(node, _df=_df, raw=raw)
100
96
101 def revdiff(self, rev1, rev2):
97 def revdiff(self, rev1, rev2):
102 return self._revlog.revdiff(rev1, rev2)
98 return self._revlog.revdiff(rev1, rev2)
103
99
104 def emitrevisiondeltas(self, requests):
100 def emitrevisiondeltas(self, requests):
105 return self._revlog.emitrevisiondeltas(requests)
101 return self._revlog.emitrevisiondeltas(requests)
106
102
107 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
103 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
108 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
104 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
109 cachedelta=None):
105 cachedelta=None):
110 return self._revlog.addrevision(revisiondata, transaction, linkrev,
106 return self._revlog.addrevision(revisiondata, transaction, linkrev,
111 p1, p2, node=node, flags=flags,
107 p1, p2, node=node, flags=flags,
112 cachedelta=cachedelta)
108 cachedelta=cachedelta)
113
109
114 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
110 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
115 return self._revlog.addgroup(deltas, linkmapper, transaction,
111 return self._revlog.addgroup(deltas, linkmapper, transaction,
116 addrevisioncb=addrevisioncb)
112 addrevisioncb=addrevisioncb)
117
113
118 def getstrippoint(self, minlink):
114 def getstrippoint(self, minlink):
119 return self._revlog.getstrippoint(minlink)
115 return self._revlog.getstrippoint(minlink)
120
116
121 def strip(self, minlink, transaction):
117 def strip(self, minlink, transaction):
122 return self._revlog.strip(minlink, transaction)
118 return self._revlog.strip(minlink, transaction)
123
119
124 def censorrevision(self, tr, node, tombstone=b''):
120 def censorrevision(self, tr, node, tombstone=b''):
125 return self._revlog.censorrevision(node, tombstone=tombstone)
121 return self._revlog.censorrevision(node, tombstone=tombstone)
126
122
127 def files(self):
123 def files(self):
128 return self._revlog.files()
124 return self._revlog.files()
129
125
130 # Used by verify.
126 # Used by verify.
131 def checksize(self):
127 def checksize(self):
132 return self._revlog.checksize()
128 return self._revlog.checksize()
133
129
134 def read(self, node):
130 def read(self, node):
135 t = self.revision(node)
131 t = self.revision(node)
136 if not t.startswith('\1\n'):
132 if not t.startswith('\1\n'):
137 return t
133 return t
138 s = t.index('\1\n', 2)
134 s = t.index('\1\n', 2)
139 return t[s + 2:]
135 return t[s + 2:]
140
136
141 def add(self, text, meta, transaction, link, p1=None, p2=None):
137 def add(self, text, meta, transaction, link, p1=None, p2=None):
142 if meta or text.startswith('\1\n'):
138 if meta or text.startswith('\1\n'):
143 text = revlog.packmeta(meta, text)
139 text = revlog.packmeta(meta, text)
144 return self.addrevision(text, transaction, link, p1, p2)
140 return self.addrevision(text, transaction, link, p1, p2)
145
141
146 def renamed(self, node):
142 def renamed(self, node):
147 if self.parents(node)[0] != revlog.nullid:
143 if self.parents(node)[0] != revlog.nullid:
148 return False
144 return False
149 t = self.revision(node)
145 t = self.revision(node)
150 m = revlog.parsemeta(t)[0]
146 m = revlog.parsemeta(t)[0]
151 # copy and copyrev occur in pairs. In rare cases due to bugs,
147 # copy and copyrev occur in pairs. In rare cases due to bugs,
152 # one can occur without the other.
148 # one can occur without the other.
153 if m and "copy" in m and "copyrev" in m:
149 if m and "copy" in m and "copyrev" in m:
154 return (m["copy"], revlog.bin(m["copyrev"]))
150 return (m["copy"], revlog.bin(m["copyrev"]))
155 return False
151 return False
156
152
157 def size(self, rev):
153 def size(self, rev):
158 """return the size of a given revision"""
154 """return the size of a given revision"""
159
155
160 # for revisions with renames, we have to go the slow way
156 # for revisions with renames, we have to go the slow way
161 node = self.node(rev)
157 node = self.node(rev)
162 if self.renamed(node):
158 if self.renamed(node):
163 return len(self.read(node))
159 return len(self.read(node))
164 if self.iscensored(rev):
160 if self.iscensored(rev):
165 return 0
161 return 0
166
162
167 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
163 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
168 return self._revlog.size(rev)
164 return self._revlog.size(rev)
169
165
170 def cmp(self, node, text):
166 def cmp(self, node, text):
171 """compare text with a given file revision
167 """compare text with a given file revision
172
168
173 returns True if text is different than what is stored.
169 returns True if text is different than what is stored.
174 """
170 """
175
171
176 t = text
172 t = text
177 if text.startswith('\1\n'):
173 if text.startswith('\1\n'):
178 t = '\1\n\1\n' + text
174 t = '\1\n\1\n' + text
179
175
180 samehashes = not self._revlog.cmp(node, t)
176 samehashes = not self._revlog.cmp(node, t)
181 if samehashes:
177 if samehashes:
182 return False
178 return False
183
179
184 # censored files compare against the empty file
180 # censored files compare against the empty file
185 if self.iscensored(self.rev(node)):
181 if self.iscensored(self.rev(node)):
186 return text != ''
182 return text != ''
187
183
188 # renaming a file produces a different hash, even if the data
184 # renaming a file produces a different hash, even if the data
189 # remains unchanged. Check if it's the case (slow):
185 # remains unchanged. Check if it's the case (slow):
190 if self.renamed(node):
186 if self.renamed(node):
191 t2 = self.read(node)
187 t2 = self.read(node)
192 return t2 != text
188 return t2 != text
193
189
194 return True
190 return True
195
191
196 # TODO these aren't part of the interface and aren't internal methods.
192 # TODO these aren't part of the interface and aren't internal methods.
197 # Callers should be fixed to not use them.
193 # Callers should be fixed to not use them.
198
194
199 # Used by LFS.
195 # Used by LFS.
200 @property
196 @property
201 def filename(self):
197 def filename(self):
202 return self._revlog.filename
198 return self._revlog.filename
203
199
204 @filename.setter
200 @filename.setter
205 def filename(self, value):
201 def filename(self, value):
206 self._revlog.filename = value
202 self._revlog.filename = value
207
203
208 # Used by bundlefilelog, unionfilelog.
204 # Used by bundlefilelog, unionfilelog.
209 @property
205 @property
210 def indexfile(self):
206 def indexfile(self):
211 return self._revlog.indexfile
207 return self._revlog.indexfile
212
208
213 @indexfile.setter
209 @indexfile.setter
214 def indexfile(self, value):
210 def indexfile(self, value):
215 self._revlog.indexfile = value
211 self._revlog.indexfile = value
216
212
217 # Used by LFS, repo upgrade.
213 # Used by LFS, repo upgrade.
218 @property
214 @property
219 def opener(self):
215 def opener(self):
220 return self._revlog.opener
216 return self._revlog.opener
221
217
222 # Used by repo upgrade.
218 # Used by repo upgrade.
223 def clone(self, tr, destrevlog, **kwargs):
219 def clone(self, tr, destrevlog, **kwargs):
224 if not isinstance(destrevlog, filelog):
220 if not isinstance(destrevlog, filelog):
225 raise error.ProgrammingError('expected filelog to clone()')
221 raise error.ProgrammingError('expected filelog to clone()')
226
222
227 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
223 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
228
224
229 class narrowfilelog(filelog):
225 class narrowfilelog(filelog):
230 """Filelog variation to be used with narrow stores."""
226 """Filelog variation to be used with narrow stores."""
231
227
232 def __init__(self, opener, path, narrowmatch):
228 def __init__(self, opener, path, narrowmatch):
233 super(narrowfilelog, self).__init__(opener, path)
229 super(narrowfilelog, self).__init__(opener, path)
234 self._narrowmatch = narrowmatch
230 self._narrowmatch = narrowmatch
235
231
236 def renamed(self, node):
232 def renamed(self, node):
237 res = super(narrowfilelog, self).renamed(node)
233 res = super(narrowfilelog, self).renamed(node)
238
234
239 # Renames that come from outside the narrowspec are problematic
235 # Renames that come from outside the narrowspec are problematic
240 # because we may lack the base text for the rename. This can result
236 # because we may lack the base text for the rename. This can result
241 # in code attempting to walk the ancestry or compute a diff
237 # in code attempting to walk the ancestry or compute a diff
242 # encountering a missing revision. We address this by silently
238 # encountering a missing revision. We address this by silently
243 # removing rename metadata if the source file is outside the
239 # removing rename metadata if the source file is outside the
244 # narrow spec.
240 # narrow spec.
245 #
241 #
246 # A better solution would be to see if the base revision is available,
242 # A better solution would be to see if the base revision is available,
247 # rather than assuming it isn't.
243 # rather than assuming it isn't.
248 #
244 #
249 # An even better solution would be to teach all consumers of rename
245 # An even better solution would be to teach all consumers of rename
250 # metadata that the base revision may not be available.
246 # metadata that the base revision may not be available.
251 #
247 #
252 # TODO consider better ways of doing this.
248 # TODO consider better ways of doing this.
253 if res and not self._narrowmatch(res[0]):
249 if res and not self._narrowmatch(res[0]):
254 return None
250 return None
255
251
256 return res
252 return res
257
253
258 def size(self, rev):
254 def size(self, rev):
259 # Because we have a custom renamed() that may lie, we need to call
255 # Because we have a custom renamed() that may lie, we need to call
260 # the base renamed() to report accurate results.
256 # the base renamed() to report accurate results.
261 node = self.node(rev)
257 node = self.node(rev)
262 if super(narrowfilelog, self).renamed(node):
258 if super(narrowfilelog, self).renamed(node):
263 return len(self.read(node))
259 return len(self.read(node))
264 else:
260 else:
265 return super(narrowfilelog, self).size(rev)
261 return super(narrowfilelog, self).size(rev)
266
262
267 def cmp(self, node, text):
263 def cmp(self, node, text):
268 different = super(narrowfilelog, self).cmp(node, text)
264 different = super(narrowfilelog, self).cmp(node, text)
269
265
270 # Because renamed() may lie, we may get false positives for
266 # Because renamed() may lie, we may get false positives for
271 # different content. Check for this by comparing against the original
267 # different content. Check for this by comparing against the original
272 # renamed() implementation.
268 # renamed() implementation.
273 if different:
269 if different:
274 if super(narrowfilelog, self).renamed(node):
270 if super(narrowfilelog, self).renamed(node):
275 t2 = self.read(node)
271 t2 = self.read(node)
276 return t2 != text
272 return t2 != text
277
273
278 return different
274 return different
@@ -1,1602 +1,1594
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 class ipeerconnection(interfaceutil.Interface):
22 class ipeerconnection(interfaceutil.Interface):
23 """Represents a "connection" to a repository.
23 """Represents a "connection" to a repository.
24
24
25 This is the base interface for representing a connection to a repository.
25 This is the base interface for representing a connection to a repository.
26 It holds basic properties and methods applicable to all peer types.
26 It holds basic properties and methods applicable to all peer types.
27
27
28 This is not a complete interface definition and should not be used
28 This is not a complete interface definition and should not be used
29 outside of this module.
29 outside of this module.
30 """
30 """
31 ui = interfaceutil.Attribute("""ui.ui instance""")
31 ui = interfaceutil.Attribute("""ui.ui instance""")
32
32
33 def url():
33 def url():
34 """Returns a URL string representing this peer.
34 """Returns a URL string representing this peer.
35
35
36 Currently, implementations expose the raw URL used to construct the
36 Currently, implementations expose the raw URL used to construct the
37 instance. It may contain credentials as part of the URL. The
37 instance. It may contain credentials as part of the URL. The
38 expectations of the value aren't well-defined and this could lead to
38 expectations of the value aren't well-defined and this could lead to
39 data leakage.
39 data leakage.
40
40
41 TODO audit/clean consumers and more clearly define the contents of this
41 TODO audit/clean consumers and more clearly define the contents of this
42 value.
42 value.
43 """
43 """
44
44
45 def local():
45 def local():
46 """Returns a local repository instance.
46 """Returns a local repository instance.
47
47
48 If the peer represents a local repository, returns an object that
48 If the peer represents a local repository, returns an object that
49 can be used to interface with it. Otherwise returns ``None``.
49 can be used to interface with it. Otherwise returns ``None``.
50 """
50 """
51
51
52 def peer():
52 def peer():
53 """Returns an object conforming to this interface.
53 """Returns an object conforming to this interface.
54
54
55 Most implementations will ``return self``.
55 Most implementations will ``return self``.
56 """
56 """
57
57
58 def canpush():
58 def canpush():
59 """Returns a boolean indicating if this peer can be pushed to."""
59 """Returns a boolean indicating if this peer can be pushed to."""
60
60
61 def close():
61 def close():
62 """Close the connection to this peer.
62 """Close the connection to this peer.
63
63
64 This is called when the peer will no longer be used. Resources
64 This is called when the peer will no longer be used. Resources
65 associated with the peer should be cleaned up.
65 associated with the peer should be cleaned up.
66 """
66 """
67
67
68 class ipeercapabilities(interfaceutil.Interface):
68 class ipeercapabilities(interfaceutil.Interface):
69 """Peer sub-interface related to capabilities."""
69 """Peer sub-interface related to capabilities."""
70
70
71 def capable(name):
71 def capable(name):
72 """Determine support for a named capability.
72 """Determine support for a named capability.
73
73
74 Returns ``False`` if capability not supported.
74 Returns ``False`` if capability not supported.
75
75
76 Returns ``True`` if boolean capability is supported. Returns a string
76 Returns ``True`` if boolean capability is supported. Returns a string
77 if capability support is non-boolean.
77 if capability support is non-boolean.
78
78
79 Capability strings may or may not map to wire protocol capabilities.
79 Capability strings may or may not map to wire protocol capabilities.
80 """
80 """
81
81
82 def requirecap(name, purpose):
82 def requirecap(name, purpose):
83 """Require a capability to be present.
83 """Require a capability to be present.
84
84
85 Raises a ``CapabilityError`` if the capability isn't present.
85 Raises a ``CapabilityError`` if the capability isn't present.
86 """
86 """
87
87
88 class ipeercommands(interfaceutil.Interface):
88 class ipeercommands(interfaceutil.Interface):
89 """Client-side interface for communicating over the wire protocol.
89 """Client-side interface for communicating over the wire protocol.
90
90
91 This interface is used as a gateway to the Mercurial wire protocol.
91 This interface is used as a gateway to the Mercurial wire protocol.
92 methods commonly call wire protocol commands of the same name.
92 methods commonly call wire protocol commands of the same name.
93 """
93 """
94
94
95 def branchmap():
95 def branchmap():
96 """Obtain heads in named branches.
96 """Obtain heads in named branches.
97
97
98 Returns a dict mapping branch name to an iterable of nodes that are
98 Returns a dict mapping branch name to an iterable of nodes that are
99 heads on that branch.
99 heads on that branch.
100 """
100 """
101
101
102 def capabilities():
102 def capabilities():
103 """Obtain capabilities of the peer.
103 """Obtain capabilities of the peer.
104
104
105 Returns a set of string capabilities.
105 Returns a set of string capabilities.
106 """
106 """
107
107
108 def clonebundles():
108 def clonebundles():
109 """Obtains the clone bundles manifest for the repo.
109 """Obtains the clone bundles manifest for the repo.
110
110
111 Returns the manifest as unparsed bytes.
111 Returns the manifest as unparsed bytes.
112 """
112 """
113
113
114 def debugwireargs(one, two, three=None, four=None, five=None):
114 def debugwireargs(one, two, three=None, four=None, five=None):
115 """Used to facilitate debugging of arguments passed over the wire."""
115 """Used to facilitate debugging of arguments passed over the wire."""
116
116
117 def getbundle(source, **kwargs):
117 def getbundle(source, **kwargs):
118 """Obtain remote repository data as a bundle.
118 """Obtain remote repository data as a bundle.
119
119
120 This command is how the bulk of repository data is transferred from
120 This command is how the bulk of repository data is transferred from
121 the peer to the local repository
121 the peer to the local repository
122
122
123 Returns a generator of bundle data.
123 Returns a generator of bundle data.
124 """
124 """
125
125
126 def heads():
126 def heads():
127 """Determine all known head revisions in the peer.
127 """Determine all known head revisions in the peer.
128
128
129 Returns an iterable of binary nodes.
129 Returns an iterable of binary nodes.
130 """
130 """
131
131
132 def known(nodes):
132 def known(nodes):
133 """Determine whether multiple nodes are known.
133 """Determine whether multiple nodes are known.
134
134
135 Accepts an iterable of nodes whose presence to check for.
135 Accepts an iterable of nodes whose presence to check for.
136
136
137 Returns an iterable of booleans indicating of the corresponding node
137 Returns an iterable of booleans indicating of the corresponding node
138 at that index is known to the peer.
138 at that index is known to the peer.
139 """
139 """
140
140
141 def listkeys(namespace):
141 def listkeys(namespace):
142 """Obtain all keys in a pushkey namespace.
142 """Obtain all keys in a pushkey namespace.
143
143
144 Returns an iterable of key names.
144 Returns an iterable of key names.
145 """
145 """
146
146
147 def lookup(key):
147 def lookup(key):
148 """Resolve a value to a known revision.
148 """Resolve a value to a known revision.
149
149
150 Returns a binary node of the resolved revision on success.
150 Returns a binary node of the resolved revision on success.
151 """
151 """
152
152
153 def pushkey(namespace, key, old, new):
153 def pushkey(namespace, key, old, new):
154 """Set a value using the ``pushkey`` protocol.
154 """Set a value using the ``pushkey`` protocol.
155
155
156 Arguments correspond to the pushkey namespace and key to operate on and
156 Arguments correspond to the pushkey namespace and key to operate on and
157 the old and new values for that key.
157 the old and new values for that key.
158
158
159 Returns a string with the peer result. The value inside varies by the
159 Returns a string with the peer result. The value inside varies by the
160 namespace.
160 namespace.
161 """
161 """
162
162
163 def stream_out():
163 def stream_out():
164 """Obtain streaming clone data.
164 """Obtain streaming clone data.
165
165
166 Successful result should be a generator of data chunks.
166 Successful result should be a generator of data chunks.
167 """
167 """
168
168
169 def unbundle(bundle, heads, url):
169 def unbundle(bundle, heads, url):
170 """Transfer repository data to the peer.
170 """Transfer repository data to the peer.
171
171
172 This is how the bulk of data during a push is transferred.
172 This is how the bulk of data during a push is transferred.
173
173
174 Returns the integer number of heads added to the peer.
174 Returns the integer number of heads added to the peer.
175 """
175 """
176
176
177 class ipeerlegacycommands(interfaceutil.Interface):
177 class ipeerlegacycommands(interfaceutil.Interface):
178 """Interface for implementing support for legacy wire protocol commands.
178 """Interface for implementing support for legacy wire protocol commands.
179
179
180 Wire protocol commands transition to legacy status when they are no longer
180 Wire protocol commands transition to legacy status when they are no longer
181 used by modern clients. To facilitate identifying which commands are
181 used by modern clients. To facilitate identifying which commands are
182 legacy, the interfaces are split.
182 legacy, the interfaces are split.
183 """
183 """
184
184
185 def between(pairs):
185 def between(pairs):
186 """Obtain nodes between pairs of nodes.
186 """Obtain nodes between pairs of nodes.
187
187
188 ``pairs`` is an iterable of node pairs.
188 ``pairs`` is an iterable of node pairs.
189
189
190 Returns an iterable of iterables of nodes corresponding to each
190 Returns an iterable of iterables of nodes corresponding to each
191 requested pair.
191 requested pair.
192 """
192 """
193
193
194 def branches(nodes):
194 def branches(nodes):
195 """Obtain ancestor changesets of specific nodes back to a branch point.
195 """Obtain ancestor changesets of specific nodes back to a branch point.
196
196
197 For each requested node, the peer finds the first ancestor node that is
197 For each requested node, the peer finds the first ancestor node that is
198 a DAG root or is a merge.
198 a DAG root or is a merge.
199
199
200 Returns an iterable of iterables with the resolved values for each node.
200 Returns an iterable of iterables with the resolved values for each node.
201 """
201 """
202
202
203 def changegroup(nodes, source):
203 def changegroup(nodes, source):
204 """Obtain a changegroup with data for descendants of specified nodes."""
204 """Obtain a changegroup with data for descendants of specified nodes."""
205
205
206 def changegroupsubset(bases, heads, source):
206 def changegroupsubset(bases, heads, source):
207 pass
207 pass
208
208
209 class ipeercommandexecutor(interfaceutil.Interface):
209 class ipeercommandexecutor(interfaceutil.Interface):
210 """Represents a mechanism to execute remote commands.
210 """Represents a mechanism to execute remote commands.
211
211
212 This is the primary interface for requesting that wire protocol commands
212 This is the primary interface for requesting that wire protocol commands
213 be executed. Instances of this interface are active in a context manager
213 be executed. Instances of this interface are active in a context manager
214 and have a well-defined lifetime. When the context manager exits, all
214 and have a well-defined lifetime. When the context manager exits, all
215 outstanding requests are waited on.
215 outstanding requests are waited on.
216 """
216 """
217
217
218 def callcommand(name, args):
218 def callcommand(name, args):
219 """Request that a named command be executed.
219 """Request that a named command be executed.
220
220
221 Receives the command name and a dictionary of command arguments.
221 Receives the command name and a dictionary of command arguments.
222
222
223 Returns a ``concurrent.futures.Future`` that will resolve to the
223 Returns a ``concurrent.futures.Future`` that will resolve to the
224 result of that command request. That exact value is left up to
224 result of that command request. That exact value is left up to
225 the implementation and possibly varies by command.
225 the implementation and possibly varies by command.
226
226
227 Not all commands can coexist with other commands in an executor
227 Not all commands can coexist with other commands in an executor
228 instance: it depends on the underlying wire protocol transport being
228 instance: it depends on the underlying wire protocol transport being
229 used and the command itself.
229 used and the command itself.
230
230
231 Implementations MAY call ``sendcommands()`` automatically if the
231 Implementations MAY call ``sendcommands()`` automatically if the
232 requested command can not coexist with other commands in this executor.
232 requested command can not coexist with other commands in this executor.
233
233
234 Implementations MAY call ``sendcommands()`` automatically when the
234 Implementations MAY call ``sendcommands()`` automatically when the
235 future's ``result()`` is called. So, consumers using multiple
235 future's ``result()`` is called. So, consumers using multiple
236 commands with an executor MUST ensure that ``result()`` is not called
236 commands with an executor MUST ensure that ``result()`` is not called
237 until all command requests have been issued.
237 until all command requests have been issued.
238 """
238 """
239
239
240 def sendcommands():
240 def sendcommands():
241 """Trigger submission of queued command requests.
241 """Trigger submission of queued command requests.
242
242
243 Not all transports submit commands as soon as they are requested to
243 Not all transports submit commands as soon as they are requested to
244 run. When called, this method forces queued command requests to be
244 run. When called, this method forces queued command requests to be
245 issued. It will no-op if all commands have already been sent.
245 issued. It will no-op if all commands have already been sent.
246
246
247 When called, no more new commands may be issued with this executor.
247 When called, no more new commands may be issued with this executor.
248 """
248 """
249
249
250 def close():
250 def close():
251 """Signal that this command request is finished.
251 """Signal that this command request is finished.
252
252
253 When called, no more new commands may be issued. All outstanding
253 When called, no more new commands may be issued. All outstanding
254 commands that have previously been issued are waited on before
254 commands that have previously been issued are waited on before
255 returning. This not only includes waiting for the futures to resolve,
255 returning. This not only includes waiting for the futures to resolve,
256 but also waiting for all response data to arrive. In other words,
256 but also waiting for all response data to arrive. In other words,
257 calling this waits for all on-wire state for issued command requests
257 calling this waits for all on-wire state for issued command requests
258 to finish.
258 to finish.
259
259
260 When used as a context manager, this method is called when exiting the
260 When used as a context manager, this method is called when exiting the
261 context manager.
261 context manager.
262
262
263 This method may call ``sendcommands()`` if there are buffered commands.
263 This method may call ``sendcommands()`` if there are buffered commands.
264 """
264 """
265
265
266 class ipeerrequests(interfaceutil.Interface):
266 class ipeerrequests(interfaceutil.Interface):
267 """Interface for executing commands on a peer."""
267 """Interface for executing commands on a peer."""
268
268
269 def commandexecutor():
269 def commandexecutor():
270 """A context manager that resolves to an ipeercommandexecutor.
270 """A context manager that resolves to an ipeercommandexecutor.
271
271
272 The object this resolves to can be used to issue command requests
272 The object this resolves to can be used to issue command requests
273 to the peer.
273 to the peer.
274
274
275 Callers should call its ``callcommand`` method to issue command
275 Callers should call its ``callcommand`` method to issue command
276 requests.
276 requests.
277
277
278 A new executor should be obtained for each distinct set of commands
278 A new executor should be obtained for each distinct set of commands
279 (possibly just a single command) that the consumer wants to execute
279 (possibly just a single command) that the consumer wants to execute
280 as part of a single operation or round trip. This is because some
280 as part of a single operation or round trip. This is because some
281 peers are half-duplex and/or don't support persistent connections.
281 peers are half-duplex and/or don't support persistent connections.
282 e.g. in the case of HTTP peers, commands sent to an executor represent
282 e.g. in the case of HTTP peers, commands sent to an executor represent
283 a single HTTP request. While some peers may support multiple command
283 a single HTTP request. While some peers may support multiple command
284 sends over the wire per executor, consumers need to code to the least
284 sends over the wire per executor, consumers need to code to the least
285 capable peer. So it should be assumed that command executors buffer
285 capable peer. So it should be assumed that command executors buffer
286 called commands until they are told to send them and that each
286 called commands until they are told to send them and that each
287 command executor could result in a new connection or wire-level request
287 command executor could result in a new connection or wire-level request
288 being issued.
288 being issued.
289 """
289 """
290
290
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
292 """Unified interface for peer repositories.
292 """Unified interface for peer repositories.
293
293
294 All peer instances must conform to this interface.
294 All peer instances must conform to this interface.
295 """
295 """
296
296
297 @interfaceutil.implementer(ipeerbase)
297 @interfaceutil.implementer(ipeerbase)
298 class peer(object):
298 class peer(object):
299 """Base class for peer repositories."""
299 """Base class for peer repositories."""
300
300
301 def capable(self, name):
301 def capable(self, name):
302 caps = self.capabilities()
302 caps = self.capabilities()
303 if name in caps:
303 if name in caps:
304 return True
304 return True
305
305
306 name = '%s=' % name
306 name = '%s=' % name
307 for cap in caps:
307 for cap in caps:
308 if cap.startswith(name):
308 if cap.startswith(name):
309 return cap[len(name):]
309 return cap[len(name):]
310
310
311 return False
311 return False
312
312
313 def requirecap(self, name, purpose):
313 def requirecap(self, name, purpose):
314 if self.capable(name):
314 if self.capable(name):
315 return
315 return
316
316
317 raise error.CapabilityError(
317 raise error.CapabilityError(
318 _('cannot %s; remote repository does not support the %r '
318 _('cannot %s; remote repository does not support the %r '
319 'capability') % (purpose, name))
319 'capability') % (purpose, name))
320
320
321 class irevisiondelta(interfaceutil.Interface):
321 class irevisiondelta(interfaceutil.Interface):
322 """Represents a delta between one revision and another.
322 """Represents a delta between one revision and another.
323
323
324 Instances convey enough information to allow a revision to be exchanged
324 Instances convey enough information to allow a revision to be exchanged
325 with another repository.
325 with another repository.
326
326
327 Instances represent the fulltext revision data or a delta against
327 Instances represent the fulltext revision data or a delta against
328 another revision. Therefore the ``revision`` and ``delta`` attributes
328 another revision. Therefore the ``revision`` and ``delta`` attributes
329 are mutually exclusive.
329 are mutually exclusive.
330
330
331 Typically used for changegroup generation.
331 Typically used for changegroup generation.
332 """
332 """
333
333
334 node = interfaceutil.Attribute(
334 node = interfaceutil.Attribute(
335 """20 byte node of this revision.""")
335 """20 byte node of this revision.""")
336
336
337 p1node = interfaceutil.Attribute(
337 p1node = interfaceutil.Attribute(
338 """20 byte node of 1st parent of this revision.""")
338 """20 byte node of 1st parent of this revision.""")
339
339
340 p2node = interfaceutil.Attribute(
340 p2node = interfaceutil.Attribute(
341 """20 byte node of 2nd parent of this revision.""")
341 """20 byte node of 2nd parent of this revision.""")
342
342
343 linknode = interfaceutil.Attribute(
343 linknode = interfaceutil.Attribute(
344 """20 byte node of the changelog revision this node is linked to.""")
344 """20 byte node of the changelog revision this node is linked to.""")
345
345
346 flags = interfaceutil.Attribute(
346 flags = interfaceutil.Attribute(
347 """2 bytes of integer flags that apply to this revision.""")
347 """2 bytes of integer flags that apply to this revision.""")
348
348
349 basenode = interfaceutil.Attribute(
349 basenode = interfaceutil.Attribute(
350 """20 byte node of the revision this data is a delta against.
350 """20 byte node of the revision this data is a delta against.
351
351
352 ``nullid`` indicates that the revision is a full revision and not
352 ``nullid`` indicates that the revision is a full revision and not
353 a delta.
353 a delta.
354 """)
354 """)
355
355
356 baserevisionsize = interfaceutil.Attribute(
356 baserevisionsize = interfaceutil.Attribute(
357 """Size of base revision this delta is against.
357 """Size of base revision this delta is against.
358
358
359 May be ``None`` if ``basenode`` is ``nullid``.
359 May be ``None`` if ``basenode`` is ``nullid``.
360 """)
360 """)
361
361
362 revision = interfaceutil.Attribute(
362 revision = interfaceutil.Attribute(
363 """Raw fulltext of revision data for this node.""")
363 """Raw fulltext of revision data for this node.""")
364
364
365 delta = interfaceutil.Attribute(
365 delta = interfaceutil.Attribute(
366 """Delta between ``basenode`` and ``node``.
366 """Delta between ``basenode`` and ``node``.
367
367
368 Stored in the bdiff delta format.
368 Stored in the bdiff delta format.
369 """)
369 """)
370
370
371 class irevisiondeltarequest(interfaceutil.Interface):
371 class irevisiondeltarequest(interfaceutil.Interface):
372 """Represents a request to generate an ``irevisiondelta``."""
372 """Represents a request to generate an ``irevisiondelta``."""
373
373
374 node = interfaceutil.Attribute(
374 node = interfaceutil.Attribute(
375 """20 byte node of revision being requested.""")
375 """20 byte node of revision being requested.""")
376
376
377 p1node = interfaceutil.Attribute(
377 p1node = interfaceutil.Attribute(
378 """20 byte node of 1st parent of revision.""")
378 """20 byte node of 1st parent of revision.""")
379
379
380 p2node = interfaceutil.Attribute(
380 p2node = interfaceutil.Attribute(
381 """20 byte node of 2nd parent of revision.""")
381 """20 byte node of 2nd parent of revision.""")
382
382
383 linknode = interfaceutil.Attribute(
383 linknode = interfaceutil.Attribute(
384 """20 byte node to store in ``linknode`` attribute.""")
384 """20 byte node to store in ``linknode`` attribute.""")
385
385
386 basenode = interfaceutil.Attribute(
386 basenode = interfaceutil.Attribute(
387 """Base revision that delta should be generated against.
387 """Base revision that delta should be generated against.
388
388
389 If ``nullid``, the derived ``irevisiondelta`` should have its
389 If ``nullid``, the derived ``irevisiondelta`` should have its
390 ``revision`` field populated and no delta should be generated.
390 ``revision`` field populated and no delta should be generated.
391
391
392 If ``None``, the delta may be generated against any revision that
392 If ``None``, the delta may be generated against any revision that
393 is an ancestor of this revision. Or a full revision may be used.
393 is an ancestor of this revision. Or a full revision may be used.
394
394
395 If any other value, the delta should be produced against that
395 If any other value, the delta should be produced against that
396 revision.
396 revision.
397 """)
397 """)
398
398
399 ellipsis = interfaceutil.Attribute(
399 ellipsis = interfaceutil.Attribute(
400 """Boolean on whether the ellipsis flag should be set.""")
400 """Boolean on whether the ellipsis flag should be set.""")
401
401
402 class ifilerevisionssequence(interfaceutil.Interface):
402 class ifilerevisionssequence(interfaceutil.Interface):
403 """Contains index data for all revisions of a file.
403 """Contains index data for all revisions of a file.
404
404
405 Types implementing this behave like lists of tuples. The index
405 Types implementing this behave like lists of tuples. The index
406 in the list corresponds to the revision number. The values contain
406 in the list corresponds to the revision number. The values contain
407 index metadata.
407 index metadata.
408
408
409 The *null* revision (revision number -1) is always the last item
409 The *null* revision (revision number -1) is always the last item
410 in the index.
410 in the index.
411 """
411 """
412
412
413 def __len__():
413 def __len__():
414 """The total number of revisions."""
414 """The total number of revisions."""
415
415
416 def __getitem__(rev):
416 def __getitem__(rev):
417 """Returns the object having a specific revision number.
417 """Returns the object having a specific revision number.
418
418
419 Returns an 8-tuple with the following fields:
419 Returns an 8-tuple with the following fields:
420
420
421 offset+flags
421 offset+flags
422 Contains the offset and flags for the revision. 64-bit unsigned
422 Contains the offset and flags for the revision. 64-bit unsigned
423 integer where first 6 bytes are the offset and the next 2 bytes
423 integer where first 6 bytes are the offset and the next 2 bytes
424 are flags. The offset can be 0 if it is not used by the store.
424 are flags. The offset can be 0 if it is not used by the store.
425 compressed size
425 compressed size
426 Size of the revision data in the store. It can be 0 if it isn't
426 Size of the revision data in the store. It can be 0 if it isn't
427 needed by the store.
427 needed by the store.
428 uncompressed size
428 uncompressed size
429 Fulltext size. It can be 0 if it isn't needed by the store.
429 Fulltext size. It can be 0 if it isn't needed by the store.
430 base revision
430 base revision
431 Revision number of revision the delta for storage is encoded
431 Revision number of revision the delta for storage is encoded
432 against. -1 indicates not encoded against a base revision.
432 against. -1 indicates not encoded against a base revision.
433 link revision
433 link revision
434 Revision number of changelog revision this entry is related to.
434 Revision number of changelog revision this entry is related to.
435 p1 revision
435 p1 revision
436 Revision number of 1st parent. -1 if no 1st parent.
436 Revision number of 1st parent. -1 if no 1st parent.
437 p2 revision
437 p2 revision
438 Revision number of 2nd parent. -1 if no 1st parent.
438 Revision number of 2nd parent. -1 if no 1st parent.
439 node
439 node
440 Binary node value for this revision number.
440 Binary node value for this revision number.
441
441
442 Negative values should index off the end of the sequence. ``-1``
442 Negative values should index off the end of the sequence. ``-1``
443 should return the null revision. ``-2`` should return the most
443 should return the null revision. ``-2`` should return the most
444 recent revision.
444 recent revision.
445 """
445 """
446
446
447 def __contains__(rev):
447 def __contains__(rev):
448 """Whether a revision number exists."""
448 """Whether a revision number exists."""
449
449
450 def insert(self, i, entry):
450 def insert(self, i, entry):
451 """Add an item to the index at specific revision."""
451 """Add an item to the index at specific revision."""
452
452
453 class ifileindex(interfaceutil.Interface):
453 class ifileindex(interfaceutil.Interface):
454 """Storage interface for index data of a single file.
454 """Storage interface for index data of a single file.
455
455
456 File storage data is divided into index metadata and data storage.
456 File storage data is divided into index metadata and data storage.
457 This interface defines the index portion of the interface.
457 This interface defines the index portion of the interface.
458
458
459 The index logically consists of:
459 The index logically consists of:
460
460
461 * A mapping between revision numbers and nodes.
461 * A mapping between revision numbers and nodes.
462 * DAG data (storing and querying the relationship between nodes).
462 * DAG data (storing and querying the relationship between nodes).
463 * Metadata to facilitate storage.
463 * Metadata to facilitate storage.
464 """
464 """
465 index = interfaceutil.Attribute(
465 index = interfaceutil.Attribute(
466 """An ``ifilerevisionssequence`` instance.""")
466 """An ``ifilerevisionssequence`` instance.""")
467
467
468 def __len__():
468 def __len__():
469 """Obtain the number of revisions stored for this file."""
469 """Obtain the number of revisions stored for this file."""
470
470
471 def __iter__():
471 def __iter__():
472 """Iterate over revision numbers for this file."""
472 """Iterate over revision numbers for this file."""
473
473
474 def revs(start=0, stop=None):
474 def revs(start=0, stop=None):
475 """Iterate over revision numbers for this file, with control."""
475 """Iterate over revision numbers for this file, with control."""
476
476
477 def parents(node):
477 def parents(node):
478 """Returns a 2-tuple of parent nodes for a revision.
478 """Returns a 2-tuple of parent nodes for a revision.
479
479
480 Values will be ``nullid`` if the parent is empty.
480 Values will be ``nullid`` if the parent is empty.
481 """
481 """
482
482
483 def parentrevs(rev):
483 def parentrevs(rev):
484 """Like parents() but operates on revision numbers."""
484 """Like parents() but operates on revision numbers."""
485
485
486 def rev(node):
486 def rev(node):
487 """Obtain the revision number given a node.
487 """Obtain the revision number given a node.
488
488
489 Raises ``error.LookupError`` if the node is not known.
489 Raises ``error.LookupError`` if the node is not known.
490 """
490 """
491
491
492 def node(rev):
492 def node(rev):
493 """Obtain the node value given a revision number.
493 """Obtain the node value given a revision number.
494
494
495 Raises ``IndexError`` if the node is not known.
495 Raises ``IndexError`` if the node is not known.
496 """
496 """
497
497
498 def lookup(node):
498 def lookup(node):
499 """Attempt to resolve a value to a node.
499 """Attempt to resolve a value to a node.
500
500
501 Value can be a binary node, hex node, revision number, or a string
501 Value can be a binary node, hex node, revision number, or a string
502 that can be converted to an integer.
502 that can be converted to an integer.
503
503
504 Raises ``error.LookupError`` if a node could not be resolved.
504 Raises ``error.LookupError`` if a node could not be resolved.
505 """
505 """
506
506
507 def linkrev(rev):
507 def linkrev(rev):
508 """Obtain the changeset revision number a revision is linked to."""
508 """Obtain the changeset revision number a revision is linked to."""
509
509
510 def flags(rev):
510 def flags(rev):
511 """Obtain flags used to affect storage of a revision."""
511 """Obtain flags used to affect storage of a revision."""
512
512
513 def iscensored(rev):
513 def iscensored(rev):
514 """Return whether a revision's content has been censored."""
514 """Return whether a revision's content has been censored."""
515
515
516 def commonancestorsheads(node1, node2):
516 def commonancestorsheads(node1, node2):
517 """Obtain an iterable of nodes containing heads of common ancestors.
517 """Obtain an iterable of nodes containing heads of common ancestors.
518
518
519 See ``ancestor.commonancestorsheads()``.
519 See ``ancestor.commonancestorsheads()``.
520 """
520 """
521
521
522 def descendants(revs):
522 def descendants(revs):
523 """Obtain descendant revision numbers for a set of revision numbers.
523 """Obtain descendant revision numbers for a set of revision numbers.
524
524
525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
525 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
526 """
526 """
527
527
528 def headrevs():
529 """Obtain a list of revision numbers that are DAG heads.
530
531 The list is sorted oldest to newest.
532
533 TODO determine if sorting is required.
534 """
535
536 def heads(start=None, stop=None):
528 def heads(start=None, stop=None):
537 """Obtain a list of nodes that are DAG heads, with control.
529 """Obtain a list of nodes that are DAG heads, with control.
538
530
539 The set of revisions examined can be limited by specifying
531 The set of revisions examined can be limited by specifying
540 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
532 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
541 iterable of nodes. DAG traversal starts at earlier revision
533 iterable of nodes. DAG traversal starts at earlier revision
542 ``start`` and iterates forward until any node in ``stop`` is
534 ``start`` and iterates forward until any node in ``stop`` is
543 encountered.
535 encountered.
544 """
536 """
545
537
546 def children(node):
538 def children(node):
547 """Obtain nodes that are children of a node.
539 """Obtain nodes that are children of a node.
548
540
549 Returns a list of nodes.
541 Returns a list of nodes.
550 """
542 """
551
543
552 def deltaparent(rev):
544 def deltaparent(rev):
553 """"Return the revision that is a suitable parent to delta against."""
545 """"Return the revision that is a suitable parent to delta against."""
554
546
555 class ifiledata(interfaceutil.Interface):
547 class ifiledata(interfaceutil.Interface):
556 """Storage interface for data storage of a specific file.
548 """Storage interface for data storage of a specific file.
557
549
558 This complements ``ifileindex`` and provides an interface for accessing
550 This complements ``ifileindex`` and provides an interface for accessing
559 data for a tracked file.
551 data for a tracked file.
560 """
552 """
561 def rawsize(rev):
553 def rawsize(rev):
562 """The size of the fulltext data for a revision as stored."""
554 """The size of the fulltext data for a revision as stored."""
563
555
564 def size(rev):
556 def size(rev):
565 """Obtain the fulltext size of file data.
557 """Obtain the fulltext size of file data.
566
558
567 Any metadata is excluded from size measurements. Use ``rawsize()`` if
559 Any metadata is excluded from size measurements. Use ``rawsize()`` if
568 metadata size is important.
560 metadata size is important.
569 """
561 """
570
562
571 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
563 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
572 """Validate the stored hash of a given fulltext and node.
564 """Validate the stored hash of a given fulltext and node.
573
565
574 Raises ``error.StorageError`` is hash validation fails.
566 Raises ``error.StorageError`` is hash validation fails.
575 """
567 """
576
568
577 def revision(node, raw=False):
569 def revision(node, raw=False):
578 """"Obtain fulltext data for a node.
570 """"Obtain fulltext data for a node.
579
571
580 By default, any storage transformations are applied before the data
572 By default, any storage transformations are applied before the data
581 is returned. If ``raw`` is True, non-raw storage transformations
573 is returned. If ``raw`` is True, non-raw storage transformations
582 are not applied.
574 are not applied.
583
575
584 The fulltext data may contain a header containing metadata. Most
576 The fulltext data may contain a header containing metadata. Most
585 consumers should use ``read()`` to obtain the actual file data.
577 consumers should use ``read()`` to obtain the actual file data.
586 """
578 """
587
579
588 def read(node):
580 def read(node):
589 """Resolve file fulltext data.
581 """Resolve file fulltext data.
590
582
591 This is similar to ``revision()`` except any metadata in the data
583 This is similar to ``revision()`` except any metadata in the data
592 headers is stripped.
584 headers is stripped.
593 """
585 """
594
586
595 def renamed(node):
587 def renamed(node):
596 """Obtain copy metadata for a node.
588 """Obtain copy metadata for a node.
597
589
598 Returns ``False`` if no copy metadata is stored or a 2-tuple of
590 Returns ``False`` if no copy metadata is stored or a 2-tuple of
599 (path, node) from which this revision was copied.
591 (path, node) from which this revision was copied.
600 """
592 """
601
593
602 def cmp(node, fulltext):
594 def cmp(node, fulltext):
603 """Compare fulltext to another revision.
595 """Compare fulltext to another revision.
604
596
605 Returns True if the fulltext is different from what is stored.
597 Returns True if the fulltext is different from what is stored.
606
598
607 This takes copy metadata into account.
599 This takes copy metadata into account.
608
600
609 TODO better document the copy metadata and censoring logic.
601 TODO better document the copy metadata and censoring logic.
610 """
602 """
611
603
612 def revdiff(rev1, rev2):
604 def revdiff(rev1, rev2):
613 """Obtain a delta between two revision numbers.
605 """Obtain a delta between two revision numbers.
614
606
615 Operates on raw data in the store (``revision(node, raw=True)``).
607 Operates on raw data in the store (``revision(node, raw=True)``).
616
608
617 The returned data is the result of ``bdiff.bdiff`` on the raw
609 The returned data is the result of ``bdiff.bdiff`` on the raw
618 revision data.
610 revision data.
619 """
611 """
620
612
621 def emitrevisiondeltas(requests):
613 def emitrevisiondeltas(requests):
622 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
614 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
623
615
624 Given an iterable of objects conforming to the ``irevisiondeltarequest``
616 Given an iterable of objects conforming to the ``irevisiondeltarequest``
625 interface, emits objects conforming to the ``irevisiondelta``
617 interface, emits objects conforming to the ``irevisiondelta``
626 interface.
618 interface.
627
619
628 This method is a generator.
620 This method is a generator.
629
621
630 ``irevisiondelta`` should be emitted in the same order of
622 ``irevisiondelta`` should be emitted in the same order of
631 ``irevisiondeltarequest`` that was passed in.
623 ``irevisiondeltarequest`` that was passed in.
632
624
633 The emitted objects MUST conform by the results of
625 The emitted objects MUST conform by the results of
634 ``irevisiondeltarequest``. Namely, they must respect any requests
626 ``irevisiondeltarequest``. Namely, they must respect any requests
635 for building a delta from a specific ``basenode`` if defined.
627 for building a delta from a specific ``basenode`` if defined.
636
628
637 When sending deltas, implementations must take into account whether
629 When sending deltas, implementations must take into account whether
638 the client has the base delta before encoding a delta against that
630 the client has the base delta before encoding a delta against that
639 revision. A revision encountered previously in ``requests`` is
631 revision. A revision encountered previously in ``requests`` is
640 always a suitable base revision. An example of a bad delta is a delta
632 always a suitable base revision. An example of a bad delta is a delta
641 against a non-ancestor revision. Another example of a bad delta is a
633 against a non-ancestor revision. Another example of a bad delta is a
642 delta against a censored revision.
634 delta against a censored revision.
643 """
635 """
644
636
645 class ifilemutation(interfaceutil.Interface):
637 class ifilemutation(interfaceutil.Interface):
646 """Storage interface for mutation events of a tracked file."""
638 """Storage interface for mutation events of a tracked file."""
647
639
648 def add(filedata, meta, transaction, linkrev, p1, p2):
640 def add(filedata, meta, transaction, linkrev, p1, p2):
649 """Add a new revision to the store.
641 """Add a new revision to the store.
650
642
651 Takes file data, dictionary of metadata, a transaction, linkrev,
643 Takes file data, dictionary of metadata, a transaction, linkrev,
652 and parent nodes.
644 and parent nodes.
653
645
654 Returns the node that was added.
646 Returns the node that was added.
655
647
656 May no-op if a revision matching the supplied data is already stored.
648 May no-op if a revision matching the supplied data is already stored.
657 """
649 """
658
650
659 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
651 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
660 flags=0, cachedelta=None):
652 flags=0, cachedelta=None):
661 """Add a new revision to the store.
653 """Add a new revision to the store.
662
654
663 This is similar to ``add()`` except it operates at a lower level.
655 This is similar to ``add()`` except it operates at a lower level.
664
656
665 The data passed in already contains a metadata header, if any.
657 The data passed in already contains a metadata header, if any.
666
658
667 ``node`` and ``flags`` can be used to define the expected node and
659 ``node`` and ``flags`` can be used to define the expected node and
668 the flags to use with storage.
660 the flags to use with storage.
669
661
670 ``add()`` is usually called when adding files from e.g. the working
662 ``add()`` is usually called when adding files from e.g. the working
671 directory. ``addrevision()`` is often called by ``add()`` and for
663 directory. ``addrevision()`` is often called by ``add()`` and for
672 scenarios where revision data has already been computed, such as when
664 scenarios where revision data has already been computed, such as when
673 applying raw data from a peer repo.
665 applying raw data from a peer repo.
674 """
666 """
675
667
676 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
668 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
677 """Process a series of deltas for storage.
669 """Process a series of deltas for storage.
678
670
679 ``deltas`` is an iterable of 7-tuples of
671 ``deltas`` is an iterable of 7-tuples of
680 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
672 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
681 to add.
673 to add.
682
674
683 The ``delta`` field contains ``mpatch`` data to apply to a base
675 The ``delta`` field contains ``mpatch`` data to apply to a base
684 revision, identified by ``deltabase``. The base node can be
676 revision, identified by ``deltabase``. The base node can be
685 ``nullid``, in which case the header from the delta can be ignored
677 ``nullid``, in which case the header from the delta can be ignored
686 and the delta used as the fulltext.
678 and the delta used as the fulltext.
687
679
688 ``addrevisioncb`` should be called for each node as it is committed.
680 ``addrevisioncb`` should be called for each node as it is committed.
689
681
690 Returns a list of nodes that were processed. A node will be in the list
682 Returns a list of nodes that were processed. A node will be in the list
691 even if it existed in the store previously.
683 even if it existed in the store previously.
692 """
684 """
693
685
694 def censorrevision(tr, node, tombstone=b''):
686 def censorrevision(tr, node, tombstone=b''):
695 """Remove the content of a single revision.
687 """Remove the content of a single revision.
696
688
697 The specified ``node`` will have its content purged from storage.
689 The specified ``node`` will have its content purged from storage.
698 Future attempts to access the revision data for this node will
690 Future attempts to access the revision data for this node will
699 result in failure.
691 result in failure.
700
692
701 A ``tombstone`` message can optionally be stored. This message may be
693 A ``tombstone`` message can optionally be stored. This message may be
702 displayed to users when they attempt to access the missing revision
694 displayed to users when they attempt to access the missing revision
703 data.
695 data.
704
696
705 Storage backends may have stored deltas against the previous content
697 Storage backends may have stored deltas against the previous content
706 in this revision. As part of censoring a revision, these storage
698 in this revision. As part of censoring a revision, these storage
707 backends are expected to rewrite any internally stored deltas such
699 backends are expected to rewrite any internally stored deltas such
708 that they no longer reference the deleted content.
700 that they no longer reference the deleted content.
709 """
701 """
710
702
711 def getstrippoint(minlink):
703 def getstrippoint(minlink):
712 """Find the minimum revision that must be stripped to strip a linkrev.
704 """Find the minimum revision that must be stripped to strip a linkrev.
713
705
714 Returns a 2-tuple containing the minimum revision number and a set
706 Returns a 2-tuple containing the minimum revision number and a set
715 of all revisions numbers that would be broken by this strip.
707 of all revisions numbers that would be broken by this strip.
716
708
717 TODO this is highly revlog centric and should be abstracted into
709 TODO this is highly revlog centric and should be abstracted into
718 a higher-level deletion API. ``repair.strip()`` relies on this.
710 a higher-level deletion API. ``repair.strip()`` relies on this.
719 """
711 """
720
712
721 def strip(minlink, transaction):
713 def strip(minlink, transaction):
722 """Remove storage of items starting at a linkrev.
714 """Remove storage of items starting at a linkrev.
723
715
724 This uses ``getstrippoint()`` to determine the first node to remove.
716 This uses ``getstrippoint()`` to determine the first node to remove.
725 Then it effectively truncates storage for all revisions after that.
717 Then it effectively truncates storage for all revisions after that.
726
718
727 TODO this is highly revlog centric and should be abstracted into a
719 TODO this is highly revlog centric and should be abstracted into a
728 higher-level deletion API.
720 higher-level deletion API.
729 """
721 """
730
722
731 class ifilestorage(ifileindex, ifiledata, ifilemutation):
723 class ifilestorage(ifileindex, ifiledata, ifilemutation):
732 """Complete storage interface for a single tracked file."""
724 """Complete storage interface for a single tracked file."""
733
725
734 version = interfaceutil.Attribute(
726 version = interfaceutil.Attribute(
735 """Version number of storage.
727 """Version number of storage.
736
728
737 TODO this feels revlog centric and could likely be removed.
729 TODO this feels revlog centric and could likely be removed.
738 """)
730 """)
739
731
740 _generaldelta = interfaceutil.Attribute(
732 _generaldelta = interfaceutil.Attribute(
741 """Whether deltas can be against any parent revision.
733 """Whether deltas can be against any parent revision.
742
734
743 TODO this is used by changegroup code and it could probably be
735 TODO this is used by changegroup code and it could probably be
744 folded into another API.
736 folded into another API.
745 """)
737 """)
746
738
747 def files():
739 def files():
748 """Obtain paths that are backing storage for this file.
740 """Obtain paths that are backing storage for this file.
749
741
750 TODO this is used heavily by verify code and there should probably
742 TODO this is used heavily by verify code and there should probably
751 be a better API for that.
743 be a better API for that.
752 """
744 """
753
745
754 def checksize():
746 def checksize():
755 """Obtain the expected sizes of backing files.
747 """Obtain the expected sizes of backing files.
756
748
757 TODO this is used by verify and it should not be part of the interface.
749 TODO this is used by verify and it should not be part of the interface.
758 """
750 """
759
751
760 class idirs(interfaceutil.Interface):
752 class idirs(interfaceutil.Interface):
761 """Interface representing a collection of directories from paths.
753 """Interface representing a collection of directories from paths.
762
754
763 This interface is essentially a derived data structure representing
755 This interface is essentially a derived data structure representing
764 directories from a collection of paths.
756 directories from a collection of paths.
765 """
757 """
766
758
767 def addpath(path):
759 def addpath(path):
768 """Add a path to the collection.
760 """Add a path to the collection.
769
761
770 All directories in the path will be added to the collection.
762 All directories in the path will be added to the collection.
771 """
763 """
772
764
773 def delpath(path):
765 def delpath(path):
774 """Remove a path from the collection.
766 """Remove a path from the collection.
775
767
776 If the removal was the last path in a particular directory, the
768 If the removal was the last path in a particular directory, the
777 directory is removed from the collection.
769 directory is removed from the collection.
778 """
770 """
779
771
780 def __iter__():
772 def __iter__():
781 """Iterate over the directories in this collection of paths."""
773 """Iterate over the directories in this collection of paths."""
782
774
783 def __contains__(path):
775 def __contains__(path):
784 """Whether a specific directory is in this collection."""
776 """Whether a specific directory is in this collection."""
785
777
786 class imanifestdict(interfaceutil.Interface):
778 class imanifestdict(interfaceutil.Interface):
787 """Interface representing a manifest data structure.
779 """Interface representing a manifest data structure.
788
780
789 A manifest is effectively a dict mapping paths to entries. Each entry
781 A manifest is effectively a dict mapping paths to entries. Each entry
790 consists of a binary node and extra flags affecting that entry.
782 consists of a binary node and extra flags affecting that entry.
791 """
783 """
792
784
793 def __getitem__(path):
785 def __getitem__(path):
794 """Returns the binary node value for a path in the manifest.
786 """Returns the binary node value for a path in the manifest.
795
787
796 Raises ``KeyError`` if the path does not exist in the manifest.
788 Raises ``KeyError`` if the path does not exist in the manifest.
797
789
798 Equivalent to ``self.find(path)[0]``.
790 Equivalent to ``self.find(path)[0]``.
799 """
791 """
800
792
801 def find(path):
793 def find(path):
802 """Returns the entry for a path in the manifest.
794 """Returns the entry for a path in the manifest.
803
795
804 Returns a 2-tuple of (node, flags).
796 Returns a 2-tuple of (node, flags).
805
797
806 Raises ``KeyError`` if the path does not exist in the manifest.
798 Raises ``KeyError`` if the path does not exist in the manifest.
807 """
799 """
808
800
809 def __len__():
801 def __len__():
810 """Return the number of entries in the manifest."""
802 """Return the number of entries in the manifest."""
811
803
812 def __nonzero__():
804 def __nonzero__():
813 """Returns True if the manifest has entries, False otherwise."""
805 """Returns True if the manifest has entries, False otherwise."""
814
806
815 __bool__ = __nonzero__
807 __bool__ = __nonzero__
816
808
817 def __setitem__(path, node):
809 def __setitem__(path, node):
818 """Define the node value for a path in the manifest.
810 """Define the node value for a path in the manifest.
819
811
820 If the path is already in the manifest, its flags will be copied to
812 If the path is already in the manifest, its flags will be copied to
821 the new entry.
813 the new entry.
822 """
814 """
823
815
824 def __contains__(path):
816 def __contains__(path):
825 """Whether a path exists in the manifest."""
817 """Whether a path exists in the manifest."""
826
818
827 def __delitem__(path):
819 def __delitem__(path):
828 """Remove a path from the manifest.
820 """Remove a path from the manifest.
829
821
830 Raises ``KeyError`` if the path is not in the manifest.
822 Raises ``KeyError`` if the path is not in the manifest.
831 """
823 """
832
824
833 def __iter__():
825 def __iter__():
834 """Iterate over paths in the manifest."""
826 """Iterate over paths in the manifest."""
835
827
836 def iterkeys():
828 def iterkeys():
837 """Iterate over paths in the manifest."""
829 """Iterate over paths in the manifest."""
838
830
839 def keys():
831 def keys():
840 """Obtain a list of paths in the manifest."""
832 """Obtain a list of paths in the manifest."""
841
833
842 def filesnotin(other, match=None):
834 def filesnotin(other, match=None):
843 """Obtain the set of paths in this manifest but not in another.
835 """Obtain the set of paths in this manifest but not in another.
844
836
845 ``match`` is an optional matcher function to be applied to both
837 ``match`` is an optional matcher function to be applied to both
846 manifests.
838 manifests.
847
839
848 Returns a set of paths.
840 Returns a set of paths.
849 """
841 """
850
842
851 def dirs():
843 def dirs():
852 """Returns an object implementing the ``idirs`` interface."""
844 """Returns an object implementing the ``idirs`` interface."""
853
845
854 def hasdir(dir):
846 def hasdir(dir):
855 """Returns a bool indicating if a directory is in this manifest."""
847 """Returns a bool indicating if a directory is in this manifest."""
856
848
857 def matches(match):
849 def matches(match):
858 """Generate a new manifest filtered through a matcher.
850 """Generate a new manifest filtered through a matcher.
859
851
860 Returns an object conforming to the ``imanifestdict`` interface.
852 Returns an object conforming to the ``imanifestdict`` interface.
861 """
853 """
862
854
863 def walk(match):
855 def walk(match):
864 """Generator of paths in manifest satisfying a matcher.
856 """Generator of paths in manifest satisfying a matcher.
865
857
866 This is equivalent to ``self.matches(match).iterkeys()`` except a new
858 This is equivalent to ``self.matches(match).iterkeys()`` except a new
867 manifest object is not created.
859 manifest object is not created.
868
860
869 If the matcher has explicit files listed and they don't exist in
861 If the matcher has explicit files listed and they don't exist in
870 the manifest, ``match.bad()`` is called for each missing file.
862 the manifest, ``match.bad()`` is called for each missing file.
871 """
863 """
872
864
873 def diff(other, match=None, clean=False):
865 def diff(other, match=None, clean=False):
874 """Find differences between this manifest and another.
866 """Find differences between this manifest and another.
875
867
876 This manifest is compared to ``other``.
868 This manifest is compared to ``other``.
877
869
878 If ``match`` is provided, the two manifests are filtered against this
870 If ``match`` is provided, the two manifests are filtered against this
879 matcher and only entries satisfying the matcher are compared.
871 matcher and only entries satisfying the matcher are compared.
880
872
881 If ``clean`` is True, unchanged files are included in the returned
873 If ``clean`` is True, unchanged files are included in the returned
882 object.
874 object.
883
875
884 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
876 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
885 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
877 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
886 represents the node and flags for this manifest and ``(node2, flag2)``
878 represents the node and flags for this manifest and ``(node2, flag2)``
887 are the same for the other manifest.
879 are the same for the other manifest.
888 """
880 """
889
881
890 def setflag(path, flag):
882 def setflag(path, flag):
891 """Set the flag value for a given path.
883 """Set the flag value for a given path.
892
884
893 Raises ``KeyError`` if the path is not already in the manifest.
885 Raises ``KeyError`` if the path is not already in the manifest.
894 """
886 """
895
887
896 def get(path, default=None):
888 def get(path, default=None):
897 """Obtain the node value for a path or a default value if missing."""
889 """Obtain the node value for a path or a default value if missing."""
898
890
899 def flags(path, default=''):
891 def flags(path, default=''):
900 """Return the flags value for a path or a default value if missing."""
892 """Return the flags value for a path or a default value if missing."""
901
893
902 def copy():
894 def copy():
903 """Return a copy of this manifest."""
895 """Return a copy of this manifest."""
904
896
905 def items():
897 def items():
906 """Returns an iterable of (path, node) for items in this manifest."""
898 """Returns an iterable of (path, node) for items in this manifest."""
907
899
908 def iteritems():
900 def iteritems():
909 """Identical to items()."""
901 """Identical to items()."""
910
902
911 def iterentries():
903 def iterentries():
912 """Returns an iterable of (path, node, flags) for this manifest.
904 """Returns an iterable of (path, node, flags) for this manifest.
913
905
914 Similar to ``iteritems()`` except items are a 3-tuple and include
906 Similar to ``iteritems()`` except items are a 3-tuple and include
915 flags.
907 flags.
916 """
908 """
917
909
918 def text():
910 def text():
919 """Obtain the raw data representation for this manifest.
911 """Obtain the raw data representation for this manifest.
920
912
921 Result is used to create a manifest revision.
913 Result is used to create a manifest revision.
922 """
914 """
923
915
924 def fastdelta(base, changes):
916 def fastdelta(base, changes):
925 """Obtain a delta between this manifest and another given changes.
917 """Obtain a delta between this manifest and another given changes.
926
918
927 ``base`` in the raw data representation for another manifest.
919 ``base`` in the raw data representation for another manifest.
928
920
929 ``changes`` is an iterable of ``(path, to_delete)``.
921 ``changes`` is an iterable of ``(path, to_delete)``.
930
922
931 Returns a 2-tuple containing ``bytearray(self.text())`` and the
923 Returns a 2-tuple containing ``bytearray(self.text())`` and the
932 delta between ``base`` and this manifest.
924 delta between ``base`` and this manifest.
933 """
925 """
934
926
935 class imanifestrevisionbase(interfaceutil.Interface):
927 class imanifestrevisionbase(interfaceutil.Interface):
936 """Base interface representing a single revision of a manifest.
928 """Base interface representing a single revision of a manifest.
937
929
938 Should not be used as a primary interface: should always be inherited
930 Should not be used as a primary interface: should always be inherited
939 as part of a larger interface.
931 as part of a larger interface.
940 """
932 """
941
933
942 def new():
934 def new():
943 """Obtain a new manifest instance.
935 """Obtain a new manifest instance.
944
936
945 Returns an object conforming to the ``imanifestrevisionwritable``
937 Returns an object conforming to the ``imanifestrevisionwritable``
946 interface. The instance will be associated with the same
938 interface. The instance will be associated with the same
947 ``imanifestlog`` collection as this instance.
939 ``imanifestlog`` collection as this instance.
948 """
940 """
949
941
950 def copy():
942 def copy():
951 """Obtain a copy of this manifest instance.
943 """Obtain a copy of this manifest instance.
952
944
953 Returns an object conforming to the ``imanifestrevisionwritable``
945 Returns an object conforming to the ``imanifestrevisionwritable``
954 interface. The instance will be associated with the same
946 interface. The instance will be associated with the same
955 ``imanifestlog`` collection as this instance.
947 ``imanifestlog`` collection as this instance.
956 """
948 """
957
949
958 def read():
950 def read():
959 """Obtain the parsed manifest data structure.
951 """Obtain the parsed manifest data structure.
960
952
961 The returned object conforms to the ``imanifestdict`` interface.
953 The returned object conforms to the ``imanifestdict`` interface.
962 """
954 """
963
955
964 class imanifestrevisionstored(imanifestrevisionbase):
956 class imanifestrevisionstored(imanifestrevisionbase):
965 """Interface representing a manifest revision committed to storage."""
957 """Interface representing a manifest revision committed to storage."""
966
958
967 def node():
959 def node():
968 """The binary node for this manifest."""
960 """The binary node for this manifest."""
969
961
970 parents = interfaceutil.Attribute(
962 parents = interfaceutil.Attribute(
971 """List of binary nodes that are parents for this manifest revision."""
963 """List of binary nodes that are parents for this manifest revision."""
972 )
964 )
973
965
974 def readdelta(shallow=False):
966 def readdelta(shallow=False):
975 """Obtain the manifest data structure representing changes from parent.
967 """Obtain the manifest data structure representing changes from parent.
976
968
977 This manifest is compared to its 1st parent. A new manifest representing
969 This manifest is compared to its 1st parent. A new manifest representing
978 those differences is constructed.
970 those differences is constructed.
979
971
980 The returned object conforms to the ``imanifestdict`` interface.
972 The returned object conforms to the ``imanifestdict`` interface.
981 """
973 """
982
974
983 def readfast(shallow=False):
975 def readfast(shallow=False):
984 """Calls either ``read()`` or ``readdelta()``.
976 """Calls either ``read()`` or ``readdelta()``.
985
977
986 The faster of the two options is called.
978 The faster of the two options is called.
987 """
979 """
988
980
989 def find(key):
981 def find(key):
990 """Calls self.read().find(key)``.
982 """Calls self.read().find(key)``.
991
983
992 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
984 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
993 """
985 """
994
986
995 class imanifestrevisionwritable(imanifestrevisionbase):
987 class imanifestrevisionwritable(imanifestrevisionbase):
996 """Interface representing a manifest revision that can be committed."""
988 """Interface representing a manifest revision that can be committed."""
997
989
998 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
990 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
999 """Add this revision to storage.
991 """Add this revision to storage.
1000
992
1001 Takes a transaction object, the changeset revision number it will
993 Takes a transaction object, the changeset revision number it will
1002 be associated with, its parent nodes, and lists of added and
994 be associated with, its parent nodes, and lists of added and
1003 removed paths.
995 removed paths.
1004
996
1005 If match is provided, storage can choose not to inspect or write out
997 If match is provided, storage can choose not to inspect or write out
1006 items that do not match. Storage is still required to be able to provide
998 items that do not match. Storage is still required to be able to provide
1007 the full manifest in the future for any directories written (these
999 the full manifest in the future for any directories written (these
1008 manifests should not be "narrowed on disk").
1000 manifests should not be "narrowed on disk").
1009
1001
1010 Returns the binary node of the created revision.
1002 Returns the binary node of the created revision.
1011 """
1003 """
1012
1004
1013 class imanifeststorage(interfaceutil.Interface):
1005 class imanifeststorage(interfaceutil.Interface):
1014 """Storage interface for manifest data."""
1006 """Storage interface for manifest data."""
1015
1007
1016 tree = interfaceutil.Attribute(
1008 tree = interfaceutil.Attribute(
1017 """The path to the directory this manifest tracks.
1009 """The path to the directory this manifest tracks.
1018
1010
1019 The empty bytestring represents the root manifest.
1011 The empty bytestring represents the root manifest.
1020 """)
1012 """)
1021
1013
1022 index = interfaceutil.Attribute(
1014 index = interfaceutil.Attribute(
1023 """An ``ifilerevisionssequence`` instance.""")
1015 """An ``ifilerevisionssequence`` instance.""")
1024
1016
1025 indexfile = interfaceutil.Attribute(
1017 indexfile = interfaceutil.Attribute(
1026 """Path of revlog index file.
1018 """Path of revlog index file.
1027
1019
1028 TODO this is revlog specific and should not be exposed.
1020 TODO this is revlog specific and should not be exposed.
1029 """)
1021 """)
1030
1022
1031 opener = interfaceutil.Attribute(
1023 opener = interfaceutil.Attribute(
1032 """VFS opener to use to access underlying files used for storage.
1024 """VFS opener to use to access underlying files used for storage.
1033
1025
1034 TODO this is revlog specific and should not be exposed.
1026 TODO this is revlog specific and should not be exposed.
1035 """)
1027 """)
1036
1028
1037 version = interfaceutil.Attribute(
1029 version = interfaceutil.Attribute(
1038 """Revlog version number.
1030 """Revlog version number.
1039
1031
1040 TODO this is revlog specific and should not be exposed.
1032 TODO this is revlog specific and should not be exposed.
1041 """)
1033 """)
1042
1034
1043 _generaldelta = interfaceutil.Attribute(
1035 _generaldelta = interfaceutil.Attribute(
1044 """Whether generaldelta storage is being used.
1036 """Whether generaldelta storage is being used.
1045
1037
1046 TODO this is revlog specific and should not be exposed.
1038 TODO this is revlog specific and should not be exposed.
1047 """)
1039 """)
1048
1040
1049 fulltextcache = interfaceutil.Attribute(
1041 fulltextcache = interfaceutil.Attribute(
1050 """Dict with cache of fulltexts.
1042 """Dict with cache of fulltexts.
1051
1043
1052 TODO this doesn't feel appropriate for the storage interface.
1044 TODO this doesn't feel appropriate for the storage interface.
1053 """)
1045 """)
1054
1046
1055 def __len__():
1047 def __len__():
1056 """Obtain the number of revisions stored for this manifest."""
1048 """Obtain the number of revisions stored for this manifest."""
1057
1049
1058 def __iter__():
1050 def __iter__():
1059 """Iterate over revision numbers for this manifest."""
1051 """Iterate over revision numbers for this manifest."""
1060
1052
1061 def rev(node):
1053 def rev(node):
1062 """Obtain the revision number given a binary node.
1054 """Obtain the revision number given a binary node.
1063
1055
1064 Raises ``error.LookupError`` if the node is not known.
1056 Raises ``error.LookupError`` if the node is not known.
1065 """
1057 """
1066
1058
1067 def node(rev):
1059 def node(rev):
1068 """Obtain the node value given a revision number.
1060 """Obtain the node value given a revision number.
1069
1061
1070 Raises ``error.LookupError`` if the revision is not known.
1062 Raises ``error.LookupError`` if the revision is not known.
1071 """
1063 """
1072
1064
1073 def lookup(value):
1065 def lookup(value):
1074 """Attempt to resolve a value to a node.
1066 """Attempt to resolve a value to a node.
1075
1067
1076 Value can be a binary node, hex node, revision number, or a bytes
1068 Value can be a binary node, hex node, revision number, or a bytes
1077 that can be converted to an integer.
1069 that can be converted to an integer.
1078
1070
1079 Raises ``error.LookupError`` if a ndoe could not be resolved.
1071 Raises ``error.LookupError`` if a ndoe could not be resolved.
1080
1072
1081 TODO this is only used by debug* commands and can probably be deleted
1073 TODO this is only used by debug* commands and can probably be deleted
1082 easily.
1074 easily.
1083 """
1075 """
1084
1076
1085 def parents(node):
1077 def parents(node):
1086 """Returns a 2-tuple of parent nodes for a node.
1078 """Returns a 2-tuple of parent nodes for a node.
1087
1079
1088 Values will be ``nullid`` if the parent is empty.
1080 Values will be ``nullid`` if the parent is empty.
1089 """
1081 """
1090
1082
1091 def parentrevs(rev):
1083 def parentrevs(rev):
1092 """Like parents() but operates on revision numbers."""
1084 """Like parents() but operates on revision numbers."""
1093
1085
1094 def linkrev(rev):
1086 def linkrev(rev):
1095 """Obtain the changeset revision number a revision is linked to."""
1087 """Obtain the changeset revision number a revision is linked to."""
1096
1088
1097 def revision(node, _df=None, raw=False):
1089 def revision(node, _df=None, raw=False):
1098 """Obtain fulltext data for a node."""
1090 """Obtain fulltext data for a node."""
1099
1091
1100 def revdiff(rev1, rev2):
1092 def revdiff(rev1, rev2):
1101 """Obtain a delta between two revision numbers.
1093 """Obtain a delta between two revision numbers.
1102
1094
1103 The returned data is the result of ``bdiff.bdiff()`` on the raw
1095 The returned data is the result of ``bdiff.bdiff()`` on the raw
1104 revision data.
1096 revision data.
1105 """
1097 """
1106
1098
1107 def cmp(node, fulltext):
1099 def cmp(node, fulltext):
1108 """Compare fulltext to another revision.
1100 """Compare fulltext to another revision.
1109
1101
1110 Returns True if the fulltext is different from what is stored.
1102 Returns True if the fulltext is different from what is stored.
1111 """
1103 """
1112
1104
1113 def emitrevisiondeltas(requests):
1105 def emitrevisiondeltas(requests):
1114 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1106 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1115
1107
1116 See the documentation for ``ifiledata`` for more.
1108 See the documentation for ``ifiledata`` for more.
1117 """
1109 """
1118
1110
1119 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1111 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1120 """Process a series of deltas for storage.
1112 """Process a series of deltas for storage.
1121
1113
1122 See the documentation in ``ifilemutation`` for more.
1114 See the documentation in ``ifilemutation`` for more.
1123 """
1115 """
1124
1116
1125 def getstrippoint(minlink):
1117 def getstrippoint(minlink):
1126 """Find minimum revision that must be stripped to strip a linkrev.
1118 """Find minimum revision that must be stripped to strip a linkrev.
1127
1119
1128 See the documentation in ``ifilemutation`` for more.
1120 See the documentation in ``ifilemutation`` for more.
1129 """
1121 """
1130
1122
1131 def strip(minlink, transaction):
1123 def strip(minlink, transaction):
1132 """Remove storage of items starting at a linkrev.
1124 """Remove storage of items starting at a linkrev.
1133
1125
1134 See the documentation in ``ifilemutation`` for more.
1126 See the documentation in ``ifilemutation`` for more.
1135 """
1127 """
1136
1128
1137 def checksize():
1129 def checksize():
1138 """Obtain the expected sizes of backing files.
1130 """Obtain the expected sizes of backing files.
1139
1131
1140 TODO this is used by verify and it should not be part of the interface.
1132 TODO this is used by verify and it should not be part of the interface.
1141 """
1133 """
1142
1134
1143 def files():
1135 def files():
1144 """Obtain paths that are backing storage for this manifest.
1136 """Obtain paths that are backing storage for this manifest.
1145
1137
1146 TODO this is used by verify and there should probably be a better API
1138 TODO this is used by verify and there should probably be a better API
1147 for this functionality.
1139 for this functionality.
1148 """
1140 """
1149
1141
1150 def deltaparent(rev):
1142 def deltaparent(rev):
1151 """Obtain the revision that a revision is delta'd against.
1143 """Obtain the revision that a revision is delta'd against.
1152
1144
1153 TODO delta encoding is an implementation detail of storage and should
1145 TODO delta encoding is an implementation detail of storage and should
1154 not be exposed to the storage interface.
1146 not be exposed to the storage interface.
1155 """
1147 """
1156
1148
1157 def clone(tr, dest, **kwargs):
1149 def clone(tr, dest, **kwargs):
1158 """Clone this instance to another."""
1150 """Clone this instance to another."""
1159
1151
1160 def clearcaches(clear_persisted_data=False):
1152 def clearcaches(clear_persisted_data=False):
1161 """Clear any caches associated with this instance."""
1153 """Clear any caches associated with this instance."""
1162
1154
1163 def dirlog(d):
1155 def dirlog(d):
1164 """Obtain a manifest storage instance for a tree."""
1156 """Obtain a manifest storage instance for a tree."""
1165
1157
1166 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1158 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1167 match=None):
1159 match=None):
1168 """Add a revision to storage.
1160 """Add a revision to storage.
1169
1161
1170 ``m`` is an object conforming to ``imanifestdict``.
1162 ``m`` is an object conforming to ``imanifestdict``.
1171
1163
1172 ``link`` is the linkrev revision number.
1164 ``link`` is the linkrev revision number.
1173
1165
1174 ``p1`` and ``p2`` are the parent revision numbers.
1166 ``p1`` and ``p2`` are the parent revision numbers.
1175
1167
1176 ``added`` and ``removed`` are iterables of added and removed paths,
1168 ``added`` and ``removed`` are iterables of added and removed paths,
1177 respectively.
1169 respectively.
1178
1170
1179 ``readtree`` is a function that can be used to read the child tree(s)
1171 ``readtree`` is a function that can be used to read the child tree(s)
1180 when recursively writing the full tree structure when using
1172 when recursively writing the full tree structure when using
1181 treemanifets.
1173 treemanifets.
1182
1174
1183 ``match`` is a matcher that can be used to hint to storage that not all
1175 ``match`` is a matcher that can be used to hint to storage that not all
1184 paths must be inspected; this is an optimization and can be safely
1176 paths must be inspected; this is an optimization and can be safely
1185 ignored. Note that the storage must still be able to reproduce a full
1177 ignored. Note that the storage must still be able to reproduce a full
1186 manifest including files that did not match.
1178 manifest including files that did not match.
1187 """
1179 """
1188
1180
1189 class imanifestlog(interfaceutil.Interface):
1181 class imanifestlog(interfaceutil.Interface):
1190 """Interface representing a collection of manifest snapshots.
1182 """Interface representing a collection of manifest snapshots.
1191
1183
1192 Represents the root manifest in a repository.
1184 Represents the root manifest in a repository.
1193
1185
1194 Also serves as a means to access nested tree manifests and to cache
1186 Also serves as a means to access nested tree manifests and to cache
1195 tree manifests.
1187 tree manifests.
1196 """
1188 """
1197
1189
1198 def __getitem__(node):
1190 def __getitem__(node):
1199 """Obtain a manifest instance for a given binary node.
1191 """Obtain a manifest instance for a given binary node.
1200
1192
1201 Equivalent to calling ``self.get('', node)``.
1193 Equivalent to calling ``self.get('', node)``.
1202
1194
1203 The returned object conforms to the ``imanifestrevisionstored``
1195 The returned object conforms to the ``imanifestrevisionstored``
1204 interface.
1196 interface.
1205 """
1197 """
1206
1198
1207 def get(tree, node, verify=True):
1199 def get(tree, node, verify=True):
1208 """Retrieve the manifest instance for a given directory and binary node.
1200 """Retrieve the manifest instance for a given directory and binary node.
1209
1201
1210 ``node`` always refers to the node of the root manifest (which will be
1202 ``node`` always refers to the node of the root manifest (which will be
1211 the only manifest if flat manifests are being used).
1203 the only manifest if flat manifests are being used).
1212
1204
1213 If ``tree`` is the empty string, the root manifest is returned.
1205 If ``tree`` is the empty string, the root manifest is returned.
1214 Otherwise the manifest for the specified directory will be returned
1206 Otherwise the manifest for the specified directory will be returned
1215 (requires tree manifests).
1207 (requires tree manifests).
1216
1208
1217 If ``verify`` is True, ``LookupError`` is raised if the node is not
1209 If ``verify`` is True, ``LookupError`` is raised if the node is not
1218 known.
1210 known.
1219
1211
1220 The returned object conforms to the ``imanifestrevisionstored``
1212 The returned object conforms to the ``imanifestrevisionstored``
1221 interface.
1213 interface.
1222 """
1214 """
1223
1215
1224 def getstorage(tree):
1216 def getstorage(tree):
1225 """Retrieve an interface to storage for a particular tree.
1217 """Retrieve an interface to storage for a particular tree.
1226
1218
1227 If ``tree`` is the empty bytestring, storage for the root manifest will
1219 If ``tree`` is the empty bytestring, storage for the root manifest will
1228 be returned. Otherwise storage for a tree manifest is returned.
1220 be returned. Otherwise storage for a tree manifest is returned.
1229
1221
1230 TODO formalize interface for returned object.
1222 TODO formalize interface for returned object.
1231 """
1223 """
1232
1224
1233 def clearcaches():
1225 def clearcaches():
1234 """Clear caches associated with this collection."""
1226 """Clear caches associated with this collection."""
1235
1227
1236 def rev(node):
1228 def rev(node):
1237 """Obtain the revision number for a binary node.
1229 """Obtain the revision number for a binary node.
1238
1230
1239 Raises ``error.LookupError`` if the node is not known.
1231 Raises ``error.LookupError`` if the node is not known.
1240 """
1232 """
1241
1233
1242 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1234 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1243 """Local repository sub-interface providing access to tracked file storage.
1235 """Local repository sub-interface providing access to tracked file storage.
1244
1236
1245 This interface defines how a repository accesses storage for a single
1237 This interface defines how a repository accesses storage for a single
1246 tracked file path.
1238 tracked file path.
1247 """
1239 """
1248
1240
1249 def file(f):
1241 def file(f):
1250 """Obtain a filelog for a tracked path.
1242 """Obtain a filelog for a tracked path.
1251
1243
1252 The returned type conforms to the ``ifilestorage`` interface.
1244 The returned type conforms to the ``ifilestorage`` interface.
1253 """
1245 """
1254
1246
1255 class ilocalrepositorymain(interfaceutil.Interface):
1247 class ilocalrepositorymain(interfaceutil.Interface):
1256 """Main interface for local repositories.
1248 """Main interface for local repositories.
1257
1249
1258 This currently captures the reality of things - not how things should be.
1250 This currently captures the reality of things - not how things should be.
1259 """
1251 """
1260
1252
1261 supportedformats = interfaceutil.Attribute(
1253 supportedformats = interfaceutil.Attribute(
1262 """Set of requirements that apply to stream clone.
1254 """Set of requirements that apply to stream clone.
1263
1255
1264 This is actually a class attribute and is shared among all instances.
1256 This is actually a class attribute and is shared among all instances.
1265 """)
1257 """)
1266
1258
1267 supported = interfaceutil.Attribute(
1259 supported = interfaceutil.Attribute(
1268 """Set of requirements that this repo is capable of opening.""")
1260 """Set of requirements that this repo is capable of opening.""")
1269
1261
1270 requirements = interfaceutil.Attribute(
1262 requirements = interfaceutil.Attribute(
1271 """Set of requirements this repo uses.""")
1263 """Set of requirements this repo uses.""")
1272
1264
1273 filtername = interfaceutil.Attribute(
1265 filtername = interfaceutil.Attribute(
1274 """Name of the repoview that is active on this repo.""")
1266 """Name of the repoview that is active on this repo.""")
1275
1267
1276 wvfs = interfaceutil.Attribute(
1268 wvfs = interfaceutil.Attribute(
1277 """VFS used to access the working directory.""")
1269 """VFS used to access the working directory.""")
1278
1270
1279 vfs = interfaceutil.Attribute(
1271 vfs = interfaceutil.Attribute(
1280 """VFS rooted at the .hg directory.
1272 """VFS rooted at the .hg directory.
1281
1273
1282 Used to access repository data not in the store.
1274 Used to access repository data not in the store.
1283 """)
1275 """)
1284
1276
1285 svfs = interfaceutil.Attribute(
1277 svfs = interfaceutil.Attribute(
1286 """VFS rooted at the store.
1278 """VFS rooted at the store.
1287
1279
1288 Used to access repository data in the store. Typically .hg/store.
1280 Used to access repository data in the store. Typically .hg/store.
1289 But can point elsewhere if the store is shared.
1281 But can point elsewhere if the store is shared.
1290 """)
1282 """)
1291
1283
1292 root = interfaceutil.Attribute(
1284 root = interfaceutil.Attribute(
1293 """Path to the root of the working directory.""")
1285 """Path to the root of the working directory.""")
1294
1286
1295 path = interfaceutil.Attribute(
1287 path = interfaceutil.Attribute(
1296 """Path to the .hg directory.""")
1288 """Path to the .hg directory.""")
1297
1289
1298 origroot = interfaceutil.Attribute(
1290 origroot = interfaceutil.Attribute(
1299 """The filesystem path that was used to construct the repo.""")
1291 """The filesystem path that was used to construct the repo.""")
1300
1292
1301 auditor = interfaceutil.Attribute(
1293 auditor = interfaceutil.Attribute(
1302 """A pathauditor for the working directory.
1294 """A pathauditor for the working directory.
1303
1295
1304 This checks if a path refers to a nested repository.
1296 This checks if a path refers to a nested repository.
1305
1297
1306 Operates on the filesystem.
1298 Operates on the filesystem.
1307 """)
1299 """)
1308
1300
1309 nofsauditor = interfaceutil.Attribute(
1301 nofsauditor = interfaceutil.Attribute(
1310 """A pathauditor for the working directory.
1302 """A pathauditor for the working directory.
1311
1303
1312 This is like ``auditor`` except it doesn't do filesystem checks.
1304 This is like ``auditor`` except it doesn't do filesystem checks.
1313 """)
1305 """)
1314
1306
1315 baseui = interfaceutil.Attribute(
1307 baseui = interfaceutil.Attribute(
1316 """Original ui instance passed into constructor.""")
1308 """Original ui instance passed into constructor.""")
1317
1309
1318 ui = interfaceutil.Attribute(
1310 ui = interfaceutil.Attribute(
1319 """Main ui instance for this instance.""")
1311 """Main ui instance for this instance.""")
1320
1312
1321 sharedpath = interfaceutil.Attribute(
1313 sharedpath = interfaceutil.Attribute(
1322 """Path to the .hg directory of the repo this repo was shared from.""")
1314 """Path to the .hg directory of the repo this repo was shared from.""")
1323
1315
1324 store = interfaceutil.Attribute(
1316 store = interfaceutil.Attribute(
1325 """A store instance.""")
1317 """A store instance.""")
1326
1318
1327 spath = interfaceutil.Attribute(
1319 spath = interfaceutil.Attribute(
1328 """Path to the store.""")
1320 """Path to the store.""")
1329
1321
1330 sjoin = interfaceutil.Attribute(
1322 sjoin = interfaceutil.Attribute(
1331 """Alias to self.store.join.""")
1323 """Alias to self.store.join.""")
1332
1324
1333 cachevfs = interfaceutil.Attribute(
1325 cachevfs = interfaceutil.Attribute(
1334 """A VFS used to access the cache directory.
1326 """A VFS used to access the cache directory.
1335
1327
1336 Typically .hg/cache.
1328 Typically .hg/cache.
1337 """)
1329 """)
1338
1330
1339 filteredrevcache = interfaceutil.Attribute(
1331 filteredrevcache = interfaceutil.Attribute(
1340 """Holds sets of revisions to be filtered.""")
1332 """Holds sets of revisions to be filtered.""")
1341
1333
1342 names = interfaceutil.Attribute(
1334 names = interfaceutil.Attribute(
1343 """A ``namespaces`` instance.""")
1335 """A ``namespaces`` instance.""")
1344
1336
1345 def close():
1337 def close():
1346 """Close the handle on this repository."""
1338 """Close the handle on this repository."""
1347
1339
1348 def peer():
1340 def peer():
1349 """Obtain an object conforming to the ``peer`` interface."""
1341 """Obtain an object conforming to the ``peer`` interface."""
1350
1342
1351 def unfiltered():
1343 def unfiltered():
1352 """Obtain an unfiltered/raw view of this repo."""
1344 """Obtain an unfiltered/raw view of this repo."""
1353
1345
1354 def filtered(name, visibilityexceptions=None):
1346 def filtered(name, visibilityexceptions=None):
1355 """Obtain a named view of this repository."""
1347 """Obtain a named view of this repository."""
1356
1348
1357 obsstore = interfaceutil.Attribute(
1349 obsstore = interfaceutil.Attribute(
1358 """A store of obsolescence data.""")
1350 """A store of obsolescence data.""")
1359
1351
1360 changelog = interfaceutil.Attribute(
1352 changelog = interfaceutil.Attribute(
1361 """A handle on the changelog revlog.""")
1353 """A handle on the changelog revlog.""")
1362
1354
1363 manifestlog = interfaceutil.Attribute(
1355 manifestlog = interfaceutil.Attribute(
1364 """An instance conforming to the ``imanifestlog`` interface.
1356 """An instance conforming to the ``imanifestlog`` interface.
1365
1357
1366 Provides access to manifests for the repository.
1358 Provides access to manifests for the repository.
1367 """)
1359 """)
1368
1360
1369 dirstate = interfaceutil.Attribute(
1361 dirstate = interfaceutil.Attribute(
1370 """Working directory state.""")
1362 """Working directory state.""")
1371
1363
1372 narrowpats = interfaceutil.Attribute(
1364 narrowpats = interfaceutil.Attribute(
1373 """Matcher patterns for this repository's narrowspec.""")
1365 """Matcher patterns for this repository's narrowspec.""")
1374
1366
1375 def narrowmatch():
1367 def narrowmatch():
1376 """Obtain a matcher for the narrowspec."""
1368 """Obtain a matcher for the narrowspec."""
1377
1369
1378 def setnarrowpats(newincludes, newexcludes):
1370 def setnarrowpats(newincludes, newexcludes):
1379 """Define the narrowspec for this repository."""
1371 """Define the narrowspec for this repository."""
1380
1372
1381 def __getitem__(changeid):
1373 def __getitem__(changeid):
1382 """Try to resolve a changectx."""
1374 """Try to resolve a changectx."""
1383
1375
1384 def __contains__(changeid):
1376 def __contains__(changeid):
1385 """Whether a changeset exists."""
1377 """Whether a changeset exists."""
1386
1378
1387 def __nonzero__():
1379 def __nonzero__():
1388 """Always returns True."""
1380 """Always returns True."""
1389 return True
1381 return True
1390
1382
1391 __bool__ = __nonzero__
1383 __bool__ = __nonzero__
1392
1384
1393 def __len__():
1385 def __len__():
1394 """Returns the number of changesets in the repo."""
1386 """Returns the number of changesets in the repo."""
1395
1387
1396 def __iter__():
1388 def __iter__():
1397 """Iterate over revisions in the changelog."""
1389 """Iterate over revisions in the changelog."""
1398
1390
1399 def revs(expr, *args):
1391 def revs(expr, *args):
1400 """Evaluate a revset.
1392 """Evaluate a revset.
1401
1393
1402 Emits revisions.
1394 Emits revisions.
1403 """
1395 """
1404
1396
1405 def set(expr, *args):
1397 def set(expr, *args):
1406 """Evaluate a revset.
1398 """Evaluate a revset.
1407
1399
1408 Emits changectx instances.
1400 Emits changectx instances.
1409 """
1401 """
1410
1402
1411 def anyrevs(specs, user=False, localalias=None):
1403 def anyrevs(specs, user=False, localalias=None):
1412 """Find revisions matching one of the given revsets."""
1404 """Find revisions matching one of the given revsets."""
1413
1405
1414 def url():
1406 def url():
1415 """Returns a string representing the location of this repo."""
1407 """Returns a string representing the location of this repo."""
1416
1408
1417 def hook(name, throw=False, **args):
1409 def hook(name, throw=False, **args):
1418 """Call a hook."""
1410 """Call a hook."""
1419
1411
1420 def tags():
1412 def tags():
1421 """Return a mapping of tag to node."""
1413 """Return a mapping of tag to node."""
1422
1414
1423 def tagtype(tagname):
1415 def tagtype(tagname):
1424 """Return the type of a given tag."""
1416 """Return the type of a given tag."""
1425
1417
1426 def tagslist():
1418 def tagslist():
1427 """Return a list of tags ordered by revision."""
1419 """Return a list of tags ordered by revision."""
1428
1420
1429 def nodetags(node):
1421 def nodetags(node):
1430 """Return the tags associated with a node."""
1422 """Return the tags associated with a node."""
1431
1423
1432 def nodebookmarks(node):
1424 def nodebookmarks(node):
1433 """Return the list of bookmarks pointing to the specified node."""
1425 """Return the list of bookmarks pointing to the specified node."""
1434
1426
1435 def branchmap():
1427 def branchmap():
1436 """Return a mapping of branch to heads in that branch."""
1428 """Return a mapping of branch to heads in that branch."""
1437
1429
1438 def revbranchcache():
1430 def revbranchcache():
1439 pass
1431 pass
1440
1432
1441 def branchtip(branchtip, ignoremissing=False):
1433 def branchtip(branchtip, ignoremissing=False):
1442 """Return the tip node for a given branch."""
1434 """Return the tip node for a given branch."""
1443
1435
1444 def lookup(key):
1436 def lookup(key):
1445 """Resolve the node for a revision."""
1437 """Resolve the node for a revision."""
1446
1438
1447 def lookupbranch(key):
1439 def lookupbranch(key):
1448 """Look up the branch name of the given revision or branch name."""
1440 """Look up the branch name of the given revision or branch name."""
1449
1441
1450 def known(nodes):
1442 def known(nodes):
1451 """Determine whether a series of nodes is known.
1443 """Determine whether a series of nodes is known.
1452
1444
1453 Returns a list of bools.
1445 Returns a list of bools.
1454 """
1446 """
1455
1447
1456 def local():
1448 def local():
1457 """Whether the repository is local."""
1449 """Whether the repository is local."""
1458 return True
1450 return True
1459
1451
1460 def publishing():
1452 def publishing():
1461 """Whether the repository is a publishing repository."""
1453 """Whether the repository is a publishing repository."""
1462
1454
1463 def cancopy():
1455 def cancopy():
1464 pass
1456 pass
1465
1457
1466 def shared():
1458 def shared():
1467 """The type of shared repository or None."""
1459 """The type of shared repository or None."""
1468
1460
1469 def wjoin(f, *insidef):
1461 def wjoin(f, *insidef):
1470 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1462 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1471
1463
1472 def setparents(p1, p2):
1464 def setparents(p1, p2):
1473 """Set the parent nodes of the working directory."""
1465 """Set the parent nodes of the working directory."""
1474
1466
1475 def filectx(path, changeid=None, fileid=None):
1467 def filectx(path, changeid=None, fileid=None):
1476 """Obtain a filectx for the given file revision."""
1468 """Obtain a filectx for the given file revision."""
1477
1469
1478 def getcwd():
1470 def getcwd():
1479 """Obtain the current working directory from the dirstate."""
1471 """Obtain the current working directory from the dirstate."""
1480
1472
1481 def pathto(f, cwd=None):
1473 def pathto(f, cwd=None):
1482 """Obtain the relative path to a file."""
1474 """Obtain the relative path to a file."""
1483
1475
1484 def adddatafilter(name, fltr):
1476 def adddatafilter(name, fltr):
1485 pass
1477 pass
1486
1478
1487 def wread(filename):
1479 def wread(filename):
1488 """Read a file from wvfs, using data filters."""
1480 """Read a file from wvfs, using data filters."""
1489
1481
1490 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1482 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1491 """Write data to a file in the wvfs, using data filters."""
1483 """Write data to a file in the wvfs, using data filters."""
1492
1484
1493 def wwritedata(filename, data):
1485 def wwritedata(filename, data):
1494 """Resolve data for writing to the wvfs, using data filters."""
1486 """Resolve data for writing to the wvfs, using data filters."""
1495
1487
1496 def currenttransaction():
1488 def currenttransaction():
1497 """Obtain the current transaction instance or None."""
1489 """Obtain the current transaction instance or None."""
1498
1490
1499 def transaction(desc, report=None):
1491 def transaction(desc, report=None):
1500 """Open a new transaction to write to the repository."""
1492 """Open a new transaction to write to the repository."""
1501
1493
1502 def undofiles():
1494 def undofiles():
1503 """Returns a list of (vfs, path) for files to undo transactions."""
1495 """Returns a list of (vfs, path) for files to undo transactions."""
1504
1496
1505 def recover():
1497 def recover():
1506 """Roll back an interrupted transaction."""
1498 """Roll back an interrupted transaction."""
1507
1499
1508 def rollback(dryrun=False, force=False):
1500 def rollback(dryrun=False, force=False):
1509 """Undo the last transaction.
1501 """Undo the last transaction.
1510
1502
1511 DANGEROUS.
1503 DANGEROUS.
1512 """
1504 """
1513
1505
1514 def updatecaches(tr=None, full=False):
1506 def updatecaches(tr=None, full=False):
1515 """Warm repo caches."""
1507 """Warm repo caches."""
1516
1508
1517 def invalidatecaches():
1509 def invalidatecaches():
1518 """Invalidate cached data due to the repository mutating."""
1510 """Invalidate cached data due to the repository mutating."""
1519
1511
1520 def invalidatevolatilesets():
1512 def invalidatevolatilesets():
1521 pass
1513 pass
1522
1514
1523 def invalidatedirstate():
1515 def invalidatedirstate():
1524 """Invalidate the dirstate."""
1516 """Invalidate the dirstate."""
1525
1517
1526 def invalidate(clearfilecache=False):
1518 def invalidate(clearfilecache=False):
1527 pass
1519 pass
1528
1520
1529 def invalidateall():
1521 def invalidateall():
1530 pass
1522 pass
1531
1523
1532 def lock(wait=True):
1524 def lock(wait=True):
1533 """Lock the repository store and return a lock instance."""
1525 """Lock the repository store and return a lock instance."""
1534
1526
1535 def wlock(wait=True):
1527 def wlock(wait=True):
1536 """Lock the non-store parts of the repository."""
1528 """Lock the non-store parts of the repository."""
1537
1529
1538 def currentwlock():
1530 def currentwlock():
1539 """Return the wlock if it's held or None."""
1531 """Return the wlock if it's held or None."""
1540
1532
1541 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1533 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1542 pass
1534 pass
1543
1535
1544 def commit(text='', user=None, date=None, match=None, force=False,
1536 def commit(text='', user=None, date=None, match=None, force=False,
1545 editor=False, extra=None):
1537 editor=False, extra=None):
1546 """Add a new revision to the repository."""
1538 """Add a new revision to the repository."""
1547
1539
1548 def commitctx(ctx, error=False):
1540 def commitctx(ctx, error=False):
1549 """Commit a commitctx instance to the repository."""
1541 """Commit a commitctx instance to the repository."""
1550
1542
1551 def destroying():
1543 def destroying():
1552 """Inform the repository that nodes are about to be destroyed."""
1544 """Inform the repository that nodes are about to be destroyed."""
1553
1545
1554 def destroyed():
1546 def destroyed():
1555 """Inform the repository that nodes have been destroyed."""
1547 """Inform the repository that nodes have been destroyed."""
1556
1548
1557 def status(node1='.', node2=None, match=None, ignored=False,
1549 def status(node1='.', node2=None, match=None, ignored=False,
1558 clean=False, unknown=False, listsubrepos=False):
1550 clean=False, unknown=False, listsubrepos=False):
1559 """Convenience method to call repo[x].status()."""
1551 """Convenience method to call repo[x].status()."""
1560
1552
1561 def addpostdsstatus(ps):
1553 def addpostdsstatus(ps):
1562 pass
1554 pass
1563
1555
1564 def postdsstatus():
1556 def postdsstatus():
1565 pass
1557 pass
1566
1558
1567 def clearpostdsstatus():
1559 def clearpostdsstatus():
1568 pass
1560 pass
1569
1561
1570 def heads(start=None):
1562 def heads(start=None):
1571 """Obtain list of nodes that are DAG heads."""
1563 """Obtain list of nodes that are DAG heads."""
1572
1564
1573 def branchheads(branch=None, start=None, closed=False):
1565 def branchheads(branch=None, start=None, closed=False):
1574 pass
1566 pass
1575
1567
1576 def branches(nodes):
1568 def branches(nodes):
1577 pass
1569 pass
1578
1570
1579 def between(pairs):
1571 def between(pairs):
1580 pass
1572 pass
1581
1573
1582 def checkpush(pushop):
1574 def checkpush(pushop):
1583 pass
1575 pass
1584
1576
1585 prepushoutgoinghooks = interfaceutil.Attribute(
1577 prepushoutgoinghooks = interfaceutil.Attribute(
1586 """util.hooks instance.""")
1578 """util.hooks instance.""")
1587
1579
1588 def pushkey(namespace, key, old, new):
1580 def pushkey(namespace, key, old, new):
1589 pass
1581 pass
1590
1582
1591 def listkeys(namespace):
1583 def listkeys(namespace):
1592 pass
1584 pass
1593
1585
1594 def debugwireargs(one, two, three=None, four=None, five=None):
1586 def debugwireargs(one, two, three=None, four=None, five=None):
1595 pass
1587 pass
1596
1588
1597 def savecommitmessage(text):
1589 def savecommitmessage(text):
1598 pass
1590 pass
1599
1591
1600 class completelocalrepository(ilocalrepositorymain,
1592 class completelocalrepository(ilocalrepositorymain,
1601 ilocalrepositoryfilestorage):
1593 ilocalrepositoryfilestorage):
1602 """Complete interface for a local repository."""
1594 """Complete interface for a local repository."""
@@ -1,984 +1,977
1 # storage.py - Testing of storage primitives.
1 # storage.py - Testing of storage primitives.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import unittest
10 import unittest
11
11
12 from ..node import (
12 from ..node import (
13 hex,
13 hex,
14 nullid,
14 nullid,
15 nullrev,
15 nullrev,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 mdiff,
19 mdiff,
20 revlog,
20 revlog,
21 )
21 )
22
22
23 class basetestcase(unittest.TestCase):
23 class basetestcase(unittest.TestCase):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 assertRaisesRegex = (# camelcase-required
25 assertRaisesRegex = (# camelcase-required
26 unittest.TestCase.assertRaisesRegexp)
26 unittest.TestCase.assertRaisesRegexp)
27
27
28 class revisiondeltarequest(object):
28 class revisiondeltarequest(object):
29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
29 def __init__(self, node, p1, p2, linknode, basenode, ellipsis):
30 self.node = node
30 self.node = node
31 self.p1node = p1
31 self.p1node = p1
32 self.p2node = p2
32 self.p2node = p2
33 self.linknode = linknode
33 self.linknode = linknode
34 self.basenode = basenode
34 self.basenode = basenode
35 self.ellipsis = ellipsis
35 self.ellipsis = ellipsis
36
36
37 class ifileindextests(basetestcase):
37 class ifileindextests(basetestcase):
38 """Generic tests for the ifileindex interface.
38 """Generic tests for the ifileindex interface.
39
39
40 All file storage backends for index data should conform to the tests in this
40 All file storage backends for index data should conform to the tests in this
41 class.
41 class.
42
42
43 Use ``makeifileindextests()`` to create an instance of this type.
43 Use ``makeifileindextests()`` to create an instance of this type.
44 """
44 """
45 def testempty(self):
45 def testempty(self):
46 f = self._makefilefn()
46 f = self._makefilefn()
47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
47 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
48 self.assertEqual(list(f), [], 'iter yields nothing by default')
48 self.assertEqual(list(f), [], 'iter yields nothing by default')
49
49
50 gen = iter(f)
50 gen = iter(f)
51 with self.assertRaises(StopIteration):
51 with self.assertRaises(StopIteration):
52 next(gen)
52 next(gen)
53
53
54 # revs() should evaluate to an empty list.
54 # revs() should evaluate to an empty list.
55 self.assertEqual(list(f.revs()), [])
55 self.assertEqual(list(f.revs()), [])
56
56
57 revs = iter(f.revs())
57 revs = iter(f.revs())
58 with self.assertRaises(StopIteration):
58 with self.assertRaises(StopIteration):
59 next(revs)
59 next(revs)
60
60
61 self.assertEqual(list(f.revs(start=20)), [])
61 self.assertEqual(list(f.revs(start=20)), [])
62
62
63 # parents() and parentrevs() work with nullid/nullrev.
63 # parents() and parentrevs() work with nullid/nullrev.
64 self.assertEqual(f.parents(nullid), (nullid, nullid))
64 self.assertEqual(f.parents(nullid), (nullid, nullid))
65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
65 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
66
66
67 with self.assertRaises(error.LookupError):
67 with self.assertRaises(error.LookupError):
68 f.parents(b'\x01' * 20)
68 f.parents(b'\x01' * 20)
69
69
70 for i in range(-5, 5):
70 for i in range(-5, 5):
71 if i == nullrev:
71 if i == nullrev:
72 continue
72 continue
73
73
74 with self.assertRaises(IndexError):
74 with self.assertRaises(IndexError):
75 f.parentrevs(i)
75 f.parentrevs(i)
76
76
77 # nullid/nullrev lookup always works.
77 # nullid/nullrev lookup always works.
78 self.assertEqual(f.rev(nullid), nullrev)
78 self.assertEqual(f.rev(nullid), nullrev)
79 self.assertEqual(f.node(nullrev), nullid)
79 self.assertEqual(f.node(nullrev), nullid)
80
80
81 with self.assertRaises(error.LookupError):
81 with self.assertRaises(error.LookupError):
82 f.rev(b'\x01' * 20)
82 f.rev(b'\x01' * 20)
83
83
84 for i in range(-5, 5):
84 for i in range(-5, 5):
85 if i == nullrev:
85 if i == nullrev:
86 continue
86 continue
87
87
88 with self.assertRaises(IndexError):
88 with self.assertRaises(IndexError):
89 f.node(i)
89 f.node(i)
90
90
91 self.assertEqual(f.lookup(nullid), nullid)
91 self.assertEqual(f.lookup(nullid), nullid)
92 self.assertEqual(f.lookup(nullrev), nullid)
92 self.assertEqual(f.lookup(nullrev), nullid)
93 self.assertEqual(f.lookup(hex(nullid)), nullid)
93 self.assertEqual(f.lookup(hex(nullid)), nullid)
94
94
95 # String converted to integer doesn't work for nullrev.
95 # String converted to integer doesn't work for nullrev.
96 with self.assertRaises(error.LookupError):
96 with self.assertRaises(error.LookupError):
97 f.lookup(b'%d' % nullrev)
97 f.lookup(b'%d' % nullrev)
98
98
99 self.assertEqual(f.linkrev(nullrev), nullrev)
99 self.assertEqual(f.linkrev(nullrev), nullrev)
100
100
101 for i in range(-5, 5):
101 for i in range(-5, 5):
102 if i == nullrev:
102 if i == nullrev:
103 continue
103 continue
104
104
105 with self.assertRaises(IndexError):
105 with self.assertRaises(IndexError):
106 f.linkrev(i)
106 f.linkrev(i)
107
107
108 self.assertEqual(f.flags(nullrev), 0)
108 self.assertEqual(f.flags(nullrev), 0)
109
109
110 for i in range(-5, 5):
110 for i in range(-5, 5):
111 if i == nullrev:
111 if i == nullrev:
112 continue
112 continue
113
113
114 with self.assertRaises(IndexError):
114 with self.assertRaises(IndexError):
115 f.flags(i)
115 f.flags(i)
116
116
117 self.assertFalse(f.iscensored(nullrev))
117 self.assertFalse(f.iscensored(nullrev))
118
118
119 for i in range(-5, 5):
119 for i in range(-5, 5):
120 if i == nullrev:
120 if i == nullrev:
121 continue
121 continue
122
122
123 with self.assertRaises(IndexError):
123 with self.assertRaises(IndexError):
124 f.iscensored(i)
124 f.iscensored(i)
125
125
126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
126 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
127
127
128 with self.assertRaises(ValueError):
128 with self.assertRaises(ValueError):
129 self.assertEqual(list(f.descendants([])), [])
129 self.assertEqual(list(f.descendants([])), [])
130
130
131 self.assertEqual(list(f.descendants([nullrev])), [])
131 self.assertEqual(list(f.descendants([nullrev])), [])
132
132
133 self.assertEqual(f.headrevs(), [nullrev])
134 self.assertEqual(f.heads(), [nullid])
133 self.assertEqual(f.heads(), [nullid])
135 self.assertEqual(f.heads(nullid), [nullid])
134 self.assertEqual(f.heads(nullid), [nullid])
136 self.assertEqual(f.heads(None, [nullid]), [nullid])
135 self.assertEqual(f.heads(None, [nullid]), [nullid])
137 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
136 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
138
137
139 self.assertEqual(f.children(nullid), [])
138 self.assertEqual(f.children(nullid), [])
140
139
141 with self.assertRaises(error.LookupError):
140 with self.assertRaises(error.LookupError):
142 f.children(b'\x01' * 20)
141 f.children(b'\x01' * 20)
143
142
144 self.assertEqual(f.deltaparent(nullrev), nullrev)
143 self.assertEqual(f.deltaparent(nullrev), nullrev)
145
144
146 for i in range(-5, 5):
145 for i in range(-5, 5):
147 if i == nullrev:
146 if i == nullrev:
148 continue
147 continue
149
148
150 with self.assertRaises(IndexError):
149 with self.assertRaises(IndexError):
151 f.deltaparent(i)
150 f.deltaparent(i)
152
151
153 def testsinglerevision(self):
152 def testsinglerevision(self):
154 f = self._makefilefn()
153 f = self._makefilefn()
155 with self._maketransactionfn() as tr:
154 with self._maketransactionfn() as tr:
156 node = f.add(b'initial', None, tr, 0, nullid, nullid)
155 node = f.add(b'initial', None, tr, 0, nullid, nullid)
157
156
158 self.assertEqual(len(f), 1)
157 self.assertEqual(len(f), 1)
159 self.assertEqual(list(f), [0])
158 self.assertEqual(list(f), [0])
160
159
161 gen = iter(f)
160 gen = iter(f)
162 self.assertEqual(next(gen), 0)
161 self.assertEqual(next(gen), 0)
163
162
164 with self.assertRaises(StopIteration):
163 with self.assertRaises(StopIteration):
165 next(gen)
164 next(gen)
166
165
167 self.assertEqual(list(f.revs()), [0])
166 self.assertEqual(list(f.revs()), [0])
168 self.assertEqual(list(f.revs(start=1)), [])
167 self.assertEqual(list(f.revs(start=1)), [])
169 self.assertEqual(list(f.revs(start=0)), [0])
168 self.assertEqual(list(f.revs(start=0)), [0])
170 self.assertEqual(list(f.revs(stop=0)), [0])
169 self.assertEqual(list(f.revs(stop=0)), [0])
171 self.assertEqual(list(f.revs(stop=1)), [0])
170 self.assertEqual(list(f.revs(stop=1)), [0])
172 self.assertEqual(list(f.revs(1, 1)), [])
171 self.assertEqual(list(f.revs(1, 1)), [])
173 # TODO buggy
172 # TODO buggy
174 self.assertEqual(list(f.revs(1, 0)), [1, 0])
173 self.assertEqual(list(f.revs(1, 0)), [1, 0])
175 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
174 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
176
175
177 self.assertEqual(f.parents(node), (nullid, nullid))
176 self.assertEqual(f.parents(node), (nullid, nullid))
178 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
177 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
179
178
180 with self.assertRaises(error.LookupError):
179 with self.assertRaises(error.LookupError):
181 f.parents(b'\x01' * 20)
180 f.parents(b'\x01' * 20)
182
181
183 with self.assertRaises(IndexError):
182 with self.assertRaises(IndexError):
184 f.parentrevs(1)
183 f.parentrevs(1)
185
184
186 self.assertEqual(f.rev(node), 0)
185 self.assertEqual(f.rev(node), 0)
187
186
188 with self.assertRaises(error.LookupError):
187 with self.assertRaises(error.LookupError):
189 f.rev(b'\x01' * 20)
188 f.rev(b'\x01' * 20)
190
189
191 self.assertEqual(f.node(0), node)
190 self.assertEqual(f.node(0), node)
192
191
193 with self.assertRaises(IndexError):
192 with self.assertRaises(IndexError):
194 f.node(1)
193 f.node(1)
195
194
196 self.assertEqual(f.lookup(node), node)
195 self.assertEqual(f.lookup(node), node)
197 self.assertEqual(f.lookup(0), node)
196 self.assertEqual(f.lookup(0), node)
198 self.assertEqual(f.lookup(b'0'), node)
197 self.assertEqual(f.lookup(b'0'), node)
199 self.assertEqual(f.lookup(hex(node)), node)
198 self.assertEqual(f.lookup(hex(node)), node)
200
199
201 self.assertEqual(f.linkrev(0), 0)
200 self.assertEqual(f.linkrev(0), 0)
202
201
203 with self.assertRaises(IndexError):
202 with self.assertRaises(IndexError):
204 f.linkrev(1)
203 f.linkrev(1)
205
204
206 self.assertEqual(f.flags(0), 0)
205 self.assertEqual(f.flags(0), 0)
207
206
208 with self.assertRaises(IndexError):
207 with self.assertRaises(IndexError):
209 f.flags(1)
208 f.flags(1)
210
209
211 self.assertFalse(f.iscensored(0))
210 self.assertFalse(f.iscensored(0))
212
211
213 with self.assertRaises(IndexError):
212 with self.assertRaises(IndexError):
214 f.iscensored(1)
213 f.iscensored(1)
215
214
216 self.assertEqual(list(f.descendants([0])), [])
215 self.assertEqual(list(f.descendants([0])), [])
217
216
218 self.assertEqual(f.headrevs(), [0])
219
220 self.assertEqual(f.heads(), [node])
217 self.assertEqual(f.heads(), [node])
221 self.assertEqual(f.heads(node), [node])
218 self.assertEqual(f.heads(node), [node])
222 self.assertEqual(f.heads(stop=[node]), [node])
219 self.assertEqual(f.heads(stop=[node]), [node])
223
220
224 with self.assertRaises(error.LookupError):
221 with self.assertRaises(error.LookupError):
225 f.heads(stop=[b'\x01' * 20])
222 f.heads(stop=[b'\x01' * 20])
226
223
227 self.assertEqual(f.children(node), [])
224 self.assertEqual(f.children(node), [])
228
225
229 self.assertEqual(f.deltaparent(0), nullrev)
226 self.assertEqual(f.deltaparent(0), nullrev)
230
227
231 def testmultiplerevisions(self):
228 def testmultiplerevisions(self):
232 fulltext0 = b'x' * 1024
229 fulltext0 = b'x' * 1024
233 fulltext1 = fulltext0 + b'y'
230 fulltext1 = fulltext0 + b'y'
234 fulltext2 = b'y' + fulltext0 + b'z'
231 fulltext2 = b'y' + fulltext0 + b'z'
235
232
236 f = self._makefilefn()
233 f = self._makefilefn()
237 with self._maketransactionfn() as tr:
234 with self._maketransactionfn() as tr:
238 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
235 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
239 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
236 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
240 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
237 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
241
238
242 self.assertEqual(len(f), 3)
239 self.assertEqual(len(f), 3)
243 self.assertEqual(list(f), [0, 1, 2])
240 self.assertEqual(list(f), [0, 1, 2])
244
241
245 gen = iter(f)
242 gen = iter(f)
246 self.assertEqual(next(gen), 0)
243 self.assertEqual(next(gen), 0)
247 self.assertEqual(next(gen), 1)
244 self.assertEqual(next(gen), 1)
248 self.assertEqual(next(gen), 2)
245 self.assertEqual(next(gen), 2)
249
246
250 with self.assertRaises(StopIteration):
247 with self.assertRaises(StopIteration):
251 next(gen)
248 next(gen)
252
249
253 self.assertEqual(list(f.revs()), [0, 1, 2])
250 self.assertEqual(list(f.revs()), [0, 1, 2])
254 self.assertEqual(list(f.revs(0)), [0, 1, 2])
251 self.assertEqual(list(f.revs(0)), [0, 1, 2])
255 self.assertEqual(list(f.revs(1)), [1, 2])
252 self.assertEqual(list(f.revs(1)), [1, 2])
256 self.assertEqual(list(f.revs(2)), [2])
253 self.assertEqual(list(f.revs(2)), [2])
257 self.assertEqual(list(f.revs(3)), [])
254 self.assertEqual(list(f.revs(3)), [])
258 self.assertEqual(list(f.revs(stop=1)), [0, 1])
255 self.assertEqual(list(f.revs(stop=1)), [0, 1])
259 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
256 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
260 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
257 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
261 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
258 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
262 self.assertEqual(list(f.revs(2, 1)), [2, 1])
259 self.assertEqual(list(f.revs(2, 1)), [2, 1])
263 # TODO this is wrong
260 # TODO this is wrong
264 self.assertEqual(list(f.revs(3, 2)), [3, 2])
261 self.assertEqual(list(f.revs(3, 2)), [3, 2])
265
262
266 self.assertEqual(f.parents(node0), (nullid, nullid))
263 self.assertEqual(f.parents(node0), (nullid, nullid))
267 self.assertEqual(f.parents(node1), (node0, nullid))
264 self.assertEqual(f.parents(node1), (node0, nullid))
268 self.assertEqual(f.parents(node2), (node1, nullid))
265 self.assertEqual(f.parents(node2), (node1, nullid))
269
266
270 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
267 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
271 self.assertEqual(f.parentrevs(1), (0, nullrev))
268 self.assertEqual(f.parentrevs(1), (0, nullrev))
272 self.assertEqual(f.parentrevs(2), (1, nullrev))
269 self.assertEqual(f.parentrevs(2), (1, nullrev))
273
270
274 self.assertEqual(f.rev(node0), 0)
271 self.assertEqual(f.rev(node0), 0)
275 self.assertEqual(f.rev(node1), 1)
272 self.assertEqual(f.rev(node1), 1)
276 self.assertEqual(f.rev(node2), 2)
273 self.assertEqual(f.rev(node2), 2)
277
274
278 with self.assertRaises(error.LookupError):
275 with self.assertRaises(error.LookupError):
279 f.rev(b'\x01' * 20)
276 f.rev(b'\x01' * 20)
280
277
281 self.assertEqual(f.node(0), node0)
278 self.assertEqual(f.node(0), node0)
282 self.assertEqual(f.node(1), node1)
279 self.assertEqual(f.node(1), node1)
283 self.assertEqual(f.node(2), node2)
280 self.assertEqual(f.node(2), node2)
284
281
285 with self.assertRaises(IndexError):
282 with self.assertRaises(IndexError):
286 f.node(3)
283 f.node(3)
287
284
288 self.assertEqual(f.lookup(node0), node0)
285 self.assertEqual(f.lookup(node0), node0)
289 self.assertEqual(f.lookup(0), node0)
286 self.assertEqual(f.lookup(0), node0)
290 self.assertEqual(f.lookup(b'0'), node0)
287 self.assertEqual(f.lookup(b'0'), node0)
291 self.assertEqual(f.lookup(hex(node0)), node0)
288 self.assertEqual(f.lookup(hex(node0)), node0)
292
289
293 self.assertEqual(f.lookup(node1), node1)
290 self.assertEqual(f.lookup(node1), node1)
294 self.assertEqual(f.lookup(1), node1)
291 self.assertEqual(f.lookup(1), node1)
295 self.assertEqual(f.lookup(b'1'), node1)
292 self.assertEqual(f.lookup(b'1'), node1)
296 self.assertEqual(f.lookup(hex(node1)), node1)
293 self.assertEqual(f.lookup(hex(node1)), node1)
297
294
298 self.assertEqual(f.linkrev(0), 0)
295 self.assertEqual(f.linkrev(0), 0)
299 self.assertEqual(f.linkrev(1), 1)
296 self.assertEqual(f.linkrev(1), 1)
300 self.assertEqual(f.linkrev(2), 3)
297 self.assertEqual(f.linkrev(2), 3)
301
298
302 with self.assertRaises(IndexError):
299 with self.assertRaises(IndexError):
303 f.linkrev(3)
300 f.linkrev(3)
304
301
305 self.assertEqual(f.flags(0), 0)
302 self.assertEqual(f.flags(0), 0)
306 self.assertEqual(f.flags(1), 0)
303 self.assertEqual(f.flags(1), 0)
307 self.assertEqual(f.flags(2), 0)
304 self.assertEqual(f.flags(2), 0)
308
305
309 with self.assertRaises(IndexError):
306 with self.assertRaises(IndexError):
310 f.flags(3)
307 f.flags(3)
311
308
312 self.assertFalse(f.iscensored(0))
309 self.assertFalse(f.iscensored(0))
313 self.assertFalse(f.iscensored(1))
310 self.assertFalse(f.iscensored(1))
314 self.assertFalse(f.iscensored(2))
311 self.assertFalse(f.iscensored(2))
315
312
316 with self.assertRaises(IndexError):
313 with self.assertRaises(IndexError):
317 f.iscensored(3)
314 f.iscensored(3)
318
315
319 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
316 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
320 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
317 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
321 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
318 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
322 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
319 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
323 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
320 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
324 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
321 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
325
322
326 self.assertEqual(list(f.descendants([0])), [1, 2])
323 self.assertEqual(list(f.descendants([0])), [1, 2])
327 self.assertEqual(list(f.descendants([1])), [2])
324 self.assertEqual(list(f.descendants([1])), [2])
328 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
325 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
329
326
330 self.assertEqual(f.headrevs(), [2])
331
332 self.assertEqual(f.heads(), [node2])
327 self.assertEqual(f.heads(), [node2])
333 self.assertEqual(f.heads(node0), [node2])
328 self.assertEqual(f.heads(node0), [node2])
334 self.assertEqual(f.heads(node1), [node2])
329 self.assertEqual(f.heads(node1), [node2])
335 self.assertEqual(f.heads(node2), [node2])
330 self.assertEqual(f.heads(node2), [node2])
336
331
337 # TODO this behavior seems wonky. Is it correct? If so, the
332 # TODO this behavior seems wonky. Is it correct? If so, the
338 # docstring for heads() should be updated to reflect desired
333 # docstring for heads() should be updated to reflect desired
339 # behavior.
334 # behavior.
340 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
335 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
341 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
336 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
342 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
337 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
343
338
344 with self.assertRaises(error.LookupError):
339 with self.assertRaises(error.LookupError):
345 f.heads(stop=[b'\x01' * 20])
340 f.heads(stop=[b'\x01' * 20])
346
341
347 self.assertEqual(f.children(node0), [node1])
342 self.assertEqual(f.children(node0), [node1])
348 self.assertEqual(f.children(node1), [node2])
343 self.assertEqual(f.children(node1), [node2])
349 self.assertEqual(f.children(node2), [])
344 self.assertEqual(f.children(node2), [])
350
345
351 self.assertEqual(f.deltaparent(0), nullrev)
346 self.assertEqual(f.deltaparent(0), nullrev)
352 self.assertEqual(f.deltaparent(1), 0)
347 self.assertEqual(f.deltaparent(1), 0)
353 self.assertEqual(f.deltaparent(2), 1)
348 self.assertEqual(f.deltaparent(2), 1)
354
349
355 def testmultipleheads(self):
350 def testmultipleheads(self):
356 f = self._makefilefn()
351 f = self._makefilefn()
357
352
358 with self._maketransactionfn() as tr:
353 with self._maketransactionfn() as tr:
359 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
354 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
360 node1 = f.add(b'1', None, tr, 1, node0, nullid)
355 node1 = f.add(b'1', None, tr, 1, node0, nullid)
361 node2 = f.add(b'2', None, tr, 2, node1, nullid)
356 node2 = f.add(b'2', None, tr, 2, node1, nullid)
362 node3 = f.add(b'3', None, tr, 3, node0, nullid)
357 node3 = f.add(b'3', None, tr, 3, node0, nullid)
363 node4 = f.add(b'4', None, tr, 4, node3, nullid)
358 node4 = f.add(b'4', None, tr, 4, node3, nullid)
364 node5 = f.add(b'5', None, tr, 5, node0, nullid)
359 node5 = f.add(b'5', None, tr, 5, node0, nullid)
365
360
366 self.assertEqual(len(f), 6)
361 self.assertEqual(len(f), 6)
367
362
368 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
363 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
369 self.assertEqual(list(f.descendants([1])), [2])
364 self.assertEqual(list(f.descendants([1])), [2])
370 self.assertEqual(list(f.descendants([2])), [])
365 self.assertEqual(list(f.descendants([2])), [])
371 self.assertEqual(list(f.descendants([3])), [4])
366 self.assertEqual(list(f.descendants([3])), [4])
372 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
367 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
373 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
368 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
374
369
375 self.assertEqual(f.headrevs(), [2, 4, 5])
376
377 self.assertEqual(f.heads(), [node2, node4, node5])
370 self.assertEqual(f.heads(), [node2, node4, node5])
378 self.assertEqual(f.heads(node0), [node2, node4, node5])
371 self.assertEqual(f.heads(node0), [node2, node4, node5])
379 self.assertEqual(f.heads(node1), [node2])
372 self.assertEqual(f.heads(node1), [node2])
380 self.assertEqual(f.heads(node2), [node2])
373 self.assertEqual(f.heads(node2), [node2])
381 self.assertEqual(f.heads(node3), [node4])
374 self.assertEqual(f.heads(node3), [node4])
382 self.assertEqual(f.heads(node4), [node4])
375 self.assertEqual(f.heads(node4), [node4])
383 self.assertEqual(f.heads(node5), [node5])
376 self.assertEqual(f.heads(node5), [node5])
384
377
385 # TODO this seems wrong.
378 # TODO this seems wrong.
386 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
379 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
387 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
380 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
388
381
389 self.assertEqual(f.children(node0), [node1, node3, node5])
382 self.assertEqual(f.children(node0), [node1, node3, node5])
390 self.assertEqual(f.children(node1), [node2])
383 self.assertEqual(f.children(node1), [node2])
391 self.assertEqual(f.children(node2), [])
384 self.assertEqual(f.children(node2), [])
392 self.assertEqual(f.children(node3), [node4])
385 self.assertEqual(f.children(node3), [node4])
393 self.assertEqual(f.children(node4), [])
386 self.assertEqual(f.children(node4), [])
394 self.assertEqual(f.children(node5), [])
387 self.assertEqual(f.children(node5), [])
395
388
396 class ifiledatatests(basetestcase):
389 class ifiledatatests(basetestcase):
397 """Generic tests for the ifiledata interface.
390 """Generic tests for the ifiledata interface.
398
391
399 All file storage backends for data should conform to the tests in this
392 All file storage backends for data should conform to the tests in this
400 class.
393 class.
401
394
402 Use ``makeifiledatatests()`` to create an instance of this type.
395 Use ``makeifiledatatests()`` to create an instance of this type.
403 """
396 """
404 def testempty(self):
397 def testempty(self):
405 f = self._makefilefn()
398 f = self._makefilefn()
406
399
407 self.assertEqual(f.rawsize(nullrev), 0)
400 self.assertEqual(f.rawsize(nullrev), 0)
408
401
409 for i in range(-5, 5):
402 for i in range(-5, 5):
410 if i == nullrev:
403 if i == nullrev:
411 continue
404 continue
412
405
413 with self.assertRaises(IndexError):
406 with self.assertRaises(IndexError):
414 f.rawsize(i)
407 f.rawsize(i)
415
408
416 self.assertEqual(f.size(nullrev), 0)
409 self.assertEqual(f.size(nullrev), 0)
417
410
418 for i in range(-5, 5):
411 for i in range(-5, 5):
419 if i == nullrev:
412 if i == nullrev:
420 continue
413 continue
421
414
422 with self.assertRaises(IndexError):
415 with self.assertRaises(IndexError):
423 f.size(i)
416 f.size(i)
424
417
425 with self.assertRaises(error.StorageError):
418 with self.assertRaises(error.StorageError):
426 f.checkhash(b'', nullid)
419 f.checkhash(b'', nullid)
427
420
428 with self.assertRaises(error.LookupError):
421 with self.assertRaises(error.LookupError):
429 f.checkhash(b'', b'\x01' * 20)
422 f.checkhash(b'', b'\x01' * 20)
430
423
431 self.assertEqual(f.revision(nullid), b'')
424 self.assertEqual(f.revision(nullid), b'')
432 self.assertEqual(f.revision(nullid, raw=True), b'')
425 self.assertEqual(f.revision(nullid, raw=True), b'')
433
426
434 with self.assertRaises(error.LookupError):
427 with self.assertRaises(error.LookupError):
435 f.revision(b'\x01' * 20)
428 f.revision(b'\x01' * 20)
436
429
437 self.assertEqual(f.read(nullid), b'')
430 self.assertEqual(f.read(nullid), b'')
438
431
439 with self.assertRaises(error.LookupError):
432 with self.assertRaises(error.LookupError):
440 f.read(b'\x01' * 20)
433 f.read(b'\x01' * 20)
441
434
442 self.assertFalse(f.renamed(nullid))
435 self.assertFalse(f.renamed(nullid))
443
436
444 with self.assertRaises(error.LookupError):
437 with self.assertRaises(error.LookupError):
445 f.read(b'\x01' * 20)
438 f.read(b'\x01' * 20)
446
439
447 self.assertTrue(f.cmp(nullid, b''))
440 self.assertTrue(f.cmp(nullid, b''))
448 self.assertTrue(f.cmp(nullid, b'foo'))
441 self.assertTrue(f.cmp(nullid, b'foo'))
449
442
450 with self.assertRaises(error.LookupError):
443 with self.assertRaises(error.LookupError):
451 f.cmp(b'\x01' * 20, b'irrelevant')
444 f.cmp(b'\x01' * 20, b'irrelevant')
452
445
453 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
446 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
454
447
455 with self.assertRaises(IndexError):
448 with self.assertRaises(IndexError):
456 f.revdiff(0, nullrev)
449 f.revdiff(0, nullrev)
457
450
458 with self.assertRaises(IndexError):
451 with self.assertRaises(IndexError):
459 f.revdiff(nullrev, 0)
452 f.revdiff(nullrev, 0)
460
453
461 with self.assertRaises(IndexError):
454 with self.assertRaises(IndexError):
462 f.revdiff(0, 0)
455 f.revdiff(0, 0)
463
456
464 gen = f.emitrevisiondeltas([])
457 gen = f.emitrevisiondeltas([])
465 with self.assertRaises(StopIteration):
458 with self.assertRaises(StopIteration):
466 next(gen)
459 next(gen)
467
460
468 requests = [
461 requests = [
469 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
462 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
470 ]
463 ]
471 gen = f.emitrevisiondeltas(requests)
464 gen = f.emitrevisiondeltas(requests)
472
465
473 delta = next(gen)
466 delta = next(gen)
474
467
475 self.assertEqual(delta.node, nullid)
468 self.assertEqual(delta.node, nullid)
476 self.assertEqual(delta.p1node, nullid)
469 self.assertEqual(delta.p1node, nullid)
477 self.assertEqual(delta.p2node, nullid)
470 self.assertEqual(delta.p2node, nullid)
478 self.assertEqual(delta.linknode, nullid)
471 self.assertEqual(delta.linknode, nullid)
479 self.assertEqual(delta.basenode, nullid)
472 self.assertEqual(delta.basenode, nullid)
480 self.assertIsNone(delta.baserevisionsize)
473 self.assertIsNone(delta.baserevisionsize)
481 self.assertEqual(delta.revision, b'')
474 self.assertEqual(delta.revision, b'')
482 self.assertIsNone(delta.delta)
475 self.assertIsNone(delta.delta)
483
476
484 with self.assertRaises(StopIteration):
477 with self.assertRaises(StopIteration):
485 next(gen)
478 next(gen)
486
479
487 requests = [
480 requests = [
488 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
481 revisiondeltarequest(nullid, nullid, nullid, nullid, nullid, False),
489 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
482 revisiondeltarequest(nullid, b'\x01' * 20, b'\x02' * 20,
490 b'\x03' * 20, nullid, False)
483 b'\x03' * 20, nullid, False)
491 ]
484 ]
492
485
493 gen = f.emitrevisiondeltas(requests)
486 gen = f.emitrevisiondeltas(requests)
494
487
495 next(gen)
488 next(gen)
496 delta = next(gen)
489 delta = next(gen)
497
490
498 self.assertEqual(delta.node, nullid)
491 self.assertEqual(delta.node, nullid)
499 self.assertEqual(delta.p1node, b'\x01' * 20)
492 self.assertEqual(delta.p1node, b'\x01' * 20)
500 self.assertEqual(delta.p2node, b'\x02' * 20)
493 self.assertEqual(delta.p2node, b'\x02' * 20)
501 self.assertEqual(delta.linknode, b'\x03' * 20)
494 self.assertEqual(delta.linknode, b'\x03' * 20)
502 self.assertEqual(delta.basenode, nullid)
495 self.assertEqual(delta.basenode, nullid)
503 self.assertIsNone(delta.baserevisionsize)
496 self.assertIsNone(delta.baserevisionsize)
504 self.assertEqual(delta.revision, b'')
497 self.assertEqual(delta.revision, b'')
505 self.assertIsNone(delta.delta)
498 self.assertIsNone(delta.delta)
506
499
507 with self.assertRaises(StopIteration):
500 with self.assertRaises(StopIteration):
508 next(gen)
501 next(gen)
509
502
510 def testsinglerevision(self):
503 def testsinglerevision(self):
511 fulltext = b'initial'
504 fulltext = b'initial'
512
505
513 f = self._makefilefn()
506 f = self._makefilefn()
514 with self._maketransactionfn() as tr:
507 with self._maketransactionfn() as tr:
515 node = f.add(fulltext, None, tr, 0, nullid, nullid)
508 node = f.add(fulltext, None, tr, 0, nullid, nullid)
516
509
517 self.assertEqual(f.rawsize(0), len(fulltext))
510 self.assertEqual(f.rawsize(0), len(fulltext))
518
511
519 with self.assertRaises(IndexError):
512 with self.assertRaises(IndexError):
520 f.rawsize(1)
513 f.rawsize(1)
521
514
522 self.assertEqual(f.size(0), len(fulltext))
515 self.assertEqual(f.size(0), len(fulltext))
523
516
524 with self.assertRaises(IndexError):
517 with self.assertRaises(IndexError):
525 f.size(1)
518 f.size(1)
526
519
527 f.checkhash(fulltext, node)
520 f.checkhash(fulltext, node)
528 f.checkhash(fulltext, node, nullid, nullid)
521 f.checkhash(fulltext, node, nullid, nullid)
529
522
530 with self.assertRaises(error.StorageError):
523 with self.assertRaises(error.StorageError):
531 f.checkhash(fulltext + b'extra', node)
524 f.checkhash(fulltext + b'extra', node)
532
525
533 with self.assertRaises(error.StorageError):
526 with self.assertRaises(error.StorageError):
534 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
527 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
535
528
536 with self.assertRaises(error.StorageError):
529 with self.assertRaises(error.StorageError):
537 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
530 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
538
531
539 self.assertEqual(f.revision(node), fulltext)
532 self.assertEqual(f.revision(node), fulltext)
540 self.assertEqual(f.revision(node, raw=True), fulltext)
533 self.assertEqual(f.revision(node, raw=True), fulltext)
541
534
542 self.assertEqual(f.read(node), fulltext)
535 self.assertEqual(f.read(node), fulltext)
543
536
544 self.assertFalse(f.renamed(node))
537 self.assertFalse(f.renamed(node))
545
538
546 self.assertFalse(f.cmp(node, fulltext))
539 self.assertFalse(f.cmp(node, fulltext))
547 self.assertTrue(f.cmp(node, fulltext + b'extra'))
540 self.assertTrue(f.cmp(node, fulltext + b'extra'))
548
541
549 self.assertEqual(f.revdiff(0, 0), b'')
542 self.assertEqual(f.revdiff(0, 0), b'')
550 self.assertEqual(f.revdiff(nullrev, 0),
543 self.assertEqual(f.revdiff(nullrev, 0),
551 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
544 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
552 fulltext)
545 fulltext)
553
546
554 self.assertEqual(f.revdiff(0, nullrev),
547 self.assertEqual(f.revdiff(0, nullrev),
555 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
548 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
556
549
557 requests = [
550 requests = [
558 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
551 revisiondeltarequest(node, nullid, nullid, nullid, nullid, False),
559 ]
552 ]
560 gen = f.emitrevisiondeltas(requests)
553 gen = f.emitrevisiondeltas(requests)
561
554
562 delta = next(gen)
555 delta = next(gen)
563
556
564 self.assertEqual(delta.node, node)
557 self.assertEqual(delta.node, node)
565 self.assertEqual(delta.p1node, nullid)
558 self.assertEqual(delta.p1node, nullid)
566 self.assertEqual(delta.p2node, nullid)
559 self.assertEqual(delta.p2node, nullid)
567 self.assertEqual(delta.linknode, nullid)
560 self.assertEqual(delta.linknode, nullid)
568 self.assertEqual(delta.basenode, nullid)
561 self.assertEqual(delta.basenode, nullid)
569 self.assertIsNone(delta.baserevisionsize)
562 self.assertIsNone(delta.baserevisionsize)
570 self.assertEqual(delta.revision, fulltext)
563 self.assertEqual(delta.revision, fulltext)
571 self.assertIsNone(delta.delta)
564 self.assertIsNone(delta.delta)
572
565
573 with self.assertRaises(StopIteration):
566 with self.assertRaises(StopIteration):
574 next(gen)
567 next(gen)
575
568
576 def testmultiplerevisions(self):
569 def testmultiplerevisions(self):
577 fulltext0 = b'x' * 1024
570 fulltext0 = b'x' * 1024
578 fulltext1 = fulltext0 + b'y'
571 fulltext1 = fulltext0 + b'y'
579 fulltext2 = b'y' + fulltext0 + b'z'
572 fulltext2 = b'y' + fulltext0 + b'z'
580
573
581 f = self._makefilefn()
574 f = self._makefilefn()
582 with self._maketransactionfn() as tr:
575 with self._maketransactionfn() as tr:
583 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
576 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
584 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
577 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
585 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
578 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
586
579
587 self.assertEqual(f.rawsize(0), len(fulltext0))
580 self.assertEqual(f.rawsize(0), len(fulltext0))
588 self.assertEqual(f.rawsize(1), len(fulltext1))
581 self.assertEqual(f.rawsize(1), len(fulltext1))
589 self.assertEqual(f.rawsize(2), len(fulltext2))
582 self.assertEqual(f.rawsize(2), len(fulltext2))
590
583
591 with self.assertRaises(IndexError):
584 with self.assertRaises(IndexError):
592 f.rawsize(3)
585 f.rawsize(3)
593
586
594 self.assertEqual(f.size(0), len(fulltext0))
587 self.assertEqual(f.size(0), len(fulltext0))
595 self.assertEqual(f.size(1), len(fulltext1))
588 self.assertEqual(f.size(1), len(fulltext1))
596 self.assertEqual(f.size(2), len(fulltext2))
589 self.assertEqual(f.size(2), len(fulltext2))
597
590
598 with self.assertRaises(IndexError):
591 with self.assertRaises(IndexError):
599 f.size(3)
592 f.size(3)
600
593
601 f.checkhash(fulltext0, node0)
594 f.checkhash(fulltext0, node0)
602 f.checkhash(fulltext1, node1)
595 f.checkhash(fulltext1, node1)
603 f.checkhash(fulltext1, node1, node0, nullid)
596 f.checkhash(fulltext1, node1, node0, nullid)
604 f.checkhash(fulltext2, node2, node1, nullid)
597 f.checkhash(fulltext2, node2, node1, nullid)
605
598
606 with self.assertRaises(error.StorageError):
599 with self.assertRaises(error.StorageError):
607 f.checkhash(fulltext1, b'\x01' * 20)
600 f.checkhash(fulltext1, b'\x01' * 20)
608
601
609 with self.assertRaises(error.StorageError):
602 with self.assertRaises(error.StorageError):
610 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
603 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
611
604
612 with self.assertRaises(error.StorageError):
605 with self.assertRaises(error.StorageError):
613 f.checkhash(fulltext1, node1, node0, node0)
606 f.checkhash(fulltext1, node1, node0, node0)
614
607
615 self.assertEqual(f.revision(node0), fulltext0)
608 self.assertEqual(f.revision(node0), fulltext0)
616 self.assertEqual(f.revision(node0, raw=True), fulltext0)
609 self.assertEqual(f.revision(node0, raw=True), fulltext0)
617 self.assertEqual(f.revision(node1), fulltext1)
610 self.assertEqual(f.revision(node1), fulltext1)
618 self.assertEqual(f.revision(node1, raw=True), fulltext1)
611 self.assertEqual(f.revision(node1, raw=True), fulltext1)
619 self.assertEqual(f.revision(node2), fulltext2)
612 self.assertEqual(f.revision(node2), fulltext2)
620 self.assertEqual(f.revision(node2, raw=True), fulltext2)
613 self.assertEqual(f.revision(node2, raw=True), fulltext2)
621
614
622 with self.assertRaises(error.LookupError):
615 with self.assertRaises(error.LookupError):
623 f.revision(b'\x01' * 20)
616 f.revision(b'\x01' * 20)
624
617
625 self.assertEqual(f.read(node0), fulltext0)
618 self.assertEqual(f.read(node0), fulltext0)
626 self.assertEqual(f.read(node1), fulltext1)
619 self.assertEqual(f.read(node1), fulltext1)
627 self.assertEqual(f.read(node2), fulltext2)
620 self.assertEqual(f.read(node2), fulltext2)
628
621
629 with self.assertRaises(error.LookupError):
622 with self.assertRaises(error.LookupError):
630 f.read(b'\x01' * 20)
623 f.read(b'\x01' * 20)
631
624
632 self.assertFalse(f.renamed(node0))
625 self.assertFalse(f.renamed(node0))
633 self.assertFalse(f.renamed(node1))
626 self.assertFalse(f.renamed(node1))
634 self.assertFalse(f.renamed(node2))
627 self.assertFalse(f.renamed(node2))
635
628
636 with self.assertRaises(error.LookupError):
629 with self.assertRaises(error.LookupError):
637 f.renamed(b'\x01' * 20)
630 f.renamed(b'\x01' * 20)
638
631
639 self.assertFalse(f.cmp(node0, fulltext0))
632 self.assertFalse(f.cmp(node0, fulltext0))
640 self.assertFalse(f.cmp(node1, fulltext1))
633 self.assertFalse(f.cmp(node1, fulltext1))
641 self.assertFalse(f.cmp(node2, fulltext2))
634 self.assertFalse(f.cmp(node2, fulltext2))
642
635
643 self.assertTrue(f.cmp(node1, fulltext0))
636 self.assertTrue(f.cmp(node1, fulltext0))
644 self.assertTrue(f.cmp(node2, fulltext1))
637 self.assertTrue(f.cmp(node2, fulltext1))
645
638
646 with self.assertRaises(error.LookupError):
639 with self.assertRaises(error.LookupError):
647 f.cmp(b'\x01' * 20, b'irrelevant')
640 f.cmp(b'\x01' * 20, b'irrelevant')
648
641
649 self.assertEqual(f.revdiff(0, 1),
642 self.assertEqual(f.revdiff(0, 1),
650 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
643 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
651 fulltext1)
644 fulltext1)
652
645
653 self.assertEqual(f.revdiff(0, 2),
646 self.assertEqual(f.revdiff(0, 2),
654 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
647 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
655 fulltext2)
648 fulltext2)
656
649
657 requests = [
650 requests = [
658 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
651 revisiondeltarequest(node0, nullid, nullid, b'\x01' * 20, nullid,
659 False),
652 False),
660 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
653 revisiondeltarequest(node1, node0, nullid, b'\x02' * 20, node0,
661 False),
654 False),
662 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
655 revisiondeltarequest(node2, node1, nullid, b'\x03' * 20, node1,
663 False),
656 False),
664 ]
657 ]
665 gen = f.emitrevisiondeltas(requests)
658 gen = f.emitrevisiondeltas(requests)
666
659
667 delta = next(gen)
660 delta = next(gen)
668
661
669 self.assertEqual(delta.node, node0)
662 self.assertEqual(delta.node, node0)
670 self.assertEqual(delta.p1node, nullid)
663 self.assertEqual(delta.p1node, nullid)
671 self.assertEqual(delta.p2node, nullid)
664 self.assertEqual(delta.p2node, nullid)
672 self.assertEqual(delta.linknode, b'\x01' * 20)
665 self.assertEqual(delta.linknode, b'\x01' * 20)
673 self.assertEqual(delta.basenode, nullid)
666 self.assertEqual(delta.basenode, nullid)
674 self.assertIsNone(delta.baserevisionsize)
667 self.assertIsNone(delta.baserevisionsize)
675 self.assertEqual(delta.revision, fulltext0)
668 self.assertEqual(delta.revision, fulltext0)
676 self.assertIsNone(delta.delta)
669 self.assertIsNone(delta.delta)
677
670
678 delta = next(gen)
671 delta = next(gen)
679
672
680 self.assertEqual(delta.node, node1)
673 self.assertEqual(delta.node, node1)
681 self.assertEqual(delta.p1node, node0)
674 self.assertEqual(delta.p1node, node0)
682 self.assertEqual(delta.p2node, nullid)
675 self.assertEqual(delta.p2node, nullid)
683 self.assertEqual(delta.linknode, b'\x02' * 20)
676 self.assertEqual(delta.linknode, b'\x02' * 20)
684 self.assertEqual(delta.basenode, node0)
677 self.assertEqual(delta.basenode, node0)
685 self.assertIsNone(delta.baserevisionsize)
678 self.assertIsNone(delta.baserevisionsize)
686 self.assertIsNone(delta.revision)
679 self.assertIsNone(delta.revision)
687 self.assertEqual(delta.delta,
680 self.assertEqual(delta.delta,
688 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
681 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
689 fulltext1)
682 fulltext1)
690
683
691 delta = next(gen)
684 delta = next(gen)
692
685
693 self.assertEqual(delta.node, node2)
686 self.assertEqual(delta.node, node2)
694 self.assertEqual(delta.p1node, node1)
687 self.assertEqual(delta.p1node, node1)
695 self.assertEqual(delta.p2node, nullid)
688 self.assertEqual(delta.p2node, nullid)
696 self.assertEqual(delta.linknode, b'\x03' * 20)
689 self.assertEqual(delta.linknode, b'\x03' * 20)
697 self.assertEqual(delta.basenode, node1)
690 self.assertEqual(delta.basenode, node1)
698 self.assertIsNone(delta.baserevisionsize)
691 self.assertIsNone(delta.baserevisionsize)
699 self.assertIsNone(delta.revision)
692 self.assertIsNone(delta.revision)
700 self.assertEqual(delta.delta,
693 self.assertEqual(delta.delta,
701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
694 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
702 fulltext2)
695 fulltext2)
703
696
704 with self.assertRaises(StopIteration):
697 with self.assertRaises(StopIteration):
705 next(gen)
698 next(gen)
706
699
707 def testrenamed(self):
700 def testrenamed(self):
708 fulltext0 = b'foo'
701 fulltext0 = b'foo'
709 fulltext1 = b'bar'
702 fulltext1 = b'bar'
710 fulltext2 = b'baz'
703 fulltext2 = b'baz'
711
704
712 meta1 = {
705 meta1 = {
713 b'copy': b'source0',
706 b'copy': b'source0',
714 b'copyrev': b'a' * 40,
707 b'copyrev': b'a' * 40,
715 }
708 }
716
709
717 meta2 = {
710 meta2 = {
718 b'copy': b'source1',
711 b'copy': b'source1',
719 b'copyrev': b'b' * 40,
712 b'copyrev': b'b' * 40,
720 }
713 }
721
714
722 stored1 = b''.join([
715 stored1 = b''.join([
723 b'\x01\ncopy: source0\n',
716 b'\x01\ncopy: source0\n',
724 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
717 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
725 fulltext1,
718 fulltext1,
726 ])
719 ])
727
720
728 stored2 = b''.join([
721 stored2 = b''.join([
729 b'\x01\ncopy: source1\n',
722 b'\x01\ncopy: source1\n',
730 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
723 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
731 fulltext2,
724 fulltext2,
732 ])
725 ])
733
726
734 f = self._makefilefn()
727 f = self._makefilefn()
735 with self._maketransactionfn() as tr:
728 with self._maketransactionfn() as tr:
736 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
729 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
737 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
730 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
738 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
731 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
739
732
740 self.assertEqual(f.rawsize(1), len(stored1))
733 self.assertEqual(f.rawsize(1), len(stored1))
741 self.assertEqual(f.rawsize(2), len(stored2))
734 self.assertEqual(f.rawsize(2), len(stored2))
742
735
743 # Metadata header isn't recognized when parent isn't nullid.
736 # Metadata header isn't recognized when parent isn't nullid.
744 self.assertEqual(f.size(1), len(stored1))
737 self.assertEqual(f.size(1), len(stored1))
745 self.assertEqual(f.size(2), len(fulltext2))
738 self.assertEqual(f.size(2), len(fulltext2))
746
739
747 self.assertEqual(f.revision(node1), stored1)
740 self.assertEqual(f.revision(node1), stored1)
748 self.assertEqual(f.revision(node1, raw=True), stored1)
741 self.assertEqual(f.revision(node1, raw=True), stored1)
749 self.assertEqual(f.revision(node2), stored2)
742 self.assertEqual(f.revision(node2), stored2)
750 self.assertEqual(f.revision(node2, raw=True), stored2)
743 self.assertEqual(f.revision(node2, raw=True), stored2)
751
744
752 self.assertEqual(f.read(node1), fulltext1)
745 self.assertEqual(f.read(node1), fulltext1)
753 self.assertEqual(f.read(node2), fulltext2)
746 self.assertEqual(f.read(node2), fulltext2)
754
747
755 # Returns False when first parent is set.
748 # Returns False when first parent is set.
756 self.assertFalse(f.renamed(node1))
749 self.assertFalse(f.renamed(node1))
757 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
750 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
758
751
759 self.assertTrue(f.cmp(node1, fulltext1))
752 self.assertTrue(f.cmp(node1, fulltext1))
760 self.assertTrue(f.cmp(node1, stored1))
753 self.assertTrue(f.cmp(node1, stored1))
761 self.assertFalse(f.cmp(node2, fulltext2))
754 self.assertFalse(f.cmp(node2, fulltext2))
762 self.assertTrue(f.cmp(node2, stored2))
755 self.assertTrue(f.cmp(node2, stored2))
763
756
764 def testmetadataprefix(self):
757 def testmetadataprefix(self):
765 # Content with metadata prefix has extra prefix inserted in storage.
758 # Content with metadata prefix has extra prefix inserted in storage.
766 fulltext0 = b'\x01\nfoo'
759 fulltext0 = b'\x01\nfoo'
767 stored0 = b'\x01\n\x01\n\x01\nfoo'
760 stored0 = b'\x01\n\x01\n\x01\nfoo'
768
761
769 fulltext1 = b'\x01\nbar'
762 fulltext1 = b'\x01\nbar'
770 meta1 = {
763 meta1 = {
771 b'copy': b'source0',
764 b'copy': b'source0',
772 b'copyrev': b'b' * 40,
765 b'copyrev': b'b' * 40,
773 }
766 }
774 stored1 = b''.join([
767 stored1 = b''.join([
775 b'\x01\ncopy: source0\n',
768 b'\x01\ncopy: source0\n',
776 b'copyrev: %s\n' % (b'b' * 40),
769 b'copyrev: %s\n' % (b'b' * 40),
777 b'\x01\n\x01\nbar',
770 b'\x01\n\x01\nbar',
778 ])
771 ])
779
772
780 f = self._makefilefn()
773 f = self._makefilefn()
781 with self._maketransactionfn() as tr:
774 with self._maketransactionfn() as tr:
782 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
775 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
783 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
776 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
784
777
785 self.assertEqual(f.rawsize(0), len(stored0))
778 self.assertEqual(f.rawsize(0), len(stored0))
786 self.assertEqual(f.rawsize(1), len(stored1))
779 self.assertEqual(f.rawsize(1), len(stored1))
787
780
788 # TODO this is buggy.
781 # TODO this is buggy.
789 self.assertEqual(f.size(0), len(fulltext0) + 4)
782 self.assertEqual(f.size(0), len(fulltext0) + 4)
790
783
791 self.assertEqual(f.size(1), len(fulltext1))
784 self.assertEqual(f.size(1), len(fulltext1))
792
785
793 self.assertEqual(f.revision(node0), stored0)
786 self.assertEqual(f.revision(node0), stored0)
794 self.assertEqual(f.revision(node0, raw=True), stored0)
787 self.assertEqual(f.revision(node0, raw=True), stored0)
795
788
796 self.assertEqual(f.revision(node1), stored1)
789 self.assertEqual(f.revision(node1), stored1)
797 self.assertEqual(f.revision(node1, raw=True), stored1)
790 self.assertEqual(f.revision(node1, raw=True), stored1)
798
791
799 self.assertEqual(f.read(node0), fulltext0)
792 self.assertEqual(f.read(node0), fulltext0)
800 self.assertEqual(f.read(node1), fulltext1)
793 self.assertEqual(f.read(node1), fulltext1)
801
794
802 self.assertFalse(f.cmp(node0, fulltext0))
795 self.assertFalse(f.cmp(node0, fulltext0))
803 self.assertTrue(f.cmp(node0, stored0))
796 self.assertTrue(f.cmp(node0, stored0))
804
797
805 self.assertFalse(f.cmp(node1, fulltext1))
798 self.assertFalse(f.cmp(node1, fulltext1))
806 self.assertTrue(f.cmp(node1, stored0))
799 self.assertTrue(f.cmp(node1, stored0))
807
800
808 def testcensored(self):
801 def testcensored(self):
809 f = self._makefilefn()
802 f = self._makefilefn()
810
803
811 stored1 = revlog.packmeta({
804 stored1 = revlog.packmeta({
812 b'censored': b'tombstone',
805 b'censored': b'tombstone',
813 }, b'')
806 }, b'')
814
807
815 # TODO tests are incomplete because we need the node to be
808 # TODO tests are incomplete because we need the node to be
816 # different due to presence of censor metadata. But we can't
809 # different due to presence of censor metadata. But we can't
817 # do this with addrevision().
810 # do this with addrevision().
818 with self._maketransactionfn() as tr:
811 with self._maketransactionfn() as tr:
819 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
812 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
820 f.addrevision(stored1, tr, 1, node0, nullid,
813 f.addrevision(stored1, tr, 1, node0, nullid,
821 flags=revlog.REVIDX_ISCENSORED)
814 flags=revlog.REVIDX_ISCENSORED)
822
815
823 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
816 self.assertEqual(f.flags(1), revlog.REVIDX_ISCENSORED)
824 self.assertTrue(f.iscensored(1))
817 self.assertTrue(f.iscensored(1))
825
818
826 self.assertEqual(f.revision(1), stored1)
819 self.assertEqual(f.revision(1), stored1)
827 self.assertEqual(f.revision(1, raw=True), stored1)
820 self.assertEqual(f.revision(1, raw=True), stored1)
828
821
829 self.assertEqual(f.read(1), b'')
822 self.assertEqual(f.read(1), b'')
830
823
831 class ifilemutationtests(basetestcase):
824 class ifilemutationtests(basetestcase):
832 """Generic tests for the ifilemutation interface.
825 """Generic tests for the ifilemutation interface.
833
826
834 All file storage backends that support writing should conform to this
827 All file storage backends that support writing should conform to this
835 interface.
828 interface.
836
829
837 Use ``makeifilemutationtests()`` to create an instance of this type.
830 Use ``makeifilemutationtests()`` to create an instance of this type.
838 """
831 """
839 def testaddnoop(self):
832 def testaddnoop(self):
840 f = self._makefilefn()
833 f = self._makefilefn()
841 with self._maketransactionfn() as tr:
834 with self._maketransactionfn() as tr:
842 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
835 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
843 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
836 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
844 # Varying by linkrev shouldn't impact hash.
837 # Varying by linkrev shouldn't impact hash.
845 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
838 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
846
839
847 self.assertEqual(node1, node0)
840 self.assertEqual(node1, node0)
848 self.assertEqual(node2, node0)
841 self.assertEqual(node2, node0)
849 self.assertEqual(len(f), 1)
842 self.assertEqual(len(f), 1)
850
843
851 def testaddrevisionbadnode(self):
844 def testaddrevisionbadnode(self):
852 f = self._makefilefn()
845 f = self._makefilefn()
853 with self._maketransactionfn() as tr:
846 with self._maketransactionfn() as tr:
854 # Adding a revision with bad node value fails.
847 # Adding a revision with bad node value fails.
855 with self.assertRaises(error.StorageError):
848 with self.assertRaises(error.StorageError):
856 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
849 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
857
850
858 def testaddrevisionunknownflag(self):
851 def testaddrevisionunknownflag(self):
859 f = self._makefilefn()
852 f = self._makefilefn()
860 with self._maketransactionfn() as tr:
853 with self._maketransactionfn() as tr:
861 for i in range(15, 0, -1):
854 for i in range(15, 0, -1):
862 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
855 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
863 flags = 1 << i
856 flags = 1 << i
864 break
857 break
865
858
866 with self.assertRaises(error.StorageError):
859 with self.assertRaises(error.StorageError):
867 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
860 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
868
861
869 def testaddgroupsimple(self):
862 def testaddgroupsimple(self):
870 f = self._makefilefn()
863 f = self._makefilefn()
871
864
872 callbackargs = []
865 callbackargs = []
873 def cb(*args, **kwargs):
866 def cb(*args, **kwargs):
874 callbackargs.append((args, kwargs))
867 callbackargs.append((args, kwargs))
875
868
876 def linkmapper(node):
869 def linkmapper(node):
877 return 0
870 return 0
878
871
879 with self._maketransactionfn() as tr:
872 with self._maketransactionfn() as tr:
880 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
873 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
881
874
882 self.assertEqual(nodes, [])
875 self.assertEqual(nodes, [])
883 self.assertEqual(callbackargs, [])
876 self.assertEqual(callbackargs, [])
884 self.assertEqual(len(f), 0)
877 self.assertEqual(len(f), 0)
885
878
886 fulltext0 = b'foo'
879 fulltext0 = b'foo'
887 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
880 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
888
881
889 deltas = [
882 deltas = [
890 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
883 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
891 ]
884 ]
892
885
893 with self._maketransactionfn() as tr:
886 with self._maketransactionfn() as tr:
894 with self.assertRaises(error.StorageError):
887 with self.assertRaises(error.StorageError):
895 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
888 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
896
889
897 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
890 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
898
891
899 f = self._makefilefn()
892 f = self._makefilefn()
900
893
901 deltas = [
894 deltas = [
902 (node0, nullid, nullid, nullid, nullid, delta0, 0),
895 (node0, nullid, nullid, nullid, nullid, delta0, 0),
903 ]
896 ]
904
897
905 with self._maketransactionfn() as tr:
898 with self._maketransactionfn() as tr:
906 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
899 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
907
900
908 self.assertEqual(nodes, [
901 self.assertEqual(nodes, [
909 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
902 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
910 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
903 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
911
904
912 self.assertEqual(len(callbackargs), 1)
905 self.assertEqual(len(callbackargs), 1)
913 self.assertEqual(callbackargs[0][0][1], nodes[0])
906 self.assertEqual(callbackargs[0][0][1], nodes[0])
914
907
915 self.assertEqual(list(f.revs()), [0])
908 self.assertEqual(list(f.revs()), [0])
916 self.assertEqual(f.rev(nodes[0]), 0)
909 self.assertEqual(f.rev(nodes[0]), 0)
917 self.assertEqual(f.node(0), nodes[0])
910 self.assertEqual(f.node(0), nodes[0])
918
911
919 def testaddgroupmultiple(self):
912 def testaddgroupmultiple(self):
920 f = self._makefilefn()
913 f = self._makefilefn()
921
914
922 fulltexts = [
915 fulltexts = [
923 b'foo',
916 b'foo',
924 b'bar',
917 b'bar',
925 b'x' * 1024,
918 b'x' * 1024,
926 ]
919 ]
927
920
928 nodes = []
921 nodes = []
929 with self._maketransactionfn() as tr:
922 with self._maketransactionfn() as tr:
930 for fulltext in fulltexts:
923 for fulltext in fulltexts:
931 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
924 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
932
925
933 f = self._makefilefn()
926 f = self._makefilefn()
934 deltas = []
927 deltas = []
935 for i, fulltext in enumerate(fulltexts):
928 for i, fulltext in enumerate(fulltexts):
936 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
929 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
937
930
938 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
931 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
939
932
940 with self._maketransactionfn() as tr:
933 with self._maketransactionfn() as tr:
941 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
934 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
942
935
943 self.assertEqual(len(f), len(deltas))
936 self.assertEqual(len(f), len(deltas))
944 self.assertEqual(list(f.revs()), [0, 1, 2])
937 self.assertEqual(list(f.revs()), [0, 1, 2])
945 self.assertEqual(f.rev(nodes[0]), 0)
938 self.assertEqual(f.rev(nodes[0]), 0)
946 self.assertEqual(f.rev(nodes[1]), 1)
939 self.assertEqual(f.rev(nodes[1]), 1)
947 self.assertEqual(f.rev(nodes[2]), 2)
940 self.assertEqual(f.rev(nodes[2]), 2)
948 self.assertEqual(f.node(0), nodes[0])
941 self.assertEqual(f.node(0), nodes[0])
949 self.assertEqual(f.node(1), nodes[1])
942 self.assertEqual(f.node(1), nodes[1])
950 self.assertEqual(f.node(2), nodes[2])
943 self.assertEqual(f.node(2), nodes[2])
951
944
952 def makeifileindextests(makefilefn, maketransactionfn):
945 def makeifileindextests(makefilefn, maketransactionfn):
953 """Create a unittest.TestCase class suitable for testing file storage.
946 """Create a unittest.TestCase class suitable for testing file storage.
954
947
955 ``makefilefn`` is a callable which receives the test case as an
948 ``makefilefn`` is a callable which receives the test case as an
956 argument and returns an object implementing the ``ifilestorage`` interface.
949 argument and returns an object implementing the ``ifilestorage`` interface.
957
950
958 ``maketransactionfn`` is a callable which receives the test case as an
951 ``maketransactionfn`` is a callable which receives the test case as an
959 argument and returns a transaction object.
952 argument and returns a transaction object.
960
953
961 Returns a type that is a ``unittest.TestCase`` that can be used for
954 Returns a type that is a ``unittest.TestCase`` that can be used for
962 testing the object implementing the file storage interface. Simply
955 testing the object implementing the file storage interface. Simply
963 assign the returned value to a module-level attribute and a test loader
956 assign the returned value to a module-level attribute and a test loader
964 should find and run it automatically.
957 should find and run it automatically.
965 """
958 """
966 d = {
959 d = {
967 r'_makefilefn': makefilefn,
960 r'_makefilefn': makefilefn,
968 r'_maketransactionfn': maketransactionfn,
961 r'_maketransactionfn': maketransactionfn,
969 }
962 }
970 return type(r'ifileindextests', (ifileindextests,), d)
963 return type(r'ifileindextests', (ifileindextests,), d)
971
964
972 def makeifiledatatests(makefilefn, maketransactionfn):
965 def makeifiledatatests(makefilefn, maketransactionfn):
973 d = {
966 d = {
974 r'_makefilefn': makefilefn,
967 r'_makefilefn': makefilefn,
975 r'_maketransactionfn': maketransactionfn,
968 r'_maketransactionfn': maketransactionfn,
976 }
969 }
977 return type(r'ifiledatatests', (ifiledatatests,), d)
970 return type(r'ifiledatatests', (ifiledatatests,), d)
978
971
979 def makeifilemutationtests(makefilefn, maketransactionfn):
972 def makeifilemutationtests(makefilefn, maketransactionfn):
980 d = {
973 d = {
981 r'_makefilefn': makefilefn,
974 r'_makefilefn': makefilefn,
982 r'_maketransactionfn': maketransactionfn,
975 r'_maketransactionfn': maketransactionfn,
983 }
976 }
984 return type(r'ifilemutationtests', (ifilemutationtests,), d)
977 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,753 +1,741
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 )
43 )
44
44
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
48 # leave the attribute unspecified.
48 # leave the attribute unspecified.
49 testedwith = 'ships-with-hg-core'
49 testedwith = 'ships-with-hg-core'
50
50
51 REQUIREMENT = 'testonly-simplestore'
51 REQUIREMENT = 'testonly-simplestore'
52
52
53 def validatenode(node):
53 def validatenode(node):
54 if isinstance(node, int):
54 if isinstance(node, int):
55 raise ValueError('expected node; got int')
55 raise ValueError('expected node; got int')
56
56
57 if len(node) != 20:
57 if len(node) != 20:
58 raise ValueError('expected 20 byte node')
58 raise ValueError('expected 20 byte node')
59
59
60 def validaterev(rev):
60 def validaterev(rev):
61 if not isinstance(rev, int):
61 if not isinstance(rev, int):
62 raise ValueError('expected int')
62 raise ValueError('expected int')
63
63
64 class simplestoreerror(error.StorageError):
64 class simplestoreerror(error.StorageError):
65 pass
65 pass
66
66
67 @interfaceutil.implementer(repository.irevisiondelta)
67 @interfaceutil.implementer(repository.irevisiondelta)
68 @attr.s(slots=True, frozen=True)
68 @attr.s(slots=True, frozen=True)
69 class simplestorerevisiondelta(object):
69 class simplestorerevisiondelta(object):
70 node = attr.ib()
70 node = attr.ib()
71 p1node = attr.ib()
71 p1node = attr.ib()
72 p2node = attr.ib()
72 p2node = attr.ib()
73 basenode = attr.ib()
73 basenode = attr.ib()
74 linknode = attr.ib()
74 linknode = attr.ib()
75 flags = attr.ib()
75 flags = attr.ib()
76 baserevisionsize = attr.ib()
76 baserevisionsize = attr.ib()
77 revision = attr.ib()
77 revision = attr.ib()
78 delta = attr.ib()
78 delta = attr.ib()
79
79
80 @interfaceutil.implementer(repository.ifilestorage)
80 @interfaceutil.implementer(repository.ifilestorage)
81 class filestorage(object):
81 class filestorage(object):
82 """Implements storage for a tracked path.
82 """Implements storage for a tracked path.
83
83
84 Data is stored in the VFS in a directory corresponding to the tracked
84 Data is stored in the VFS in a directory corresponding to the tracked
85 path.
85 path.
86
86
87 Index data is stored in an ``index`` file using CBOR.
87 Index data is stored in an ``index`` file using CBOR.
88
88
89 Fulltext data is stored in files having names of the node.
89 Fulltext data is stored in files having names of the node.
90 """
90 """
91
91
92 def __init__(self, svfs, path):
92 def __init__(self, svfs, path):
93 self._svfs = svfs
93 self._svfs = svfs
94 self._path = path
94 self._path = path
95
95
96 self._storepath = b'/'.join([b'data', path])
96 self._storepath = b'/'.join([b'data', path])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
98
98
99 indexdata = self._svfs.tryread(self._indexpath)
99 indexdata = self._svfs.tryread(self._indexpath)
100 if indexdata:
100 if indexdata:
101 indexdata = cbor.loads(indexdata)
101 indexdata = cbor.loads(indexdata)
102
102
103 self._indexdata = indexdata or []
103 self._indexdata = indexdata or []
104 self._indexbynode = {}
104 self._indexbynode = {}
105 self._indexbyrev = {}
105 self._indexbyrev = {}
106 self.index = []
106 self.index = []
107 self._refreshindex()
107 self._refreshindex()
108
108
109 # This is used by changegroup code :/
109 # This is used by changegroup code :/
110 self._generaldelta = True
110 self._generaldelta = True
111
111
112 self.version = 1
112 self.version = 1
113
113
114 def _refreshindex(self):
114 def _refreshindex(self):
115 self._indexbynode.clear()
115 self._indexbynode.clear()
116 self._indexbyrev.clear()
116 self._indexbyrev.clear()
117 self.index = []
117 self.index = []
118
118
119 for i, entry in enumerate(self._indexdata):
119 for i, entry in enumerate(self._indexdata):
120 self._indexbynode[entry[b'node']] = entry
120 self._indexbynode[entry[b'node']] = entry
121 self._indexbyrev[i] = entry
121 self._indexbyrev[i] = entry
122
122
123 self._indexbynode[nullid] = {
123 self._indexbynode[nullid] = {
124 b'node': nullid,
124 b'node': nullid,
125 b'p1': nullid,
125 b'p1': nullid,
126 b'p2': nullid,
126 b'p2': nullid,
127 b'linkrev': nullrev,
127 b'linkrev': nullrev,
128 b'flags': 0,
128 b'flags': 0,
129 }
129 }
130
130
131 self._indexbyrev[nullrev] = {
131 self._indexbyrev[nullrev] = {
132 b'node': nullid,
132 b'node': nullid,
133 b'p1': nullid,
133 b'p1': nullid,
134 b'p2': nullid,
134 b'p2': nullid,
135 b'linkrev': nullrev,
135 b'linkrev': nullrev,
136 b'flags': 0,
136 b'flags': 0,
137 }
137 }
138
138
139 for i, entry in enumerate(self._indexdata):
139 for i, entry in enumerate(self._indexdata):
140 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
140 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
141
141
142 # start, length, rawsize, chainbase, linkrev, p1, p2, node
142 # start, length, rawsize, chainbase, linkrev, p1, p2, node
143 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
143 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
144 entry[b'node']))
144 entry[b'node']))
145
145
146 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
147
147
148 def __len__(self):
148 def __len__(self):
149 return len(self._indexdata)
149 return len(self._indexdata)
150
150
151 def __iter__(self):
151 def __iter__(self):
152 return iter(range(len(self)))
152 return iter(range(len(self)))
153
153
154 def revs(self, start=0, stop=None):
154 def revs(self, start=0, stop=None):
155 step = 1
155 step = 1
156 if stop is not None:
156 if stop is not None:
157 if start > stop:
157 if start > stop:
158 step = -1
158 step = -1
159
159
160 stop += step
160 stop += step
161 else:
161 else:
162 stop = len(self)
162 stop = len(self)
163
163
164 return range(start, stop, step)
164 return range(start, stop, step)
165
165
166 def parents(self, node):
166 def parents(self, node):
167 validatenode(node)
167 validatenode(node)
168
168
169 if node not in self._indexbynode:
169 if node not in self._indexbynode:
170 raise KeyError('unknown node')
170 raise KeyError('unknown node')
171
171
172 entry = self._indexbynode[node]
172 entry = self._indexbynode[node]
173
173
174 return entry[b'p1'], entry[b'p2']
174 return entry[b'p1'], entry[b'p2']
175
175
176 def parentrevs(self, rev):
176 def parentrevs(self, rev):
177 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
177 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
178 return self.rev(p1), self.rev(p2)
178 return self.rev(p1), self.rev(p2)
179
179
180 def rev(self, node):
180 def rev(self, node):
181 validatenode(node)
181 validatenode(node)
182
182
183 try:
183 try:
184 self._indexbynode[node]
184 self._indexbynode[node]
185 except KeyError:
185 except KeyError:
186 raise error.LookupError(node, self._indexpath, _('no node'))
186 raise error.LookupError(node, self._indexpath, _('no node'))
187
187
188 for rev, entry in self._indexbyrev.items():
188 for rev, entry in self._indexbyrev.items():
189 if entry[b'node'] == node:
189 if entry[b'node'] == node:
190 return rev
190 return rev
191
191
192 raise error.ProgrammingError('this should not occur')
192 raise error.ProgrammingError('this should not occur')
193
193
194 def node(self, rev):
194 def node(self, rev):
195 validaterev(rev)
195 validaterev(rev)
196
196
197 return self._indexbyrev[rev][b'node']
197 return self._indexbyrev[rev][b'node']
198
198
199 def lookup(self, node):
199 def lookup(self, node):
200 if isinstance(node, int):
200 if isinstance(node, int):
201 return self.node(node)
201 return self.node(node)
202
202
203 if len(node) == 20:
203 if len(node) == 20:
204 self.rev(node)
204 self.rev(node)
205 return node
205 return node
206
206
207 try:
207 try:
208 rev = int(node)
208 rev = int(node)
209 if '%d' % rev != node:
209 if '%d' % rev != node:
210 raise ValueError
210 raise ValueError
211
211
212 if rev < 0:
212 if rev < 0:
213 rev = len(self) + rev
213 rev = len(self) + rev
214 if rev < 0 or rev >= len(self):
214 if rev < 0 or rev >= len(self):
215 raise ValueError
215 raise ValueError
216
216
217 return self.node(rev)
217 return self.node(rev)
218 except (ValueError, OverflowError):
218 except (ValueError, OverflowError):
219 pass
219 pass
220
220
221 if len(node) == 40:
221 if len(node) == 40:
222 try:
222 try:
223 rawnode = bin(node)
223 rawnode = bin(node)
224 self.rev(rawnode)
224 self.rev(rawnode)
225 return rawnode
225 return rawnode
226 except TypeError:
226 except TypeError:
227 pass
227 pass
228
228
229 raise error.LookupError(node, self._path, _('invalid lookup input'))
229 raise error.LookupError(node, self._path, _('invalid lookup input'))
230
230
231 def linkrev(self, rev):
231 def linkrev(self, rev):
232 validaterev(rev)
232 validaterev(rev)
233
233
234 return self._indexbyrev[rev][b'linkrev']
234 return self._indexbyrev[rev][b'linkrev']
235
235
236 def flags(self, rev):
236 def flags(self, rev):
237 validaterev(rev)
237 validaterev(rev)
238
238
239 return self._indexbyrev[rev][b'flags']
239 return self._indexbyrev[rev][b'flags']
240
240
241 def deltaparent(self, rev):
241 def deltaparent(self, rev):
242 validaterev(rev)
242 validaterev(rev)
243
243
244 p1node = self.parents(self.node(rev))[0]
244 p1node = self.parents(self.node(rev))[0]
245 return self.rev(p1node)
245 return self.rev(p1node)
246
246
247 def _candelta(self, baserev, rev):
247 def _candelta(self, baserev, rev):
248 validaterev(baserev)
248 validaterev(baserev)
249 validaterev(rev)
249 validaterev(rev)
250
250
251 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
251 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
252 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
252 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
253 return False
253 return False
254
254
255 return True
255 return True
256
256
257 def rawsize(self, rev):
257 def rawsize(self, rev):
258 validaterev(rev)
258 validaterev(rev)
259 node = self.node(rev)
259 node = self.node(rev)
260 return len(self.revision(node, raw=True))
260 return len(self.revision(node, raw=True))
261
261
262 def _processflags(self, text, flags, operation, raw=False):
262 def _processflags(self, text, flags, operation, raw=False):
263 if flags == 0:
263 if flags == 0:
264 return text, True
264 return text, True
265
265
266 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
266 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
267 raise simplestoreerror(_("incompatible revision flag '%#x'") %
267 raise simplestoreerror(_("incompatible revision flag '%#x'") %
268 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
268 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
269
269
270 validatehash = True
270 validatehash = True
271 # Depending on the operation (read or write), the order might be
271 # Depending on the operation (read or write), the order might be
272 # reversed due to non-commutative transforms.
272 # reversed due to non-commutative transforms.
273 orderedflags = revlog.REVIDX_FLAGS_ORDER
273 orderedflags = revlog.REVIDX_FLAGS_ORDER
274 if operation == 'write':
274 if operation == 'write':
275 orderedflags = reversed(orderedflags)
275 orderedflags = reversed(orderedflags)
276
276
277 for flag in orderedflags:
277 for flag in orderedflags:
278 # If a flagprocessor has been registered for a known flag, apply the
278 # If a flagprocessor has been registered for a known flag, apply the
279 # related operation transform and update result tuple.
279 # related operation transform and update result tuple.
280 if flag & flags:
280 if flag & flags:
281 vhash = True
281 vhash = True
282
282
283 if flag not in revlog._flagprocessors:
283 if flag not in revlog._flagprocessors:
284 message = _("missing processor for flag '%#x'") % (flag)
284 message = _("missing processor for flag '%#x'") % (flag)
285 raise simplestoreerror(message)
285 raise simplestoreerror(message)
286
286
287 processor = revlog._flagprocessors[flag]
287 processor = revlog._flagprocessors[flag]
288 if processor is not None:
288 if processor is not None:
289 readtransform, writetransform, rawtransform = processor
289 readtransform, writetransform, rawtransform = processor
290
290
291 if raw:
291 if raw:
292 vhash = rawtransform(self, text)
292 vhash = rawtransform(self, text)
293 elif operation == 'read':
293 elif operation == 'read':
294 text, vhash = readtransform(self, text)
294 text, vhash = readtransform(self, text)
295 else: # write operation
295 else: # write operation
296 text, vhash = writetransform(self, text)
296 text, vhash = writetransform(self, text)
297 validatehash = validatehash and vhash
297 validatehash = validatehash and vhash
298
298
299 return text, validatehash
299 return text, validatehash
300
300
301 def checkhash(self, text, node, p1=None, p2=None, rev=None):
301 def checkhash(self, text, node, p1=None, p2=None, rev=None):
302 if p1 is None and p2 is None:
302 if p1 is None and p2 is None:
303 p1, p2 = self.parents(node)
303 p1, p2 = self.parents(node)
304 if node != revlog.hash(text, p1, p2):
304 if node != revlog.hash(text, p1, p2):
305 raise simplestoreerror(_("integrity check failed on %s") %
305 raise simplestoreerror(_("integrity check failed on %s") %
306 self._path)
306 self._path)
307
307
308 def revision(self, node, raw=False):
308 def revision(self, node, raw=False):
309 validatenode(node)
309 validatenode(node)
310
310
311 if node == nullid:
311 if node == nullid:
312 return b''
312 return b''
313
313
314 rev = self.rev(node)
314 rev = self.rev(node)
315 flags = self.flags(rev)
315 flags = self.flags(rev)
316
316
317 path = b'/'.join([self._storepath, hex(node)])
317 path = b'/'.join([self._storepath, hex(node)])
318 rawtext = self._svfs.read(path)
318 rawtext = self._svfs.read(path)
319
319
320 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
320 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
321 if validatehash:
321 if validatehash:
322 self.checkhash(text, node, rev=rev)
322 self.checkhash(text, node, rev=rev)
323
323
324 return text
324 return text
325
325
326 def read(self, node):
326 def read(self, node):
327 validatenode(node)
327 validatenode(node)
328
328
329 revision = self.revision(node)
329 revision = self.revision(node)
330
330
331 if not revision.startswith(b'\1\n'):
331 if not revision.startswith(b'\1\n'):
332 return revision
332 return revision
333
333
334 start = revision.index(b'\1\n', 2)
334 start = revision.index(b'\1\n', 2)
335 return revision[start + 2:]
335 return revision[start + 2:]
336
336
337 def renamed(self, node):
337 def renamed(self, node):
338 validatenode(node)
338 validatenode(node)
339
339
340 if self.parents(node)[0] != nullid:
340 if self.parents(node)[0] != nullid:
341 return False
341 return False
342
342
343 fulltext = self.revision(node)
343 fulltext = self.revision(node)
344 m = revlog.parsemeta(fulltext)[0]
344 m = revlog.parsemeta(fulltext)[0]
345
345
346 if m and 'copy' in m:
346 if m and 'copy' in m:
347 return m['copy'], bin(m['copyrev'])
347 return m['copy'], bin(m['copyrev'])
348
348
349 return False
349 return False
350
350
351 def cmp(self, node, text):
351 def cmp(self, node, text):
352 validatenode(node)
352 validatenode(node)
353
353
354 t = text
354 t = text
355
355
356 if text.startswith(b'\1\n'):
356 if text.startswith(b'\1\n'):
357 t = b'\1\n\1\n' + text
357 t = b'\1\n\1\n' + text
358
358
359 p1, p2 = self.parents(node)
359 p1, p2 = self.parents(node)
360
360
361 if revlog.hash(t, p1, p2) == node:
361 if revlog.hash(t, p1, p2) == node:
362 return False
362 return False
363
363
364 if self.iscensored(self.rev(node)):
364 if self.iscensored(self.rev(node)):
365 return text != b''
365 return text != b''
366
366
367 if self.renamed(node):
367 if self.renamed(node):
368 t2 = self.read(node)
368 t2 = self.read(node)
369 return t2 != text
369 return t2 != text
370
370
371 return True
371 return True
372
372
373 def size(self, rev):
373 def size(self, rev):
374 validaterev(rev)
374 validaterev(rev)
375
375
376 node = self._indexbyrev[rev][b'node']
376 node = self._indexbyrev[rev][b'node']
377
377
378 if self.renamed(node):
378 if self.renamed(node):
379 return len(self.read(node))
379 return len(self.read(node))
380
380
381 if self.iscensored(rev):
381 if self.iscensored(rev):
382 return 0
382 return 0
383
383
384 return len(self.revision(node))
384 return len(self.revision(node))
385
385
386 def iscensored(self, rev):
386 def iscensored(self, rev):
387 validaterev(rev)
387 validaterev(rev)
388
388
389 return self.flags(rev) & revlog.REVIDX_ISCENSORED
389 return self.flags(rev) & revlog.REVIDX_ISCENSORED
390
390
391 def commonancestorsheads(self, a, b):
391 def commonancestorsheads(self, a, b):
392 validatenode(a)
392 validatenode(a)
393 validatenode(b)
393 validatenode(b)
394
394
395 a = self.rev(a)
395 a = self.rev(a)
396 b = self.rev(b)
396 b = self.rev(b)
397
397
398 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
398 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
399 return pycompat.maplist(self.node, ancestors)
399 return pycompat.maplist(self.node, ancestors)
400
400
401 def descendants(self, revs):
401 def descendants(self, revs):
402 # This is a copy of revlog.descendants()
402 # This is a copy of revlog.descendants()
403 first = min(revs)
403 first = min(revs)
404 if first == nullrev:
404 if first == nullrev:
405 for i in self:
405 for i in self:
406 yield i
406 yield i
407 return
407 return
408
408
409 seen = set(revs)
409 seen = set(revs)
410 for i in self.revs(start=first + 1):
410 for i in self.revs(start=first + 1):
411 for x in self.parentrevs(i):
411 for x in self.parentrevs(i):
412 if x != nullrev and x in seen:
412 if x != nullrev and x in seen:
413 seen.add(i)
413 seen.add(i)
414 yield i
414 yield i
415 break
415 break
416
416
417 # Required by verify.
417 # Required by verify.
418 def files(self):
418 def files(self):
419 entries = self._svfs.listdir(self._storepath)
419 entries = self._svfs.listdir(self._storepath)
420
420
421 # Strip out undo.backup.* files created as part of transaction
421 # Strip out undo.backup.* files created as part of transaction
422 # recording.
422 # recording.
423 entries = [f for f in entries if not f.startswith('undo.backup.')]
423 entries = [f for f in entries if not f.startswith('undo.backup.')]
424
424
425 return [b'/'.join((self._storepath, f)) for f in entries]
425 return [b'/'.join((self._storepath, f)) for f in entries]
426
426
427 # Required by verify.
427 # Required by verify.
428 def checksize(self):
428 def checksize(self):
429 return 0, 0
429 return 0, 0
430
430
431 def add(self, text, meta, transaction, linkrev, p1, p2):
431 def add(self, text, meta, transaction, linkrev, p1, p2):
432 if meta or text.startswith(b'\1\n'):
432 if meta or text.startswith(b'\1\n'):
433 text = revlog.packmeta(meta, text)
433 text = revlog.packmeta(meta, text)
434
434
435 return self.addrevision(text, transaction, linkrev, p1, p2)
435 return self.addrevision(text, transaction, linkrev, p1, p2)
436
436
437 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
437 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
438 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
438 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
439 validatenode(p1)
439 validatenode(p1)
440 validatenode(p2)
440 validatenode(p2)
441
441
442 if flags:
442 if flags:
443 node = node or revlog.hash(text, p1, p2)
443 node = node or revlog.hash(text, p1, p2)
444
444
445 rawtext, validatehash = self._processflags(text, flags, 'write')
445 rawtext, validatehash = self._processflags(text, flags, 'write')
446
446
447 node = node or revlog.hash(text, p1, p2)
447 node = node or revlog.hash(text, p1, p2)
448
448
449 if node in self._indexbynode:
449 if node in self._indexbynode:
450 return node
450 return node
451
451
452 if validatehash:
452 if validatehash:
453 self.checkhash(rawtext, node, p1=p1, p2=p2)
453 self.checkhash(rawtext, node, p1=p1, p2=p2)
454
454
455 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
455 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
456 flags)
456 flags)
457
457
458 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
458 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
459 transaction.addbackup(self._indexpath)
459 transaction.addbackup(self._indexpath)
460
460
461 path = b'/'.join([self._storepath, hex(node)])
461 path = b'/'.join([self._storepath, hex(node)])
462
462
463 self._svfs.write(path, rawtext)
463 self._svfs.write(path, rawtext)
464
464
465 self._indexdata.append({
465 self._indexdata.append({
466 b'node': node,
466 b'node': node,
467 b'p1': p1,
467 b'p1': p1,
468 b'p2': p2,
468 b'p2': p2,
469 b'linkrev': link,
469 b'linkrev': link,
470 b'flags': flags,
470 b'flags': flags,
471 })
471 })
472
472
473 self._reflectindexupdate()
473 self._reflectindexupdate()
474
474
475 return node
475 return node
476
476
477 def _reflectindexupdate(self):
477 def _reflectindexupdate(self):
478 self._refreshindex()
478 self._refreshindex()
479 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
479 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
480
480
481 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
481 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
482 nodes = []
482 nodes = []
483
483
484 transaction.addbackup(self._indexpath)
484 transaction.addbackup(self._indexpath)
485
485
486 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
486 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
487 linkrev = linkmapper(linknode)
487 linkrev = linkmapper(linknode)
488 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
488 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
489
489
490 nodes.append(node)
490 nodes.append(node)
491
491
492 if node in self._indexbynode:
492 if node in self._indexbynode:
493 continue
493 continue
494
494
495 # Need to resolve the fulltext from the delta base.
495 # Need to resolve the fulltext from the delta base.
496 if deltabase == nullid:
496 if deltabase == nullid:
497 text = mdiff.patch(b'', delta)
497 text = mdiff.patch(b'', delta)
498 else:
498 else:
499 text = mdiff.patch(self.revision(deltabase), delta)
499 text = mdiff.patch(self.revision(deltabase), delta)
500
500
501 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
501 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
502 flags)
502 flags)
503
503
504 if addrevisioncb:
504 if addrevisioncb:
505 addrevisioncb(self, node)
505 addrevisioncb(self, node)
506
506
507 return nodes
507 return nodes
508
508
509 def revdiff(self, rev1, rev2):
509 def revdiff(self, rev1, rev2):
510 validaterev(rev1)
510 validaterev(rev1)
511 validaterev(rev2)
511 validaterev(rev2)
512
512
513 node1 = self.node(rev1)
513 node1 = self.node(rev1)
514 node2 = self.node(rev2)
514 node2 = self.node(rev2)
515
515
516 return mdiff.textdiff(self.revision(node1, raw=True),
516 return mdiff.textdiff(self.revision(node1, raw=True),
517 self.revision(node2, raw=True))
517 self.revision(node2, raw=True))
518
518
519 def emitrevisiondeltas(self, requests):
519 def emitrevisiondeltas(self, requests):
520 for request in requests:
520 for request in requests:
521 node = request.node
521 node = request.node
522 rev = self.rev(node)
522 rev = self.rev(node)
523
523
524 if request.basenode == nullid:
524 if request.basenode == nullid:
525 baserev = nullrev
525 baserev = nullrev
526 elif request.basenode is not None:
526 elif request.basenode is not None:
527 baserev = self.rev(request.basenode)
527 baserev = self.rev(request.basenode)
528 else:
528 else:
529 # This is a test extension and we can do simple things
529 # This is a test extension and we can do simple things
530 # for choosing a delta parent.
530 # for choosing a delta parent.
531 baserev = self.deltaparent(rev)
531 baserev = self.deltaparent(rev)
532
532
533 if baserev != nullrev and not self._candelta(baserev, rev):
533 if baserev != nullrev and not self._candelta(baserev, rev):
534 baserev = nullrev
534 baserev = nullrev
535
535
536 revision = None
536 revision = None
537 delta = None
537 delta = None
538 baserevisionsize = None
538 baserevisionsize = None
539
539
540 if self.iscensored(baserev) or self.iscensored(rev):
540 if self.iscensored(baserev) or self.iscensored(rev):
541 try:
541 try:
542 revision = self.revision(node, raw=True)
542 revision = self.revision(node, raw=True)
543 except error.CensoredNodeError as e:
543 except error.CensoredNodeError as e:
544 revision = e.tombstone
544 revision = e.tombstone
545
545
546 if baserev != nullrev:
546 if baserev != nullrev:
547 baserevisionsize = self.rawsize(baserev)
547 baserevisionsize = self.rawsize(baserev)
548
548
549 elif baserev == nullrev:
549 elif baserev == nullrev:
550 revision = self.revision(node, raw=True)
550 revision = self.revision(node, raw=True)
551 else:
551 else:
552 delta = self.revdiff(baserev, rev)
552 delta = self.revdiff(baserev, rev)
553
553
554 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
554 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
555
555
556 yield simplestorerevisiondelta(
556 yield simplestorerevisiondelta(
557 node=node,
557 node=node,
558 p1node=request.p1node,
558 p1node=request.p1node,
559 p2node=request.p2node,
559 p2node=request.p2node,
560 linknode=request.linknode,
560 linknode=request.linknode,
561 basenode=self.node(baserev),
561 basenode=self.node(baserev),
562 flags=self.flags(rev) | extraflags,
562 flags=self.flags(rev) | extraflags,
563 baserevisionsize=baserevisionsize,
563 baserevisionsize=baserevisionsize,
564 revision=revision,
564 revision=revision,
565 delta=delta)
565 delta=delta)
566
566
567 def headrevs(self):
568 # Assume all revisions are heads by default.
569 revishead = {rev: True for rev in self._indexbyrev}
570
571 for rev, entry in self._indexbyrev.items():
572 # Unset head flag for all seen parents.
573 revishead[self.rev(entry[b'p1'])] = False
574 revishead[self.rev(entry[b'p2'])] = False
575
576 return [rev for rev, ishead in sorted(revishead.items())
577 if ishead]
578
579 def heads(self, start=None, stop=None):
567 def heads(self, start=None, stop=None):
580 # This is copied from revlog.py.
568 # This is copied from revlog.py.
581 if start is None and stop is None:
569 if start is None and stop is None:
582 if not len(self):
570 if not len(self):
583 return [nullid]
571 return [nullid]
584 return [self.node(r) for r in self.headrevs()]
572 return [self.node(r) for r in self.headrevs()]
585
573
586 if start is None:
574 if start is None:
587 start = nullid
575 start = nullid
588 if stop is None:
576 if stop is None:
589 stop = []
577 stop = []
590 stoprevs = set([self.rev(n) for n in stop])
578 stoprevs = set([self.rev(n) for n in stop])
591 startrev = self.rev(start)
579 startrev = self.rev(start)
592 reachable = {startrev}
580 reachable = {startrev}
593 heads = {startrev}
581 heads = {startrev}
594
582
595 parentrevs = self.parentrevs
583 parentrevs = self.parentrevs
596 for r in self.revs(start=startrev + 1):
584 for r in self.revs(start=startrev + 1):
597 for p in parentrevs(r):
585 for p in parentrevs(r):
598 if p in reachable:
586 if p in reachable:
599 if r not in stoprevs:
587 if r not in stoprevs:
600 reachable.add(r)
588 reachable.add(r)
601 heads.add(r)
589 heads.add(r)
602 if p in heads and p not in stoprevs:
590 if p in heads and p not in stoprevs:
603 heads.remove(p)
591 heads.remove(p)
604
592
605 return [self.node(r) for r in heads]
593 return [self.node(r) for r in heads]
606
594
607 def children(self, node):
595 def children(self, node):
608 validatenode(node)
596 validatenode(node)
609
597
610 # This is a copy of revlog.children().
598 # This is a copy of revlog.children().
611 c = []
599 c = []
612 p = self.rev(node)
600 p = self.rev(node)
613 for r in self.revs(start=p + 1):
601 for r in self.revs(start=p + 1):
614 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
602 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
615 if prevs:
603 if prevs:
616 for pr in prevs:
604 for pr in prevs:
617 if pr == p:
605 if pr == p:
618 c.append(self.node(r))
606 c.append(self.node(r))
619 elif p == nullrev:
607 elif p == nullrev:
620 c.append(self.node(r))
608 c.append(self.node(r))
621 return c
609 return c
622
610
623 def getstrippoint(self, minlink):
611 def getstrippoint(self, minlink):
624
612
625 # This is largely a copy of revlog.getstrippoint().
613 # This is largely a copy of revlog.getstrippoint().
626 brokenrevs = set()
614 brokenrevs = set()
627 strippoint = len(self)
615 strippoint = len(self)
628
616
629 heads = {}
617 heads = {}
630 futurelargelinkrevs = set()
618 futurelargelinkrevs = set()
631 for head in self.headrevs():
619 for head in self.heads():
632 headlinkrev = self.linkrev(head)
620 headlinkrev = self.linkrev(self.rev(head))
633 heads[head] = headlinkrev
621 heads[head] = headlinkrev
634 if headlinkrev >= minlink:
622 if headlinkrev >= minlink:
635 futurelargelinkrevs.add(headlinkrev)
623 futurelargelinkrevs.add(headlinkrev)
636
624
637 # This algorithm involves walking down the rev graph, starting at the
625 # This algorithm involves walking down the rev graph, starting at the
638 # heads. Since the revs are topologically sorted according to linkrev,
626 # heads. Since the revs are topologically sorted according to linkrev,
639 # once all head linkrevs are below the minlink, we know there are
627 # once all head linkrevs are below the minlink, we know there are
640 # no more revs that could have a linkrev greater than minlink.
628 # no more revs that could have a linkrev greater than minlink.
641 # So we can stop walking.
629 # So we can stop walking.
642 while futurelargelinkrevs:
630 while futurelargelinkrevs:
643 strippoint -= 1
631 strippoint -= 1
644 linkrev = heads.pop(strippoint)
632 linkrev = heads.pop(strippoint)
645
633
646 if linkrev < minlink:
634 if linkrev < minlink:
647 brokenrevs.add(strippoint)
635 brokenrevs.add(strippoint)
648 else:
636 else:
649 futurelargelinkrevs.remove(linkrev)
637 futurelargelinkrevs.remove(linkrev)
650
638
651 for p in self.parentrevs(strippoint):
639 for p in self.parentrevs(strippoint):
652 if p != nullrev:
640 if p != nullrev:
653 plinkrev = self.linkrev(p)
641 plinkrev = self.linkrev(p)
654 heads[p] = plinkrev
642 heads[p] = plinkrev
655 if plinkrev >= minlink:
643 if plinkrev >= minlink:
656 futurelargelinkrevs.add(plinkrev)
644 futurelargelinkrevs.add(plinkrev)
657
645
658 return strippoint, brokenrevs
646 return strippoint, brokenrevs
659
647
660 def strip(self, minlink, transaction):
648 def strip(self, minlink, transaction):
661 if not len(self):
649 if not len(self):
662 return
650 return
663
651
664 rev, _ignored = self.getstrippoint(minlink)
652 rev, _ignored = self.getstrippoint(minlink)
665 if rev == len(self):
653 if rev == len(self):
666 return
654 return
667
655
668 # Purge index data starting at the requested revision.
656 # Purge index data starting at the requested revision.
669 self._indexdata[rev:] = []
657 self._indexdata[rev:] = []
670 self._reflectindexupdate()
658 self._reflectindexupdate()
671
659
672 def issimplestorefile(f, kind, st):
660 def issimplestorefile(f, kind, st):
673 if kind != stat.S_IFREG:
661 if kind != stat.S_IFREG:
674 return False
662 return False
675
663
676 if store.isrevlog(f, kind, st):
664 if store.isrevlog(f, kind, st):
677 return False
665 return False
678
666
679 # Ignore transaction undo files.
667 # Ignore transaction undo files.
680 if f.startswith('undo.'):
668 if f.startswith('undo.'):
681 return False
669 return False
682
670
683 # Otherwise assume it belongs to the simple store.
671 # Otherwise assume it belongs to the simple store.
684 return True
672 return True
685
673
686 class simplestore(store.encodedstore):
674 class simplestore(store.encodedstore):
687 def datafiles(self):
675 def datafiles(self):
688 for x in super(simplestore, self).datafiles():
676 for x in super(simplestore, self).datafiles():
689 yield x
677 yield x
690
678
691 # Supplement with non-revlog files.
679 # Supplement with non-revlog files.
692 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
680 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
693
681
694 for unencoded, encoded, size in extrafiles:
682 for unencoded, encoded, size in extrafiles:
695 try:
683 try:
696 unencoded = store.decodefilename(unencoded)
684 unencoded = store.decodefilename(unencoded)
697 except KeyError:
685 except KeyError:
698 unencoded = None
686 unencoded = None
699
687
700 yield unencoded, encoded, size
688 yield unencoded, encoded, size
701
689
702 def reposetup(ui, repo):
690 def reposetup(ui, repo):
703 if not repo.local():
691 if not repo.local():
704 return
692 return
705
693
706 if isinstance(repo, bundlerepo.bundlerepository):
694 if isinstance(repo, bundlerepo.bundlerepository):
707 raise error.Abort(_('cannot use simple store with bundlerepo'))
695 raise error.Abort(_('cannot use simple store with bundlerepo'))
708
696
709 class simplestorerepo(repo.__class__):
697 class simplestorerepo(repo.__class__):
710 def file(self, f):
698 def file(self, f):
711 return filestorage(self.svfs, f)
699 return filestorage(self.svfs, f)
712
700
713 repo.__class__ = simplestorerepo
701 repo.__class__ = simplestorerepo
714
702
715 def featuresetup(ui, supported):
703 def featuresetup(ui, supported):
716 supported.add(REQUIREMENT)
704 supported.add(REQUIREMENT)
717
705
718 def newreporequirements(orig, ui):
706 def newreporequirements(orig, ui):
719 """Modifies default requirements for new repos to use the simple store."""
707 """Modifies default requirements for new repos to use the simple store."""
720 requirements = orig(ui)
708 requirements = orig(ui)
721
709
722 # These requirements are only used to affect creation of the store
710 # These requirements are only used to affect creation of the store
723 # object. We have our own store. So we can remove them.
711 # object. We have our own store. So we can remove them.
724 # TODO do this once we feel like taking the test hit.
712 # TODO do this once we feel like taking the test hit.
725 #if 'fncache' in requirements:
713 #if 'fncache' in requirements:
726 # requirements.remove('fncache')
714 # requirements.remove('fncache')
727 #if 'dotencode' in requirements:
715 #if 'dotencode' in requirements:
728 # requirements.remove('dotencode')
716 # requirements.remove('dotencode')
729
717
730 requirements.add(REQUIREMENT)
718 requirements.add(REQUIREMENT)
731
719
732 return requirements
720 return requirements
733
721
734 def makestore(orig, requirements, path, vfstype):
722 def makestore(orig, requirements, path, vfstype):
735 if REQUIREMENT not in requirements:
723 if REQUIREMENT not in requirements:
736 return orig(requirements, path, vfstype)
724 return orig(requirements, path, vfstype)
737
725
738 return simplestore(path, vfstype)
726 return simplestore(path, vfstype)
739
727
740 def verifierinit(orig, self, *args, **kwargs):
728 def verifierinit(orig, self, *args, **kwargs):
741 orig(self, *args, **kwargs)
729 orig(self, *args, **kwargs)
742
730
743 # We don't care that files in the store don't align with what is
731 # We don't care that files in the store don't align with what is
744 # advertised. So suppress these warnings.
732 # advertised. So suppress these warnings.
745 self.warnorphanstorefiles = False
733 self.warnorphanstorefiles = False
746
734
747 def extsetup(ui):
735 def extsetup(ui):
748 localrepo.featuresetupfuncs.add(featuresetup)
736 localrepo.featuresetupfuncs.add(featuresetup)
749
737
750 extensions.wrapfunction(localrepo, 'newreporequirements',
738 extensions.wrapfunction(localrepo, 'newreporequirements',
751 newreporequirements)
739 newreporequirements)
752 extensions.wrapfunction(store, 'store', makestore)
740 extensions.wrapfunction(store, 'store', makestore)
753 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
741 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now