##// END OF EJS Templates
filelog: stop proxying deltaparent() (API)...
Gregory Szorc -
r39912:a269fa55 default
parent child Browse files
Show More
@@ -1,258 +1,255 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # Full name of the user visible file, relative to the repository root.
25 # Full name of the user visible file, relative to the repository root.
26 # Used by LFS.
26 # Used by LFS.
27 self._revlog.filename = path
27 self._revlog.filename = path
28
28
29 def __len__(self):
29 def __len__(self):
30 return len(self._revlog)
30 return len(self._revlog)
31
31
32 def __iter__(self):
32 def __iter__(self):
33 return self._revlog.__iter__()
33 return self._revlog.__iter__()
34
34
35 def revs(self, start=0, stop=None):
35 def revs(self, start=0, stop=None):
36 return self._revlog.revs(start=start, stop=stop)
36 return self._revlog.revs(start=start, stop=stop)
37
37
38 def parents(self, node):
38 def parents(self, node):
39 return self._revlog.parents(node)
39 return self._revlog.parents(node)
40
40
41 def parentrevs(self, rev):
41 def parentrevs(self, rev):
42 return self._revlog.parentrevs(rev)
42 return self._revlog.parentrevs(rev)
43
43
44 def rev(self, node):
44 def rev(self, node):
45 return self._revlog.rev(node)
45 return self._revlog.rev(node)
46
46
47 def node(self, rev):
47 def node(self, rev):
48 return self._revlog.node(rev)
48 return self._revlog.node(rev)
49
49
50 def lookup(self, node):
50 def lookup(self, node):
51 return self._revlog.lookup(node)
51 return self._revlog.lookup(node)
52
52
53 def linkrev(self, rev):
53 def linkrev(self, rev):
54 return self._revlog.linkrev(rev)
54 return self._revlog.linkrev(rev)
55
55
56 def commonancestorsheads(self, node1, node2):
56 def commonancestorsheads(self, node1, node2):
57 return self._revlog.commonancestorsheads(node1, node2)
57 return self._revlog.commonancestorsheads(node1, node2)
58
58
59 # Used by dagop.blockdescendants().
59 # Used by dagop.blockdescendants().
60 def descendants(self, revs):
60 def descendants(self, revs):
61 return self._revlog.descendants(revs)
61 return self._revlog.descendants(revs)
62
62
63 def heads(self, start=None, stop=None):
63 def heads(self, start=None, stop=None):
64 return self._revlog.heads(start, stop)
64 return self._revlog.heads(start, stop)
65
65
66 # Used by hgweb, children extension.
66 # Used by hgweb, children extension.
67 def children(self, node):
67 def children(self, node):
68 return self._revlog.children(node)
68 return self._revlog.children(node)
69
69
70 def deltaparent(self, rev):
71 return self._revlog.deltaparent(rev)
72
73 def iscensored(self, rev):
70 def iscensored(self, rev):
74 return self._revlog.iscensored(rev)
71 return self._revlog.iscensored(rev)
75
72
76 # Might be unused.
73 # Might be unused.
77 def checkhash(self, text, node, p1=None, p2=None, rev=None):
74 def checkhash(self, text, node, p1=None, p2=None, rev=None):
78 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
75 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
79
76
80 def revision(self, node, _df=None, raw=False):
77 def revision(self, node, _df=None, raw=False):
81 return self._revlog.revision(node, _df=_df, raw=raw)
78 return self._revlog.revision(node, _df=_df, raw=raw)
82
79
83 def revdiff(self, rev1, rev2):
80 def revdiff(self, rev1, rev2):
84 return self._revlog.revdiff(rev1, rev2)
81 return self._revlog.revdiff(rev1, rev2)
85
82
86 def emitrevisions(self, nodes, nodesorder=None,
83 def emitrevisions(self, nodes, nodesorder=None,
87 revisiondata=False, assumehaveparentrevisions=False,
84 revisiondata=False, assumehaveparentrevisions=False,
88 deltaprevious=False):
85 deltaprevious=False):
89 return self._revlog.emitrevisions(
86 return self._revlog.emitrevisions(
90 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
87 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
91 assumehaveparentrevisions=assumehaveparentrevisions,
88 assumehaveparentrevisions=assumehaveparentrevisions,
92 deltaprevious=deltaprevious)
89 deltaprevious=deltaprevious)
93
90
94 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
91 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
95 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
92 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
96 cachedelta=None):
93 cachedelta=None):
97 return self._revlog.addrevision(revisiondata, transaction, linkrev,
94 return self._revlog.addrevision(revisiondata, transaction, linkrev,
98 p1, p2, node=node, flags=flags,
95 p1, p2, node=node, flags=flags,
99 cachedelta=cachedelta)
96 cachedelta=cachedelta)
100
97
101 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
98 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
102 return self._revlog.addgroup(deltas, linkmapper, transaction,
99 return self._revlog.addgroup(deltas, linkmapper, transaction,
103 addrevisioncb=addrevisioncb)
100 addrevisioncb=addrevisioncb)
104
101
105 def getstrippoint(self, minlink):
102 def getstrippoint(self, minlink):
106 return self._revlog.getstrippoint(minlink)
103 return self._revlog.getstrippoint(minlink)
107
104
108 def strip(self, minlink, transaction):
105 def strip(self, minlink, transaction):
109 return self._revlog.strip(minlink, transaction)
106 return self._revlog.strip(minlink, transaction)
110
107
111 def censorrevision(self, tr, node, tombstone=b''):
108 def censorrevision(self, tr, node, tombstone=b''):
112 return self._revlog.censorrevision(node, tombstone=tombstone)
109 return self._revlog.censorrevision(node, tombstone=tombstone)
113
110
114 def files(self):
111 def files(self):
115 return self._revlog.files()
112 return self._revlog.files()
116
113
117 def read(self, node):
114 def read(self, node):
118 t = self.revision(node)
115 t = self.revision(node)
119 if not t.startswith('\1\n'):
116 if not t.startswith('\1\n'):
120 return t
117 return t
121 s = t.index('\1\n', 2)
118 s = t.index('\1\n', 2)
122 return t[s + 2:]
119 return t[s + 2:]
123
120
124 def add(self, text, meta, transaction, link, p1=None, p2=None):
121 def add(self, text, meta, transaction, link, p1=None, p2=None):
125 if meta or text.startswith('\1\n'):
122 if meta or text.startswith('\1\n'):
126 text = revlog.packmeta(meta, text)
123 text = revlog.packmeta(meta, text)
127 return self.addrevision(text, transaction, link, p1, p2)
124 return self.addrevision(text, transaction, link, p1, p2)
128
125
129 def renamed(self, node):
126 def renamed(self, node):
130 if self.parents(node)[0] != revlog.nullid:
127 if self.parents(node)[0] != revlog.nullid:
131 return False
128 return False
132 t = self.revision(node)
129 t = self.revision(node)
133 m = revlog.parsemeta(t)[0]
130 m = revlog.parsemeta(t)[0]
134 # copy and copyrev occur in pairs. In rare cases due to bugs,
131 # copy and copyrev occur in pairs. In rare cases due to bugs,
135 # one can occur without the other.
132 # one can occur without the other.
136 if m and "copy" in m and "copyrev" in m:
133 if m and "copy" in m and "copyrev" in m:
137 return (m["copy"], revlog.bin(m["copyrev"]))
134 return (m["copy"], revlog.bin(m["copyrev"]))
138 return False
135 return False
139
136
140 def size(self, rev):
137 def size(self, rev):
141 """return the size of a given revision"""
138 """return the size of a given revision"""
142
139
143 # for revisions with renames, we have to go the slow way
140 # for revisions with renames, we have to go the slow way
144 node = self.node(rev)
141 node = self.node(rev)
145 if self.renamed(node):
142 if self.renamed(node):
146 return len(self.read(node))
143 return len(self.read(node))
147 if self.iscensored(rev):
144 if self.iscensored(rev):
148 return 0
145 return 0
149
146
150 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
147 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
151 return self._revlog.size(rev)
148 return self._revlog.size(rev)
152
149
153 def cmp(self, node, text):
150 def cmp(self, node, text):
154 """compare text with a given file revision
151 """compare text with a given file revision
155
152
156 returns True if text is different than what is stored.
153 returns True if text is different than what is stored.
157 """
154 """
158
155
159 t = text
156 t = text
160 if text.startswith('\1\n'):
157 if text.startswith('\1\n'):
161 t = '\1\n\1\n' + text
158 t = '\1\n\1\n' + text
162
159
163 samehashes = not self._revlog.cmp(node, t)
160 samehashes = not self._revlog.cmp(node, t)
164 if samehashes:
161 if samehashes:
165 return False
162 return False
166
163
167 # censored files compare against the empty file
164 # censored files compare against the empty file
168 if self.iscensored(self.rev(node)):
165 if self.iscensored(self.rev(node)):
169 return text != ''
166 return text != ''
170
167
171 # renaming a file produces a different hash, even if the data
168 # renaming a file produces a different hash, even if the data
172 # remains unchanged. Check if it's the case (slow):
169 # remains unchanged. Check if it's the case (slow):
173 if self.renamed(node):
170 if self.renamed(node):
174 t2 = self.read(node)
171 t2 = self.read(node)
175 return t2 != text
172 return t2 != text
176
173
177 return True
174 return True
178
175
179 def verifyintegrity(self, state):
176 def verifyintegrity(self, state):
180 return self._revlog.verifyintegrity(state)
177 return self._revlog.verifyintegrity(state)
181
178
182 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
179 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
183 revisionscount=False, trackedsize=False,
180 revisionscount=False, trackedsize=False,
184 storedsize=False):
181 storedsize=False):
185 return self._revlog.storageinfo(
182 return self._revlog.storageinfo(
186 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
183 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
187 revisionscount=revisionscount, trackedsize=trackedsize,
184 revisionscount=revisionscount, trackedsize=trackedsize,
188 storedsize=storedsize)
185 storedsize=storedsize)
189
186
190 # TODO these aren't part of the interface and aren't internal methods.
187 # TODO these aren't part of the interface and aren't internal methods.
191 # Callers should be fixed to not use them.
188 # Callers should be fixed to not use them.
192
189
193 # Used by bundlefilelog, unionfilelog.
190 # Used by bundlefilelog, unionfilelog.
194 @property
191 @property
195 def indexfile(self):
192 def indexfile(self):
196 return self._revlog.indexfile
193 return self._revlog.indexfile
197
194
198 @indexfile.setter
195 @indexfile.setter
199 def indexfile(self, value):
196 def indexfile(self, value):
200 self._revlog.indexfile = value
197 self._revlog.indexfile = value
201
198
202 # Used by repo upgrade.
199 # Used by repo upgrade.
203 def clone(self, tr, destrevlog, **kwargs):
200 def clone(self, tr, destrevlog, **kwargs):
204 if not isinstance(destrevlog, filelog):
201 if not isinstance(destrevlog, filelog):
205 raise error.ProgrammingError('expected filelog to clone()')
202 raise error.ProgrammingError('expected filelog to clone()')
206
203
207 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
204 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
208
205
209 class narrowfilelog(filelog):
206 class narrowfilelog(filelog):
210 """Filelog variation to be used with narrow stores."""
207 """Filelog variation to be used with narrow stores."""
211
208
212 def __init__(self, opener, path, narrowmatch):
209 def __init__(self, opener, path, narrowmatch):
213 super(narrowfilelog, self).__init__(opener, path)
210 super(narrowfilelog, self).__init__(opener, path)
214 self._narrowmatch = narrowmatch
211 self._narrowmatch = narrowmatch
215
212
216 def renamed(self, node):
213 def renamed(self, node):
217 res = super(narrowfilelog, self).renamed(node)
214 res = super(narrowfilelog, self).renamed(node)
218
215
219 # Renames that come from outside the narrowspec are problematic
216 # Renames that come from outside the narrowspec are problematic
220 # because we may lack the base text for the rename. This can result
217 # because we may lack the base text for the rename. This can result
221 # in code attempting to walk the ancestry or compute a diff
218 # in code attempting to walk the ancestry or compute a diff
222 # encountering a missing revision. We address this by silently
219 # encountering a missing revision. We address this by silently
223 # removing rename metadata if the source file is outside the
220 # removing rename metadata if the source file is outside the
224 # narrow spec.
221 # narrow spec.
225 #
222 #
226 # A better solution would be to see if the base revision is available,
223 # A better solution would be to see if the base revision is available,
227 # rather than assuming it isn't.
224 # rather than assuming it isn't.
228 #
225 #
229 # An even better solution would be to teach all consumers of rename
226 # An even better solution would be to teach all consumers of rename
230 # metadata that the base revision may not be available.
227 # metadata that the base revision may not be available.
231 #
228 #
232 # TODO consider better ways of doing this.
229 # TODO consider better ways of doing this.
233 if res and not self._narrowmatch(res[0]):
230 if res and not self._narrowmatch(res[0]):
234 return None
231 return None
235
232
236 return res
233 return res
237
234
238 def size(self, rev):
235 def size(self, rev):
239 # Because we have a custom renamed() that may lie, we need to call
236 # Because we have a custom renamed() that may lie, we need to call
240 # the base renamed() to report accurate results.
237 # the base renamed() to report accurate results.
241 node = self.node(rev)
238 node = self.node(rev)
242 if super(narrowfilelog, self).renamed(node):
239 if super(narrowfilelog, self).renamed(node):
243 return len(self.read(node))
240 return len(self.read(node))
244 else:
241 else:
245 return super(narrowfilelog, self).size(rev)
242 return super(narrowfilelog, self).size(rev)
246
243
247 def cmp(self, node, text):
244 def cmp(self, node, text):
248 different = super(narrowfilelog, self).cmp(node, text)
245 different = super(narrowfilelog, self).cmp(node, text)
249
246
250 # Because renamed() may lie, we may get false positives for
247 # Because renamed() may lie, we may get false positives for
251 # different content. Check for this by comparing against the original
248 # different content. Check for this by comparing against the original
252 # renamed() implementation.
249 # renamed() implementation.
253 if different:
250 if different:
254 if super(narrowfilelog, self).renamed(node):
251 if super(narrowfilelog, self).renamed(node):
255 t2 = self.read(node)
252 t2 = self.read(node)
256 return t2 != text
253 return t2 != text
257
254
258 return different
255 return different
@@ -1,1680 +1,1677 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30
30
31 class ipeerconnection(interfaceutil.Interface):
31 class ipeerconnection(interfaceutil.Interface):
32 """Represents a "connection" to a repository.
32 """Represents a "connection" to a repository.
33
33
34 This is the base interface for representing a connection to a repository.
34 This is the base interface for representing a connection to a repository.
35 It holds basic properties and methods applicable to all peer types.
35 It holds basic properties and methods applicable to all peer types.
36
36
37 This is not a complete interface definition and should not be used
37 This is not a complete interface definition and should not be used
38 outside of this module.
38 outside of this module.
39 """
39 """
40 ui = interfaceutil.Attribute("""ui.ui instance""")
40 ui = interfaceutil.Attribute("""ui.ui instance""")
41
41
42 def url():
42 def url():
43 """Returns a URL string representing this peer.
43 """Returns a URL string representing this peer.
44
44
45 Currently, implementations expose the raw URL used to construct the
45 Currently, implementations expose the raw URL used to construct the
46 instance. It may contain credentials as part of the URL. The
46 instance. It may contain credentials as part of the URL. The
47 expectations of the value aren't well-defined and this could lead to
47 expectations of the value aren't well-defined and this could lead to
48 data leakage.
48 data leakage.
49
49
50 TODO audit/clean consumers and more clearly define the contents of this
50 TODO audit/clean consumers and more clearly define the contents of this
51 value.
51 value.
52 """
52 """
53
53
54 def local():
54 def local():
55 """Returns a local repository instance.
55 """Returns a local repository instance.
56
56
57 If the peer represents a local repository, returns an object that
57 If the peer represents a local repository, returns an object that
58 can be used to interface with it. Otherwise returns ``None``.
58 can be used to interface with it. Otherwise returns ``None``.
59 """
59 """
60
60
61 def peer():
61 def peer():
62 """Returns an object conforming to this interface.
62 """Returns an object conforming to this interface.
63
63
64 Most implementations will ``return self``.
64 Most implementations will ``return self``.
65 """
65 """
66
66
67 def canpush():
67 def canpush():
68 """Returns a boolean indicating if this peer can be pushed to."""
68 """Returns a boolean indicating if this peer can be pushed to."""
69
69
70 def close():
70 def close():
71 """Close the connection to this peer.
71 """Close the connection to this peer.
72
72
73 This is called when the peer will no longer be used. Resources
73 This is called when the peer will no longer be used. Resources
74 associated with the peer should be cleaned up.
74 associated with the peer should be cleaned up.
75 """
75 """
76
76
77 class ipeercapabilities(interfaceutil.Interface):
77 class ipeercapabilities(interfaceutil.Interface):
78 """Peer sub-interface related to capabilities."""
78 """Peer sub-interface related to capabilities."""
79
79
80 def capable(name):
80 def capable(name):
81 """Determine support for a named capability.
81 """Determine support for a named capability.
82
82
83 Returns ``False`` if capability not supported.
83 Returns ``False`` if capability not supported.
84
84
85 Returns ``True`` if boolean capability is supported. Returns a string
85 Returns ``True`` if boolean capability is supported. Returns a string
86 if capability support is non-boolean.
86 if capability support is non-boolean.
87
87
88 Capability strings may or may not map to wire protocol capabilities.
88 Capability strings may or may not map to wire protocol capabilities.
89 """
89 """
90
90
91 def requirecap(name, purpose):
91 def requirecap(name, purpose):
92 """Require a capability to be present.
92 """Require a capability to be present.
93
93
94 Raises a ``CapabilityError`` if the capability isn't present.
94 Raises a ``CapabilityError`` if the capability isn't present.
95 """
95 """
96
96
97 class ipeercommands(interfaceutil.Interface):
97 class ipeercommands(interfaceutil.Interface):
98 """Client-side interface for communicating over the wire protocol.
98 """Client-side interface for communicating over the wire protocol.
99
99
100 This interface is used as a gateway to the Mercurial wire protocol.
100 This interface is used as a gateway to the Mercurial wire protocol.
101 methods commonly call wire protocol commands of the same name.
101 methods commonly call wire protocol commands of the same name.
102 """
102 """
103
103
104 def branchmap():
104 def branchmap():
105 """Obtain heads in named branches.
105 """Obtain heads in named branches.
106
106
107 Returns a dict mapping branch name to an iterable of nodes that are
107 Returns a dict mapping branch name to an iterable of nodes that are
108 heads on that branch.
108 heads on that branch.
109 """
109 """
110
110
111 def capabilities():
111 def capabilities():
112 """Obtain capabilities of the peer.
112 """Obtain capabilities of the peer.
113
113
114 Returns a set of string capabilities.
114 Returns a set of string capabilities.
115 """
115 """
116
116
117 def clonebundles():
117 def clonebundles():
118 """Obtains the clone bundles manifest for the repo.
118 """Obtains the clone bundles manifest for the repo.
119
119
120 Returns the manifest as unparsed bytes.
120 Returns the manifest as unparsed bytes.
121 """
121 """
122
122
123 def debugwireargs(one, two, three=None, four=None, five=None):
123 def debugwireargs(one, two, three=None, four=None, five=None):
124 """Used to facilitate debugging of arguments passed over the wire."""
124 """Used to facilitate debugging of arguments passed over the wire."""
125
125
126 def getbundle(source, **kwargs):
126 def getbundle(source, **kwargs):
127 """Obtain remote repository data as a bundle.
127 """Obtain remote repository data as a bundle.
128
128
129 This command is how the bulk of repository data is transferred from
129 This command is how the bulk of repository data is transferred from
130 the peer to the local repository
130 the peer to the local repository
131
131
132 Returns a generator of bundle data.
132 Returns a generator of bundle data.
133 """
133 """
134
134
135 def heads():
135 def heads():
136 """Determine all known head revisions in the peer.
136 """Determine all known head revisions in the peer.
137
137
138 Returns an iterable of binary nodes.
138 Returns an iterable of binary nodes.
139 """
139 """
140
140
141 def known(nodes):
141 def known(nodes):
142 """Determine whether multiple nodes are known.
142 """Determine whether multiple nodes are known.
143
143
144 Accepts an iterable of nodes whose presence to check for.
144 Accepts an iterable of nodes whose presence to check for.
145
145
146 Returns an iterable of booleans indicating of the corresponding node
146 Returns an iterable of booleans indicating of the corresponding node
147 at that index is known to the peer.
147 at that index is known to the peer.
148 """
148 """
149
149
150 def listkeys(namespace):
150 def listkeys(namespace):
151 """Obtain all keys in a pushkey namespace.
151 """Obtain all keys in a pushkey namespace.
152
152
153 Returns an iterable of key names.
153 Returns an iterable of key names.
154 """
154 """
155
155
156 def lookup(key):
156 def lookup(key):
157 """Resolve a value to a known revision.
157 """Resolve a value to a known revision.
158
158
159 Returns a binary node of the resolved revision on success.
159 Returns a binary node of the resolved revision on success.
160 """
160 """
161
161
162 def pushkey(namespace, key, old, new):
162 def pushkey(namespace, key, old, new):
163 """Set a value using the ``pushkey`` protocol.
163 """Set a value using the ``pushkey`` protocol.
164
164
165 Arguments correspond to the pushkey namespace and key to operate on and
165 Arguments correspond to the pushkey namespace and key to operate on and
166 the old and new values for that key.
166 the old and new values for that key.
167
167
168 Returns a string with the peer result. The value inside varies by the
168 Returns a string with the peer result. The value inside varies by the
169 namespace.
169 namespace.
170 """
170 """
171
171
172 def stream_out():
172 def stream_out():
173 """Obtain streaming clone data.
173 """Obtain streaming clone data.
174
174
175 Successful result should be a generator of data chunks.
175 Successful result should be a generator of data chunks.
176 """
176 """
177
177
178 def unbundle(bundle, heads, url):
178 def unbundle(bundle, heads, url):
179 """Transfer repository data to the peer.
179 """Transfer repository data to the peer.
180
180
181 This is how the bulk of data during a push is transferred.
181 This is how the bulk of data during a push is transferred.
182
182
183 Returns the integer number of heads added to the peer.
183 Returns the integer number of heads added to the peer.
184 """
184 """
185
185
186 class ipeerlegacycommands(interfaceutil.Interface):
186 class ipeerlegacycommands(interfaceutil.Interface):
187 """Interface for implementing support for legacy wire protocol commands.
187 """Interface for implementing support for legacy wire protocol commands.
188
188
189 Wire protocol commands transition to legacy status when they are no longer
189 Wire protocol commands transition to legacy status when they are no longer
190 used by modern clients. To facilitate identifying which commands are
190 used by modern clients. To facilitate identifying which commands are
191 legacy, the interfaces are split.
191 legacy, the interfaces are split.
192 """
192 """
193
193
194 def between(pairs):
194 def between(pairs):
195 """Obtain nodes between pairs of nodes.
195 """Obtain nodes between pairs of nodes.
196
196
197 ``pairs`` is an iterable of node pairs.
197 ``pairs`` is an iterable of node pairs.
198
198
199 Returns an iterable of iterables of nodes corresponding to each
199 Returns an iterable of iterables of nodes corresponding to each
200 requested pair.
200 requested pair.
201 """
201 """
202
202
203 def branches(nodes):
203 def branches(nodes):
204 """Obtain ancestor changesets of specific nodes back to a branch point.
204 """Obtain ancestor changesets of specific nodes back to a branch point.
205
205
206 For each requested node, the peer finds the first ancestor node that is
206 For each requested node, the peer finds the first ancestor node that is
207 a DAG root or is a merge.
207 a DAG root or is a merge.
208
208
209 Returns an iterable of iterables with the resolved values for each node.
209 Returns an iterable of iterables with the resolved values for each node.
210 """
210 """
211
211
212 def changegroup(nodes, source):
212 def changegroup(nodes, source):
213 """Obtain a changegroup with data for descendants of specified nodes."""
213 """Obtain a changegroup with data for descendants of specified nodes."""
214
214
215 def changegroupsubset(bases, heads, source):
215 def changegroupsubset(bases, heads, source):
216 pass
216 pass
217
217
218 class ipeercommandexecutor(interfaceutil.Interface):
218 class ipeercommandexecutor(interfaceutil.Interface):
219 """Represents a mechanism to execute remote commands.
219 """Represents a mechanism to execute remote commands.
220
220
221 This is the primary interface for requesting that wire protocol commands
221 This is the primary interface for requesting that wire protocol commands
222 be executed. Instances of this interface are active in a context manager
222 be executed. Instances of this interface are active in a context manager
223 and have a well-defined lifetime. When the context manager exits, all
223 and have a well-defined lifetime. When the context manager exits, all
224 outstanding requests are waited on.
224 outstanding requests are waited on.
225 """
225 """
226
226
227 def callcommand(name, args):
227 def callcommand(name, args):
228 """Request that a named command be executed.
228 """Request that a named command be executed.
229
229
230 Receives the command name and a dictionary of command arguments.
230 Receives the command name and a dictionary of command arguments.
231
231
232 Returns a ``concurrent.futures.Future`` that will resolve to the
232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 result of that command request. That exact value is left up to
233 result of that command request. That exact value is left up to
234 the implementation and possibly varies by command.
234 the implementation and possibly varies by command.
235
235
236 Not all commands can coexist with other commands in an executor
236 Not all commands can coexist with other commands in an executor
237 instance: it depends on the underlying wire protocol transport being
237 instance: it depends on the underlying wire protocol transport being
238 used and the command itself.
238 used and the command itself.
239
239
240 Implementations MAY call ``sendcommands()`` automatically if the
240 Implementations MAY call ``sendcommands()`` automatically if the
241 requested command can not coexist with other commands in this executor.
241 requested command can not coexist with other commands in this executor.
242
242
243 Implementations MAY call ``sendcommands()`` automatically when the
243 Implementations MAY call ``sendcommands()`` automatically when the
244 future's ``result()`` is called. So, consumers using multiple
244 future's ``result()`` is called. So, consumers using multiple
245 commands with an executor MUST ensure that ``result()`` is not called
245 commands with an executor MUST ensure that ``result()`` is not called
246 until all command requests have been issued.
246 until all command requests have been issued.
247 """
247 """
248
248
249 def sendcommands():
249 def sendcommands():
250 """Trigger submission of queued command requests.
250 """Trigger submission of queued command requests.
251
251
252 Not all transports submit commands as soon as they are requested to
252 Not all transports submit commands as soon as they are requested to
253 run. When called, this method forces queued command requests to be
253 run. When called, this method forces queued command requests to be
254 issued. It will no-op if all commands have already been sent.
254 issued. It will no-op if all commands have already been sent.
255
255
256 When called, no more new commands may be issued with this executor.
256 When called, no more new commands may be issued with this executor.
257 """
257 """
258
258
259 def close():
259 def close():
260 """Signal that this command request is finished.
260 """Signal that this command request is finished.
261
261
262 When called, no more new commands may be issued. All outstanding
262 When called, no more new commands may be issued. All outstanding
263 commands that have previously been issued are waited on before
263 commands that have previously been issued are waited on before
264 returning. This not only includes waiting for the futures to resolve,
264 returning. This not only includes waiting for the futures to resolve,
265 but also waiting for all response data to arrive. In other words,
265 but also waiting for all response data to arrive. In other words,
266 calling this waits for all on-wire state for issued command requests
266 calling this waits for all on-wire state for issued command requests
267 to finish.
267 to finish.
268
268
269 When used as a context manager, this method is called when exiting the
269 When used as a context manager, this method is called when exiting the
270 context manager.
270 context manager.
271
271
272 This method may call ``sendcommands()`` if there are buffered commands.
272 This method may call ``sendcommands()`` if there are buffered commands.
273 """
273 """
274
274
275 class ipeerrequests(interfaceutil.Interface):
275 class ipeerrequests(interfaceutil.Interface):
276 """Interface for executing commands on a peer."""
276 """Interface for executing commands on a peer."""
277
277
278 def commandexecutor():
278 def commandexecutor():
279 """A context manager that resolves to an ipeercommandexecutor.
279 """A context manager that resolves to an ipeercommandexecutor.
280
280
281 The object this resolves to can be used to issue command requests
281 The object this resolves to can be used to issue command requests
282 to the peer.
282 to the peer.
283
283
284 Callers should call its ``callcommand`` method to issue command
284 Callers should call its ``callcommand`` method to issue command
285 requests.
285 requests.
286
286
287 A new executor should be obtained for each distinct set of commands
287 A new executor should be obtained for each distinct set of commands
288 (possibly just a single command) that the consumer wants to execute
288 (possibly just a single command) that the consumer wants to execute
289 as part of a single operation or round trip. This is because some
289 as part of a single operation or round trip. This is because some
290 peers are half-duplex and/or don't support persistent connections.
290 peers are half-duplex and/or don't support persistent connections.
291 e.g. in the case of HTTP peers, commands sent to an executor represent
291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 a single HTTP request. While some peers may support multiple command
292 a single HTTP request. While some peers may support multiple command
293 sends over the wire per executor, consumers need to code to the least
293 sends over the wire per executor, consumers need to code to the least
294 capable peer. So it should be assumed that command executors buffer
294 capable peer. So it should be assumed that command executors buffer
295 called commands until they are told to send them and that each
295 called commands until they are told to send them and that each
296 command executor could result in a new connection or wire-level request
296 command executor could result in a new connection or wire-level request
297 being issued.
297 being issued.
298 """
298 """
299
299
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 """Unified interface for peer repositories.
301 """Unified interface for peer repositories.
302
302
303 All peer instances must conform to this interface.
303 All peer instances must conform to this interface.
304 """
304 """
305
305
306 @interfaceutil.implementer(ipeerbase)
306 @interfaceutil.implementer(ipeerbase)
307 class peer(object):
307 class peer(object):
308 """Base class for peer repositories."""
308 """Base class for peer repositories."""
309
309
310 def capable(self, name):
310 def capable(self, name):
311 caps = self.capabilities()
311 caps = self.capabilities()
312 if name in caps:
312 if name in caps:
313 return True
313 return True
314
314
315 name = '%s=' % name
315 name = '%s=' % name
316 for cap in caps:
316 for cap in caps:
317 if cap.startswith(name):
317 if cap.startswith(name):
318 return cap[len(name):]
318 return cap[len(name):]
319
319
320 return False
320 return False
321
321
322 def requirecap(self, name, purpose):
322 def requirecap(self, name, purpose):
323 if self.capable(name):
323 if self.capable(name):
324 return
324 return
325
325
326 raise error.CapabilityError(
326 raise error.CapabilityError(
327 _('cannot %s; remote repository does not support the %r '
327 _('cannot %s; remote repository does not support the %r '
328 'capability') % (purpose, name))
328 'capability') % (purpose, name))
329
329
330 class iverifyproblem(interfaceutil.Interface):
330 class iverifyproblem(interfaceutil.Interface):
331 """Represents a problem with the integrity of the repository.
331 """Represents a problem with the integrity of the repository.
332
332
333 Instances of this interface are emitted to describe an integrity issue
333 Instances of this interface are emitted to describe an integrity issue
334 with a repository (e.g. corrupt storage, missing data, etc).
334 with a repository (e.g. corrupt storage, missing data, etc).
335
335
336 Instances are essentially messages associated with severity.
336 Instances are essentially messages associated with severity.
337 """
337 """
338 warning = interfaceutil.Attribute(
338 warning = interfaceutil.Attribute(
339 """Message indicating a non-fatal problem.""")
339 """Message indicating a non-fatal problem.""")
340
340
341 error = interfaceutil.Attribute(
341 error = interfaceutil.Attribute(
342 """Message indicating a fatal problem.""")
342 """Message indicating a fatal problem.""")
343
343
344 node = interfaceutil.Attribute(
344 node = interfaceutil.Attribute(
345 """Revision encountering the problem.
345 """Revision encountering the problem.
346
346
347 ``None`` means the problem doesn't apply to a single revision.
347 ``None`` means the problem doesn't apply to a single revision.
348 """)
348 """)
349
349
350 class irevisiondelta(interfaceutil.Interface):
350 class irevisiondelta(interfaceutil.Interface):
351 """Represents a delta between one revision and another.
351 """Represents a delta between one revision and another.
352
352
353 Instances convey enough information to allow a revision to be exchanged
353 Instances convey enough information to allow a revision to be exchanged
354 with another repository.
354 with another repository.
355
355
356 Instances represent the fulltext revision data or a delta against
356 Instances represent the fulltext revision data or a delta against
357 another revision. Therefore the ``revision`` and ``delta`` attributes
357 another revision. Therefore the ``revision`` and ``delta`` attributes
358 are mutually exclusive.
358 are mutually exclusive.
359
359
360 Typically used for changegroup generation.
360 Typically used for changegroup generation.
361 """
361 """
362
362
363 node = interfaceutil.Attribute(
363 node = interfaceutil.Attribute(
364 """20 byte node of this revision.""")
364 """20 byte node of this revision.""")
365
365
366 p1node = interfaceutil.Attribute(
366 p1node = interfaceutil.Attribute(
367 """20 byte node of 1st parent of this revision.""")
367 """20 byte node of 1st parent of this revision.""")
368
368
369 p2node = interfaceutil.Attribute(
369 p2node = interfaceutil.Attribute(
370 """20 byte node of 2nd parent of this revision.""")
370 """20 byte node of 2nd parent of this revision.""")
371
371
372 linknode = interfaceutil.Attribute(
372 linknode = interfaceutil.Attribute(
373 """20 byte node of the changelog revision this node is linked to.""")
373 """20 byte node of the changelog revision this node is linked to.""")
374
374
375 flags = interfaceutil.Attribute(
375 flags = interfaceutil.Attribute(
376 """2 bytes of integer flags that apply to this revision.""")
376 """2 bytes of integer flags that apply to this revision.""")
377
377
378 basenode = interfaceutil.Attribute(
378 basenode = interfaceutil.Attribute(
379 """20 byte node of the revision this data is a delta against.
379 """20 byte node of the revision this data is a delta against.
380
380
381 ``nullid`` indicates that the revision is a full revision and not
381 ``nullid`` indicates that the revision is a full revision and not
382 a delta.
382 a delta.
383 """)
383 """)
384
384
385 baserevisionsize = interfaceutil.Attribute(
385 baserevisionsize = interfaceutil.Attribute(
386 """Size of base revision this delta is against.
386 """Size of base revision this delta is against.
387
387
388 May be ``None`` if ``basenode`` is ``nullid``.
388 May be ``None`` if ``basenode`` is ``nullid``.
389 """)
389 """)
390
390
391 revision = interfaceutil.Attribute(
391 revision = interfaceutil.Attribute(
392 """Raw fulltext of revision data for this node.""")
392 """Raw fulltext of revision data for this node.""")
393
393
394 delta = interfaceutil.Attribute(
394 delta = interfaceutil.Attribute(
395 """Delta between ``basenode`` and ``node``.
395 """Delta between ``basenode`` and ``node``.
396
396
397 Stored in the bdiff delta format.
397 Stored in the bdiff delta format.
398 """)
398 """)
399
399
400 class ifilerevisionssequence(interfaceutil.Interface):
400 class ifilerevisionssequence(interfaceutil.Interface):
401 """Contains index data for all revisions of a file.
401 """Contains index data for all revisions of a file.
402
402
403 Types implementing this behave like lists of tuples. The index
403 Types implementing this behave like lists of tuples. The index
404 in the list corresponds to the revision number. The values contain
404 in the list corresponds to the revision number. The values contain
405 index metadata.
405 index metadata.
406
406
407 The *null* revision (revision number -1) is always the last item
407 The *null* revision (revision number -1) is always the last item
408 in the index.
408 in the index.
409 """
409 """
410
410
411 def __len__():
411 def __len__():
412 """The total number of revisions."""
412 """The total number of revisions."""
413
413
414 def __getitem__(rev):
414 def __getitem__(rev):
415 """Returns the object having a specific revision number.
415 """Returns the object having a specific revision number.
416
416
417 Returns an 8-tuple with the following fields:
417 Returns an 8-tuple with the following fields:
418
418
419 offset+flags
419 offset+flags
420 Contains the offset and flags for the revision. 64-bit unsigned
420 Contains the offset and flags for the revision. 64-bit unsigned
421 integer where first 6 bytes are the offset and the next 2 bytes
421 integer where first 6 bytes are the offset and the next 2 bytes
422 are flags. The offset can be 0 if it is not used by the store.
422 are flags. The offset can be 0 if it is not used by the store.
423 compressed size
423 compressed size
424 Size of the revision data in the store. It can be 0 if it isn't
424 Size of the revision data in the store. It can be 0 if it isn't
425 needed by the store.
425 needed by the store.
426 uncompressed size
426 uncompressed size
427 Fulltext size. It can be 0 if it isn't needed by the store.
427 Fulltext size. It can be 0 if it isn't needed by the store.
428 base revision
428 base revision
429 Revision number of revision the delta for storage is encoded
429 Revision number of revision the delta for storage is encoded
430 against. -1 indicates not encoded against a base revision.
430 against. -1 indicates not encoded against a base revision.
431 link revision
431 link revision
432 Revision number of changelog revision this entry is related to.
432 Revision number of changelog revision this entry is related to.
433 p1 revision
433 p1 revision
434 Revision number of 1st parent. -1 if no 1st parent.
434 Revision number of 1st parent. -1 if no 1st parent.
435 p2 revision
435 p2 revision
436 Revision number of 2nd parent. -1 if no 1st parent.
436 Revision number of 2nd parent. -1 if no 1st parent.
437 node
437 node
438 Binary node value for this revision number.
438 Binary node value for this revision number.
439
439
440 Negative values should index off the end of the sequence. ``-1``
440 Negative values should index off the end of the sequence. ``-1``
441 should return the null revision. ``-2`` should return the most
441 should return the null revision. ``-2`` should return the most
442 recent revision.
442 recent revision.
443 """
443 """
444
444
445 def __contains__(rev):
445 def __contains__(rev):
446 """Whether a revision number exists."""
446 """Whether a revision number exists."""
447
447
448 def insert(self, i, entry):
448 def insert(self, i, entry):
449 """Add an item to the index at specific revision."""
449 """Add an item to the index at specific revision."""
450
450
451 class ifileindex(interfaceutil.Interface):
451 class ifileindex(interfaceutil.Interface):
452 """Storage interface for index data of a single file.
452 """Storage interface for index data of a single file.
453
453
454 File storage data is divided into index metadata and data storage.
454 File storage data is divided into index metadata and data storage.
455 This interface defines the index portion of the interface.
455 This interface defines the index portion of the interface.
456
456
457 The index logically consists of:
457 The index logically consists of:
458
458
459 * A mapping between revision numbers and nodes.
459 * A mapping between revision numbers and nodes.
460 * DAG data (storing and querying the relationship between nodes).
460 * DAG data (storing and querying the relationship between nodes).
461 * Metadata to facilitate storage.
461 * Metadata to facilitate storage.
462 """
462 """
463 def __len__():
463 def __len__():
464 """Obtain the number of revisions stored for this file."""
464 """Obtain the number of revisions stored for this file."""
465
465
466 def __iter__():
466 def __iter__():
467 """Iterate over revision numbers for this file."""
467 """Iterate over revision numbers for this file."""
468
468
469 def revs(start=0, stop=None):
469 def revs(start=0, stop=None):
470 """Iterate over revision numbers for this file, with control."""
470 """Iterate over revision numbers for this file, with control."""
471
471
472 def parents(node):
472 def parents(node):
473 """Returns a 2-tuple of parent nodes for a revision.
473 """Returns a 2-tuple of parent nodes for a revision.
474
474
475 Values will be ``nullid`` if the parent is empty.
475 Values will be ``nullid`` if the parent is empty.
476 """
476 """
477
477
478 def parentrevs(rev):
478 def parentrevs(rev):
479 """Like parents() but operates on revision numbers."""
479 """Like parents() but operates on revision numbers."""
480
480
481 def rev(node):
481 def rev(node):
482 """Obtain the revision number given a node.
482 """Obtain the revision number given a node.
483
483
484 Raises ``error.LookupError`` if the node is not known.
484 Raises ``error.LookupError`` if the node is not known.
485 """
485 """
486
486
487 def node(rev):
487 def node(rev):
488 """Obtain the node value given a revision number.
488 """Obtain the node value given a revision number.
489
489
490 Raises ``IndexError`` if the node is not known.
490 Raises ``IndexError`` if the node is not known.
491 """
491 """
492
492
493 def lookup(node):
493 def lookup(node):
494 """Attempt to resolve a value to a node.
494 """Attempt to resolve a value to a node.
495
495
496 Value can be a binary node, hex node, revision number, or a string
496 Value can be a binary node, hex node, revision number, or a string
497 that can be converted to an integer.
497 that can be converted to an integer.
498
498
499 Raises ``error.LookupError`` if a node could not be resolved.
499 Raises ``error.LookupError`` if a node could not be resolved.
500 """
500 """
501
501
502 def linkrev(rev):
502 def linkrev(rev):
503 """Obtain the changeset revision number a revision is linked to."""
503 """Obtain the changeset revision number a revision is linked to."""
504
504
505 def iscensored(rev):
505 def iscensored(rev):
506 """Return whether a revision's content has been censored."""
506 """Return whether a revision's content has been censored."""
507
507
508 def commonancestorsheads(node1, node2):
508 def commonancestorsheads(node1, node2):
509 """Obtain an iterable of nodes containing heads of common ancestors.
509 """Obtain an iterable of nodes containing heads of common ancestors.
510
510
511 See ``ancestor.commonancestorsheads()``.
511 See ``ancestor.commonancestorsheads()``.
512 """
512 """
513
513
514 def descendants(revs):
514 def descendants(revs):
515 """Obtain descendant revision numbers for a set of revision numbers.
515 """Obtain descendant revision numbers for a set of revision numbers.
516
516
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
518 """
518 """
519
519
520 def heads(start=None, stop=None):
520 def heads(start=None, stop=None):
521 """Obtain a list of nodes that are DAG heads, with control.
521 """Obtain a list of nodes that are DAG heads, with control.
522
522
523 The set of revisions examined can be limited by specifying
523 The set of revisions examined can be limited by specifying
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
525 iterable of nodes. DAG traversal starts at earlier revision
525 iterable of nodes. DAG traversal starts at earlier revision
526 ``start`` and iterates forward until any node in ``stop`` is
526 ``start`` and iterates forward until any node in ``stop`` is
527 encountered.
527 encountered.
528 """
528 """
529
529
530 def children(node):
530 def children(node):
531 """Obtain nodes that are children of a node.
531 """Obtain nodes that are children of a node.
532
532
533 Returns a list of nodes.
533 Returns a list of nodes.
534 """
534 """
535
535
536 def deltaparent(rev):
537 """"Return the revision that is a suitable parent to delta against."""
538
539 class ifiledata(interfaceutil.Interface):
536 class ifiledata(interfaceutil.Interface):
540 """Storage interface for data storage of a specific file.
537 """Storage interface for data storage of a specific file.
541
538
542 This complements ``ifileindex`` and provides an interface for accessing
539 This complements ``ifileindex`` and provides an interface for accessing
543 data for a tracked file.
540 data for a tracked file.
544 """
541 """
545 def size(rev):
542 def size(rev):
546 """Obtain the fulltext size of file data.
543 """Obtain the fulltext size of file data.
547
544
548 Any metadata is excluded from size measurements.
545 Any metadata is excluded from size measurements.
549 """
546 """
550
547
551 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
548 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
552 """Validate the stored hash of a given fulltext and node.
549 """Validate the stored hash of a given fulltext and node.
553
550
554 Raises ``error.StorageError`` is hash validation fails.
551 Raises ``error.StorageError`` is hash validation fails.
555 """
552 """
556
553
557 def revision(node, raw=False):
554 def revision(node, raw=False):
558 """"Obtain fulltext data for a node.
555 """"Obtain fulltext data for a node.
559
556
560 By default, any storage transformations are applied before the data
557 By default, any storage transformations are applied before the data
561 is returned. If ``raw`` is True, non-raw storage transformations
558 is returned. If ``raw`` is True, non-raw storage transformations
562 are not applied.
559 are not applied.
563
560
564 The fulltext data may contain a header containing metadata. Most
561 The fulltext data may contain a header containing metadata. Most
565 consumers should use ``read()`` to obtain the actual file data.
562 consumers should use ``read()`` to obtain the actual file data.
566 """
563 """
567
564
568 def read(node):
565 def read(node):
569 """Resolve file fulltext data.
566 """Resolve file fulltext data.
570
567
571 This is similar to ``revision()`` except any metadata in the data
568 This is similar to ``revision()`` except any metadata in the data
572 headers is stripped.
569 headers is stripped.
573 """
570 """
574
571
575 def renamed(node):
572 def renamed(node):
576 """Obtain copy metadata for a node.
573 """Obtain copy metadata for a node.
577
574
578 Returns ``False`` if no copy metadata is stored or a 2-tuple of
575 Returns ``False`` if no copy metadata is stored or a 2-tuple of
579 (path, node) from which this revision was copied.
576 (path, node) from which this revision was copied.
580 """
577 """
581
578
582 def cmp(node, fulltext):
579 def cmp(node, fulltext):
583 """Compare fulltext to another revision.
580 """Compare fulltext to another revision.
584
581
585 Returns True if the fulltext is different from what is stored.
582 Returns True if the fulltext is different from what is stored.
586
583
587 This takes copy metadata into account.
584 This takes copy metadata into account.
588
585
589 TODO better document the copy metadata and censoring logic.
586 TODO better document the copy metadata and censoring logic.
590 """
587 """
591
588
592 def revdiff(rev1, rev2):
589 def revdiff(rev1, rev2):
593 """Obtain a delta between two revision numbers.
590 """Obtain a delta between two revision numbers.
594
591
595 Operates on raw data in the store (``revision(node, raw=True)``).
592 Operates on raw data in the store (``revision(node, raw=True)``).
596
593
597 The returned data is the result of ``bdiff.bdiff`` on the raw
594 The returned data is the result of ``bdiff.bdiff`` on the raw
598 revision data.
595 revision data.
599 """
596 """
600
597
601 def emitrevisions(nodes,
598 def emitrevisions(nodes,
602 nodesorder=None,
599 nodesorder=None,
603 revisiondata=False,
600 revisiondata=False,
604 assumehaveparentrevisions=False,
601 assumehaveparentrevisions=False,
605 deltaprevious=False):
602 deltaprevious=False):
606 """Produce ``irevisiondelta`` for revisions.
603 """Produce ``irevisiondelta`` for revisions.
607
604
608 Given an iterable of nodes, emits objects conforming to the
605 Given an iterable of nodes, emits objects conforming to the
609 ``irevisiondelta`` interface that describe revisions in storage.
606 ``irevisiondelta`` interface that describe revisions in storage.
610
607
611 This method is a generator.
608 This method is a generator.
612
609
613 The input nodes may be unordered. Implementations must ensure that a
610 The input nodes may be unordered. Implementations must ensure that a
614 node's parents are emitted before the node itself. Transitively, this
611 node's parents are emitted before the node itself. Transitively, this
615 means that a node may only be emitted once all its ancestors in
612 means that a node may only be emitted once all its ancestors in
616 ``nodes`` have also been emitted.
613 ``nodes`` have also been emitted.
617
614
618 By default, emits "index" data (the ``node``, ``p1node``, and
615 By default, emits "index" data (the ``node``, ``p1node``, and
619 ``p2node`` attributes). If ``revisiondata`` is set, revision data
616 ``p2node`` attributes). If ``revisiondata`` is set, revision data
620 will also be present on the emitted objects.
617 will also be present on the emitted objects.
621
618
622 With default argument values, implementations can choose to emit
619 With default argument values, implementations can choose to emit
623 either fulltext revision data or a delta. When emitting deltas,
620 either fulltext revision data or a delta. When emitting deltas,
624 implementations must consider whether the delta's base revision
621 implementations must consider whether the delta's base revision
625 fulltext is available to the receiver.
622 fulltext is available to the receiver.
626
623
627 The base revision fulltext is guaranteed to be available if any of
624 The base revision fulltext is guaranteed to be available if any of
628 the following are met:
625 the following are met:
629
626
630 * Its fulltext revision was emitted by this method call.
627 * Its fulltext revision was emitted by this method call.
631 * A delta for that revision was emitted by this method call.
628 * A delta for that revision was emitted by this method call.
632 * ``assumehaveparentrevisions`` is True and the base revision is a
629 * ``assumehaveparentrevisions`` is True and the base revision is a
633 parent of the node.
630 parent of the node.
634
631
635 ``nodesorder`` can be used to control the order that revisions are
632 ``nodesorder`` can be used to control the order that revisions are
636 emitted. By default, revisions can be reordered as long as they are
633 emitted. By default, revisions can be reordered as long as they are
637 in DAG topological order (see above). If the value is ``nodes``,
634 in DAG topological order (see above). If the value is ``nodes``,
638 the iteration order from ``nodes`` should be used. If the value is
635 the iteration order from ``nodes`` should be used. If the value is
639 ``storage``, then the native order from the backing storage layer
636 ``storage``, then the native order from the backing storage layer
640 is used. (Not all storage layers will have strong ordering and behavior
637 is used. (Not all storage layers will have strong ordering and behavior
641 of this mode is storage-dependent.) ``nodes`` ordering can force
638 of this mode is storage-dependent.) ``nodes`` ordering can force
642 revisions to be emitted before their ancestors, so consumers should
639 revisions to be emitted before their ancestors, so consumers should
643 use it with care.
640 use it with care.
644
641
645 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
642 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
646 be set and it is the caller's responsibility to resolve it, if needed.
643 be set and it is the caller's responsibility to resolve it, if needed.
647
644
648 If ``deltaprevious`` is True and revision data is requested, all
645 If ``deltaprevious`` is True and revision data is requested, all
649 revision data should be emitted as deltas against the revision
646 revision data should be emitted as deltas against the revision
650 emitted just prior. The initial revision should be a delta against
647 emitted just prior. The initial revision should be a delta against
651 its 1st parent.
648 its 1st parent.
652 """
649 """
653
650
654 class ifilemutation(interfaceutil.Interface):
651 class ifilemutation(interfaceutil.Interface):
655 """Storage interface for mutation events of a tracked file."""
652 """Storage interface for mutation events of a tracked file."""
656
653
657 def add(filedata, meta, transaction, linkrev, p1, p2):
654 def add(filedata, meta, transaction, linkrev, p1, p2):
658 """Add a new revision to the store.
655 """Add a new revision to the store.
659
656
660 Takes file data, dictionary of metadata, a transaction, linkrev,
657 Takes file data, dictionary of metadata, a transaction, linkrev,
661 and parent nodes.
658 and parent nodes.
662
659
663 Returns the node that was added.
660 Returns the node that was added.
664
661
665 May no-op if a revision matching the supplied data is already stored.
662 May no-op if a revision matching the supplied data is already stored.
666 """
663 """
667
664
668 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
669 flags=0, cachedelta=None):
666 flags=0, cachedelta=None):
670 """Add a new revision to the store.
667 """Add a new revision to the store.
671
668
672 This is similar to ``add()`` except it operates at a lower level.
669 This is similar to ``add()`` except it operates at a lower level.
673
670
674 The data passed in already contains a metadata header, if any.
671 The data passed in already contains a metadata header, if any.
675
672
676 ``node`` and ``flags`` can be used to define the expected node and
673 ``node`` and ``flags`` can be used to define the expected node and
677 the flags to use with storage.
674 the flags to use with storage.
678
675
679 ``add()`` is usually called when adding files from e.g. the working
676 ``add()`` is usually called when adding files from e.g. the working
680 directory. ``addrevision()`` is often called by ``add()`` and for
677 directory. ``addrevision()`` is often called by ``add()`` and for
681 scenarios where revision data has already been computed, such as when
678 scenarios where revision data has already been computed, such as when
682 applying raw data from a peer repo.
679 applying raw data from a peer repo.
683 """
680 """
684
681
685 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
686 """Process a series of deltas for storage.
683 """Process a series of deltas for storage.
687
684
688 ``deltas`` is an iterable of 7-tuples of
685 ``deltas`` is an iterable of 7-tuples of
689 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
690 to add.
687 to add.
691
688
692 The ``delta`` field contains ``mpatch`` data to apply to a base
689 The ``delta`` field contains ``mpatch`` data to apply to a base
693 revision, identified by ``deltabase``. The base node can be
690 revision, identified by ``deltabase``. The base node can be
694 ``nullid``, in which case the header from the delta can be ignored
691 ``nullid``, in which case the header from the delta can be ignored
695 and the delta used as the fulltext.
692 and the delta used as the fulltext.
696
693
697 ``addrevisioncb`` should be called for each node as it is committed.
694 ``addrevisioncb`` should be called for each node as it is committed.
698
695
699 Returns a list of nodes that were processed. A node will be in the list
696 Returns a list of nodes that were processed. A node will be in the list
700 even if it existed in the store previously.
697 even if it existed in the store previously.
701 """
698 """
702
699
703 def censorrevision(tr, node, tombstone=b''):
700 def censorrevision(tr, node, tombstone=b''):
704 """Remove the content of a single revision.
701 """Remove the content of a single revision.
705
702
706 The specified ``node`` will have its content purged from storage.
703 The specified ``node`` will have its content purged from storage.
707 Future attempts to access the revision data for this node will
704 Future attempts to access the revision data for this node will
708 result in failure.
705 result in failure.
709
706
710 A ``tombstone`` message can optionally be stored. This message may be
707 A ``tombstone`` message can optionally be stored. This message may be
711 displayed to users when they attempt to access the missing revision
708 displayed to users when they attempt to access the missing revision
712 data.
709 data.
713
710
714 Storage backends may have stored deltas against the previous content
711 Storage backends may have stored deltas against the previous content
715 in this revision. As part of censoring a revision, these storage
712 in this revision. As part of censoring a revision, these storage
716 backends are expected to rewrite any internally stored deltas such
713 backends are expected to rewrite any internally stored deltas such
717 that they no longer reference the deleted content.
714 that they no longer reference the deleted content.
718 """
715 """
719
716
720 def getstrippoint(minlink):
717 def getstrippoint(minlink):
721 """Find the minimum revision that must be stripped to strip a linkrev.
718 """Find the minimum revision that must be stripped to strip a linkrev.
722
719
723 Returns a 2-tuple containing the minimum revision number and a set
720 Returns a 2-tuple containing the minimum revision number and a set
724 of all revisions numbers that would be broken by this strip.
721 of all revisions numbers that would be broken by this strip.
725
722
726 TODO this is highly revlog centric and should be abstracted into
723 TODO this is highly revlog centric and should be abstracted into
727 a higher-level deletion API. ``repair.strip()`` relies on this.
724 a higher-level deletion API. ``repair.strip()`` relies on this.
728 """
725 """
729
726
730 def strip(minlink, transaction):
727 def strip(minlink, transaction):
731 """Remove storage of items starting at a linkrev.
728 """Remove storage of items starting at a linkrev.
732
729
733 This uses ``getstrippoint()`` to determine the first node to remove.
730 This uses ``getstrippoint()`` to determine the first node to remove.
734 Then it effectively truncates storage for all revisions after that.
731 Then it effectively truncates storage for all revisions after that.
735
732
736 TODO this is highly revlog centric and should be abstracted into a
733 TODO this is highly revlog centric and should be abstracted into a
737 higher-level deletion API.
734 higher-level deletion API.
738 """
735 """
739
736
740 class ifilestorage(ifileindex, ifiledata, ifilemutation):
737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
741 """Complete storage interface for a single tracked file."""
738 """Complete storage interface for a single tracked file."""
742
739
743 def files():
740 def files():
744 """Obtain paths that are backing storage for this file.
741 """Obtain paths that are backing storage for this file.
745
742
746 TODO this is used heavily by verify code and there should probably
743 TODO this is used heavily by verify code and there should probably
747 be a better API for that.
744 be a better API for that.
748 """
745 """
749
746
750 def storageinfo(exclusivefiles=False, sharedfiles=False,
747 def storageinfo(exclusivefiles=False, sharedfiles=False,
751 revisionscount=False, trackedsize=False,
748 revisionscount=False, trackedsize=False,
752 storedsize=False):
749 storedsize=False):
753 """Obtain information about storage for this file's data.
750 """Obtain information about storage for this file's data.
754
751
755 Returns a dict describing storage for this tracked path. The keys
752 Returns a dict describing storage for this tracked path. The keys
756 in the dict map to arguments of the same. The arguments are bools
753 in the dict map to arguments of the same. The arguments are bools
757 indicating whether to calculate and obtain that data.
754 indicating whether to calculate and obtain that data.
758
755
759 exclusivefiles
756 exclusivefiles
760 Iterable of (vfs, path) describing files that are exclusively
757 Iterable of (vfs, path) describing files that are exclusively
761 used to back storage for this tracked path.
758 used to back storage for this tracked path.
762
759
763 sharedfiles
760 sharedfiles
764 Iterable of (vfs, path) describing files that are used to back
761 Iterable of (vfs, path) describing files that are used to back
765 storage for this tracked path. Those files may also provide storage
762 storage for this tracked path. Those files may also provide storage
766 for other stored entities.
763 for other stored entities.
767
764
768 revisionscount
765 revisionscount
769 Number of revisions available for retrieval.
766 Number of revisions available for retrieval.
770
767
771 trackedsize
768 trackedsize
772 Total size in bytes of all tracked revisions. This is a sum of the
769 Total size in bytes of all tracked revisions. This is a sum of the
773 length of the fulltext of all revisions.
770 length of the fulltext of all revisions.
774
771
775 storedsize
772 storedsize
776 Total size in bytes used to store data for all tracked revisions.
773 Total size in bytes used to store data for all tracked revisions.
777 This is commonly less than ``trackedsize`` due to internal usage
774 This is commonly less than ``trackedsize`` due to internal usage
778 of deltas rather than fulltext revisions.
775 of deltas rather than fulltext revisions.
779
776
780 Not all storage backends may support all queries are have a reasonable
777 Not all storage backends may support all queries are have a reasonable
781 value to use. In that case, the value should be set to ``None`` and
778 value to use. In that case, the value should be set to ``None`` and
782 callers are expected to handle this special value.
779 callers are expected to handle this special value.
783 """
780 """
784
781
785 def verifyintegrity(state):
782 def verifyintegrity(state):
786 """Verifies the integrity of file storage.
783 """Verifies the integrity of file storage.
787
784
788 ``state`` is a dict holding state of the verifier process. It can be
785 ``state`` is a dict holding state of the verifier process. It can be
789 used to communicate data between invocations of multiple storage
786 used to communicate data between invocations of multiple storage
790 primitives.
787 primitives.
791
788
792 If individual revisions cannot have their revision content resolved,
789 If individual revisions cannot have their revision content resolved,
793 the method is expected to set the ``skipread`` key to a set of nodes
790 the method is expected to set the ``skipread`` key to a set of nodes
794 that encountered problems.
791 that encountered problems.
795
792
796 The method yields objects conforming to the ``iverifyproblem``
793 The method yields objects conforming to the ``iverifyproblem``
797 interface.
794 interface.
798 """
795 """
799
796
800 class idirs(interfaceutil.Interface):
797 class idirs(interfaceutil.Interface):
801 """Interface representing a collection of directories from paths.
798 """Interface representing a collection of directories from paths.
802
799
803 This interface is essentially a derived data structure representing
800 This interface is essentially a derived data structure representing
804 directories from a collection of paths.
801 directories from a collection of paths.
805 """
802 """
806
803
807 def addpath(path):
804 def addpath(path):
808 """Add a path to the collection.
805 """Add a path to the collection.
809
806
810 All directories in the path will be added to the collection.
807 All directories in the path will be added to the collection.
811 """
808 """
812
809
813 def delpath(path):
810 def delpath(path):
814 """Remove a path from the collection.
811 """Remove a path from the collection.
815
812
816 If the removal was the last path in a particular directory, the
813 If the removal was the last path in a particular directory, the
817 directory is removed from the collection.
814 directory is removed from the collection.
818 """
815 """
819
816
820 def __iter__():
817 def __iter__():
821 """Iterate over the directories in this collection of paths."""
818 """Iterate over the directories in this collection of paths."""
822
819
823 def __contains__(path):
820 def __contains__(path):
824 """Whether a specific directory is in this collection."""
821 """Whether a specific directory is in this collection."""
825
822
826 class imanifestdict(interfaceutil.Interface):
823 class imanifestdict(interfaceutil.Interface):
827 """Interface representing a manifest data structure.
824 """Interface representing a manifest data structure.
828
825
829 A manifest is effectively a dict mapping paths to entries. Each entry
826 A manifest is effectively a dict mapping paths to entries. Each entry
830 consists of a binary node and extra flags affecting that entry.
827 consists of a binary node and extra flags affecting that entry.
831 """
828 """
832
829
833 def __getitem__(path):
830 def __getitem__(path):
834 """Returns the binary node value for a path in the manifest.
831 """Returns the binary node value for a path in the manifest.
835
832
836 Raises ``KeyError`` if the path does not exist in the manifest.
833 Raises ``KeyError`` if the path does not exist in the manifest.
837
834
838 Equivalent to ``self.find(path)[0]``.
835 Equivalent to ``self.find(path)[0]``.
839 """
836 """
840
837
841 def find(path):
838 def find(path):
842 """Returns the entry for a path in the manifest.
839 """Returns the entry for a path in the manifest.
843
840
844 Returns a 2-tuple of (node, flags).
841 Returns a 2-tuple of (node, flags).
845
842
846 Raises ``KeyError`` if the path does not exist in the manifest.
843 Raises ``KeyError`` if the path does not exist in the manifest.
847 """
844 """
848
845
849 def __len__():
846 def __len__():
850 """Return the number of entries in the manifest."""
847 """Return the number of entries in the manifest."""
851
848
852 def __nonzero__():
849 def __nonzero__():
853 """Returns True if the manifest has entries, False otherwise."""
850 """Returns True if the manifest has entries, False otherwise."""
854
851
855 __bool__ = __nonzero__
852 __bool__ = __nonzero__
856
853
857 def __setitem__(path, node):
854 def __setitem__(path, node):
858 """Define the node value for a path in the manifest.
855 """Define the node value for a path in the manifest.
859
856
860 If the path is already in the manifest, its flags will be copied to
857 If the path is already in the manifest, its flags will be copied to
861 the new entry.
858 the new entry.
862 """
859 """
863
860
864 def __contains__(path):
861 def __contains__(path):
865 """Whether a path exists in the manifest."""
862 """Whether a path exists in the manifest."""
866
863
867 def __delitem__(path):
864 def __delitem__(path):
868 """Remove a path from the manifest.
865 """Remove a path from the manifest.
869
866
870 Raises ``KeyError`` if the path is not in the manifest.
867 Raises ``KeyError`` if the path is not in the manifest.
871 """
868 """
872
869
873 def __iter__():
870 def __iter__():
874 """Iterate over paths in the manifest."""
871 """Iterate over paths in the manifest."""
875
872
876 def iterkeys():
873 def iterkeys():
877 """Iterate over paths in the manifest."""
874 """Iterate over paths in the manifest."""
878
875
879 def keys():
876 def keys():
880 """Obtain a list of paths in the manifest."""
877 """Obtain a list of paths in the manifest."""
881
878
882 def filesnotin(other, match=None):
879 def filesnotin(other, match=None):
883 """Obtain the set of paths in this manifest but not in another.
880 """Obtain the set of paths in this manifest but not in another.
884
881
885 ``match`` is an optional matcher function to be applied to both
882 ``match`` is an optional matcher function to be applied to both
886 manifests.
883 manifests.
887
884
888 Returns a set of paths.
885 Returns a set of paths.
889 """
886 """
890
887
891 def dirs():
888 def dirs():
892 """Returns an object implementing the ``idirs`` interface."""
889 """Returns an object implementing the ``idirs`` interface."""
893
890
894 def hasdir(dir):
891 def hasdir(dir):
895 """Returns a bool indicating if a directory is in this manifest."""
892 """Returns a bool indicating if a directory is in this manifest."""
896
893
897 def matches(match):
894 def matches(match):
898 """Generate a new manifest filtered through a matcher.
895 """Generate a new manifest filtered through a matcher.
899
896
900 Returns an object conforming to the ``imanifestdict`` interface.
897 Returns an object conforming to the ``imanifestdict`` interface.
901 """
898 """
902
899
903 def walk(match):
900 def walk(match):
904 """Generator of paths in manifest satisfying a matcher.
901 """Generator of paths in manifest satisfying a matcher.
905
902
906 This is equivalent to ``self.matches(match).iterkeys()`` except a new
903 This is equivalent to ``self.matches(match).iterkeys()`` except a new
907 manifest object is not created.
904 manifest object is not created.
908
905
909 If the matcher has explicit files listed and they don't exist in
906 If the matcher has explicit files listed and they don't exist in
910 the manifest, ``match.bad()`` is called for each missing file.
907 the manifest, ``match.bad()`` is called for each missing file.
911 """
908 """
912
909
913 def diff(other, match=None, clean=False):
910 def diff(other, match=None, clean=False):
914 """Find differences between this manifest and another.
911 """Find differences between this manifest and another.
915
912
916 This manifest is compared to ``other``.
913 This manifest is compared to ``other``.
917
914
918 If ``match`` is provided, the two manifests are filtered against this
915 If ``match`` is provided, the two manifests are filtered against this
919 matcher and only entries satisfying the matcher are compared.
916 matcher and only entries satisfying the matcher are compared.
920
917
921 If ``clean`` is True, unchanged files are included in the returned
918 If ``clean`` is True, unchanged files are included in the returned
922 object.
919 object.
923
920
924 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
921 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
925 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
922 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
926 represents the node and flags for this manifest and ``(node2, flag2)``
923 represents the node and flags for this manifest and ``(node2, flag2)``
927 are the same for the other manifest.
924 are the same for the other manifest.
928 """
925 """
929
926
930 def setflag(path, flag):
927 def setflag(path, flag):
931 """Set the flag value for a given path.
928 """Set the flag value for a given path.
932
929
933 Raises ``KeyError`` if the path is not already in the manifest.
930 Raises ``KeyError`` if the path is not already in the manifest.
934 """
931 """
935
932
936 def get(path, default=None):
933 def get(path, default=None):
937 """Obtain the node value for a path or a default value if missing."""
934 """Obtain the node value for a path or a default value if missing."""
938
935
939 def flags(path, default=''):
936 def flags(path, default=''):
940 """Return the flags value for a path or a default value if missing."""
937 """Return the flags value for a path or a default value if missing."""
941
938
942 def copy():
939 def copy():
943 """Return a copy of this manifest."""
940 """Return a copy of this manifest."""
944
941
945 def items():
942 def items():
946 """Returns an iterable of (path, node) for items in this manifest."""
943 """Returns an iterable of (path, node) for items in this manifest."""
947
944
948 def iteritems():
945 def iteritems():
949 """Identical to items()."""
946 """Identical to items()."""
950
947
951 def iterentries():
948 def iterentries():
952 """Returns an iterable of (path, node, flags) for this manifest.
949 """Returns an iterable of (path, node, flags) for this manifest.
953
950
954 Similar to ``iteritems()`` except items are a 3-tuple and include
951 Similar to ``iteritems()`` except items are a 3-tuple and include
955 flags.
952 flags.
956 """
953 """
957
954
958 def text():
955 def text():
959 """Obtain the raw data representation for this manifest.
956 """Obtain the raw data representation for this manifest.
960
957
961 Result is used to create a manifest revision.
958 Result is used to create a manifest revision.
962 """
959 """
963
960
964 def fastdelta(base, changes):
961 def fastdelta(base, changes):
965 """Obtain a delta between this manifest and another given changes.
962 """Obtain a delta between this manifest and another given changes.
966
963
967 ``base`` in the raw data representation for another manifest.
964 ``base`` in the raw data representation for another manifest.
968
965
969 ``changes`` is an iterable of ``(path, to_delete)``.
966 ``changes`` is an iterable of ``(path, to_delete)``.
970
967
971 Returns a 2-tuple containing ``bytearray(self.text())`` and the
968 Returns a 2-tuple containing ``bytearray(self.text())`` and the
972 delta between ``base`` and this manifest.
969 delta between ``base`` and this manifest.
973 """
970 """
974
971
975 class imanifestrevisionbase(interfaceutil.Interface):
972 class imanifestrevisionbase(interfaceutil.Interface):
976 """Base interface representing a single revision of a manifest.
973 """Base interface representing a single revision of a manifest.
977
974
978 Should not be used as a primary interface: should always be inherited
975 Should not be used as a primary interface: should always be inherited
979 as part of a larger interface.
976 as part of a larger interface.
980 """
977 """
981
978
982 def new():
979 def new():
983 """Obtain a new manifest instance.
980 """Obtain a new manifest instance.
984
981
985 Returns an object conforming to the ``imanifestrevisionwritable``
982 Returns an object conforming to the ``imanifestrevisionwritable``
986 interface. The instance will be associated with the same
983 interface. The instance will be associated with the same
987 ``imanifestlog`` collection as this instance.
984 ``imanifestlog`` collection as this instance.
988 """
985 """
989
986
990 def copy():
987 def copy():
991 """Obtain a copy of this manifest instance.
988 """Obtain a copy of this manifest instance.
992
989
993 Returns an object conforming to the ``imanifestrevisionwritable``
990 Returns an object conforming to the ``imanifestrevisionwritable``
994 interface. The instance will be associated with the same
991 interface. The instance will be associated with the same
995 ``imanifestlog`` collection as this instance.
992 ``imanifestlog`` collection as this instance.
996 """
993 """
997
994
998 def read():
995 def read():
999 """Obtain the parsed manifest data structure.
996 """Obtain the parsed manifest data structure.
1000
997
1001 The returned object conforms to the ``imanifestdict`` interface.
998 The returned object conforms to the ``imanifestdict`` interface.
1002 """
999 """
1003
1000
1004 class imanifestrevisionstored(imanifestrevisionbase):
1001 class imanifestrevisionstored(imanifestrevisionbase):
1005 """Interface representing a manifest revision committed to storage."""
1002 """Interface representing a manifest revision committed to storage."""
1006
1003
1007 def node():
1004 def node():
1008 """The binary node for this manifest."""
1005 """The binary node for this manifest."""
1009
1006
1010 parents = interfaceutil.Attribute(
1007 parents = interfaceutil.Attribute(
1011 """List of binary nodes that are parents for this manifest revision."""
1008 """List of binary nodes that are parents for this manifest revision."""
1012 )
1009 )
1013
1010
1014 def readdelta(shallow=False):
1011 def readdelta(shallow=False):
1015 """Obtain the manifest data structure representing changes from parent.
1012 """Obtain the manifest data structure representing changes from parent.
1016
1013
1017 This manifest is compared to its 1st parent. A new manifest representing
1014 This manifest is compared to its 1st parent. A new manifest representing
1018 those differences is constructed.
1015 those differences is constructed.
1019
1016
1020 The returned object conforms to the ``imanifestdict`` interface.
1017 The returned object conforms to the ``imanifestdict`` interface.
1021 """
1018 """
1022
1019
1023 def readfast(shallow=False):
1020 def readfast(shallow=False):
1024 """Calls either ``read()`` or ``readdelta()``.
1021 """Calls either ``read()`` or ``readdelta()``.
1025
1022
1026 The faster of the two options is called.
1023 The faster of the two options is called.
1027 """
1024 """
1028
1025
1029 def find(key):
1026 def find(key):
1030 """Calls self.read().find(key)``.
1027 """Calls self.read().find(key)``.
1031
1028
1032 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1029 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1033 """
1030 """
1034
1031
1035 class imanifestrevisionwritable(imanifestrevisionbase):
1032 class imanifestrevisionwritable(imanifestrevisionbase):
1036 """Interface representing a manifest revision that can be committed."""
1033 """Interface representing a manifest revision that can be committed."""
1037
1034
1038 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1035 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1039 """Add this revision to storage.
1036 """Add this revision to storage.
1040
1037
1041 Takes a transaction object, the changeset revision number it will
1038 Takes a transaction object, the changeset revision number it will
1042 be associated with, its parent nodes, and lists of added and
1039 be associated with, its parent nodes, and lists of added and
1043 removed paths.
1040 removed paths.
1044
1041
1045 If match is provided, storage can choose not to inspect or write out
1042 If match is provided, storage can choose not to inspect or write out
1046 items that do not match. Storage is still required to be able to provide
1043 items that do not match. Storage is still required to be able to provide
1047 the full manifest in the future for any directories written (these
1044 the full manifest in the future for any directories written (these
1048 manifests should not be "narrowed on disk").
1045 manifests should not be "narrowed on disk").
1049
1046
1050 Returns the binary node of the created revision.
1047 Returns the binary node of the created revision.
1051 """
1048 """
1052
1049
1053 class imanifeststorage(interfaceutil.Interface):
1050 class imanifeststorage(interfaceutil.Interface):
1054 """Storage interface for manifest data."""
1051 """Storage interface for manifest data."""
1055
1052
1056 tree = interfaceutil.Attribute(
1053 tree = interfaceutil.Attribute(
1057 """The path to the directory this manifest tracks.
1054 """The path to the directory this manifest tracks.
1058
1055
1059 The empty bytestring represents the root manifest.
1056 The empty bytestring represents the root manifest.
1060 """)
1057 """)
1061
1058
1062 index = interfaceutil.Attribute(
1059 index = interfaceutil.Attribute(
1063 """An ``ifilerevisionssequence`` instance.""")
1060 """An ``ifilerevisionssequence`` instance.""")
1064
1061
1065 indexfile = interfaceutil.Attribute(
1062 indexfile = interfaceutil.Attribute(
1066 """Path of revlog index file.
1063 """Path of revlog index file.
1067
1064
1068 TODO this is revlog specific and should not be exposed.
1065 TODO this is revlog specific and should not be exposed.
1069 """)
1066 """)
1070
1067
1071 opener = interfaceutil.Attribute(
1068 opener = interfaceutil.Attribute(
1072 """VFS opener to use to access underlying files used for storage.
1069 """VFS opener to use to access underlying files used for storage.
1073
1070
1074 TODO this is revlog specific and should not be exposed.
1071 TODO this is revlog specific and should not be exposed.
1075 """)
1072 """)
1076
1073
1077 version = interfaceutil.Attribute(
1074 version = interfaceutil.Attribute(
1078 """Revlog version number.
1075 """Revlog version number.
1079
1076
1080 TODO this is revlog specific and should not be exposed.
1077 TODO this is revlog specific and should not be exposed.
1081 """)
1078 """)
1082
1079
1083 _generaldelta = interfaceutil.Attribute(
1080 _generaldelta = interfaceutil.Attribute(
1084 """Whether generaldelta storage is being used.
1081 """Whether generaldelta storage is being used.
1085
1082
1086 TODO this is revlog specific and should not be exposed.
1083 TODO this is revlog specific and should not be exposed.
1087 """)
1084 """)
1088
1085
1089 fulltextcache = interfaceutil.Attribute(
1086 fulltextcache = interfaceutil.Attribute(
1090 """Dict with cache of fulltexts.
1087 """Dict with cache of fulltexts.
1091
1088
1092 TODO this doesn't feel appropriate for the storage interface.
1089 TODO this doesn't feel appropriate for the storage interface.
1093 """)
1090 """)
1094
1091
1095 def __len__():
1092 def __len__():
1096 """Obtain the number of revisions stored for this manifest."""
1093 """Obtain the number of revisions stored for this manifest."""
1097
1094
1098 def __iter__():
1095 def __iter__():
1099 """Iterate over revision numbers for this manifest."""
1096 """Iterate over revision numbers for this manifest."""
1100
1097
1101 def rev(node):
1098 def rev(node):
1102 """Obtain the revision number given a binary node.
1099 """Obtain the revision number given a binary node.
1103
1100
1104 Raises ``error.LookupError`` if the node is not known.
1101 Raises ``error.LookupError`` if the node is not known.
1105 """
1102 """
1106
1103
1107 def node(rev):
1104 def node(rev):
1108 """Obtain the node value given a revision number.
1105 """Obtain the node value given a revision number.
1109
1106
1110 Raises ``error.LookupError`` if the revision is not known.
1107 Raises ``error.LookupError`` if the revision is not known.
1111 """
1108 """
1112
1109
1113 def lookup(value):
1110 def lookup(value):
1114 """Attempt to resolve a value to a node.
1111 """Attempt to resolve a value to a node.
1115
1112
1116 Value can be a binary node, hex node, revision number, or a bytes
1113 Value can be a binary node, hex node, revision number, or a bytes
1117 that can be converted to an integer.
1114 that can be converted to an integer.
1118
1115
1119 Raises ``error.LookupError`` if a ndoe could not be resolved.
1116 Raises ``error.LookupError`` if a ndoe could not be resolved.
1120
1117
1121 TODO this is only used by debug* commands and can probably be deleted
1118 TODO this is only used by debug* commands and can probably be deleted
1122 easily.
1119 easily.
1123 """
1120 """
1124
1121
1125 def parents(node):
1122 def parents(node):
1126 """Returns a 2-tuple of parent nodes for a node.
1123 """Returns a 2-tuple of parent nodes for a node.
1127
1124
1128 Values will be ``nullid`` if the parent is empty.
1125 Values will be ``nullid`` if the parent is empty.
1129 """
1126 """
1130
1127
1131 def parentrevs(rev):
1128 def parentrevs(rev):
1132 """Like parents() but operates on revision numbers."""
1129 """Like parents() but operates on revision numbers."""
1133
1130
1134 def linkrev(rev):
1131 def linkrev(rev):
1135 """Obtain the changeset revision number a revision is linked to."""
1132 """Obtain the changeset revision number a revision is linked to."""
1136
1133
1137 def revision(node, _df=None, raw=False):
1134 def revision(node, _df=None, raw=False):
1138 """Obtain fulltext data for a node."""
1135 """Obtain fulltext data for a node."""
1139
1136
1140 def revdiff(rev1, rev2):
1137 def revdiff(rev1, rev2):
1141 """Obtain a delta between two revision numbers.
1138 """Obtain a delta between two revision numbers.
1142
1139
1143 The returned data is the result of ``bdiff.bdiff()`` on the raw
1140 The returned data is the result of ``bdiff.bdiff()`` on the raw
1144 revision data.
1141 revision data.
1145 """
1142 """
1146
1143
1147 def cmp(node, fulltext):
1144 def cmp(node, fulltext):
1148 """Compare fulltext to another revision.
1145 """Compare fulltext to another revision.
1149
1146
1150 Returns True if the fulltext is different from what is stored.
1147 Returns True if the fulltext is different from what is stored.
1151 """
1148 """
1152
1149
1153 def emitrevisions(nodes,
1150 def emitrevisions(nodes,
1154 nodesorder=None,
1151 nodesorder=None,
1155 revisiondata=False,
1152 revisiondata=False,
1156 assumehaveparentrevisions=False):
1153 assumehaveparentrevisions=False):
1157 """Produce ``irevisiondelta`` describing revisions.
1154 """Produce ``irevisiondelta`` describing revisions.
1158
1155
1159 See the documentation for ``ifiledata`` for more.
1156 See the documentation for ``ifiledata`` for more.
1160 """
1157 """
1161
1158
1162 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1159 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1163 """Process a series of deltas for storage.
1160 """Process a series of deltas for storage.
1164
1161
1165 See the documentation in ``ifilemutation`` for more.
1162 See the documentation in ``ifilemutation`` for more.
1166 """
1163 """
1167
1164
1168 def rawsize(rev):
1165 def rawsize(rev):
1169 """Obtain the size of tracked data.
1166 """Obtain the size of tracked data.
1170
1167
1171 Is equivalent to ``len(m.revision(node, raw=True))``.
1168 Is equivalent to ``len(m.revision(node, raw=True))``.
1172
1169
1173 TODO this method is only used by upgrade code and may be removed.
1170 TODO this method is only used by upgrade code and may be removed.
1174 """
1171 """
1175
1172
1176 def getstrippoint(minlink):
1173 def getstrippoint(minlink):
1177 """Find minimum revision that must be stripped to strip a linkrev.
1174 """Find minimum revision that must be stripped to strip a linkrev.
1178
1175
1179 See the documentation in ``ifilemutation`` for more.
1176 See the documentation in ``ifilemutation`` for more.
1180 """
1177 """
1181
1178
1182 def strip(minlink, transaction):
1179 def strip(minlink, transaction):
1183 """Remove storage of items starting at a linkrev.
1180 """Remove storage of items starting at a linkrev.
1184
1181
1185 See the documentation in ``ifilemutation`` for more.
1182 See the documentation in ``ifilemutation`` for more.
1186 """
1183 """
1187
1184
1188 def checksize():
1185 def checksize():
1189 """Obtain the expected sizes of backing files.
1186 """Obtain the expected sizes of backing files.
1190
1187
1191 TODO this is used by verify and it should not be part of the interface.
1188 TODO this is used by verify and it should not be part of the interface.
1192 """
1189 """
1193
1190
1194 def files():
1191 def files():
1195 """Obtain paths that are backing storage for this manifest.
1192 """Obtain paths that are backing storage for this manifest.
1196
1193
1197 TODO this is used by verify and there should probably be a better API
1194 TODO this is used by verify and there should probably be a better API
1198 for this functionality.
1195 for this functionality.
1199 """
1196 """
1200
1197
1201 def deltaparent(rev):
1198 def deltaparent(rev):
1202 """Obtain the revision that a revision is delta'd against.
1199 """Obtain the revision that a revision is delta'd against.
1203
1200
1204 TODO delta encoding is an implementation detail of storage and should
1201 TODO delta encoding is an implementation detail of storage and should
1205 not be exposed to the storage interface.
1202 not be exposed to the storage interface.
1206 """
1203 """
1207
1204
1208 def clone(tr, dest, **kwargs):
1205 def clone(tr, dest, **kwargs):
1209 """Clone this instance to another."""
1206 """Clone this instance to another."""
1210
1207
1211 def clearcaches(clear_persisted_data=False):
1208 def clearcaches(clear_persisted_data=False):
1212 """Clear any caches associated with this instance."""
1209 """Clear any caches associated with this instance."""
1213
1210
1214 def dirlog(d):
1211 def dirlog(d):
1215 """Obtain a manifest storage instance for a tree."""
1212 """Obtain a manifest storage instance for a tree."""
1216
1213
1217 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1214 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1218 match=None):
1215 match=None):
1219 """Add a revision to storage.
1216 """Add a revision to storage.
1220
1217
1221 ``m`` is an object conforming to ``imanifestdict``.
1218 ``m`` is an object conforming to ``imanifestdict``.
1222
1219
1223 ``link`` is the linkrev revision number.
1220 ``link`` is the linkrev revision number.
1224
1221
1225 ``p1`` and ``p2`` are the parent revision numbers.
1222 ``p1`` and ``p2`` are the parent revision numbers.
1226
1223
1227 ``added`` and ``removed`` are iterables of added and removed paths,
1224 ``added`` and ``removed`` are iterables of added and removed paths,
1228 respectively.
1225 respectively.
1229
1226
1230 ``readtree`` is a function that can be used to read the child tree(s)
1227 ``readtree`` is a function that can be used to read the child tree(s)
1231 when recursively writing the full tree structure when using
1228 when recursively writing the full tree structure when using
1232 treemanifets.
1229 treemanifets.
1233
1230
1234 ``match`` is a matcher that can be used to hint to storage that not all
1231 ``match`` is a matcher that can be used to hint to storage that not all
1235 paths must be inspected; this is an optimization and can be safely
1232 paths must be inspected; this is an optimization and can be safely
1236 ignored. Note that the storage must still be able to reproduce a full
1233 ignored. Note that the storage must still be able to reproduce a full
1237 manifest including files that did not match.
1234 manifest including files that did not match.
1238 """
1235 """
1239
1236
1240 def storageinfo(exclusivefiles=False, sharedfiles=False,
1237 def storageinfo(exclusivefiles=False, sharedfiles=False,
1241 revisionscount=False, trackedsize=False,
1238 revisionscount=False, trackedsize=False,
1242 storedsize=False):
1239 storedsize=False):
1243 """Obtain information about storage for this manifest's data.
1240 """Obtain information about storage for this manifest's data.
1244
1241
1245 See ``ifilestorage.storageinfo()`` for a description of this method.
1242 See ``ifilestorage.storageinfo()`` for a description of this method.
1246 This one behaves the same way, except for manifest data.
1243 This one behaves the same way, except for manifest data.
1247 """
1244 """
1248
1245
1249 class imanifestlog(interfaceutil.Interface):
1246 class imanifestlog(interfaceutil.Interface):
1250 """Interface representing a collection of manifest snapshots.
1247 """Interface representing a collection of manifest snapshots.
1251
1248
1252 Represents the root manifest in a repository.
1249 Represents the root manifest in a repository.
1253
1250
1254 Also serves as a means to access nested tree manifests and to cache
1251 Also serves as a means to access nested tree manifests and to cache
1255 tree manifests.
1252 tree manifests.
1256 """
1253 """
1257
1254
1258 def __getitem__(node):
1255 def __getitem__(node):
1259 """Obtain a manifest instance for a given binary node.
1256 """Obtain a manifest instance for a given binary node.
1260
1257
1261 Equivalent to calling ``self.get('', node)``.
1258 Equivalent to calling ``self.get('', node)``.
1262
1259
1263 The returned object conforms to the ``imanifestrevisionstored``
1260 The returned object conforms to the ``imanifestrevisionstored``
1264 interface.
1261 interface.
1265 """
1262 """
1266
1263
1267 def get(tree, node, verify=True):
1264 def get(tree, node, verify=True):
1268 """Retrieve the manifest instance for a given directory and binary node.
1265 """Retrieve the manifest instance for a given directory and binary node.
1269
1266
1270 ``node`` always refers to the node of the root manifest (which will be
1267 ``node`` always refers to the node of the root manifest (which will be
1271 the only manifest if flat manifests are being used).
1268 the only manifest if flat manifests are being used).
1272
1269
1273 If ``tree`` is the empty string, the root manifest is returned.
1270 If ``tree`` is the empty string, the root manifest is returned.
1274 Otherwise the manifest for the specified directory will be returned
1271 Otherwise the manifest for the specified directory will be returned
1275 (requires tree manifests).
1272 (requires tree manifests).
1276
1273
1277 If ``verify`` is True, ``LookupError`` is raised if the node is not
1274 If ``verify`` is True, ``LookupError`` is raised if the node is not
1278 known.
1275 known.
1279
1276
1280 The returned object conforms to the ``imanifestrevisionstored``
1277 The returned object conforms to the ``imanifestrevisionstored``
1281 interface.
1278 interface.
1282 """
1279 """
1283
1280
1284 def getstorage(tree):
1281 def getstorage(tree):
1285 """Retrieve an interface to storage for a particular tree.
1282 """Retrieve an interface to storage for a particular tree.
1286
1283
1287 If ``tree`` is the empty bytestring, storage for the root manifest will
1284 If ``tree`` is the empty bytestring, storage for the root manifest will
1288 be returned. Otherwise storage for a tree manifest is returned.
1285 be returned. Otherwise storage for a tree manifest is returned.
1289
1286
1290 TODO formalize interface for returned object.
1287 TODO formalize interface for returned object.
1291 """
1288 """
1292
1289
1293 def clearcaches():
1290 def clearcaches():
1294 """Clear caches associated with this collection."""
1291 """Clear caches associated with this collection."""
1295
1292
1296 def rev(node):
1293 def rev(node):
1297 """Obtain the revision number for a binary node.
1294 """Obtain the revision number for a binary node.
1298
1295
1299 Raises ``error.LookupError`` if the node is not known.
1296 Raises ``error.LookupError`` if the node is not known.
1300 """
1297 """
1301
1298
1302 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1299 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1303 """Local repository sub-interface providing access to tracked file storage.
1300 """Local repository sub-interface providing access to tracked file storage.
1304
1301
1305 This interface defines how a repository accesses storage for a single
1302 This interface defines how a repository accesses storage for a single
1306 tracked file path.
1303 tracked file path.
1307 """
1304 """
1308
1305
1309 def file(f):
1306 def file(f):
1310 """Obtain a filelog for a tracked path.
1307 """Obtain a filelog for a tracked path.
1311
1308
1312 The returned type conforms to the ``ifilestorage`` interface.
1309 The returned type conforms to the ``ifilestorage`` interface.
1313 """
1310 """
1314
1311
1315 class ilocalrepositorymain(interfaceutil.Interface):
1312 class ilocalrepositorymain(interfaceutil.Interface):
1316 """Main interface for local repositories.
1313 """Main interface for local repositories.
1317
1314
1318 This currently captures the reality of things - not how things should be.
1315 This currently captures the reality of things - not how things should be.
1319 """
1316 """
1320
1317
1321 supportedformats = interfaceutil.Attribute(
1318 supportedformats = interfaceutil.Attribute(
1322 """Set of requirements that apply to stream clone.
1319 """Set of requirements that apply to stream clone.
1323
1320
1324 This is actually a class attribute and is shared among all instances.
1321 This is actually a class attribute and is shared among all instances.
1325 """)
1322 """)
1326
1323
1327 supported = interfaceutil.Attribute(
1324 supported = interfaceutil.Attribute(
1328 """Set of requirements that this repo is capable of opening.""")
1325 """Set of requirements that this repo is capable of opening.""")
1329
1326
1330 requirements = interfaceutil.Attribute(
1327 requirements = interfaceutil.Attribute(
1331 """Set of requirements this repo uses.""")
1328 """Set of requirements this repo uses.""")
1332
1329
1333 features = interfaceutil.Attribute(
1330 features = interfaceutil.Attribute(
1334 """Set of "features" this repository supports.
1331 """Set of "features" this repository supports.
1335
1332
1336 A "feature" is a loosely-defined term. It can refer to a feature
1333 A "feature" is a loosely-defined term. It can refer to a feature
1337 in the classical sense or can describe an implementation detail
1334 in the classical sense or can describe an implementation detail
1338 of the repository. For example, a ``readonly`` feature may denote
1335 of the repository. For example, a ``readonly`` feature may denote
1339 the repository as read-only. Or a ``revlogfilestore`` feature may
1336 the repository as read-only. Or a ``revlogfilestore`` feature may
1340 denote that the repository is using revlogs for file storage.
1337 denote that the repository is using revlogs for file storage.
1341
1338
1342 The intent of features is to provide a machine-queryable mechanism
1339 The intent of features is to provide a machine-queryable mechanism
1343 for repo consumers to test for various repository characteristics.
1340 for repo consumers to test for various repository characteristics.
1344
1341
1345 Features are similar to ``requirements``. The main difference is that
1342 Features are similar to ``requirements``. The main difference is that
1346 requirements are stored on-disk and represent requirements to open the
1343 requirements are stored on-disk and represent requirements to open the
1347 repository. Features are more run-time capabilities of the repository
1344 repository. Features are more run-time capabilities of the repository
1348 and more granular capabilities (which may be derived from requirements).
1345 and more granular capabilities (which may be derived from requirements).
1349 """)
1346 """)
1350
1347
1351 filtername = interfaceutil.Attribute(
1348 filtername = interfaceutil.Attribute(
1352 """Name of the repoview that is active on this repo.""")
1349 """Name of the repoview that is active on this repo.""")
1353
1350
1354 wvfs = interfaceutil.Attribute(
1351 wvfs = interfaceutil.Attribute(
1355 """VFS used to access the working directory.""")
1352 """VFS used to access the working directory.""")
1356
1353
1357 vfs = interfaceutil.Attribute(
1354 vfs = interfaceutil.Attribute(
1358 """VFS rooted at the .hg directory.
1355 """VFS rooted at the .hg directory.
1359
1356
1360 Used to access repository data not in the store.
1357 Used to access repository data not in the store.
1361 """)
1358 """)
1362
1359
1363 svfs = interfaceutil.Attribute(
1360 svfs = interfaceutil.Attribute(
1364 """VFS rooted at the store.
1361 """VFS rooted at the store.
1365
1362
1366 Used to access repository data in the store. Typically .hg/store.
1363 Used to access repository data in the store. Typically .hg/store.
1367 But can point elsewhere if the store is shared.
1364 But can point elsewhere if the store is shared.
1368 """)
1365 """)
1369
1366
1370 root = interfaceutil.Attribute(
1367 root = interfaceutil.Attribute(
1371 """Path to the root of the working directory.""")
1368 """Path to the root of the working directory.""")
1372
1369
1373 path = interfaceutil.Attribute(
1370 path = interfaceutil.Attribute(
1374 """Path to the .hg directory.""")
1371 """Path to the .hg directory.""")
1375
1372
1376 origroot = interfaceutil.Attribute(
1373 origroot = interfaceutil.Attribute(
1377 """The filesystem path that was used to construct the repo.""")
1374 """The filesystem path that was used to construct the repo.""")
1378
1375
1379 auditor = interfaceutil.Attribute(
1376 auditor = interfaceutil.Attribute(
1380 """A pathauditor for the working directory.
1377 """A pathauditor for the working directory.
1381
1378
1382 This checks if a path refers to a nested repository.
1379 This checks if a path refers to a nested repository.
1383
1380
1384 Operates on the filesystem.
1381 Operates on the filesystem.
1385 """)
1382 """)
1386
1383
1387 nofsauditor = interfaceutil.Attribute(
1384 nofsauditor = interfaceutil.Attribute(
1388 """A pathauditor for the working directory.
1385 """A pathauditor for the working directory.
1389
1386
1390 This is like ``auditor`` except it doesn't do filesystem checks.
1387 This is like ``auditor`` except it doesn't do filesystem checks.
1391 """)
1388 """)
1392
1389
1393 baseui = interfaceutil.Attribute(
1390 baseui = interfaceutil.Attribute(
1394 """Original ui instance passed into constructor.""")
1391 """Original ui instance passed into constructor.""")
1395
1392
1396 ui = interfaceutil.Attribute(
1393 ui = interfaceutil.Attribute(
1397 """Main ui instance for this instance.""")
1394 """Main ui instance for this instance.""")
1398
1395
1399 sharedpath = interfaceutil.Attribute(
1396 sharedpath = interfaceutil.Attribute(
1400 """Path to the .hg directory of the repo this repo was shared from.""")
1397 """Path to the .hg directory of the repo this repo was shared from.""")
1401
1398
1402 store = interfaceutil.Attribute(
1399 store = interfaceutil.Attribute(
1403 """A store instance.""")
1400 """A store instance.""")
1404
1401
1405 spath = interfaceutil.Attribute(
1402 spath = interfaceutil.Attribute(
1406 """Path to the store.""")
1403 """Path to the store.""")
1407
1404
1408 sjoin = interfaceutil.Attribute(
1405 sjoin = interfaceutil.Attribute(
1409 """Alias to self.store.join.""")
1406 """Alias to self.store.join.""")
1410
1407
1411 cachevfs = interfaceutil.Attribute(
1408 cachevfs = interfaceutil.Attribute(
1412 """A VFS used to access the cache directory.
1409 """A VFS used to access the cache directory.
1413
1410
1414 Typically .hg/cache.
1411 Typically .hg/cache.
1415 """)
1412 """)
1416
1413
1417 filteredrevcache = interfaceutil.Attribute(
1414 filteredrevcache = interfaceutil.Attribute(
1418 """Holds sets of revisions to be filtered.""")
1415 """Holds sets of revisions to be filtered.""")
1419
1416
1420 names = interfaceutil.Attribute(
1417 names = interfaceutil.Attribute(
1421 """A ``namespaces`` instance.""")
1418 """A ``namespaces`` instance.""")
1422
1419
1423 def close():
1420 def close():
1424 """Close the handle on this repository."""
1421 """Close the handle on this repository."""
1425
1422
1426 def peer():
1423 def peer():
1427 """Obtain an object conforming to the ``peer`` interface."""
1424 """Obtain an object conforming to the ``peer`` interface."""
1428
1425
1429 def unfiltered():
1426 def unfiltered():
1430 """Obtain an unfiltered/raw view of this repo."""
1427 """Obtain an unfiltered/raw view of this repo."""
1431
1428
1432 def filtered(name, visibilityexceptions=None):
1429 def filtered(name, visibilityexceptions=None):
1433 """Obtain a named view of this repository."""
1430 """Obtain a named view of this repository."""
1434
1431
1435 obsstore = interfaceutil.Attribute(
1432 obsstore = interfaceutil.Attribute(
1436 """A store of obsolescence data.""")
1433 """A store of obsolescence data.""")
1437
1434
1438 changelog = interfaceutil.Attribute(
1435 changelog = interfaceutil.Attribute(
1439 """A handle on the changelog revlog.""")
1436 """A handle on the changelog revlog.""")
1440
1437
1441 manifestlog = interfaceutil.Attribute(
1438 manifestlog = interfaceutil.Attribute(
1442 """An instance conforming to the ``imanifestlog`` interface.
1439 """An instance conforming to the ``imanifestlog`` interface.
1443
1440
1444 Provides access to manifests for the repository.
1441 Provides access to manifests for the repository.
1445 """)
1442 """)
1446
1443
1447 dirstate = interfaceutil.Attribute(
1444 dirstate = interfaceutil.Attribute(
1448 """Working directory state.""")
1445 """Working directory state.""")
1449
1446
1450 narrowpats = interfaceutil.Attribute(
1447 narrowpats = interfaceutil.Attribute(
1451 """Matcher patterns for this repository's narrowspec.""")
1448 """Matcher patterns for this repository's narrowspec.""")
1452
1449
1453 def narrowmatch():
1450 def narrowmatch():
1454 """Obtain a matcher for the narrowspec."""
1451 """Obtain a matcher for the narrowspec."""
1455
1452
1456 def setnarrowpats(newincludes, newexcludes):
1453 def setnarrowpats(newincludes, newexcludes):
1457 """Define the narrowspec for this repository."""
1454 """Define the narrowspec for this repository."""
1458
1455
1459 def __getitem__(changeid):
1456 def __getitem__(changeid):
1460 """Try to resolve a changectx."""
1457 """Try to resolve a changectx."""
1461
1458
1462 def __contains__(changeid):
1459 def __contains__(changeid):
1463 """Whether a changeset exists."""
1460 """Whether a changeset exists."""
1464
1461
1465 def __nonzero__():
1462 def __nonzero__():
1466 """Always returns True."""
1463 """Always returns True."""
1467 return True
1464 return True
1468
1465
1469 __bool__ = __nonzero__
1466 __bool__ = __nonzero__
1470
1467
1471 def __len__():
1468 def __len__():
1472 """Returns the number of changesets in the repo."""
1469 """Returns the number of changesets in the repo."""
1473
1470
1474 def __iter__():
1471 def __iter__():
1475 """Iterate over revisions in the changelog."""
1472 """Iterate over revisions in the changelog."""
1476
1473
1477 def revs(expr, *args):
1474 def revs(expr, *args):
1478 """Evaluate a revset.
1475 """Evaluate a revset.
1479
1476
1480 Emits revisions.
1477 Emits revisions.
1481 """
1478 """
1482
1479
1483 def set(expr, *args):
1480 def set(expr, *args):
1484 """Evaluate a revset.
1481 """Evaluate a revset.
1485
1482
1486 Emits changectx instances.
1483 Emits changectx instances.
1487 """
1484 """
1488
1485
1489 def anyrevs(specs, user=False, localalias=None):
1486 def anyrevs(specs, user=False, localalias=None):
1490 """Find revisions matching one of the given revsets."""
1487 """Find revisions matching one of the given revsets."""
1491
1488
1492 def url():
1489 def url():
1493 """Returns a string representing the location of this repo."""
1490 """Returns a string representing the location of this repo."""
1494
1491
1495 def hook(name, throw=False, **args):
1492 def hook(name, throw=False, **args):
1496 """Call a hook."""
1493 """Call a hook."""
1497
1494
1498 def tags():
1495 def tags():
1499 """Return a mapping of tag to node."""
1496 """Return a mapping of tag to node."""
1500
1497
1501 def tagtype(tagname):
1498 def tagtype(tagname):
1502 """Return the type of a given tag."""
1499 """Return the type of a given tag."""
1503
1500
1504 def tagslist():
1501 def tagslist():
1505 """Return a list of tags ordered by revision."""
1502 """Return a list of tags ordered by revision."""
1506
1503
1507 def nodetags(node):
1504 def nodetags(node):
1508 """Return the tags associated with a node."""
1505 """Return the tags associated with a node."""
1509
1506
1510 def nodebookmarks(node):
1507 def nodebookmarks(node):
1511 """Return the list of bookmarks pointing to the specified node."""
1508 """Return the list of bookmarks pointing to the specified node."""
1512
1509
1513 def branchmap():
1510 def branchmap():
1514 """Return a mapping of branch to heads in that branch."""
1511 """Return a mapping of branch to heads in that branch."""
1515
1512
1516 def revbranchcache():
1513 def revbranchcache():
1517 pass
1514 pass
1518
1515
1519 def branchtip(branchtip, ignoremissing=False):
1516 def branchtip(branchtip, ignoremissing=False):
1520 """Return the tip node for a given branch."""
1517 """Return the tip node for a given branch."""
1521
1518
1522 def lookup(key):
1519 def lookup(key):
1523 """Resolve the node for a revision."""
1520 """Resolve the node for a revision."""
1524
1521
1525 def lookupbranch(key):
1522 def lookupbranch(key):
1526 """Look up the branch name of the given revision or branch name."""
1523 """Look up the branch name of the given revision or branch name."""
1527
1524
1528 def known(nodes):
1525 def known(nodes):
1529 """Determine whether a series of nodes is known.
1526 """Determine whether a series of nodes is known.
1530
1527
1531 Returns a list of bools.
1528 Returns a list of bools.
1532 """
1529 """
1533
1530
1534 def local():
1531 def local():
1535 """Whether the repository is local."""
1532 """Whether the repository is local."""
1536 return True
1533 return True
1537
1534
1538 def publishing():
1535 def publishing():
1539 """Whether the repository is a publishing repository."""
1536 """Whether the repository is a publishing repository."""
1540
1537
1541 def cancopy():
1538 def cancopy():
1542 pass
1539 pass
1543
1540
1544 def shared():
1541 def shared():
1545 """The type of shared repository or None."""
1542 """The type of shared repository or None."""
1546
1543
1547 def wjoin(f, *insidef):
1544 def wjoin(f, *insidef):
1548 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1545 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1549
1546
1550 def setparents(p1, p2):
1547 def setparents(p1, p2):
1551 """Set the parent nodes of the working directory."""
1548 """Set the parent nodes of the working directory."""
1552
1549
1553 def filectx(path, changeid=None, fileid=None):
1550 def filectx(path, changeid=None, fileid=None):
1554 """Obtain a filectx for the given file revision."""
1551 """Obtain a filectx for the given file revision."""
1555
1552
1556 def getcwd():
1553 def getcwd():
1557 """Obtain the current working directory from the dirstate."""
1554 """Obtain the current working directory from the dirstate."""
1558
1555
1559 def pathto(f, cwd=None):
1556 def pathto(f, cwd=None):
1560 """Obtain the relative path to a file."""
1557 """Obtain the relative path to a file."""
1561
1558
1562 def adddatafilter(name, fltr):
1559 def adddatafilter(name, fltr):
1563 pass
1560 pass
1564
1561
1565 def wread(filename):
1562 def wread(filename):
1566 """Read a file from wvfs, using data filters."""
1563 """Read a file from wvfs, using data filters."""
1567
1564
1568 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1565 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1569 """Write data to a file in the wvfs, using data filters."""
1566 """Write data to a file in the wvfs, using data filters."""
1570
1567
1571 def wwritedata(filename, data):
1568 def wwritedata(filename, data):
1572 """Resolve data for writing to the wvfs, using data filters."""
1569 """Resolve data for writing to the wvfs, using data filters."""
1573
1570
1574 def currenttransaction():
1571 def currenttransaction():
1575 """Obtain the current transaction instance or None."""
1572 """Obtain the current transaction instance or None."""
1576
1573
1577 def transaction(desc, report=None):
1574 def transaction(desc, report=None):
1578 """Open a new transaction to write to the repository."""
1575 """Open a new transaction to write to the repository."""
1579
1576
1580 def undofiles():
1577 def undofiles():
1581 """Returns a list of (vfs, path) for files to undo transactions."""
1578 """Returns a list of (vfs, path) for files to undo transactions."""
1582
1579
1583 def recover():
1580 def recover():
1584 """Roll back an interrupted transaction."""
1581 """Roll back an interrupted transaction."""
1585
1582
1586 def rollback(dryrun=False, force=False):
1583 def rollback(dryrun=False, force=False):
1587 """Undo the last transaction.
1584 """Undo the last transaction.
1588
1585
1589 DANGEROUS.
1586 DANGEROUS.
1590 """
1587 """
1591
1588
1592 def updatecaches(tr=None, full=False):
1589 def updatecaches(tr=None, full=False):
1593 """Warm repo caches."""
1590 """Warm repo caches."""
1594
1591
1595 def invalidatecaches():
1592 def invalidatecaches():
1596 """Invalidate cached data due to the repository mutating."""
1593 """Invalidate cached data due to the repository mutating."""
1597
1594
1598 def invalidatevolatilesets():
1595 def invalidatevolatilesets():
1599 pass
1596 pass
1600
1597
1601 def invalidatedirstate():
1598 def invalidatedirstate():
1602 """Invalidate the dirstate."""
1599 """Invalidate the dirstate."""
1603
1600
1604 def invalidate(clearfilecache=False):
1601 def invalidate(clearfilecache=False):
1605 pass
1602 pass
1606
1603
1607 def invalidateall():
1604 def invalidateall():
1608 pass
1605 pass
1609
1606
1610 def lock(wait=True):
1607 def lock(wait=True):
1611 """Lock the repository store and return a lock instance."""
1608 """Lock the repository store and return a lock instance."""
1612
1609
1613 def wlock(wait=True):
1610 def wlock(wait=True):
1614 """Lock the non-store parts of the repository."""
1611 """Lock the non-store parts of the repository."""
1615
1612
1616 def currentwlock():
1613 def currentwlock():
1617 """Return the wlock if it's held or None."""
1614 """Return the wlock if it's held or None."""
1618
1615
1619 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1616 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1620 pass
1617 pass
1621
1618
1622 def commit(text='', user=None, date=None, match=None, force=False,
1619 def commit(text='', user=None, date=None, match=None, force=False,
1623 editor=False, extra=None):
1620 editor=False, extra=None):
1624 """Add a new revision to the repository."""
1621 """Add a new revision to the repository."""
1625
1622
1626 def commitctx(ctx, error=False):
1623 def commitctx(ctx, error=False):
1627 """Commit a commitctx instance to the repository."""
1624 """Commit a commitctx instance to the repository."""
1628
1625
1629 def destroying():
1626 def destroying():
1630 """Inform the repository that nodes are about to be destroyed."""
1627 """Inform the repository that nodes are about to be destroyed."""
1631
1628
1632 def destroyed():
1629 def destroyed():
1633 """Inform the repository that nodes have been destroyed."""
1630 """Inform the repository that nodes have been destroyed."""
1634
1631
1635 def status(node1='.', node2=None, match=None, ignored=False,
1632 def status(node1='.', node2=None, match=None, ignored=False,
1636 clean=False, unknown=False, listsubrepos=False):
1633 clean=False, unknown=False, listsubrepos=False):
1637 """Convenience method to call repo[x].status()."""
1634 """Convenience method to call repo[x].status()."""
1638
1635
1639 def addpostdsstatus(ps):
1636 def addpostdsstatus(ps):
1640 pass
1637 pass
1641
1638
1642 def postdsstatus():
1639 def postdsstatus():
1643 pass
1640 pass
1644
1641
1645 def clearpostdsstatus():
1642 def clearpostdsstatus():
1646 pass
1643 pass
1647
1644
1648 def heads(start=None):
1645 def heads(start=None):
1649 """Obtain list of nodes that are DAG heads."""
1646 """Obtain list of nodes that are DAG heads."""
1650
1647
1651 def branchheads(branch=None, start=None, closed=False):
1648 def branchheads(branch=None, start=None, closed=False):
1652 pass
1649 pass
1653
1650
1654 def branches(nodes):
1651 def branches(nodes):
1655 pass
1652 pass
1656
1653
1657 def between(pairs):
1654 def between(pairs):
1658 pass
1655 pass
1659
1656
1660 def checkpush(pushop):
1657 def checkpush(pushop):
1661 pass
1658 pass
1662
1659
1663 prepushoutgoinghooks = interfaceutil.Attribute(
1660 prepushoutgoinghooks = interfaceutil.Attribute(
1664 """util.hooks instance.""")
1661 """util.hooks instance.""")
1665
1662
1666 def pushkey(namespace, key, old, new):
1663 def pushkey(namespace, key, old, new):
1667 pass
1664 pass
1668
1665
1669 def listkeys(namespace):
1666 def listkeys(namespace):
1670 pass
1667 pass
1671
1668
1672 def debugwireargs(one, two, three=None, four=None, five=None):
1669 def debugwireargs(one, two, three=None, four=None, five=None):
1673 pass
1670 pass
1674
1671
1675 def savecommitmessage(text):
1672 def savecommitmessage(text):
1676 pass
1673 pass
1677
1674
1678 class completelocalrepository(ilocalrepositorymain,
1675 class completelocalrepository(ilocalrepositorymain,
1679 ilocalrepositoryfilestorage):
1676 ilocalrepositoryfilestorage):
1680 """Complete interface for a local repository."""
1677 """Complete interface for a local repository."""
@@ -1,1072 +1,1057 b''
1 # storage.py - Testing of storage primitives.
1 # storage.py - Testing of storage primitives.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import unittest
10 import unittest
11
11
12 from ..node import (
12 from ..node import (
13 hex,
13 hex,
14 nullid,
14 nullid,
15 nullrev,
15 nullrev,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 mdiff,
19 mdiff,
20 revlog,
20 revlog,
21 )
21 )
22
22
23 class basetestcase(unittest.TestCase):
23 class basetestcase(unittest.TestCase):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
24 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
25 assertRaisesRegex = (# camelcase-required
25 assertRaisesRegex = (# camelcase-required
26 unittest.TestCase.assertRaisesRegexp)
26 unittest.TestCase.assertRaisesRegexp)
27
27
28 class ifileindextests(basetestcase):
28 class ifileindextests(basetestcase):
29 """Generic tests for the ifileindex interface.
29 """Generic tests for the ifileindex interface.
30
30
31 All file storage backends for index data should conform to the tests in this
31 All file storage backends for index data should conform to the tests in this
32 class.
32 class.
33
33
34 Use ``makeifileindextests()`` to create an instance of this type.
34 Use ``makeifileindextests()`` to create an instance of this type.
35 """
35 """
36 def testempty(self):
36 def testempty(self):
37 f = self._makefilefn()
37 f = self._makefilefn()
38 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
38 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
39 self.assertEqual(list(f), [], 'iter yields nothing by default')
39 self.assertEqual(list(f), [], 'iter yields nothing by default')
40
40
41 gen = iter(f)
41 gen = iter(f)
42 with self.assertRaises(StopIteration):
42 with self.assertRaises(StopIteration):
43 next(gen)
43 next(gen)
44
44
45 # revs() should evaluate to an empty list.
45 # revs() should evaluate to an empty list.
46 self.assertEqual(list(f.revs()), [])
46 self.assertEqual(list(f.revs()), [])
47
47
48 revs = iter(f.revs())
48 revs = iter(f.revs())
49 with self.assertRaises(StopIteration):
49 with self.assertRaises(StopIteration):
50 next(revs)
50 next(revs)
51
51
52 self.assertEqual(list(f.revs(start=20)), [])
52 self.assertEqual(list(f.revs(start=20)), [])
53
53
54 # parents() and parentrevs() work with nullid/nullrev.
54 # parents() and parentrevs() work with nullid/nullrev.
55 self.assertEqual(f.parents(nullid), (nullid, nullid))
55 self.assertEqual(f.parents(nullid), (nullid, nullid))
56 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
56 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
57
57
58 with self.assertRaises(error.LookupError):
58 with self.assertRaises(error.LookupError):
59 f.parents(b'\x01' * 20)
59 f.parents(b'\x01' * 20)
60
60
61 for i in range(-5, 5):
61 for i in range(-5, 5):
62 if i == nullrev:
62 if i == nullrev:
63 continue
63 continue
64
64
65 with self.assertRaises(IndexError):
65 with self.assertRaises(IndexError):
66 f.parentrevs(i)
66 f.parentrevs(i)
67
67
68 # nullid/nullrev lookup always works.
68 # nullid/nullrev lookup always works.
69 self.assertEqual(f.rev(nullid), nullrev)
69 self.assertEqual(f.rev(nullid), nullrev)
70 self.assertEqual(f.node(nullrev), nullid)
70 self.assertEqual(f.node(nullrev), nullid)
71
71
72 with self.assertRaises(error.LookupError):
72 with self.assertRaises(error.LookupError):
73 f.rev(b'\x01' * 20)
73 f.rev(b'\x01' * 20)
74
74
75 for i in range(-5, 5):
75 for i in range(-5, 5):
76 if i == nullrev:
76 if i == nullrev:
77 continue
77 continue
78
78
79 with self.assertRaises(IndexError):
79 with self.assertRaises(IndexError):
80 f.node(i)
80 f.node(i)
81
81
82 self.assertEqual(f.lookup(nullid), nullid)
82 self.assertEqual(f.lookup(nullid), nullid)
83 self.assertEqual(f.lookup(nullrev), nullid)
83 self.assertEqual(f.lookup(nullrev), nullid)
84 self.assertEqual(f.lookup(hex(nullid)), nullid)
84 self.assertEqual(f.lookup(hex(nullid)), nullid)
85
85
86 # String converted to integer doesn't work for nullrev.
86 # String converted to integer doesn't work for nullrev.
87 with self.assertRaises(error.LookupError):
87 with self.assertRaises(error.LookupError):
88 f.lookup(b'%d' % nullrev)
88 f.lookup(b'%d' % nullrev)
89
89
90 self.assertEqual(f.linkrev(nullrev), nullrev)
90 self.assertEqual(f.linkrev(nullrev), nullrev)
91
91
92 for i in range(-5, 5):
92 for i in range(-5, 5):
93 if i == nullrev:
93 if i == nullrev:
94 continue
94 continue
95
95
96 with self.assertRaises(IndexError):
96 with self.assertRaises(IndexError):
97 f.linkrev(i)
97 f.linkrev(i)
98
98
99 self.assertFalse(f.iscensored(nullrev))
99 self.assertFalse(f.iscensored(nullrev))
100
100
101 for i in range(-5, 5):
101 for i in range(-5, 5):
102 if i == nullrev:
102 if i == nullrev:
103 continue
103 continue
104
104
105 with self.assertRaises(IndexError):
105 with self.assertRaises(IndexError):
106 f.iscensored(i)
106 f.iscensored(i)
107
107
108 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
108 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
109
109
110 with self.assertRaises(ValueError):
110 with self.assertRaises(ValueError):
111 self.assertEqual(list(f.descendants([])), [])
111 self.assertEqual(list(f.descendants([])), [])
112
112
113 self.assertEqual(list(f.descendants([nullrev])), [])
113 self.assertEqual(list(f.descendants([nullrev])), [])
114
114
115 self.assertEqual(f.heads(), [nullid])
115 self.assertEqual(f.heads(), [nullid])
116 self.assertEqual(f.heads(nullid), [nullid])
116 self.assertEqual(f.heads(nullid), [nullid])
117 self.assertEqual(f.heads(None, [nullid]), [nullid])
117 self.assertEqual(f.heads(None, [nullid]), [nullid])
118 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
118 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
119
119
120 self.assertEqual(f.children(nullid), [])
120 self.assertEqual(f.children(nullid), [])
121
121
122 with self.assertRaises(error.LookupError):
122 with self.assertRaises(error.LookupError):
123 f.children(b'\x01' * 20)
123 f.children(b'\x01' * 20)
124
124
125 self.assertEqual(f.deltaparent(nullrev), nullrev)
126
127 for i in range(-5, 5):
128 if i == nullrev:
129 continue
130
131 with self.assertRaises(IndexError):
132 f.deltaparent(i)
133
134 def testsinglerevision(self):
125 def testsinglerevision(self):
135 f = self._makefilefn()
126 f = self._makefilefn()
136 with self._maketransactionfn() as tr:
127 with self._maketransactionfn() as tr:
137 node = f.add(b'initial', None, tr, 0, nullid, nullid)
128 node = f.add(b'initial', None, tr, 0, nullid, nullid)
138
129
139 self.assertEqual(len(f), 1)
130 self.assertEqual(len(f), 1)
140 self.assertEqual(list(f), [0])
131 self.assertEqual(list(f), [0])
141
132
142 gen = iter(f)
133 gen = iter(f)
143 self.assertEqual(next(gen), 0)
134 self.assertEqual(next(gen), 0)
144
135
145 with self.assertRaises(StopIteration):
136 with self.assertRaises(StopIteration):
146 next(gen)
137 next(gen)
147
138
148 self.assertEqual(list(f.revs()), [0])
139 self.assertEqual(list(f.revs()), [0])
149 self.assertEqual(list(f.revs(start=1)), [])
140 self.assertEqual(list(f.revs(start=1)), [])
150 self.assertEqual(list(f.revs(start=0)), [0])
141 self.assertEqual(list(f.revs(start=0)), [0])
151 self.assertEqual(list(f.revs(stop=0)), [0])
142 self.assertEqual(list(f.revs(stop=0)), [0])
152 self.assertEqual(list(f.revs(stop=1)), [0])
143 self.assertEqual(list(f.revs(stop=1)), [0])
153 self.assertEqual(list(f.revs(1, 1)), [])
144 self.assertEqual(list(f.revs(1, 1)), [])
154 # TODO buggy
145 # TODO buggy
155 self.assertEqual(list(f.revs(1, 0)), [1, 0])
146 self.assertEqual(list(f.revs(1, 0)), [1, 0])
156 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
147 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
157
148
158 self.assertEqual(f.parents(node), (nullid, nullid))
149 self.assertEqual(f.parents(node), (nullid, nullid))
159 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
150 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
160
151
161 with self.assertRaises(error.LookupError):
152 with self.assertRaises(error.LookupError):
162 f.parents(b'\x01' * 20)
153 f.parents(b'\x01' * 20)
163
154
164 with self.assertRaises(IndexError):
155 with self.assertRaises(IndexError):
165 f.parentrevs(1)
156 f.parentrevs(1)
166
157
167 self.assertEqual(f.rev(node), 0)
158 self.assertEqual(f.rev(node), 0)
168
159
169 with self.assertRaises(error.LookupError):
160 with self.assertRaises(error.LookupError):
170 f.rev(b'\x01' * 20)
161 f.rev(b'\x01' * 20)
171
162
172 self.assertEqual(f.node(0), node)
163 self.assertEqual(f.node(0), node)
173
164
174 with self.assertRaises(IndexError):
165 with self.assertRaises(IndexError):
175 f.node(1)
166 f.node(1)
176
167
177 self.assertEqual(f.lookup(node), node)
168 self.assertEqual(f.lookup(node), node)
178 self.assertEqual(f.lookup(0), node)
169 self.assertEqual(f.lookup(0), node)
179 self.assertEqual(f.lookup(b'0'), node)
170 self.assertEqual(f.lookup(b'0'), node)
180 self.assertEqual(f.lookup(hex(node)), node)
171 self.assertEqual(f.lookup(hex(node)), node)
181
172
182 self.assertEqual(f.linkrev(0), 0)
173 self.assertEqual(f.linkrev(0), 0)
183
174
184 with self.assertRaises(IndexError):
175 with self.assertRaises(IndexError):
185 f.linkrev(1)
176 f.linkrev(1)
186
177
187 self.assertFalse(f.iscensored(0))
178 self.assertFalse(f.iscensored(0))
188
179
189 with self.assertRaises(IndexError):
180 with self.assertRaises(IndexError):
190 f.iscensored(1)
181 f.iscensored(1)
191
182
192 self.assertEqual(list(f.descendants([0])), [])
183 self.assertEqual(list(f.descendants([0])), [])
193
184
194 self.assertEqual(f.heads(), [node])
185 self.assertEqual(f.heads(), [node])
195 self.assertEqual(f.heads(node), [node])
186 self.assertEqual(f.heads(node), [node])
196 self.assertEqual(f.heads(stop=[node]), [node])
187 self.assertEqual(f.heads(stop=[node]), [node])
197
188
198 with self.assertRaises(error.LookupError):
189 with self.assertRaises(error.LookupError):
199 f.heads(stop=[b'\x01' * 20])
190 f.heads(stop=[b'\x01' * 20])
200
191
201 self.assertEqual(f.children(node), [])
192 self.assertEqual(f.children(node), [])
202
193
203 self.assertEqual(f.deltaparent(0), nullrev)
204
205 def testmultiplerevisions(self):
194 def testmultiplerevisions(self):
206 fulltext0 = b'x' * 1024
195 fulltext0 = b'x' * 1024
207 fulltext1 = fulltext0 + b'y'
196 fulltext1 = fulltext0 + b'y'
208 fulltext2 = b'y' + fulltext0 + b'z'
197 fulltext2 = b'y' + fulltext0 + b'z'
209
198
210 f = self._makefilefn()
199 f = self._makefilefn()
211 with self._maketransactionfn() as tr:
200 with self._maketransactionfn() as tr:
212 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
201 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
213 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
202 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
214 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
203 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
215
204
216 self.assertEqual(len(f), 3)
205 self.assertEqual(len(f), 3)
217 self.assertEqual(list(f), [0, 1, 2])
206 self.assertEqual(list(f), [0, 1, 2])
218
207
219 gen = iter(f)
208 gen = iter(f)
220 self.assertEqual(next(gen), 0)
209 self.assertEqual(next(gen), 0)
221 self.assertEqual(next(gen), 1)
210 self.assertEqual(next(gen), 1)
222 self.assertEqual(next(gen), 2)
211 self.assertEqual(next(gen), 2)
223
212
224 with self.assertRaises(StopIteration):
213 with self.assertRaises(StopIteration):
225 next(gen)
214 next(gen)
226
215
227 self.assertEqual(list(f.revs()), [0, 1, 2])
216 self.assertEqual(list(f.revs()), [0, 1, 2])
228 self.assertEqual(list(f.revs(0)), [0, 1, 2])
217 self.assertEqual(list(f.revs(0)), [0, 1, 2])
229 self.assertEqual(list(f.revs(1)), [1, 2])
218 self.assertEqual(list(f.revs(1)), [1, 2])
230 self.assertEqual(list(f.revs(2)), [2])
219 self.assertEqual(list(f.revs(2)), [2])
231 self.assertEqual(list(f.revs(3)), [])
220 self.assertEqual(list(f.revs(3)), [])
232 self.assertEqual(list(f.revs(stop=1)), [0, 1])
221 self.assertEqual(list(f.revs(stop=1)), [0, 1])
233 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
222 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
234 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
223 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
235 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
224 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
236 self.assertEqual(list(f.revs(2, 1)), [2, 1])
225 self.assertEqual(list(f.revs(2, 1)), [2, 1])
237 # TODO this is wrong
226 # TODO this is wrong
238 self.assertEqual(list(f.revs(3, 2)), [3, 2])
227 self.assertEqual(list(f.revs(3, 2)), [3, 2])
239
228
240 self.assertEqual(f.parents(node0), (nullid, nullid))
229 self.assertEqual(f.parents(node0), (nullid, nullid))
241 self.assertEqual(f.parents(node1), (node0, nullid))
230 self.assertEqual(f.parents(node1), (node0, nullid))
242 self.assertEqual(f.parents(node2), (node1, nullid))
231 self.assertEqual(f.parents(node2), (node1, nullid))
243
232
244 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
233 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
245 self.assertEqual(f.parentrevs(1), (0, nullrev))
234 self.assertEqual(f.parentrevs(1), (0, nullrev))
246 self.assertEqual(f.parentrevs(2), (1, nullrev))
235 self.assertEqual(f.parentrevs(2), (1, nullrev))
247
236
248 self.assertEqual(f.rev(node0), 0)
237 self.assertEqual(f.rev(node0), 0)
249 self.assertEqual(f.rev(node1), 1)
238 self.assertEqual(f.rev(node1), 1)
250 self.assertEqual(f.rev(node2), 2)
239 self.assertEqual(f.rev(node2), 2)
251
240
252 with self.assertRaises(error.LookupError):
241 with self.assertRaises(error.LookupError):
253 f.rev(b'\x01' * 20)
242 f.rev(b'\x01' * 20)
254
243
255 self.assertEqual(f.node(0), node0)
244 self.assertEqual(f.node(0), node0)
256 self.assertEqual(f.node(1), node1)
245 self.assertEqual(f.node(1), node1)
257 self.assertEqual(f.node(2), node2)
246 self.assertEqual(f.node(2), node2)
258
247
259 with self.assertRaises(IndexError):
248 with self.assertRaises(IndexError):
260 f.node(3)
249 f.node(3)
261
250
262 self.assertEqual(f.lookup(node0), node0)
251 self.assertEqual(f.lookup(node0), node0)
263 self.assertEqual(f.lookup(0), node0)
252 self.assertEqual(f.lookup(0), node0)
264 self.assertEqual(f.lookup(b'0'), node0)
253 self.assertEqual(f.lookup(b'0'), node0)
265 self.assertEqual(f.lookup(hex(node0)), node0)
254 self.assertEqual(f.lookup(hex(node0)), node0)
266
255
267 self.assertEqual(f.lookup(node1), node1)
256 self.assertEqual(f.lookup(node1), node1)
268 self.assertEqual(f.lookup(1), node1)
257 self.assertEqual(f.lookup(1), node1)
269 self.assertEqual(f.lookup(b'1'), node1)
258 self.assertEqual(f.lookup(b'1'), node1)
270 self.assertEqual(f.lookup(hex(node1)), node1)
259 self.assertEqual(f.lookup(hex(node1)), node1)
271
260
272 self.assertEqual(f.linkrev(0), 0)
261 self.assertEqual(f.linkrev(0), 0)
273 self.assertEqual(f.linkrev(1), 1)
262 self.assertEqual(f.linkrev(1), 1)
274 self.assertEqual(f.linkrev(2), 3)
263 self.assertEqual(f.linkrev(2), 3)
275
264
276 with self.assertRaises(IndexError):
265 with self.assertRaises(IndexError):
277 f.linkrev(3)
266 f.linkrev(3)
278
267
279 self.assertFalse(f.iscensored(0))
268 self.assertFalse(f.iscensored(0))
280 self.assertFalse(f.iscensored(1))
269 self.assertFalse(f.iscensored(1))
281 self.assertFalse(f.iscensored(2))
270 self.assertFalse(f.iscensored(2))
282
271
283 with self.assertRaises(IndexError):
272 with self.assertRaises(IndexError):
284 f.iscensored(3)
273 f.iscensored(3)
285
274
286 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
275 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
287 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
276 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
288 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
277 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
289 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
278 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
290 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
279 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
291 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
280 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
292
281
293 self.assertEqual(list(f.descendants([0])), [1, 2])
282 self.assertEqual(list(f.descendants([0])), [1, 2])
294 self.assertEqual(list(f.descendants([1])), [2])
283 self.assertEqual(list(f.descendants([1])), [2])
295 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
284 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
296
285
297 self.assertEqual(f.heads(), [node2])
286 self.assertEqual(f.heads(), [node2])
298 self.assertEqual(f.heads(node0), [node2])
287 self.assertEqual(f.heads(node0), [node2])
299 self.assertEqual(f.heads(node1), [node2])
288 self.assertEqual(f.heads(node1), [node2])
300 self.assertEqual(f.heads(node2), [node2])
289 self.assertEqual(f.heads(node2), [node2])
301
290
302 # TODO this behavior seems wonky. Is it correct? If so, the
291 # TODO this behavior seems wonky. Is it correct? If so, the
303 # docstring for heads() should be updated to reflect desired
292 # docstring for heads() should be updated to reflect desired
304 # behavior.
293 # behavior.
305 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
294 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
306 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
295 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
307 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
296 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
308
297
309 with self.assertRaises(error.LookupError):
298 with self.assertRaises(error.LookupError):
310 f.heads(stop=[b'\x01' * 20])
299 f.heads(stop=[b'\x01' * 20])
311
300
312 self.assertEqual(f.children(node0), [node1])
301 self.assertEqual(f.children(node0), [node1])
313 self.assertEqual(f.children(node1), [node2])
302 self.assertEqual(f.children(node1), [node2])
314 self.assertEqual(f.children(node2), [])
303 self.assertEqual(f.children(node2), [])
315
304
316 self.assertEqual(f.deltaparent(0), nullrev)
317 self.assertEqual(f.deltaparent(1), 0)
318 self.assertEqual(f.deltaparent(2), 1)
319
320 def testmultipleheads(self):
305 def testmultipleheads(self):
321 f = self._makefilefn()
306 f = self._makefilefn()
322
307
323 with self._maketransactionfn() as tr:
308 with self._maketransactionfn() as tr:
324 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
309 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
325 node1 = f.add(b'1', None, tr, 1, node0, nullid)
310 node1 = f.add(b'1', None, tr, 1, node0, nullid)
326 node2 = f.add(b'2', None, tr, 2, node1, nullid)
311 node2 = f.add(b'2', None, tr, 2, node1, nullid)
327 node3 = f.add(b'3', None, tr, 3, node0, nullid)
312 node3 = f.add(b'3', None, tr, 3, node0, nullid)
328 node4 = f.add(b'4', None, tr, 4, node3, nullid)
313 node4 = f.add(b'4', None, tr, 4, node3, nullid)
329 node5 = f.add(b'5', None, tr, 5, node0, nullid)
314 node5 = f.add(b'5', None, tr, 5, node0, nullid)
330
315
331 self.assertEqual(len(f), 6)
316 self.assertEqual(len(f), 6)
332
317
333 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
318 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
334 self.assertEqual(list(f.descendants([1])), [2])
319 self.assertEqual(list(f.descendants([1])), [2])
335 self.assertEqual(list(f.descendants([2])), [])
320 self.assertEqual(list(f.descendants([2])), [])
336 self.assertEqual(list(f.descendants([3])), [4])
321 self.assertEqual(list(f.descendants([3])), [4])
337 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
322 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
338 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
323 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
339
324
340 self.assertEqual(f.heads(), [node2, node4, node5])
325 self.assertEqual(f.heads(), [node2, node4, node5])
341 self.assertEqual(f.heads(node0), [node2, node4, node5])
326 self.assertEqual(f.heads(node0), [node2, node4, node5])
342 self.assertEqual(f.heads(node1), [node2])
327 self.assertEqual(f.heads(node1), [node2])
343 self.assertEqual(f.heads(node2), [node2])
328 self.assertEqual(f.heads(node2), [node2])
344 self.assertEqual(f.heads(node3), [node4])
329 self.assertEqual(f.heads(node3), [node4])
345 self.assertEqual(f.heads(node4), [node4])
330 self.assertEqual(f.heads(node4), [node4])
346 self.assertEqual(f.heads(node5), [node5])
331 self.assertEqual(f.heads(node5), [node5])
347
332
348 # TODO this seems wrong.
333 # TODO this seems wrong.
349 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
334 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
350 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
335 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
351
336
352 self.assertEqual(f.children(node0), [node1, node3, node5])
337 self.assertEqual(f.children(node0), [node1, node3, node5])
353 self.assertEqual(f.children(node1), [node2])
338 self.assertEqual(f.children(node1), [node2])
354 self.assertEqual(f.children(node2), [])
339 self.assertEqual(f.children(node2), [])
355 self.assertEqual(f.children(node3), [node4])
340 self.assertEqual(f.children(node3), [node4])
356 self.assertEqual(f.children(node4), [])
341 self.assertEqual(f.children(node4), [])
357 self.assertEqual(f.children(node5), [])
342 self.assertEqual(f.children(node5), [])
358
343
359 class ifiledatatests(basetestcase):
344 class ifiledatatests(basetestcase):
360 """Generic tests for the ifiledata interface.
345 """Generic tests for the ifiledata interface.
361
346
362 All file storage backends for data should conform to the tests in this
347 All file storage backends for data should conform to the tests in this
363 class.
348 class.
364
349
365 Use ``makeifiledatatests()`` to create an instance of this type.
350 Use ``makeifiledatatests()`` to create an instance of this type.
366 """
351 """
367 def testempty(self):
352 def testempty(self):
368 f = self._makefilefn()
353 f = self._makefilefn()
369
354
370 self.assertEqual(f.storageinfo(), {})
355 self.assertEqual(f.storageinfo(), {})
371 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
356 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
372 {'revisionscount': 0, 'trackedsize': 0})
357 {'revisionscount': 0, 'trackedsize': 0})
373
358
374 self.assertEqual(f.size(nullrev), 0)
359 self.assertEqual(f.size(nullrev), 0)
375
360
376 for i in range(-5, 5):
361 for i in range(-5, 5):
377 if i == nullrev:
362 if i == nullrev:
378 continue
363 continue
379
364
380 with self.assertRaises(IndexError):
365 with self.assertRaises(IndexError):
381 f.size(i)
366 f.size(i)
382
367
383 with self.assertRaises(error.StorageError):
368 with self.assertRaises(error.StorageError):
384 f.checkhash(b'', nullid)
369 f.checkhash(b'', nullid)
385
370
386 with self.assertRaises(error.LookupError):
371 with self.assertRaises(error.LookupError):
387 f.checkhash(b'', b'\x01' * 20)
372 f.checkhash(b'', b'\x01' * 20)
388
373
389 self.assertEqual(f.revision(nullid), b'')
374 self.assertEqual(f.revision(nullid), b'')
390 self.assertEqual(f.revision(nullid, raw=True), b'')
375 self.assertEqual(f.revision(nullid, raw=True), b'')
391
376
392 with self.assertRaises(error.LookupError):
377 with self.assertRaises(error.LookupError):
393 f.revision(b'\x01' * 20)
378 f.revision(b'\x01' * 20)
394
379
395 self.assertEqual(f.read(nullid), b'')
380 self.assertEqual(f.read(nullid), b'')
396
381
397 with self.assertRaises(error.LookupError):
382 with self.assertRaises(error.LookupError):
398 f.read(b'\x01' * 20)
383 f.read(b'\x01' * 20)
399
384
400 self.assertFalse(f.renamed(nullid))
385 self.assertFalse(f.renamed(nullid))
401
386
402 with self.assertRaises(error.LookupError):
387 with self.assertRaises(error.LookupError):
403 f.read(b'\x01' * 20)
388 f.read(b'\x01' * 20)
404
389
405 self.assertTrue(f.cmp(nullid, b''))
390 self.assertTrue(f.cmp(nullid, b''))
406 self.assertTrue(f.cmp(nullid, b'foo'))
391 self.assertTrue(f.cmp(nullid, b'foo'))
407
392
408 with self.assertRaises(error.LookupError):
393 with self.assertRaises(error.LookupError):
409 f.cmp(b'\x01' * 20, b'irrelevant')
394 f.cmp(b'\x01' * 20, b'irrelevant')
410
395
411 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
396 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
412
397
413 with self.assertRaises(IndexError):
398 with self.assertRaises(IndexError):
414 f.revdiff(0, nullrev)
399 f.revdiff(0, nullrev)
415
400
416 with self.assertRaises(IndexError):
401 with self.assertRaises(IndexError):
417 f.revdiff(nullrev, 0)
402 f.revdiff(nullrev, 0)
418
403
419 with self.assertRaises(IndexError):
404 with self.assertRaises(IndexError):
420 f.revdiff(0, 0)
405 f.revdiff(0, 0)
421
406
422 # Emitting empty list is an empty generator.
407 # Emitting empty list is an empty generator.
423 gen = f.emitrevisions([])
408 gen = f.emitrevisions([])
424 with self.assertRaises(StopIteration):
409 with self.assertRaises(StopIteration):
425 next(gen)
410 next(gen)
426
411
427 # Emitting null node yields nothing.
412 # Emitting null node yields nothing.
428 gen = f.emitrevisions([nullid])
413 gen = f.emitrevisions([nullid])
429 with self.assertRaises(StopIteration):
414 with self.assertRaises(StopIteration):
430 next(gen)
415 next(gen)
431
416
432 # Requesting unknown node fails.
417 # Requesting unknown node fails.
433 with self.assertRaises(error.LookupError):
418 with self.assertRaises(error.LookupError):
434 list(f.emitrevisions([b'\x01' * 20]))
419 list(f.emitrevisions([b'\x01' * 20]))
435
420
436 def testsinglerevision(self):
421 def testsinglerevision(self):
437 fulltext = b'initial'
422 fulltext = b'initial'
438
423
439 f = self._makefilefn()
424 f = self._makefilefn()
440 with self._maketransactionfn() as tr:
425 with self._maketransactionfn() as tr:
441 node = f.add(fulltext, None, tr, 0, nullid, nullid)
426 node = f.add(fulltext, None, tr, 0, nullid, nullid)
442
427
443 self.assertEqual(f.storageinfo(), {})
428 self.assertEqual(f.storageinfo(), {})
444 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
429 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
445 {'revisionscount': 1, 'trackedsize': len(fulltext)})
430 {'revisionscount': 1, 'trackedsize': len(fulltext)})
446
431
447 self.assertEqual(f.size(0), len(fulltext))
432 self.assertEqual(f.size(0), len(fulltext))
448
433
449 with self.assertRaises(IndexError):
434 with self.assertRaises(IndexError):
450 f.size(1)
435 f.size(1)
451
436
452 f.checkhash(fulltext, node)
437 f.checkhash(fulltext, node)
453 f.checkhash(fulltext, node, nullid, nullid)
438 f.checkhash(fulltext, node, nullid, nullid)
454
439
455 with self.assertRaises(error.StorageError):
440 with self.assertRaises(error.StorageError):
456 f.checkhash(fulltext + b'extra', node)
441 f.checkhash(fulltext + b'extra', node)
457
442
458 with self.assertRaises(error.StorageError):
443 with self.assertRaises(error.StorageError):
459 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
444 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
460
445
461 with self.assertRaises(error.StorageError):
446 with self.assertRaises(error.StorageError):
462 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
447 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
463
448
464 self.assertEqual(f.revision(node), fulltext)
449 self.assertEqual(f.revision(node), fulltext)
465 self.assertEqual(f.revision(node, raw=True), fulltext)
450 self.assertEqual(f.revision(node, raw=True), fulltext)
466
451
467 self.assertEqual(f.read(node), fulltext)
452 self.assertEqual(f.read(node), fulltext)
468
453
469 self.assertFalse(f.renamed(node))
454 self.assertFalse(f.renamed(node))
470
455
471 self.assertFalse(f.cmp(node, fulltext))
456 self.assertFalse(f.cmp(node, fulltext))
472 self.assertTrue(f.cmp(node, fulltext + b'extra'))
457 self.assertTrue(f.cmp(node, fulltext + b'extra'))
473
458
474 self.assertEqual(f.revdiff(0, 0), b'')
459 self.assertEqual(f.revdiff(0, 0), b'')
475 self.assertEqual(f.revdiff(nullrev, 0),
460 self.assertEqual(f.revdiff(nullrev, 0),
476 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
461 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
477 fulltext)
462 fulltext)
478
463
479 self.assertEqual(f.revdiff(0, nullrev),
464 self.assertEqual(f.revdiff(0, nullrev),
480 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
465 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
481
466
482 # Emitting a single revision works.
467 # Emitting a single revision works.
483 gen = f.emitrevisions([node])
468 gen = f.emitrevisions([node])
484 rev = next(gen)
469 rev = next(gen)
485
470
486 self.assertEqual(rev.node, node)
471 self.assertEqual(rev.node, node)
487 self.assertEqual(rev.p1node, nullid)
472 self.assertEqual(rev.p1node, nullid)
488 self.assertEqual(rev.p2node, nullid)
473 self.assertEqual(rev.p2node, nullid)
489 self.assertIsNone(rev.linknode)
474 self.assertIsNone(rev.linknode)
490 self.assertEqual(rev.basenode, nullid)
475 self.assertEqual(rev.basenode, nullid)
491 self.assertIsNone(rev.baserevisionsize)
476 self.assertIsNone(rev.baserevisionsize)
492 self.assertIsNone(rev.revision)
477 self.assertIsNone(rev.revision)
493 self.assertIsNone(rev.delta)
478 self.assertIsNone(rev.delta)
494
479
495 with self.assertRaises(StopIteration):
480 with self.assertRaises(StopIteration):
496 next(gen)
481 next(gen)
497
482
498 # Requesting revision data works.
483 # Requesting revision data works.
499 gen = f.emitrevisions([node], revisiondata=True)
484 gen = f.emitrevisions([node], revisiondata=True)
500 rev = next(gen)
485 rev = next(gen)
501
486
502 self.assertEqual(rev.node, node)
487 self.assertEqual(rev.node, node)
503 self.assertEqual(rev.p1node, nullid)
488 self.assertEqual(rev.p1node, nullid)
504 self.assertEqual(rev.p2node, nullid)
489 self.assertEqual(rev.p2node, nullid)
505 self.assertIsNone(rev.linknode)
490 self.assertIsNone(rev.linknode)
506 self.assertEqual(rev.basenode, nullid)
491 self.assertEqual(rev.basenode, nullid)
507 self.assertIsNone(rev.baserevisionsize)
492 self.assertIsNone(rev.baserevisionsize)
508 self.assertEqual(rev.revision, fulltext)
493 self.assertEqual(rev.revision, fulltext)
509 self.assertIsNone(rev.delta)
494 self.assertIsNone(rev.delta)
510
495
511 with self.assertRaises(StopIteration):
496 with self.assertRaises(StopIteration):
512 next(gen)
497 next(gen)
513
498
514 # Emitting an unknown node after a known revision results in error.
499 # Emitting an unknown node after a known revision results in error.
515 with self.assertRaises(error.LookupError):
500 with self.assertRaises(error.LookupError):
516 list(f.emitrevisions([node, b'\x01' * 20]))
501 list(f.emitrevisions([node, b'\x01' * 20]))
517
502
518 def testmultiplerevisions(self):
503 def testmultiplerevisions(self):
519 fulltext0 = b'x' * 1024
504 fulltext0 = b'x' * 1024
520 fulltext1 = fulltext0 + b'y'
505 fulltext1 = fulltext0 + b'y'
521 fulltext2 = b'y' + fulltext0 + b'z'
506 fulltext2 = b'y' + fulltext0 + b'z'
522
507
523 f = self._makefilefn()
508 f = self._makefilefn()
524 with self._maketransactionfn() as tr:
509 with self._maketransactionfn() as tr:
525 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
510 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
526 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
511 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
527 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
512 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
528
513
529 self.assertEqual(f.storageinfo(), {})
514 self.assertEqual(f.storageinfo(), {})
530 self.assertEqual(
515 self.assertEqual(
531 f.storageinfo(revisionscount=True, trackedsize=True),
516 f.storageinfo(revisionscount=True, trackedsize=True),
532 {
517 {
533 'revisionscount': 3,
518 'revisionscount': 3,
534 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
519 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
535 })
520 })
536
521
537 self.assertEqual(f.size(0), len(fulltext0))
522 self.assertEqual(f.size(0), len(fulltext0))
538 self.assertEqual(f.size(1), len(fulltext1))
523 self.assertEqual(f.size(1), len(fulltext1))
539 self.assertEqual(f.size(2), len(fulltext2))
524 self.assertEqual(f.size(2), len(fulltext2))
540
525
541 with self.assertRaises(IndexError):
526 with self.assertRaises(IndexError):
542 f.size(3)
527 f.size(3)
543
528
544 f.checkhash(fulltext0, node0)
529 f.checkhash(fulltext0, node0)
545 f.checkhash(fulltext1, node1)
530 f.checkhash(fulltext1, node1)
546 f.checkhash(fulltext1, node1, node0, nullid)
531 f.checkhash(fulltext1, node1, node0, nullid)
547 f.checkhash(fulltext2, node2, node1, nullid)
532 f.checkhash(fulltext2, node2, node1, nullid)
548
533
549 with self.assertRaises(error.StorageError):
534 with self.assertRaises(error.StorageError):
550 f.checkhash(fulltext1, b'\x01' * 20)
535 f.checkhash(fulltext1, b'\x01' * 20)
551
536
552 with self.assertRaises(error.StorageError):
537 with self.assertRaises(error.StorageError):
553 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
538 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
554
539
555 with self.assertRaises(error.StorageError):
540 with self.assertRaises(error.StorageError):
556 f.checkhash(fulltext1, node1, node0, node0)
541 f.checkhash(fulltext1, node1, node0, node0)
557
542
558 self.assertEqual(f.revision(node0), fulltext0)
543 self.assertEqual(f.revision(node0), fulltext0)
559 self.assertEqual(f.revision(node0, raw=True), fulltext0)
544 self.assertEqual(f.revision(node0, raw=True), fulltext0)
560 self.assertEqual(f.revision(node1), fulltext1)
545 self.assertEqual(f.revision(node1), fulltext1)
561 self.assertEqual(f.revision(node1, raw=True), fulltext1)
546 self.assertEqual(f.revision(node1, raw=True), fulltext1)
562 self.assertEqual(f.revision(node2), fulltext2)
547 self.assertEqual(f.revision(node2), fulltext2)
563 self.assertEqual(f.revision(node2, raw=True), fulltext2)
548 self.assertEqual(f.revision(node2, raw=True), fulltext2)
564
549
565 with self.assertRaises(error.LookupError):
550 with self.assertRaises(error.LookupError):
566 f.revision(b'\x01' * 20)
551 f.revision(b'\x01' * 20)
567
552
568 self.assertEqual(f.read(node0), fulltext0)
553 self.assertEqual(f.read(node0), fulltext0)
569 self.assertEqual(f.read(node1), fulltext1)
554 self.assertEqual(f.read(node1), fulltext1)
570 self.assertEqual(f.read(node2), fulltext2)
555 self.assertEqual(f.read(node2), fulltext2)
571
556
572 with self.assertRaises(error.LookupError):
557 with self.assertRaises(error.LookupError):
573 f.read(b'\x01' * 20)
558 f.read(b'\x01' * 20)
574
559
575 self.assertFalse(f.renamed(node0))
560 self.assertFalse(f.renamed(node0))
576 self.assertFalse(f.renamed(node1))
561 self.assertFalse(f.renamed(node1))
577 self.assertFalse(f.renamed(node2))
562 self.assertFalse(f.renamed(node2))
578
563
579 with self.assertRaises(error.LookupError):
564 with self.assertRaises(error.LookupError):
580 f.renamed(b'\x01' * 20)
565 f.renamed(b'\x01' * 20)
581
566
582 self.assertFalse(f.cmp(node0, fulltext0))
567 self.assertFalse(f.cmp(node0, fulltext0))
583 self.assertFalse(f.cmp(node1, fulltext1))
568 self.assertFalse(f.cmp(node1, fulltext1))
584 self.assertFalse(f.cmp(node2, fulltext2))
569 self.assertFalse(f.cmp(node2, fulltext2))
585
570
586 self.assertTrue(f.cmp(node1, fulltext0))
571 self.assertTrue(f.cmp(node1, fulltext0))
587 self.assertTrue(f.cmp(node2, fulltext1))
572 self.assertTrue(f.cmp(node2, fulltext1))
588
573
589 with self.assertRaises(error.LookupError):
574 with self.assertRaises(error.LookupError):
590 f.cmp(b'\x01' * 20, b'irrelevant')
575 f.cmp(b'\x01' * 20, b'irrelevant')
591
576
592 self.assertEqual(f.revdiff(0, 1),
577 self.assertEqual(f.revdiff(0, 1),
593 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
578 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
594 fulltext1)
579 fulltext1)
595
580
596 self.assertEqual(f.revdiff(0, 2),
581 self.assertEqual(f.revdiff(0, 2),
597 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
582 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
598 fulltext2)
583 fulltext2)
599
584
600 # Nodes should be emitted in order.
585 # Nodes should be emitted in order.
601 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
586 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
602
587
603 rev = next(gen)
588 rev = next(gen)
604
589
605 self.assertEqual(rev.node, node0)
590 self.assertEqual(rev.node, node0)
606 self.assertEqual(rev.p1node, nullid)
591 self.assertEqual(rev.p1node, nullid)
607 self.assertEqual(rev.p2node, nullid)
592 self.assertEqual(rev.p2node, nullid)
608 self.assertIsNone(rev.linknode)
593 self.assertIsNone(rev.linknode)
609 self.assertEqual(rev.basenode, nullid)
594 self.assertEqual(rev.basenode, nullid)
610 self.assertIsNone(rev.baserevisionsize)
595 self.assertIsNone(rev.baserevisionsize)
611 self.assertEqual(rev.revision, fulltext0)
596 self.assertEqual(rev.revision, fulltext0)
612 self.assertIsNone(rev.delta)
597 self.assertIsNone(rev.delta)
613
598
614 rev = next(gen)
599 rev = next(gen)
615
600
616 self.assertEqual(rev.node, node1)
601 self.assertEqual(rev.node, node1)
617 self.assertEqual(rev.p1node, node0)
602 self.assertEqual(rev.p1node, node0)
618 self.assertEqual(rev.p2node, nullid)
603 self.assertEqual(rev.p2node, nullid)
619 self.assertIsNone(rev.linknode)
604 self.assertIsNone(rev.linknode)
620 self.assertEqual(rev.basenode, node0)
605 self.assertEqual(rev.basenode, node0)
621 self.assertIsNone(rev.baserevisionsize)
606 self.assertIsNone(rev.baserevisionsize)
622 self.assertIsNone(rev.revision)
607 self.assertIsNone(rev.revision)
623 self.assertEqual(rev.delta,
608 self.assertEqual(rev.delta,
624 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
609 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
625 fulltext1)
610 fulltext1)
626
611
627 rev = next(gen)
612 rev = next(gen)
628
613
629 self.assertEqual(rev.node, node2)
614 self.assertEqual(rev.node, node2)
630 self.assertEqual(rev.p1node, node1)
615 self.assertEqual(rev.p1node, node1)
631 self.assertEqual(rev.p2node, nullid)
616 self.assertEqual(rev.p2node, nullid)
632 self.assertIsNone(rev.linknode)
617 self.assertIsNone(rev.linknode)
633 self.assertEqual(rev.basenode, node1)
618 self.assertEqual(rev.basenode, node1)
634 self.assertIsNone(rev.baserevisionsize)
619 self.assertIsNone(rev.baserevisionsize)
635 self.assertIsNone(rev.revision)
620 self.assertIsNone(rev.revision)
636 self.assertEqual(rev.delta,
621 self.assertEqual(rev.delta,
637 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
622 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
638 fulltext2)
623 fulltext2)
639
624
640 with self.assertRaises(StopIteration):
625 with self.assertRaises(StopIteration):
641 next(gen)
626 next(gen)
642
627
643 # Request not in DAG order is reordered to be in DAG order.
628 # Request not in DAG order is reordered to be in DAG order.
644 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
629 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
645
630
646 rev = next(gen)
631 rev = next(gen)
647
632
648 self.assertEqual(rev.node, node0)
633 self.assertEqual(rev.node, node0)
649 self.assertEqual(rev.p1node, nullid)
634 self.assertEqual(rev.p1node, nullid)
650 self.assertEqual(rev.p2node, nullid)
635 self.assertEqual(rev.p2node, nullid)
651 self.assertIsNone(rev.linknode)
636 self.assertIsNone(rev.linknode)
652 self.assertEqual(rev.basenode, nullid)
637 self.assertEqual(rev.basenode, nullid)
653 self.assertIsNone(rev.baserevisionsize)
638 self.assertIsNone(rev.baserevisionsize)
654 self.assertEqual(rev.revision, fulltext0)
639 self.assertEqual(rev.revision, fulltext0)
655 self.assertIsNone(rev.delta)
640 self.assertIsNone(rev.delta)
656
641
657 rev = next(gen)
642 rev = next(gen)
658
643
659 self.assertEqual(rev.node, node1)
644 self.assertEqual(rev.node, node1)
660 self.assertEqual(rev.p1node, node0)
645 self.assertEqual(rev.p1node, node0)
661 self.assertEqual(rev.p2node, nullid)
646 self.assertEqual(rev.p2node, nullid)
662 self.assertIsNone(rev.linknode)
647 self.assertIsNone(rev.linknode)
663 self.assertEqual(rev.basenode, node0)
648 self.assertEqual(rev.basenode, node0)
664 self.assertIsNone(rev.baserevisionsize)
649 self.assertIsNone(rev.baserevisionsize)
665 self.assertIsNone(rev.revision)
650 self.assertIsNone(rev.revision)
666 self.assertEqual(rev.delta,
651 self.assertEqual(rev.delta,
667 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
652 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
668 fulltext1)
653 fulltext1)
669
654
670 rev = next(gen)
655 rev = next(gen)
671
656
672 self.assertEqual(rev.node, node2)
657 self.assertEqual(rev.node, node2)
673 self.assertEqual(rev.p1node, node1)
658 self.assertEqual(rev.p1node, node1)
674 self.assertEqual(rev.p2node, nullid)
659 self.assertEqual(rev.p2node, nullid)
675 self.assertIsNone(rev.linknode)
660 self.assertIsNone(rev.linknode)
676 self.assertEqual(rev.basenode, node1)
661 self.assertEqual(rev.basenode, node1)
677 self.assertIsNone(rev.baserevisionsize)
662 self.assertIsNone(rev.baserevisionsize)
678 self.assertIsNone(rev.revision)
663 self.assertIsNone(rev.revision)
679 self.assertEqual(rev.delta,
664 self.assertEqual(rev.delta,
680 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
665 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
681 fulltext2)
666 fulltext2)
682
667
683 with self.assertRaises(StopIteration):
668 with self.assertRaises(StopIteration):
684 next(gen)
669 next(gen)
685
670
686 # Unrecognized nodesorder value raises ProgrammingError.
671 # Unrecognized nodesorder value raises ProgrammingError.
687 with self.assertRaises(error.ProgrammingError):
672 with self.assertRaises(error.ProgrammingError):
688 list(f.emitrevisions([], nodesorder='bad'))
673 list(f.emitrevisions([], nodesorder='bad'))
689
674
690 # nodesorder=storage is recognized. But we can't test it thoroughly
675 # nodesorder=storage is recognized. But we can't test it thoroughly
691 # because behavior is storage-dependent.
676 # because behavior is storage-dependent.
692 res = list(f.emitrevisions([node2, node1, node0],
677 res = list(f.emitrevisions([node2, node1, node0],
693 nodesorder='storage'))
678 nodesorder='storage'))
694 self.assertEqual(len(res), 3)
679 self.assertEqual(len(res), 3)
695 self.assertEqual({o.node for o in res}, {node0, node1, node2})
680 self.assertEqual({o.node for o in res}, {node0, node1, node2})
696
681
697 # nodesorder=nodes forces the order.
682 # nodesorder=nodes forces the order.
698 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
683 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
699 revisiondata=True)
684 revisiondata=True)
700
685
701 rev = next(gen)
686 rev = next(gen)
702 self.assertEqual(rev.node, node2)
687 self.assertEqual(rev.node, node2)
703 self.assertEqual(rev.p1node, node1)
688 self.assertEqual(rev.p1node, node1)
704 self.assertEqual(rev.p2node, nullid)
689 self.assertEqual(rev.p2node, nullid)
705 self.assertEqual(rev.basenode, nullid)
690 self.assertEqual(rev.basenode, nullid)
706 self.assertIsNone(rev.baserevisionsize)
691 self.assertIsNone(rev.baserevisionsize)
707 self.assertEqual(rev.revision, fulltext2)
692 self.assertEqual(rev.revision, fulltext2)
708 self.assertIsNone(rev.delta)
693 self.assertIsNone(rev.delta)
709
694
710 rev = next(gen)
695 rev = next(gen)
711 self.assertEqual(rev.node, node0)
696 self.assertEqual(rev.node, node0)
712 self.assertEqual(rev.p1node, nullid)
697 self.assertEqual(rev.p1node, nullid)
713 self.assertEqual(rev.p2node, nullid)
698 self.assertEqual(rev.p2node, nullid)
714 # Delta behavior is storage dependent, so we can't easily test it.
699 # Delta behavior is storage dependent, so we can't easily test it.
715
700
716 with self.assertRaises(StopIteration):
701 with self.assertRaises(StopIteration):
717 next(gen)
702 next(gen)
718
703
719 # assumehaveparentrevisions=False (the default) won't send a delta for
704 # assumehaveparentrevisions=False (the default) won't send a delta for
720 # the first revision.
705 # the first revision.
721 gen = f.emitrevisions({node2, node1}, revisiondata=True)
706 gen = f.emitrevisions({node2, node1}, revisiondata=True)
722
707
723 rev = next(gen)
708 rev = next(gen)
724 self.assertEqual(rev.node, node1)
709 self.assertEqual(rev.node, node1)
725 self.assertEqual(rev.p1node, node0)
710 self.assertEqual(rev.p1node, node0)
726 self.assertEqual(rev.p2node, nullid)
711 self.assertEqual(rev.p2node, nullid)
727 self.assertEqual(rev.basenode, nullid)
712 self.assertEqual(rev.basenode, nullid)
728 self.assertIsNone(rev.baserevisionsize)
713 self.assertIsNone(rev.baserevisionsize)
729 self.assertEqual(rev.revision, fulltext1)
714 self.assertEqual(rev.revision, fulltext1)
730 self.assertIsNone(rev.delta)
715 self.assertIsNone(rev.delta)
731
716
732 rev = next(gen)
717 rev = next(gen)
733 self.assertEqual(rev.node, node2)
718 self.assertEqual(rev.node, node2)
734 self.assertEqual(rev.p1node, node1)
719 self.assertEqual(rev.p1node, node1)
735 self.assertEqual(rev.p2node, nullid)
720 self.assertEqual(rev.p2node, nullid)
736 self.assertEqual(rev.basenode, node1)
721 self.assertEqual(rev.basenode, node1)
737 self.assertIsNone(rev.baserevisionsize)
722 self.assertIsNone(rev.baserevisionsize)
738 self.assertIsNone(rev.revision)
723 self.assertIsNone(rev.revision)
739 self.assertEqual(rev.delta,
724 self.assertEqual(rev.delta,
740 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
725 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
741 fulltext2)
726 fulltext2)
742
727
743 with self.assertRaises(StopIteration):
728 with self.assertRaises(StopIteration):
744 next(gen)
729 next(gen)
745
730
746 # assumehaveparentrevisions=True allows delta against initial revision.
731 # assumehaveparentrevisions=True allows delta against initial revision.
747 gen = f.emitrevisions([node2, node1],
732 gen = f.emitrevisions([node2, node1],
748 revisiondata=True, assumehaveparentrevisions=True)
733 revisiondata=True, assumehaveparentrevisions=True)
749
734
750 rev = next(gen)
735 rev = next(gen)
751 self.assertEqual(rev.node, node1)
736 self.assertEqual(rev.node, node1)
752 self.assertEqual(rev.p1node, node0)
737 self.assertEqual(rev.p1node, node0)
753 self.assertEqual(rev.p2node, nullid)
738 self.assertEqual(rev.p2node, nullid)
754 self.assertEqual(rev.basenode, node0)
739 self.assertEqual(rev.basenode, node0)
755 self.assertIsNone(rev.baserevisionsize)
740 self.assertIsNone(rev.baserevisionsize)
756 self.assertIsNone(rev.revision)
741 self.assertIsNone(rev.revision)
757 self.assertEqual(rev.delta,
742 self.assertEqual(rev.delta,
758 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
743 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
759 fulltext1)
744 fulltext1)
760
745
761 # forceprevious=True forces a delta against the previous revision.
746 # forceprevious=True forces a delta against the previous revision.
762 # Special case for initial revision.
747 # Special case for initial revision.
763 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
748 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
764
749
765 rev = next(gen)
750 rev = next(gen)
766 self.assertEqual(rev.node, node0)
751 self.assertEqual(rev.node, node0)
767 self.assertEqual(rev.p1node, nullid)
752 self.assertEqual(rev.p1node, nullid)
768 self.assertEqual(rev.p2node, nullid)
753 self.assertEqual(rev.p2node, nullid)
769 self.assertEqual(rev.basenode, nullid)
754 self.assertEqual(rev.basenode, nullid)
770 self.assertIsNone(rev.baserevisionsize)
755 self.assertIsNone(rev.baserevisionsize)
771 self.assertIsNone(rev.revision)
756 self.assertIsNone(rev.revision)
772 self.assertEqual(rev.delta,
757 self.assertEqual(rev.delta,
773 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
758 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
774 fulltext0)
759 fulltext0)
775
760
776 with self.assertRaises(StopIteration):
761 with self.assertRaises(StopIteration):
777 next(gen)
762 next(gen)
778
763
779 gen = f.emitrevisions([node0, node2], revisiondata=True,
764 gen = f.emitrevisions([node0, node2], revisiondata=True,
780 deltaprevious=True)
765 deltaprevious=True)
781
766
782 rev = next(gen)
767 rev = next(gen)
783 self.assertEqual(rev.node, node0)
768 self.assertEqual(rev.node, node0)
784 self.assertEqual(rev.p1node, nullid)
769 self.assertEqual(rev.p1node, nullid)
785 self.assertEqual(rev.p2node, nullid)
770 self.assertEqual(rev.p2node, nullid)
786 self.assertEqual(rev.basenode, nullid)
771 self.assertEqual(rev.basenode, nullid)
787 self.assertIsNone(rev.baserevisionsize)
772 self.assertIsNone(rev.baserevisionsize)
788 self.assertIsNone(rev.revision)
773 self.assertIsNone(rev.revision)
789 self.assertEqual(rev.delta,
774 self.assertEqual(rev.delta,
790 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
775 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
791 fulltext0)
776 fulltext0)
792
777
793 rev = next(gen)
778 rev = next(gen)
794 self.assertEqual(rev.node, node2)
779 self.assertEqual(rev.node, node2)
795 self.assertEqual(rev.p1node, node1)
780 self.assertEqual(rev.p1node, node1)
796 self.assertEqual(rev.p2node, nullid)
781 self.assertEqual(rev.p2node, nullid)
797 self.assertEqual(rev.basenode, node0)
782 self.assertEqual(rev.basenode, node0)
798
783
799 with self.assertRaises(StopIteration):
784 with self.assertRaises(StopIteration):
800 next(gen)
785 next(gen)
801
786
802 def testrenamed(self):
787 def testrenamed(self):
803 fulltext0 = b'foo'
788 fulltext0 = b'foo'
804 fulltext1 = b'bar'
789 fulltext1 = b'bar'
805 fulltext2 = b'baz'
790 fulltext2 = b'baz'
806
791
807 meta1 = {
792 meta1 = {
808 b'copy': b'source0',
793 b'copy': b'source0',
809 b'copyrev': b'a' * 40,
794 b'copyrev': b'a' * 40,
810 }
795 }
811
796
812 meta2 = {
797 meta2 = {
813 b'copy': b'source1',
798 b'copy': b'source1',
814 b'copyrev': b'b' * 40,
799 b'copyrev': b'b' * 40,
815 }
800 }
816
801
817 stored1 = b''.join([
802 stored1 = b''.join([
818 b'\x01\ncopy: source0\n',
803 b'\x01\ncopy: source0\n',
819 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
804 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
820 fulltext1,
805 fulltext1,
821 ])
806 ])
822
807
823 stored2 = b''.join([
808 stored2 = b''.join([
824 b'\x01\ncopy: source1\n',
809 b'\x01\ncopy: source1\n',
825 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
810 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
826 fulltext2,
811 fulltext2,
827 ])
812 ])
828
813
829 f = self._makefilefn()
814 f = self._makefilefn()
830 with self._maketransactionfn() as tr:
815 with self._maketransactionfn() as tr:
831 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
816 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
832 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
817 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
833 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
818 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
834
819
835 # Metadata header isn't recognized when parent isn't nullid.
820 # Metadata header isn't recognized when parent isn't nullid.
836 self.assertEqual(f.size(1), len(stored1))
821 self.assertEqual(f.size(1), len(stored1))
837 self.assertEqual(f.size(2), len(fulltext2))
822 self.assertEqual(f.size(2), len(fulltext2))
838
823
839 self.assertEqual(f.revision(node1), stored1)
824 self.assertEqual(f.revision(node1), stored1)
840 self.assertEqual(f.revision(node1, raw=True), stored1)
825 self.assertEqual(f.revision(node1, raw=True), stored1)
841 self.assertEqual(f.revision(node2), stored2)
826 self.assertEqual(f.revision(node2), stored2)
842 self.assertEqual(f.revision(node2, raw=True), stored2)
827 self.assertEqual(f.revision(node2, raw=True), stored2)
843
828
844 self.assertEqual(f.read(node1), fulltext1)
829 self.assertEqual(f.read(node1), fulltext1)
845 self.assertEqual(f.read(node2), fulltext2)
830 self.assertEqual(f.read(node2), fulltext2)
846
831
847 # Returns False when first parent is set.
832 # Returns False when first parent is set.
848 self.assertFalse(f.renamed(node1))
833 self.assertFalse(f.renamed(node1))
849 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
834 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
850
835
851 self.assertTrue(f.cmp(node1, fulltext1))
836 self.assertTrue(f.cmp(node1, fulltext1))
852 self.assertTrue(f.cmp(node1, stored1))
837 self.assertTrue(f.cmp(node1, stored1))
853 self.assertFalse(f.cmp(node2, fulltext2))
838 self.assertFalse(f.cmp(node2, fulltext2))
854 self.assertTrue(f.cmp(node2, stored2))
839 self.assertTrue(f.cmp(node2, stored2))
855
840
856 def testmetadataprefix(self):
841 def testmetadataprefix(self):
857 # Content with metadata prefix has extra prefix inserted in storage.
842 # Content with metadata prefix has extra prefix inserted in storage.
858 fulltext0 = b'\x01\nfoo'
843 fulltext0 = b'\x01\nfoo'
859 stored0 = b'\x01\n\x01\n\x01\nfoo'
844 stored0 = b'\x01\n\x01\n\x01\nfoo'
860
845
861 fulltext1 = b'\x01\nbar'
846 fulltext1 = b'\x01\nbar'
862 meta1 = {
847 meta1 = {
863 b'copy': b'source0',
848 b'copy': b'source0',
864 b'copyrev': b'b' * 40,
849 b'copyrev': b'b' * 40,
865 }
850 }
866 stored1 = b''.join([
851 stored1 = b''.join([
867 b'\x01\ncopy: source0\n',
852 b'\x01\ncopy: source0\n',
868 b'copyrev: %s\n' % (b'b' * 40),
853 b'copyrev: %s\n' % (b'b' * 40),
869 b'\x01\n\x01\nbar',
854 b'\x01\n\x01\nbar',
870 ])
855 ])
871
856
872 f = self._makefilefn()
857 f = self._makefilefn()
873 with self._maketransactionfn() as tr:
858 with self._maketransactionfn() as tr:
874 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
859 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
875 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
860 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
876
861
877 # TODO this is buggy.
862 # TODO this is buggy.
878 self.assertEqual(f.size(0), len(fulltext0) + 4)
863 self.assertEqual(f.size(0), len(fulltext0) + 4)
879
864
880 self.assertEqual(f.size(1), len(fulltext1))
865 self.assertEqual(f.size(1), len(fulltext1))
881
866
882 self.assertEqual(f.revision(node0), stored0)
867 self.assertEqual(f.revision(node0), stored0)
883 self.assertEqual(f.revision(node0, raw=True), stored0)
868 self.assertEqual(f.revision(node0, raw=True), stored0)
884
869
885 self.assertEqual(f.revision(node1), stored1)
870 self.assertEqual(f.revision(node1), stored1)
886 self.assertEqual(f.revision(node1, raw=True), stored1)
871 self.assertEqual(f.revision(node1, raw=True), stored1)
887
872
888 self.assertEqual(f.read(node0), fulltext0)
873 self.assertEqual(f.read(node0), fulltext0)
889 self.assertEqual(f.read(node1), fulltext1)
874 self.assertEqual(f.read(node1), fulltext1)
890
875
891 self.assertFalse(f.cmp(node0, fulltext0))
876 self.assertFalse(f.cmp(node0, fulltext0))
892 self.assertTrue(f.cmp(node0, stored0))
877 self.assertTrue(f.cmp(node0, stored0))
893
878
894 self.assertFalse(f.cmp(node1, fulltext1))
879 self.assertFalse(f.cmp(node1, fulltext1))
895 self.assertTrue(f.cmp(node1, stored0))
880 self.assertTrue(f.cmp(node1, stored0))
896
881
897 def testcensored(self):
882 def testcensored(self):
898 f = self._makefilefn()
883 f = self._makefilefn()
899
884
900 stored1 = revlog.packmeta({
885 stored1 = revlog.packmeta({
901 b'censored': b'tombstone',
886 b'censored': b'tombstone',
902 }, b'')
887 }, b'')
903
888
904 # TODO tests are incomplete because we need the node to be
889 # TODO tests are incomplete because we need the node to be
905 # different due to presence of censor metadata. But we can't
890 # different due to presence of censor metadata. But we can't
906 # do this with addrevision().
891 # do this with addrevision().
907 with self._maketransactionfn() as tr:
892 with self._maketransactionfn() as tr:
908 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
893 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
909 f.addrevision(stored1, tr, 1, node0, nullid,
894 f.addrevision(stored1, tr, 1, node0, nullid,
910 flags=revlog.REVIDX_ISCENSORED)
895 flags=revlog.REVIDX_ISCENSORED)
911
896
912 self.assertTrue(f.iscensored(1))
897 self.assertTrue(f.iscensored(1))
913
898
914 self.assertEqual(f.revision(1), stored1)
899 self.assertEqual(f.revision(1), stored1)
915 self.assertEqual(f.revision(1, raw=True), stored1)
900 self.assertEqual(f.revision(1, raw=True), stored1)
916
901
917 self.assertEqual(f.read(1), b'')
902 self.assertEqual(f.read(1), b'')
918
903
919 class ifilemutationtests(basetestcase):
904 class ifilemutationtests(basetestcase):
920 """Generic tests for the ifilemutation interface.
905 """Generic tests for the ifilemutation interface.
921
906
922 All file storage backends that support writing should conform to this
907 All file storage backends that support writing should conform to this
923 interface.
908 interface.
924
909
925 Use ``makeifilemutationtests()`` to create an instance of this type.
910 Use ``makeifilemutationtests()`` to create an instance of this type.
926 """
911 """
927 def testaddnoop(self):
912 def testaddnoop(self):
928 f = self._makefilefn()
913 f = self._makefilefn()
929 with self._maketransactionfn() as tr:
914 with self._maketransactionfn() as tr:
930 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
915 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
931 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
916 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
932 # Varying by linkrev shouldn't impact hash.
917 # Varying by linkrev shouldn't impact hash.
933 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
918 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
934
919
935 self.assertEqual(node1, node0)
920 self.assertEqual(node1, node0)
936 self.assertEqual(node2, node0)
921 self.assertEqual(node2, node0)
937 self.assertEqual(len(f), 1)
922 self.assertEqual(len(f), 1)
938
923
939 def testaddrevisionbadnode(self):
924 def testaddrevisionbadnode(self):
940 f = self._makefilefn()
925 f = self._makefilefn()
941 with self._maketransactionfn() as tr:
926 with self._maketransactionfn() as tr:
942 # Adding a revision with bad node value fails.
927 # Adding a revision with bad node value fails.
943 with self.assertRaises(error.StorageError):
928 with self.assertRaises(error.StorageError):
944 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
929 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
945
930
946 def testaddrevisionunknownflag(self):
931 def testaddrevisionunknownflag(self):
947 f = self._makefilefn()
932 f = self._makefilefn()
948 with self._maketransactionfn() as tr:
933 with self._maketransactionfn() as tr:
949 for i in range(15, 0, -1):
934 for i in range(15, 0, -1):
950 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
935 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
951 flags = 1 << i
936 flags = 1 << i
952 break
937 break
953
938
954 with self.assertRaises(error.StorageError):
939 with self.assertRaises(error.StorageError):
955 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
940 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
956
941
957 def testaddgroupsimple(self):
942 def testaddgroupsimple(self):
958 f = self._makefilefn()
943 f = self._makefilefn()
959
944
960 callbackargs = []
945 callbackargs = []
961 def cb(*args, **kwargs):
946 def cb(*args, **kwargs):
962 callbackargs.append((args, kwargs))
947 callbackargs.append((args, kwargs))
963
948
964 def linkmapper(node):
949 def linkmapper(node):
965 return 0
950 return 0
966
951
967 with self._maketransactionfn() as tr:
952 with self._maketransactionfn() as tr:
968 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
953 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
969
954
970 self.assertEqual(nodes, [])
955 self.assertEqual(nodes, [])
971 self.assertEqual(callbackargs, [])
956 self.assertEqual(callbackargs, [])
972 self.assertEqual(len(f), 0)
957 self.assertEqual(len(f), 0)
973
958
974 fulltext0 = b'foo'
959 fulltext0 = b'foo'
975 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
960 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
976
961
977 deltas = [
962 deltas = [
978 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
963 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
979 ]
964 ]
980
965
981 with self._maketransactionfn() as tr:
966 with self._maketransactionfn() as tr:
982 with self.assertRaises(error.StorageError):
967 with self.assertRaises(error.StorageError):
983 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
968 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
984
969
985 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
970 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
986
971
987 f = self._makefilefn()
972 f = self._makefilefn()
988
973
989 deltas = [
974 deltas = [
990 (node0, nullid, nullid, nullid, nullid, delta0, 0),
975 (node0, nullid, nullid, nullid, nullid, delta0, 0),
991 ]
976 ]
992
977
993 with self._maketransactionfn() as tr:
978 with self._maketransactionfn() as tr:
994 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
979 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
995
980
996 self.assertEqual(nodes, [
981 self.assertEqual(nodes, [
997 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
982 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
998 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
983 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
999
984
1000 self.assertEqual(len(callbackargs), 1)
985 self.assertEqual(len(callbackargs), 1)
1001 self.assertEqual(callbackargs[0][0][1], nodes[0])
986 self.assertEqual(callbackargs[0][0][1], nodes[0])
1002
987
1003 self.assertEqual(list(f.revs()), [0])
988 self.assertEqual(list(f.revs()), [0])
1004 self.assertEqual(f.rev(nodes[0]), 0)
989 self.assertEqual(f.rev(nodes[0]), 0)
1005 self.assertEqual(f.node(0), nodes[0])
990 self.assertEqual(f.node(0), nodes[0])
1006
991
1007 def testaddgroupmultiple(self):
992 def testaddgroupmultiple(self):
1008 f = self._makefilefn()
993 f = self._makefilefn()
1009
994
1010 fulltexts = [
995 fulltexts = [
1011 b'foo',
996 b'foo',
1012 b'bar',
997 b'bar',
1013 b'x' * 1024,
998 b'x' * 1024,
1014 ]
999 ]
1015
1000
1016 nodes = []
1001 nodes = []
1017 with self._maketransactionfn() as tr:
1002 with self._maketransactionfn() as tr:
1018 for fulltext in fulltexts:
1003 for fulltext in fulltexts:
1019 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1004 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1020
1005
1021 f = self._makefilefn()
1006 f = self._makefilefn()
1022 deltas = []
1007 deltas = []
1023 for i, fulltext in enumerate(fulltexts):
1008 for i, fulltext in enumerate(fulltexts):
1024 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1009 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1025
1010
1026 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1011 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1027
1012
1028 with self._maketransactionfn() as tr:
1013 with self._maketransactionfn() as tr:
1029 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1014 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1030
1015
1031 self.assertEqual(len(f), len(deltas))
1016 self.assertEqual(len(f), len(deltas))
1032 self.assertEqual(list(f.revs()), [0, 1, 2])
1017 self.assertEqual(list(f.revs()), [0, 1, 2])
1033 self.assertEqual(f.rev(nodes[0]), 0)
1018 self.assertEqual(f.rev(nodes[0]), 0)
1034 self.assertEqual(f.rev(nodes[1]), 1)
1019 self.assertEqual(f.rev(nodes[1]), 1)
1035 self.assertEqual(f.rev(nodes[2]), 2)
1020 self.assertEqual(f.rev(nodes[2]), 2)
1036 self.assertEqual(f.node(0), nodes[0])
1021 self.assertEqual(f.node(0), nodes[0])
1037 self.assertEqual(f.node(1), nodes[1])
1022 self.assertEqual(f.node(1), nodes[1])
1038 self.assertEqual(f.node(2), nodes[2])
1023 self.assertEqual(f.node(2), nodes[2])
1039
1024
1040 def makeifileindextests(makefilefn, maketransactionfn):
1025 def makeifileindextests(makefilefn, maketransactionfn):
1041 """Create a unittest.TestCase class suitable for testing file storage.
1026 """Create a unittest.TestCase class suitable for testing file storage.
1042
1027
1043 ``makefilefn`` is a callable which receives the test case as an
1028 ``makefilefn`` is a callable which receives the test case as an
1044 argument and returns an object implementing the ``ifilestorage`` interface.
1029 argument and returns an object implementing the ``ifilestorage`` interface.
1045
1030
1046 ``maketransactionfn`` is a callable which receives the test case as an
1031 ``maketransactionfn`` is a callable which receives the test case as an
1047 argument and returns a transaction object.
1032 argument and returns a transaction object.
1048
1033
1049 Returns a type that is a ``unittest.TestCase`` that can be used for
1034 Returns a type that is a ``unittest.TestCase`` that can be used for
1050 testing the object implementing the file storage interface. Simply
1035 testing the object implementing the file storage interface. Simply
1051 assign the returned value to a module-level attribute and a test loader
1036 assign the returned value to a module-level attribute and a test loader
1052 should find and run it automatically.
1037 should find and run it automatically.
1053 """
1038 """
1054 d = {
1039 d = {
1055 r'_makefilefn': makefilefn,
1040 r'_makefilefn': makefilefn,
1056 r'_maketransactionfn': maketransactionfn,
1041 r'_maketransactionfn': maketransactionfn,
1057 }
1042 }
1058 return type(r'ifileindextests', (ifileindextests,), d)
1043 return type(r'ifileindextests', (ifileindextests,), d)
1059
1044
1060 def makeifiledatatests(makefilefn, maketransactionfn):
1045 def makeifiledatatests(makefilefn, maketransactionfn):
1061 d = {
1046 d = {
1062 r'_makefilefn': makefilefn,
1047 r'_makefilefn': makefilefn,
1063 r'_maketransactionfn': maketransactionfn,
1048 r'_maketransactionfn': maketransactionfn,
1064 }
1049 }
1065 return type(r'ifiledatatests', (ifiledatatests,), d)
1050 return type(r'ifiledatatests', (ifiledatatests,), d)
1066
1051
1067 def makeifilemutationtests(makefilefn, maketransactionfn):
1052 def makeifilemutationtests(makefilefn, maketransactionfn):
1068 d = {
1053 d = {
1069 r'_makefilefn': makefilefn,
1054 r'_makefilefn': makefilefn,
1070 r'_maketransactionfn': maketransactionfn,
1055 r'_maketransactionfn': maketransactionfn,
1071 }
1056 }
1072 return type(r'ifilemutationtests', (ifilemutationtests,), d)
1057 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,679 +1,673 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 )
43 )
44
44
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
48 # leave the attribute unspecified.
48 # leave the attribute unspecified.
49 testedwith = 'ships-with-hg-core'
49 testedwith = 'ships-with-hg-core'
50
50
51 REQUIREMENT = 'testonly-simplestore'
51 REQUIREMENT = 'testonly-simplestore'
52
52
53 def validatenode(node):
53 def validatenode(node):
54 if isinstance(node, int):
54 if isinstance(node, int):
55 raise ValueError('expected node; got int')
55 raise ValueError('expected node; got int')
56
56
57 if len(node) != 20:
57 if len(node) != 20:
58 raise ValueError('expected 20 byte node')
58 raise ValueError('expected 20 byte node')
59
59
60 def validaterev(rev):
60 def validaterev(rev):
61 if not isinstance(rev, int):
61 if not isinstance(rev, int):
62 raise ValueError('expected int')
62 raise ValueError('expected int')
63
63
64 class simplestoreerror(error.StorageError):
64 class simplestoreerror(error.StorageError):
65 pass
65 pass
66
66
67 @interfaceutil.implementer(repository.irevisiondelta)
67 @interfaceutil.implementer(repository.irevisiondelta)
68 @attr.s(slots=True, frozen=True)
68 @attr.s(slots=True, frozen=True)
69 class simplestorerevisiondelta(object):
69 class simplestorerevisiondelta(object):
70 node = attr.ib()
70 node = attr.ib()
71 p1node = attr.ib()
71 p1node = attr.ib()
72 p2node = attr.ib()
72 p2node = attr.ib()
73 basenode = attr.ib()
73 basenode = attr.ib()
74 linknode = attr.ib()
74 linknode = attr.ib()
75 flags = attr.ib()
75 flags = attr.ib()
76 baserevisionsize = attr.ib()
76 baserevisionsize = attr.ib()
77 revision = attr.ib()
77 revision = attr.ib()
78 delta = attr.ib()
78 delta = attr.ib()
79
79
80 @interfaceutil.implementer(repository.ifilestorage)
80 @interfaceutil.implementer(repository.ifilestorage)
81 class filestorage(object):
81 class filestorage(object):
82 """Implements storage for a tracked path.
82 """Implements storage for a tracked path.
83
83
84 Data is stored in the VFS in a directory corresponding to the tracked
84 Data is stored in the VFS in a directory corresponding to the tracked
85 path.
85 path.
86
86
87 Index data is stored in an ``index`` file using CBOR.
87 Index data is stored in an ``index`` file using CBOR.
88
88
89 Fulltext data is stored in files having names of the node.
89 Fulltext data is stored in files having names of the node.
90 """
90 """
91
91
92 def __init__(self, svfs, path):
92 def __init__(self, svfs, path):
93 self._svfs = svfs
93 self._svfs = svfs
94 self._path = path
94 self._path = path
95
95
96 self._storepath = b'/'.join([b'data', path])
96 self._storepath = b'/'.join([b'data', path])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
98
98
99 indexdata = self._svfs.tryread(self._indexpath)
99 indexdata = self._svfs.tryread(self._indexpath)
100 if indexdata:
100 if indexdata:
101 indexdata = cbor.loads(indexdata)
101 indexdata = cbor.loads(indexdata)
102
102
103 self._indexdata = indexdata or []
103 self._indexdata = indexdata or []
104 self._indexbynode = {}
104 self._indexbynode = {}
105 self._indexbyrev = {}
105 self._indexbyrev = {}
106 self._index = []
106 self._index = []
107 self._refreshindex()
107 self._refreshindex()
108
108
109 def _refreshindex(self):
109 def _refreshindex(self):
110 self._indexbynode.clear()
110 self._indexbynode.clear()
111 self._indexbyrev.clear()
111 self._indexbyrev.clear()
112 self._index = []
112 self._index = []
113
113
114 for i, entry in enumerate(self._indexdata):
114 for i, entry in enumerate(self._indexdata):
115 self._indexbynode[entry[b'node']] = entry
115 self._indexbynode[entry[b'node']] = entry
116 self._indexbyrev[i] = entry
116 self._indexbyrev[i] = entry
117
117
118 self._indexbynode[nullid] = {
118 self._indexbynode[nullid] = {
119 b'node': nullid,
119 b'node': nullid,
120 b'p1': nullid,
120 b'p1': nullid,
121 b'p2': nullid,
121 b'p2': nullid,
122 b'linkrev': nullrev,
122 b'linkrev': nullrev,
123 b'flags': 0,
123 b'flags': 0,
124 }
124 }
125
125
126 self._indexbyrev[nullrev] = {
126 self._indexbyrev[nullrev] = {
127 b'node': nullid,
127 b'node': nullid,
128 b'p1': nullid,
128 b'p1': nullid,
129 b'p2': nullid,
129 b'p2': nullid,
130 b'linkrev': nullrev,
130 b'linkrev': nullrev,
131 b'flags': 0,
131 b'flags': 0,
132 }
132 }
133
133
134 for i, entry in enumerate(self._indexdata):
134 for i, entry in enumerate(self._indexdata):
135 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
135 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
136
136
137 # start, length, rawsize, chainbase, linkrev, p1, p2, node
137 # start, length, rawsize, chainbase, linkrev, p1, p2, node
138 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
138 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
139 entry[b'node']))
139 entry[b'node']))
140
140
141 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
141 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
142
142
143 def __len__(self):
143 def __len__(self):
144 return len(self._indexdata)
144 return len(self._indexdata)
145
145
146 def __iter__(self):
146 def __iter__(self):
147 return iter(range(len(self)))
147 return iter(range(len(self)))
148
148
149 def revs(self, start=0, stop=None):
149 def revs(self, start=0, stop=None):
150 step = 1
150 step = 1
151 if stop is not None:
151 if stop is not None:
152 if start > stop:
152 if start > stop:
153 step = -1
153 step = -1
154
154
155 stop += step
155 stop += step
156 else:
156 else:
157 stop = len(self)
157 stop = len(self)
158
158
159 return range(start, stop, step)
159 return range(start, stop, step)
160
160
161 def parents(self, node):
161 def parents(self, node):
162 validatenode(node)
162 validatenode(node)
163
163
164 if node not in self._indexbynode:
164 if node not in self._indexbynode:
165 raise KeyError('unknown node')
165 raise KeyError('unknown node')
166
166
167 entry = self._indexbynode[node]
167 entry = self._indexbynode[node]
168
168
169 return entry[b'p1'], entry[b'p2']
169 return entry[b'p1'], entry[b'p2']
170
170
171 def parentrevs(self, rev):
171 def parentrevs(self, rev):
172 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
172 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
173 return self.rev(p1), self.rev(p2)
173 return self.rev(p1), self.rev(p2)
174
174
175 def rev(self, node):
175 def rev(self, node):
176 validatenode(node)
176 validatenode(node)
177
177
178 try:
178 try:
179 self._indexbynode[node]
179 self._indexbynode[node]
180 except KeyError:
180 except KeyError:
181 raise error.LookupError(node, self._indexpath, _('no node'))
181 raise error.LookupError(node, self._indexpath, _('no node'))
182
182
183 for rev, entry in self._indexbyrev.items():
183 for rev, entry in self._indexbyrev.items():
184 if entry[b'node'] == node:
184 if entry[b'node'] == node:
185 return rev
185 return rev
186
186
187 raise error.ProgrammingError('this should not occur')
187 raise error.ProgrammingError('this should not occur')
188
188
189 def node(self, rev):
189 def node(self, rev):
190 validaterev(rev)
190 validaterev(rev)
191
191
192 return self._indexbyrev[rev][b'node']
192 return self._indexbyrev[rev][b'node']
193
193
194 def lookup(self, node):
194 def lookup(self, node):
195 if isinstance(node, int):
195 if isinstance(node, int):
196 return self.node(node)
196 return self.node(node)
197
197
198 if len(node) == 20:
198 if len(node) == 20:
199 self.rev(node)
199 self.rev(node)
200 return node
200 return node
201
201
202 try:
202 try:
203 rev = int(node)
203 rev = int(node)
204 if '%d' % rev != node:
204 if '%d' % rev != node:
205 raise ValueError
205 raise ValueError
206
206
207 if rev < 0:
207 if rev < 0:
208 rev = len(self) + rev
208 rev = len(self) + rev
209 if rev < 0 or rev >= len(self):
209 if rev < 0 or rev >= len(self):
210 raise ValueError
210 raise ValueError
211
211
212 return self.node(rev)
212 return self.node(rev)
213 except (ValueError, OverflowError):
213 except (ValueError, OverflowError):
214 pass
214 pass
215
215
216 if len(node) == 40:
216 if len(node) == 40:
217 try:
217 try:
218 rawnode = bin(node)
218 rawnode = bin(node)
219 self.rev(rawnode)
219 self.rev(rawnode)
220 return rawnode
220 return rawnode
221 except TypeError:
221 except TypeError:
222 pass
222 pass
223
223
224 raise error.LookupError(node, self._path, _('invalid lookup input'))
224 raise error.LookupError(node, self._path, _('invalid lookup input'))
225
225
226 def linkrev(self, rev):
226 def linkrev(self, rev):
227 validaterev(rev)
227 validaterev(rev)
228
228
229 return self._indexbyrev[rev][b'linkrev']
229 return self._indexbyrev[rev][b'linkrev']
230
230
231 def _flags(self, rev):
231 def _flags(self, rev):
232 validaterev(rev)
232 validaterev(rev)
233
233
234 return self._indexbyrev[rev][b'flags']
234 return self._indexbyrev[rev][b'flags']
235
235
236 def deltaparent(self, rev):
237 validaterev(rev)
238
239 p1node = self.parents(self.node(rev))[0]
240 return self.rev(p1node)
241
242 def _candelta(self, baserev, rev):
236 def _candelta(self, baserev, rev):
243 validaterev(baserev)
237 validaterev(baserev)
244 validaterev(rev)
238 validaterev(rev)
245
239
246 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
240 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
247 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
241 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
248 return False
242 return False
249
243
250 return True
244 return True
251
245
252 def _processflags(self, text, flags, operation, raw=False):
246 def _processflags(self, text, flags, operation, raw=False):
253 if flags == 0:
247 if flags == 0:
254 return text, True
248 return text, True
255
249
256 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
250 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
257 raise simplestoreerror(_("incompatible revision flag '%#x'") %
251 raise simplestoreerror(_("incompatible revision flag '%#x'") %
258 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
252 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
259
253
260 validatehash = True
254 validatehash = True
261 # Depending on the operation (read or write), the order might be
255 # Depending on the operation (read or write), the order might be
262 # reversed due to non-commutative transforms.
256 # reversed due to non-commutative transforms.
263 orderedflags = revlog.REVIDX_FLAGS_ORDER
257 orderedflags = revlog.REVIDX_FLAGS_ORDER
264 if operation == 'write':
258 if operation == 'write':
265 orderedflags = reversed(orderedflags)
259 orderedflags = reversed(orderedflags)
266
260
267 for flag in orderedflags:
261 for flag in orderedflags:
268 # If a flagprocessor has been registered for a known flag, apply the
262 # If a flagprocessor has been registered for a known flag, apply the
269 # related operation transform and update result tuple.
263 # related operation transform and update result tuple.
270 if flag & flags:
264 if flag & flags:
271 vhash = True
265 vhash = True
272
266
273 if flag not in revlog._flagprocessors:
267 if flag not in revlog._flagprocessors:
274 message = _("missing processor for flag '%#x'") % (flag)
268 message = _("missing processor for flag '%#x'") % (flag)
275 raise simplestoreerror(message)
269 raise simplestoreerror(message)
276
270
277 processor = revlog._flagprocessors[flag]
271 processor = revlog._flagprocessors[flag]
278 if processor is not None:
272 if processor is not None:
279 readtransform, writetransform, rawtransform = processor
273 readtransform, writetransform, rawtransform = processor
280
274
281 if raw:
275 if raw:
282 vhash = rawtransform(self, text)
276 vhash = rawtransform(self, text)
283 elif operation == 'read':
277 elif operation == 'read':
284 text, vhash = readtransform(self, text)
278 text, vhash = readtransform(self, text)
285 else: # write operation
279 else: # write operation
286 text, vhash = writetransform(self, text)
280 text, vhash = writetransform(self, text)
287 validatehash = validatehash and vhash
281 validatehash = validatehash and vhash
288
282
289 return text, validatehash
283 return text, validatehash
290
284
291 def checkhash(self, text, node, p1=None, p2=None, rev=None):
285 def checkhash(self, text, node, p1=None, p2=None, rev=None):
292 if p1 is None and p2 is None:
286 if p1 is None and p2 is None:
293 p1, p2 = self.parents(node)
287 p1, p2 = self.parents(node)
294 if node != revlog.hash(text, p1, p2):
288 if node != revlog.hash(text, p1, p2):
295 raise simplestoreerror(_("integrity check failed on %s") %
289 raise simplestoreerror(_("integrity check failed on %s") %
296 self._path)
290 self._path)
297
291
298 def revision(self, node, raw=False):
292 def revision(self, node, raw=False):
299 validatenode(node)
293 validatenode(node)
300
294
301 if node == nullid:
295 if node == nullid:
302 return b''
296 return b''
303
297
304 rev = self.rev(node)
298 rev = self.rev(node)
305 flags = self._flags(rev)
299 flags = self._flags(rev)
306
300
307 path = b'/'.join([self._storepath, hex(node)])
301 path = b'/'.join([self._storepath, hex(node)])
308 rawtext = self._svfs.read(path)
302 rawtext = self._svfs.read(path)
309
303
310 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
304 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
311 if validatehash:
305 if validatehash:
312 self.checkhash(text, node, rev=rev)
306 self.checkhash(text, node, rev=rev)
313
307
314 return text
308 return text
315
309
316 def read(self, node):
310 def read(self, node):
317 validatenode(node)
311 validatenode(node)
318
312
319 revision = self.revision(node)
313 revision = self.revision(node)
320
314
321 if not revision.startswith(b'\1\n'):
315 if not revision.startswith(b'\1\n'):
322 return revision
316 return revision
323
317
324 start = revision.index(b'\1\n', 2)
318 start = revision.index(b'\1\n', 2)
325 return revision[start + 2:]
319 return revision[start + 2:]
326
320
327 def renamed(self, node):
321 def renamed(self, node):
328 validatenode(node)
322 validatenode(node)
329
323
330 if self.parents(node)[0] != nullid:
324 if self.parents(node)[0] != nullid:
331 return False
325 return False
332
326
333 fulltext = self.revision(node)
327 fulltext = self.revision(node)
334 m = revlog.parsemeta(fulltext)[0]
328 m = revlog.parsemeta(fulltext)[0]
335
329
336 if m and 'copy' in m:
330 if m and 'copy' in m:
337 return m['copy'], bin(m['copyrev'])
331 return m['copy'], bin(m['copyrev'])
338
332
339 return False
333 return False
340
334
341 def cmp(self, node, text):
335 def cmp(self, node, text):
342 validatenode(node)
336 validatenode(node)
343
337
344 t = text
338 t = text
345
339
346 if text.startswith(b'\1\n'):
340 if text.startswith(b'\1\n'):
347 t = b'\1\n\1\n' + text
341 t = b'\1\n\1\n' + text
348
342
349 p1, p2 = self.parents(node)
343 p1, p2 = self.parents(node)
350
344
351 if revlog.hash(t, p1, p2) == node:
345 if revlog.hash(t, p1, p2) == node:
352 return False
346 return False
353
347
354 if self.iscensored(self.rev(node)):
348 if self.iscensored(self.rev(node)):
355 return text != b''
349 return text != b''
356
350
357 if self.renamed(node):
351 if self.renamed(node):
358 t2 = self.read(node)
352 t2 = self.read(node)
359 return t2 != text
353 return t2 != text
360
354
361 return True
355 return True
362
356
363 def size(self, rev):
357 def size(self, rev):
364 validaterev(rev)
358 validaterev(rev)
365
359
366 node = self._indexbyrev[rev][b'node']
360 node = self._indexbyrev[rev][b'node']
367
361
368 if self.renamed(node):
362 if self.renamed(node):
369 return len(self.read(node))
363 return len(self.read(node))
370
364
371 if self.iscensored(rev):
365 if self.iscensored(rev):
372 return 0
366 return 0
373
367
374 return len(self.revision(node))
368 return len(self.revision(node))
375
369
376 def iscensored(self, rev):
370 def iscensored(self, rev):
377 validaterev(rev)
371 validaterev(rev)
378
372
379 return self._flags(rev) & revlog.REVIDX_ISCENSORED
373 return self._flags(rev) & revlog.REVIDX_ISCENSORED
380
374
381 def commonancestorsheads(self, a, b):
375 def commonancestorsheads(self, a, b):
382 validatenode(a)
376 validatenode(a)
383 validatenode(b)
377 validatenode(b)
384
378
385 a = self.rev(a)
379 a = self.rev(a)
386 b = self.rev(b)
380 b = self.rev(b)
387
381
388 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
382 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
389 return pycompat.maplist(self.node, ancestors)
383 return pycompat.maplist(self.node, ancestors)
390
384
391 def descendants(self, revs):
385 def descendants(self, revs):
392 # This is a copy of revlog.descendants()
386 # This is a copy of revlog.descendants()
393 first = min(revs)
387 first = min(revs)
394 if first == nullrev:
388 if first == nullrev:
395 for i in self:
389 for i in self:
396 yield i
390 yield i
397 return
391 return
398
392
399 seen = set(revs)
393 seen = set(revs)
400 for i in self.revs(start=first + 1):
394 for i in self.revs(start=first + 1):
401 for x in self.parentrevs(i):
395 for x in self.parentrevs(i):
402 if x != nullrev and x in seen:
396 if x != nullrev and x in seen:
403 seen.add(i)
397 seen.add(i)
404 yield i
398 yield i
405 break
399 break
406
400
407 # Required by verify.
401 # Required by verify.
408 def files(self):
402 def files(self):
409 entries = self._svfs.listdir(self._storepath)
403 entries = self._svfs.listdir(self._storepath)
410
404
411 # Strip out undo.backup.* files created as part of transaction
405 # Strip out undo.backup.* files created as part of transaction
412 # recording.
406 # recording.
413 entries = [f for f in entries if not f.startswith('undo.backup.')]
407 entries = [f for f in entries if not f.startswith('undo.backup.')]
414
408
415 return [b'/'.join((self._storepath, f)) for f in entries]
409 return [b'/'.join((self._storepath, f)) for f in entries]
416
410
417 def add(self, text, meta, transaction, linkrev, p1, p2):
411 def add(self, text, meta, transaction, linkrev, p1, p2):
418 if meta or text.startswith(b'\1\n'):
412 if meta or text.startswith(b'\1\n'):
419 text = revlog.packmeta(meta, text)
413 text = revlog.packmeta(meta, text)
420
414
421 return self.addrevision(text, transaction, linkrev, p1, p2)
415 return self.addrevision(text, transaction, linkrev, p1, p2)
422
416
423 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
417 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
424 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
418 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
425 validatenode(p1)
419 validatenode(p1)
426 validatenode(p2)
420 validatenode(p2)
427
421
428 if flags:
422 if flags:
429 node = node or revlog.hash(text, p1, p2)
423 node = node or revlog.hash(text, p1, p2)
430
424
431 rawtext, validatehash = self._processflags(text, flags, 'write')
425 rawtext, validatehash = self._processflags(text, flags, 'write')
432
426
433 node = node or revlog.hash(text, p1, p2)
427 node = node or revlog.hash(text, p1, p2)
434
428
435 if node in self._indexbynode:
429 if node in self._indexbynode:
436 return node
430 return node
437
431
438 if validatehash:
432 if validatehash:
439 self.checkhash(rawtext, node, p1=p1, p2=p2)
433 self.checkhash(rawtext, node, p1=p1, p2=p2)
440
434
441 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
435 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
442 flags)
436 flags)
443
437
444 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
438 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
445 transaction.addbackup(self._indexpath)
439 transaction.addbackup(self._indexpath)
446
440
447 path = b'/'.join([self._storepath, hex(node)])
441 path = b'/'.join([self._storepath, hex(node)])
448
442
449 self._svfs.write(path, rawtext)
443 self._svfs.write(path, rawtext)
450
444
451 self._indexdata.append({
445 self._indexdata.append({
452 b'node': node,
446 b'node': node,
453 b'p1': p1,
447 b'p1': p1,
454 b'p2': p2,
448 b'p2': p2,
455 b'linkrev': link,
449 b'linkrev': link,
456 b'flags': flags,
450 b'flags': flags,
457 })
451 })
458
452
459 self._reflectindexupdate()
453 self._reflectindexupdate()
460
454
461 return node
455 return node
462
456
463 def _reflectindexupdate(self):
457 def _reflectindexupdate(self):
464 self._refreshindex()
458 self._refreshindex()
465 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
459 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
466
460
467 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
461 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
468 nodes = []
462 nodes = []
469
463
470 transaction.addbackup(self._indexpath)
464 transaction.addbackup(self._indexpath)
471
465
472 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
466 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
473 linkrev = linkmapper(linknode)
467 linkrev = linkmapper(linknode)
474 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
468 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
475
469
476 nodes.append(node)
470 nodes.append(node)
477
471
478 if node in self._indexbynode:
472 if node in self._indexbynode:
479 continue
473 continue
480
474
481 # Need to resolve the fulltext from the delta base.
475 # Need to resolve the fulltext from the delta base.
482 if deltabase == nullid:
476 if deltabase == nullid:
483 text = mdiff.patch(b'', delta)
477 text = mdiff.patch(b'', delta)
484 else:
478 else:
485 text = mdiff.patch(self.revision(deltabase), delta)
479 text = mdiff.patch(self.revision(deltabase), delta)
486
480
487 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
481 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
488 flags)
482 flags)
489
483
490 if addrevisioncb:
484 if addrevisioncb:
491 addrevisioncb(self, node)
485 addrevisioncb(self, node)
492
486
493 return nodes
487 return nodes
494
488
495 def revdiff(self, rev1, rev2):
489 def revdiff(self, rev1, rev2):
496 validaterev(rev1)
490 validaterev(rev1)
497 validaterev(rev2)
491 validaterev(rev2)
498
492
499 node1 = self.node(rev1)
493 node1 = self.node(rev1)
500 node2 = self.node(rev2)
494 node2 = self.node(rev2)
501
495
502 return mdiff.textdiff(self.revision(node1, raw=True),
496 return mdiff.textdiff(self.revision(node1, raw=True),
503 self.revision(node2, raw=True))
497 self.revision(node2, raw=True))
504
498
505 def heads(self, start=None, stop=None):
499 def heads(self, start=None, stop=None):
506 # This is copied from revlog.py.
500 # This is copied from revlog.py.
507 if start is None and stop is None:
501 if start is None and stop is None:
508 if not len(self):
502 if not len(self):
509 return [nullid]
503 return [nullid]
510 return [self.node(r) for r in self.headrevs()]
504 return [self.node(r) for r in self.headrevs()]
511
505
512 if start is None:
506 if start is None:
513 start = nullid
507 start = nullid
514 if stop is None:
508 if stop is None:
515 stop = []
509 stop = []
516 stoprevs = set([self.rev(n) for n in stop])
510 stoprevs = set([self.rev(n) for n in stop])
517 startrev = self.rev(start)
511 startrev = self.rev(start)
518 reachable = {startrev}
512 reachable = {startrev}
519 heads = {startrev}
513 heads = {startrev}
520
514
521 parentrevs = self.parentrevs
515 parentrevs = self.parentrevs
522 for r in self.revs(start=startrev + 1):
516 for r in self.revs(start=startrev + 1):
523 for p in parentrevs(r):
517 for p in parentrevs(r):
524 if p in reachable:
518 if p in reachable:
525 if r not in stoprevs:
519 if r not in stoprevs:
526 reachable.add(r)
520 reachable.add(r)
527 heads.add(r)
521 heads.add(r)
528 if p in heads and p not in stoprevs:
522 if p in heads and p not in stoprevs:
529 heads.remove(p)
523 heads.remove(p)
530
524
531 return [self.node(r) for r in heads]
525 return [self.node(r) for r in heads]
532
526
533 def children(self, node):
527 def children(self, node):
534 validatenode(node)
528 validatenode(node)
535
529
536 # This is a copy of revlog.children().
530 # This is a copy of revlog.children().
537 c = []
531 c = []
538 p = self.rev(node)
532 p = self.rev(node)
539 for r in self.revs(start=p + 1):
533 for r in self.revs(start=p + 1):
540 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
534 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
541 if prevs:
535 if prevs:
542 for pr in prevs:
536 for pr in prevs:
543 if pr == p:
537 if pr == p:
544 c.append(self.node(r))
538 c.append(self.node(r))
545 elif p == nullrev:
539 elif p == nullrev:
546 c.append(self.node(r))
540 c.append(self.node(r))
547 return c
541 return c
548
542
549 def getstrippoint(self, minlink):
543 def getstrippoint(self, minlink):
550
544
551 # This is largely a copy of revlog.getstrippoint().
545 # This is largely a copy of revlog.getstrippoint().
552 brokenrevs = set()
546 brokenrevs = set()
553 strippoint = len(self)
547 strippoint = len(self)
554
548
555 heads = {}
549 heads = {}
556 futurelargelinkrevs = set()
550 futurelargelinkrevs = set()
557 for head in self.heads():
551 for head in self.heads():
558 headlinkrev = self.linkrev(self.rev(head))
552 headlinkrev = self.linkrev(self.rev(head))
559 heads[head] = headlinkrev
553 heads[head] = headlinkrev
560 if headlinkrev >= minlink:
554 if headlinkrev >= minlink:
561 futurelargelinkrevs.add(headlinkrev)
555 futurelargelinkrevs.add(headlinkrev)
562
556
563 # This algorithm involves walking down the rev graph, starting at the
557 # This algorithm involves walking down the rev graph, starting at the
564 # heads. Since the revs are topologically sorted according to linkrev,
558 # heads. Since the revs are topologically sorted according to linkrev,
565 # once all head linkrevs are below the minlink, we know there are
559 # once all head linkrevs are below the minlink, we know there are
566 # no more revs that could have a linkrev greater than minlink.
560 # no more revs that could have a linkrev greater than minlink.
567 # So we can stop walking.
561 # So we can stop walking.
568 while futurelargelinkrevs:
562 while futurelargelinkrevs:
569 strippoint -= 1
563 strippoint -= 1
570 linkrev = heads.pop(strippoint)
564 linkrev = heads.pop(strippoint)
571
565
572 if linkrev < minlink:
566 if linkrev < minlink:
573 brokenrevs.add(strippoint)
567 brokenrevs.add(strippoint)
574 else:
568 else:
575 futurelargelinkrevs.remove(linkrev)
569 futurelargelinkrevs.remove(linkrev)
576
570
577 for p in self.parentrevs(strippoint):
571 for p in self.parentrevs(strippoint):
578 if p != nullrev:
572 if p != nullrev:
579 plinkrev = self.linkrev(p)
573 plinkrev = self.linkrev(p)
580 heads[p] = plinkrev
574 heads[p] = plinkrev
581 if plinkrev >= minlink:
575 if plinkrev >= minlink:
582 futurelargelinkrevs.add(plinkrev)
576 futurelargelinkrevs.add(plinkrev)
583
577
584 return strippoint, brokenrevs
578 return strippoint, brokenrevs
585
579
586 def strip(self, minlink, transaction):
580 def strip(self, minlink, transaction):
587 if not len(self):
581 if not len(self):
588 return
582 return
589
583
590 rev, _ignored = self.getstrippoint(minlink)
584 rev, _ignored = self.getstrippoint(minlink)
591 if rev == len(self):
585 if rev == len(self):
592 return
586 return
593
587
594 # Purge index data starting at the requested revision.
588 # Purge index data starting at the requested revision.
595 self._indexdata[rev:] = []
589 self._indexdata[rev:] = []
596 self._reflectindexupdate()
590 self._reflectindexupdate()
597
591
598 def issimplestorefile(f, kind, st):
592 def issimplestorefile(f, kind, st):
599 if kind != stat.S_IFREG:
593 if kind != stat.S_IFREG:
600 return False
594 return False
601
595
602 if store.isrevlog(f, kind, st):
596 if store.isrevlog(f, kind, st):
603 return False
597 return False
604
598
605 # Ignore transaction undo files.
599 # Ignore transaction undo files.
606 if f.startswith('undo.'):
600 if f.startswith('undo.'):
607 return False
601 return False
608
602
609 # Otherwise assume it belongs to the simple store.
603 # Otherwise assume it belongs to the simple store.
610 return True
604 return True
611
605
612 class simplestore(store.encodedstore):
606 class simplestore(store.encodedstore):
613 def datafiles(self):
607 def datafiles(self):
614 for x in super(simplestore, self).datafiles():
608 for x in super(simplestore, self).datafiles():
615 yield x
609 yield x
616
610
617 # Supplement with non-revlog files.
611 # Supplement with non-revlog files.
618 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
612 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
619
613
620 for unencoded, encoded, size in extrafiles:
614 for unencoded, encoded, size in extrafiles:
621 try:
615 try:
622 unencoded = store.decodefilename(unencoded)
616 unencoded = store.decodefilename(unencoded)
623 except KeyError:
617 except KeyError:
624 unencoded = None
618 unencoded = None
625
619
626 yield unencoded, encoded, size
620 yield unencoded, encoded, size
627
621
628 def reposetup(ui, repo):
622 def reposetup(ui, repo):
629 if not repo.local():
623 if not repo.local():
630 return
624 return
631
625
632 if isinstance(repo, bundlerepo.bundlerepository):
626 if isinstance(repo, bundlerepo.bundlerepository):
633 raise error.Abort(_('cannot use simple store with bundlerepo'))
627 raise error.Abort(_('cannot use simple store with bundlerepo'))
634
628
635 class simplestorerepo(repo.__class__):
629 class simplestorerepo(repo.__class__):
636 def file(self, f):
630 def file(self, f):
637 return filestorage(self.svfs, f)
631 return filestorage(self.svfs, f)
638
632
639 repo.__class__ = simplestorerepo
633 repo.__class__ = simplestorerepo
640
634
641 def featuresetup(ui, supported):
635 def featuresetup(ui, supported):
642 supported.add(REQUIREMENT)
636 supported.add(REQUIREMENT)
643
637
644 def newreporequirements(orig, ui):
638 def newreporequirements(orig, ui):
645 """Modifies default requirements for new repos to use the simple store."""
639 """Modifies default requirements for new repos to use the simple store."""
646 requirements = orig(ui)
640 requirements = orig(ui)
647
641
648 # These requirements are only used to affect creation of the store
642 # These requirements are only used to affect creation of the store
649 # object. We have our own store. So we can remove them.
643 # object. We have our own store. So we can remove them.
650 # TODO do this once we feel like taking the test hit.
644 # TODO do this once we feel like taking the test hit.
651 #if 'fncache' in requirements:
645 #if 'fncache' in requirements:
652 # requirements.remove('fncache')
646 # requirements.remove('fncache')
653 #if 'dotencode' in requirements:
647 #if 'dotencode' in requirements:
654 # requirements.remove('dotencode')
648 # requirements.remove('dotencode')
655
649
656 requirements.add(REQUIREMENT)
650 requirements.add(REQUIREMENT)
657
651
658 return requirements
652 return requirements
659
653
660 def makestore(orig, requirements, path, vfstype):
654 def makestore(orig, requirements, path, vfstype):
661 if REQUIREMENT not in requirements:
655 if REQUIREMENT not in requirements:
662 return orig(requirements, path, vfstype)
656 return orig(requirements, path, vfstype)
663
657
664 return simplestore(path, vfstype)
658 return simplestore(path, vfstype)
665
659
666 def verifierinit(orig, self, *args, **kwargs):
660 def verifierinit(orig, self, *args, **kwargs):
667 orig(self, *args, **kwargs)
661 orig(self, *args, **kwargs)
668
662
669 # We don't care that files in the store don't align with what is
663 # We don't care that files in the store don't align with what is
670 # advertised. So suppress these warnings.
664 # advertised. So suppress these warnings.
671 self.warnorphanstorefiles = False
665 self.warnorphanstorefiles = False
672
666
673 def extsetup(ui):
667 def extsetup(ui):
674 localrepo.featuresetupfuncs.add(featuresetup)
668 localrepo.featuresetupfuncs.add(featuresetup)
675
669
676 extensions.wrapfunction(localrepo, 'newreporequirements',
670 extensions.wrapfunction(localrepo, 'newreporequirements',
677 newreporequirements)
671 newreporequirements)
678 extensions.wrapfunction(store, 'store', makestore)
672 extensions.wrapfunction(store, 'store', makestore)
679 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
673 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now