##// END OF EJS Templates
filelog: remove revdiff() (API)...
Gregory Szorc -
r40033:2f80eaf3 default
parent child Browse files
Show More
@@ -1,252 +1,249
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 storageutil,
17 storageutil,
18 )
18 )
19
19
20 @interfaceutil.implementer(repository.ifilestorage)
20 @interfaceutil.implementer(repository.ifilestorage)
21 class filelog(object):
21 class filelog(object):
22 def __init__(self, opener, path):
22 def __init__(self, opener, path):
23 self._revlog = revlog.revlog(opener,
23 self._revlog = revlog.revlog(opener,
24 '/'.join(('data', path + '.i')),
24 '/'.join(('data', path + '.i')),
25 censorable=True)
25 censorable=True)
26 # Full name of the user visible file, relative to the repository root.
26 # Full name of the user visible file, relative to the repository root.
27 # Used by LFS.
27 # Used by LFS.
28 self._revlog.filename = path
28 self._revlog.filename = path
29
29
30 def __len__(self):
30 def __len__(self):
31 return len(self._revlog)
31 return len(self._revlog)
32
32
33 def __iter__(self):
33 def __iter__(self):
34 return self._revlog.__iter__()
34 return self._revlog.__iter__()
35
35
36 def revs(self, start=0, stop=None):
36 def revs(self, start=0, stop=None):
37 return self._revlog.revs(start=start, stop=stop)
37 return self._revlog.revs(start=start, stop=stop)
38
38
39 def parents(self, node):
39 def parents(self, node):
40 return self._revlog.parents(node)
40 return self._revlog.parents(node)
41
41
42 def parentrevs(self, rev):
42 def parentrevs(self, rev):
43 return self._revlog.parentrevs(rev)
43 return self._revlog.parentrevs(rev)
44
44
45 def rev(self, node):
45 def rev(self, node):
46 return self._revlog.rev(node)
46 return self._revlog.rev(node)
47
47
48 def node(self, rev):
48 def node(self, rev):
49 return self._revlog.node(rev)
49 return self._revlog.node(rev)
50
50
51 def lookup(self, node):
51 def lookup(self, node):
52 return self._revlog.lookup(node)
52 return self._revlog.lookup(node)
53
53
54 def linkrev(self, rev):
54 def linkrev(self, rev):
55 return self._revlog.linkrev(rev)
55 return self._revlog.linkrev(rev)
56
56
57 def commonancestorsheads(self, node1, node2):
57 def commonancestorsheads(self, node1, node2):
58 return self._revlog.commonancestorsheads(node1, node2)
58 return self._revlog.commonancestorsheads(node1, node2)
59
59
60 # Used by dagop.blockdescendants().
60 # Used by dagop.blockdescendants().
61 def descendants(self, revs):
61 def descendants(self, revs):
62 return self._revlog.descendants(revs)
62 return self._revlog.descendants(revs)
63
63
64 def heads(self, start=None, stop=None):
64 def heads(self, start=None, stop=None):
65 return self._revlog.heads(start, stop)
65 return self._revlog.heads(start, stop)
66
66
67 # Used by hgweb, children extension.
67 # Used by hgweb, children extension.
68 def children(self, node):
68 def children(self, node):
69 return self._revlog.children(node)
69 return self._revlog.children(node)
70
70
71 def iscensored(self, rev):
71 def iscensored(self, rev):
72 return self._revlog.iscensored(rev)
72 return self._revlog.iscensored(rev)
73
73
74 # Might be unused.
74 # Might be unused.
75 def checkhash(self, text, node, p1=None, p2=None, rev=None):
75 def checkhash(self, text, node, p1=None, p2=None, rev=None):
76 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
76 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
77
77
78 def revision(self, node, _df=None, raw=False):
78 def revision(self, node, _df=None, raw=False):
79 return self._revlog.revision(node, _df=_df, raw=raw)
79 return self._revlog.revision(node, _df=_df, raw=raw)
80
80
81 def revdiff(self, rev1, rev2):
82 return self._revlog.revdiff(rev1, rev2)
83
84 def emitrevisions(self, nodes, nodesorder=None,
81 def emitrevisions(self, nodes, nodesorder=None,
85 revisiondata=False, assumehaveparentrevisions=False,
82 revisiondata=False, assumehaveparentrevisions=False,
86 deltaprevious=False):
83 deltaprevious=False):
87 return self._revlog.emitrevisions(
84 return self._revlog.emitrevisions(
88 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
85 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
89 assumehaveparentrevisions=assumehaveparentrevisions,
86 assumehaveparentrevisions=assumehaveparentrevisions,
90 deltaprevious=deltaprevious)
87 deltaprevious=deltaprevious)
91
88
92 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
89 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
93 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
90 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
94 cachedelta=None):
91 cachedelta=None):
95 return self._revlog.addrevision(revisiondata, transaction, linkrev,
92 return self._revlog.addrevision(revisiondata, transaction, linkrev,
96 p1, p2, node=node, flags=flags,
93 p1, p2, node=node, flags=flags,
97 cachedelta=cachedelta)
94 cachedelta=cachedelta)
98
95
99 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
96 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
100 return self._revlog.addgroup(deltas, linkmapper, transaction,
97 return self._revlog.addgroup(deltas, linkmapper, transaction,
101 addrevisioncb=addrevisioncb)
98 addrevisioncb=addrevisioncb)
102
99
103 def getstrippoint(self, minlink):
100 def getstrippoint(self, minlink):
104 return self._revlog.getstrippoint(minlink)
101 return self._revlog.getstrippoint(minlink)
105
102
106 def strip(self, minlink, transaction):
103 def strip(self, minlink, transaction):
107 return self._revlog.strip(minlink, transaction)
104 return self._revlog.strip(minlink, transaction)
108
105
109 def censorrevision(self, tr, node, tombstone=b''):
106 def censorrevision(self, tr, node, tombstone=b''):
110 return self._revlog.censorrevision(node, tombstone=tombstone)
107 return self._revlog.censorrevision(node, tombstone=tombstone)
111
108
112 def files(self):
109 def files(self):
113 return self._revlog.files()
110 return self._revlog.files()
114
111
115 def read(self, node):
112 def read(self, node):
116 return storageutil.filtermetadata(self.revision(node))
113 return storageutil.filtermetadata(self.revision(node))
117
114
118 def add(self, text, meta, transaction, link, p1=None, p2=None):
115 def add(self, text, meta, transaction, link, p1=None, p2=None):
119 if meta or text.startswith('\1\n'):
116 if meta or text.startswith('\1\n'):
120 text = storageutil.packmeta(meta, text)
117 text = storageutil.packmeta(meta, text)
121 return self.addrevision(text, transaction, link, p1, p2)
118 return self.addrevision(text, transaction, link, p1, p2)
122
119
123 def renamed(self, node):
120 def renamed(self, node):
124 if self.parents(node)[0] != revlog.nullid:
121 if self.parents(node)[0] != revlog.nullid:
125 return False
122 return False
126 t = self.revision(node)
123 t = self.revision(node)
127 m = storageutil.parsemeta(t)[0]
124 m = storageutil.parsemeta(t)[0]
128 # copy and copyrev occur in pairs. In rare cases due to bugs,
125 # copy and copyrev occur in pairs. In rare cases due to bugs,
129 # one can occur without the other.
126 # one can occur without the other.
130 if m and "copy" in m and "copyrev" in m:
127 if m and "copy" in m and "copyrev" in m:
131 return (m["copy"], revlog.bin(m["copyrev"]))
128 return (m["copy"], revlog.bin(m["copyrev"]))
132 return False
129 return False
133
130
134 def size(self, rev):
131 def size(self, rev):
135 """return the size of a given revision"""
132 """return the size of a given revision"""
136
133
137 # for revisions with renames, we have to go the slow way
134 # for revisions with renames, we have to go the slow way
138 node = self.node(rev)
135 node = self.node(rev)
139 if self.renamed(node):
136 if self.renamed(node):
140 return len(self.read(node))
137 return len(self.read(node))
141 if self.iscensored(rev):
138 if self.iscensored(rev):
142 return 0
139 return 0
143
140
144 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
141 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
145 return self._revlog.size(rev)
142 return self._revlog.size(rev)
146
143
147 def cmp(self, node, text):
144 def cmp(self, node, text):
148 """compare text with a given file revision
145 """compare text with a given file revision
149
146
150 returns True if text is different than what is stored.
147 returns True if text is different than what is stored.
151 """
148 """
152
149
153 t = text
150 t = text
154 if text.startswith('\1\n'):
151 if text.startswith('\1\n'):
155 t = '\1\n\1\n' + text
152 t = '\1\n\1\n' + text
156
153
157 samehashes = not self._revlog.cmp(node, t)
154 samehashes = not self._revlog.cmp(node, t)
158 if samehashes:
155 if samehashes:
159 return False
156 return False
160
157
161 # censored files compare against the empty file
158 # censored files compare against the empty file
162 if self.iscensored(self.rev(node)):
159 if self.iscensored(self.rev(node)):
163 return text != ''
160 return text != ''
164
161
165 # renaming a file produces a different hash, even if the data
162 # renaming a file produces a different hash, even if the data
166 # remains unchanged. Check if it's the case (slow):
163 # remains unchanged. Check if it's the case (slow):
167 if self.renamed(node):
164 if self.renamed(node):
168 t2 = self.read(node)
165 t2 = self.read(node)
169 return t2 != text
166 return t2 != text
170
167
171 return True
168 return True
172
169
173 def verifyintegrity(self, state):
170 def verifyintegrity(self, state):
174 return self._revlog.verifyintegrity(state)
171 return self._revlog.verifyintegrity(state)
175
172
176 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
173 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
177 revisionscount=False, trackedsize=False,
174 revisionscount=False, trackedsize=False,
178 storedsize=False):
175 storedsize=False):
179 return self._revlog.storageinfo(
176 return self._revlog.storageinfo(
180 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
177 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
181 revisionscount=revisionscount, trackedsize=trackedsize,
178 revisionscount=revisionscount, trackedsize=trackedsize,
182 storedsize=storedsize)
179 storedsize=storedsize)
183
180
184 # TODO these aren't part of the interface and aren't internal methods.
181 # TODO these aren't part of the interface and aren't internal methods.
185 # Callers should be fixed to not use them.
182 # Callers should be fixed to not use them.
186
183
187 # Used by bundlefilelog, unionfilelog.
184 # Used by bundlefilelog, unionfilelog.
188 @property
185 @property
189 def indexfile(self):
186 def indexfile(self):
190 return self._revlog.indexfile
187 return self._revlog.indexfile
191
188
192 @indexfile.setter
189 @indexfile.setter
193 def indexfile(self, value):
190 def indexfile(self, value):
194 self._revlog.indexfile = value
191 self._revlog.indexfile = value
195
192
196 # Used by repo upgrade.
193 # Used by repo upgrade.
197 def clone(self, tr, destrevlog, **kwargs):
194 def clone(self, tr, destrevlog, **kwargs):
198 if not isinstance(destrevlog, filelog):
195 if not isinstance(destrevlog, filelog):
199 raise error.ProgrammingError('expected filelog to clone()')
196 raise error.ProgrammingError('expected filelog to clone()')
200
197
201 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
198 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
202
199
203 class narrowfilelog(filelog):
200 class narrowfilelog(filelog):
204 """Filelog variation to be used with narrow stores."""
201 """Filelog variation to be used with narrow stores."""
205
202
206 def __init__(self, opener, path, narrowmatch):
203 def __init__(self, opener, path, narrowmatch):
207 super(narrowfilelog, self).__init__(opener, path)
204 super(narrowfilelog, self).__init__(opener, path)
208 self._narrowmatch = narrowmatch
205 self._narrowmatch = narrowmatch
209
206
210 def renamed(self, node):
207 def renamed(self, node):
211 res = super(narrowfilelog, self).renamed(node)
208 res = super(narrowfilelog, self).renamed(node)
212
209
213 # Renames that come from outside the narrowspec are problematic
210 # Renames that come from outside the narrowspec are problematic
214 # because we may lack the base text for the rename. This can result
211 # because we may lack the base text for the rename. This can result
215 # in code attempting to walk the ancestry or compute a diff
212 # in code attempting to walk the ancestry or compute a diff
216 # encountering a missing revision. We address this by silently
213 # encountering a missing revision. We address this by silently
217 # removing rename metadata if the source file is outside the
214 # removing rename metadata if the source file is outside the
218 # narrow spec.
215 # narrow spec.
219 #
216 #
220 # A better solution would be to see if the base revision is available,
217 # A better solution would be to see if the base revision is available,
221 # rather than assuming it isn't.
218 # rather than assuming it isn't.
222 #
219 #
223 # An even better solution would be to teach all consumers of rename
220 # An even better solution would be to teach all consumers of rename
224 # metadata that the base revision may not be available.
221 # metadata that the base revision may not be available.
225 #
222 #
226 # TODO consider better ways of doing this.
223 # TODO consider better ways of doing this.
227 if res and not self._narrowmatch(res[0]):
224 if res and not self._narrowmatch(res[0]):
228 return None
225 return None
229
226
230 return res
227 return res
231
228
232 def size(self, rev):
229 def size(self, rev):
233 # Because we have a custom renamed() that may lie, we need to call
230 # Because we have a custom renamed() that may lie, we need to call
234 # the base renamed() to report accurate results.
231 # the base renamed() to report accurate results.
235 node = self.node(rev)
232 node = self.node(rev)
236 if super(narrowfilelog, self).renamed(node):
233 if super(narrowfilelog, self).renamed(node):
237 return len(self.read(node))
234 return len(self.read(node))
238 else:
235 else:
239 return super(narrowfilelog, self).size(rev)
236 return super(narrowfilelog, self).size(rev)
240
237
241 def cmp(self, node, text):
238 def cmp(self, node, text):
242 different = super(narrowfilelog, self).cmp(node, text)
239 different = super(narrowfilelog, self).cmp(node, text)
243
240
244 # Because renamed() may lie, we may get false positives for
241 # Because renamed() may lie, we may get false positives for
245 # different content. Check for this by comparing against the original
242 # different content. Check for this by comparing against the original
246 # renamed() implementation.
243 # renamed() implementation.
247 if different:
244 if different:
248 if super(narrowfilelog, self).renamed(node):
245 if super(narrowfilelog, self).renamed(node):
249 t2 = self.read(node)
246 t2 = self.read(node)
250 return t2 != text
247 return t2 != text
251
248
252 return different
249 return different
@@ -1,1677 +1,1668
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30
30
31 class ipeerconnection(interfaceutil.Interface):
31 class ipeerconnection(interfaceutil.Interface):
32 """Represents a "connection" to a repository.
32 """Represents a "connection" to a repository.
33
33
34 This is the base interface for representing a connection to a repository.
34 This is the base interface for representing a connection to a repository.
35 It holds basic properties and methods applicable to all peer types.
35 It holds basic properties and methods applicable to all peer types.
36
36
37 This is not a complete interface definition and should not be used
37 This is not a complete interface definition and should not be used
38 outside of this module.
38 outside of this module.
39 """
39 """
40 ui = interfaceutil.Attribute("""ui.ui instance""")
40 ui = interfaceutil.Attribute("""ui.ui instance""")
41
41
42 def url():
42 def url():
43 """Returns a URL string representing this peer.
43 """Returns a URL string representing this peer.
44
44
45 Currently, implementations expose the raw URL used to construct the
45 Currently, implementations expose the raw URL used to construct the
46 instance. It may contain credentials as part of the URL. The
46 instance. It may contain credentials as part of the URL. The
47 expectations of the value aren't well-defined and this could lead to
47 expectations of the value aren't well-defined and this could lead to
48 data leakage.
48 data leakage.
49
49
50 TODO audit/clean consumers and more clearly define the contents of this
50 TODO audit/clean consumers and more clearly define the contents of this
51 value.
51 value.
52 """
52 """
53
53
54 def local():
54 def local():
55 """Returns a local repository instance.
55 """Returns a local repository instance.
56
56
57 If the peer represents a local repository, returns an object that
57 If the peer represents a local repository, returns an object that
58 can be used to interface with it. Otherwise returns ``None``.
58 can be used to interface with it. Otherwise returns ``None``.
59 """
59 """
60
60
61 def peer():
61 def peer():
62 """Returns an object conforming to this interface.
62 """Returns an object conforming to this interface.
63
63
64 Most implementations will ``return self``.
64 Most implementations will ``return self``.
65 """
65 """
66
66
67 def canpush():
67 def canpush():
68 """Returns a boolean indicating if this peer can be pushed to."""
68 """Returns a boolean indicating if this peer can be pushed to."""
69
69
70 def close():
70 def close():
71 """Close the connection to this peer.
71 """Close the connection to this peer.
72
72
73 This is called when the peer will no longer be used. Resources
73 This is called when the peer will no longer be used. Resources
74 associated with the peer should be cleaned up.
74 associated with the peer should be cleaned up.
75 """
75 """
76
76
77 class ipeercapabilities(interfaceutil.Interface):
77 class ipeercapabilities(interfaceutil.Interface):
78 """Peer sub-interface related to capabilities."""
78 """Peer sub-interface related to capabilities."""
79
79
80 def capable(name):
80 def capable(name):
81 """Determine support for a named capability.
81 """Determine support for a named capability.
82
82
83 Returns ``False`` if capability not supported.
83 Returns ``False`` if capability not supported.
84
84
85 Returns ``True`` if boolean capability is supported. Returns a string
85 Returns ``True`` if boolean capability is supported. Returns a string
86 if capability support is non-boolean.
86 if capability support is non-boolean.
87
87
88 Capability strings may or may not map to wire protocol capabilities.
88 Capability strings may or may not map to wire protocol capabilities.
89 """
89 """
90
90
91 def requirecap(name, purpose):
91 def requirecap(name, purpose):
92 """Require a capability to be present.
92 """Require a capability to be present.
93
93
94 Raises a ``CapabilityError`` if the capability isn't present.
94 Raises a ``CapabilityError`` if the capability isn't present.
95 """
95 """
96
96
97 class ipeercommands(interfaceutil.Interface):
97 class ipeercommands(interfaceutil.Interface):
98 """Client-side interface for communicating over the wire protocol.
98 """Client-side interface for communicating over the wire protocol.
99
99
100 This interface is used as a gateway to the Mercurial wire protocol.
100 This interface is used as a gateway to the Mercurial wire protocol.
101 methods commonly call wire protocol commands of the same name.
101 methods commonly call wire protocol commands of the same name.
102 """
102 """
103
103
104 def branchmap():
104 def branchmap():
105 """Obtain heads in named branches.
105 """Obtain heads in named branches.
106
106
107 Returns a dict mapping branch name to an iterable of nodes that are
107 Returns a dict mapping branch name to an iterable of nodes that are
108 heads on that branch.
108 heads on that branch.
109 """
109 """
110
110
111 def capabilities():
111 def capabilities():
112 """Obtain capabilities of the peer.
112 """Obtain capabilities of the peer.
113
113
114 Returns a set of string capabilities.
114 Returns a set of string capabilities.
115 """
115 """
116
116
117 def clonebundles():
117 def clonebundles():
118 """Obtains the clone bundles manifest for the repo.
118 """Obtains the clone bundles manifest for the repo.
119
119
120 Returns the manifest as unparsed bytes.
120 Returns the manifest as unparsed bytes.
121 """
121 """
122
122
123 def debugwireargs(one, two, three=None, four=None, five=None):
123 def debugwireargs(one, two, three=None, four=None, five=None):
124 """Used to facilitate debugging of arguments passed over the wire."""
124 """Used to facilitate debugging of arguments passed over the wire."""
125
125
126 def getbundle(source, **kwargs):
126 def getbundle(source, **kwargs):
127 """Obtain remote repository data as a bundle.
127 """Obtain remote repository data as a bundle.
128
128
129 This command is how the bulk of repository data is transferred from
129 This command is how the bulk of repository data is transferred from
130 the peer to the local repository
130 the peer to the local repository
131
131
132 Returns a generator of bundle data.
132 Returns a generator of bundle data.
133 """
133 """
134
134
135 def heads():
135 def heads():
136 """Determine all known head revisions in the peer.
136 """Determine all known head revisions in the peer.
137
137
138 Returns an iterable of binary nodes.
138 Returns an iterable of binary nodes.
139 """
139 """
140
140
141 def known(nodes):
141 def known(nodes):
142 """Determine whether multiple nodes are known.
142 """Determine whether multiple nodes are known.
143
143
144 Accepts an iterable of nodes whose presence to check for.
144 Accepts an iterable of nodes whose presence to check for.
145
145
146 Returns an iterable of booleans indicating of the corresponding node
146 Returns an iterable of booleans indicating of the corresponding node
147 at that index is known to the peer.
147 at that index is known to the peer.
148 """
148 """
149
149
150 def listkeys(namespace):
150 def listkeys(namespace):
151 """Obtain all keys in a pushkey namespace.
151 """Obtain all keys in a pushkey namespace.
152
152
153 Returns an iterable of key names.
153 Returns an iterable of key names.
154 """
154 """
155
155
156 def lookup(key):
156 def lookup(key):
157 """Resolve a value to a known revision.
157 """Resolve a value to a known revision.
158
158
159 Returns a binary node of the resolved revision on success.
159 Returns a binary node of the resolved revision on success.
160 """
160 """
161
161
162 def pushkey(namespace, key, old, new):
162 def pushkey(namespace, key, old, new):
163 """Set a value using the ``pushkey`` protocol.
163 """Set a value using the ``pushkey`` protocol.
164
164
165 Arguments correspond to the pushkey namespace and key to operate on and
165 Arguments correspond to the pushkey namespace and key to operate on and
166 the old and new values for that key.
166 the old and new values for that key.
167
167
168 Returns a string with the peer result. The value inside varies by the
168 Returns a string with the peer result. The value inside varies by the
169 namespace.
169 namespace.
170 """
170 """
171
171
172 def stream_out():
172 def stream_out():
173 """Obtain streaming clone data.
173 """Obtain streaming clone data.
174
174
175 Successful result should be a generator of data chunks.
175 Successful result should be a generator of data chunks.
176 """
176 """
177
177
178 def unbundle(bundle, heads, url):
178 def unbundle(bundle, heads, url):
179 """Transfer repository data to the peer.
179 """Transfer repository data to the peer.
180
180
181 This is how the bulk of data during a push is transferred.
181 This is how the bulk of data during a push is transferred.
182
182
183 Returns the integer number of heads added to the peer.
183 Returns the integer number of heads added to the peer.
184 """
184 """
185
185
186 class ipeerlegacycommands(interfaceutil.Interface):
186 class ipeerlegacycommands(interfaceutil.Interface):
187 """Interface for implementing support for legacy wire protocol commands.
187 """Interface for implementing support for legacy wire protocol commands.
188
188
189 Wire protocol commands transition to legacy status when they are no longer
189 Wire protocol commands transition to legacy status when they are no longer
190 used by modern clients. To facilitate identifying which commands are
190 used by modern clients. To facilitate identifying which commands are
191 legacy, the interfaces are split.
191 legacy, the interfaces are split.
192 """
192 """
193
193
194 def between(pairs):
194 def between(pairs):
195 """Obtain nodes between pairs of nodes.
195 """Obtain nodes between pairs of nodes.
196
196
197 ``pairs`` is an iterable of node pairs.
197 ``pairs`` is an iterable of node pairs.
198
198
199 Returns an iterable of iterables of nodes corresponding to each
199 Returns an iterable of iterables of nodes corresponding to each
200 requested pair.
200 requested pair.
201 """
201 """
202
202
203 def branches(nodes):
203 def branches(nodes):
204 """Obtain ancestor changesets of specific nodes back to a branch point.
204 """Obtain ancestor changesets of specific nodes back to a branch point.
205
205
206 For each requested node, the peer finds the first ancestor node that is
206 For each requested node, the peer finds the first ancestor node that is
207 a DAG root or is a merge.
207 a DAG root or is a merge.
208
208
209 Returns an iterable of iterables with the resolved values for each node.
209 Returns an iterable of iterables with the resolved values for each node.
210 """
210 """
211
211
212 def changegroup(nodes, source):
212 def changegroup(nodes, source):
213 """Obtain a changegroup with data for descendants of specified nodes."""
213 """Obtain a changegroup with data for descendants of specified nodes."""
214
214
215 def changegroupsubset(bases, heads, source):
215 def changegroupsubset(bases, heads, source):
216 pass
216 pass
217
217
218 class ipeercommandexecutor(interfaceutil.Interface):
218 class ipeercommandexecutor(interfaceutil.Interface):
219 """Represents a mechanism to execute remote commands.
219 """Represents a mechanism to execute remote commands.
220
220
221 This is the primary interface for requesting that wire protocol commands
221 This is the primary interface for requesting that wire protocol commands
222 be executed. Instances of this interface are active in a context manager
222 be executed. Instances of this interface are active in a context manager
223 and have a well-defined lifetime. When the context manager exits, all
223 and have a well-defined lifetime. When the context manager exits, all
224 outstanding requests are waited on.
224 outstanding requests are waited on.
225 """
225 """
226
226
227 def callcommand(name, args):
227 def callcommand(name, args):
228 """Request that a named command be executed.
228 """Request that a named command be executed.
229
229
230 Receives the command name and a dictionary of command arguments.
230 Receives the command name and a dictionary of command arguments.
231
231
232 Returns a ``concurrent.futures.Future`` that will resolve to the
232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 result of that command request. That exact value is left up to
233 result of that command request. That exact value is left up to
234 the implementation and possibly varies by command.
234 the implementation and possibly varies by command.
235
235
236 Not all commands can coexist with other commands in an executor
236 Not all commands can coexist with other commands in an executor
237 instance: it depends on the underlying wire protocol transport being
237 instance: it depends on the underlying wire protocol transport being
238 used and the command itself.
238 used and the command itself.
239
239
240 Implementations MAY call ``sendcommands()`` automatically if the
240 Implementations MAY call ``sendcommands()`` automatically if the
241 requested command can not coexist with other commands in this executor.
241 requested command can not coexist with other commands in this executor.
242
242
243 Implementations MAY call ``sendcommands()`` automatically when the
243 Implementations MAY call ``sendcommands()`` automatically when the
244 future's ``result()`` is called. So, consumers using multiple
244 future's ``result()`` is called. So, consumers using multiple
245 commands with an executor MUST ensure that ``result()`` is not called
245 commands with an executor MUST ensure that ``result()`` is not called
246 until all command requests have been issued.
246 until all command requests have been issued.
247 """
247 """
248
248
249 def sendcommands():
249 def sendcommands():
250 """Trigger submission of queued command requests.
250 """Trigger submission of queued command requests.
251
251
252 Not all transports submit commands as soon as they are requested to
252 Not all transports submit commands as soon as they are requested to
253 run. When called, this method forces queued command requests to be
253 run. When called, this method forces queued command requests to be
254 issued. It will no-op if all commands have already been sent.
254 issued. It will no-op if all commands have already been sent.
255
255
256 When called, no more new commands may be issued with this executor.
256 When called, no more new commands may be issued with this executor.
257 """
257 """
258
258
259 def close():
259 def close():
260 """Signal that this command request is finished.
260 """Signal that this command request is finished.
261
261
262 When called, no more new commands may be issued. All outstanding
262 When called, no more new commands may be issued. All outstanding
263 commands that have previously been issued are waited on before
263 commands that have previously been issued are waited on before
264 returning. This not only includes waiting for the futures to resolve,
264 returning. This not only includes waiting for the futures to resolve,
265 but also waiting for all response data to arrive. In other words,
265 but also waiting for all response data to arrive. In other words,
266 calling this waits for all on-wire state for issued command requests
266 calling this waits for all on-wire state for issued command requests
267 to finish.
267 to finish.
268
268
269 When used as a context manager, this method is called when exiting the
269 When used as a context manager, this method is called when exiting the
270 context manager.
270 context manager.
271
271
272 This method may call ``sendcommands()`` if there are buffered commands.
272 This method may call ``sendcommands()`` if there are buffered commands.
273 """
273 """
274
274
275 class ipeerrequests(interfaceutil.Interface):
275 class ipeerrequests(interfaceutil.Interface):
276 """Interface for executing commands on a peer."""
276 """Interface for executing commands on a peer."""
277
277
278 def commandexecutor():
278 def commandexecutor():
279 """A context manager that resolves to an ipeercommandexecutor.
279 """A context manager that resolves to an ipeercommandexecutor.
280
280
281 The object this resolves to can be used to issue command requests
281 The object this resolves to can be used to issue command requests
282 to the peer.
282 to the peer.
283
283
284 Callers should call its ``callcommand`` method to issue command
284 Callers should call its ``callcommand`` method to issue command
285 requests.
285 requests.
286
286
287 A new executor should be obtained for each distinct set of commands
287 A new executor should be obtained for each distinct set of commands
288 (possibly just a single command) that the consumer wants to execute
288 (possibly just a single command) that the consumer wants to execute
289 as part of a single operation or round trip. This is because some
289 as part of a single operation or round trip. This is because some
290 peers are half-duplex and/or don't support persistent connections.
290 peers are half-duplex and/or don't support persistent connections.
291 e.g. in the case of HTTP peers, commands sent to an executor represent
291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 a single HTTP request. While some peers may support multiple command
292 a single HTTP request. While some peers may support multiple command
293 sends over the wire per executor, consumers need to code to the least
293 sends over the wire per executor, consumers need to code to the least
294 capable peer. So it should be assumed that command executors buffer
294 capable peer. So it should be assumed that command executors buffer
295 called commands until they are told to send them and that each
295 called commands until they are told to send them and that each
296 command executor could result in a new connection or wire-level request
296 command executor could result in a new connection or wire-level request
297 being issued.
297 being issued.
298 """
298 """
299
299
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 """Unified interface for peer repositories.
301 """Unified interface for peer repositories.
302
302
303 All peer instances must conform to this interface.
303 All peer instances must conform to this interface.
304 """
304 """
305
305
306 @interfaceutil.implementer(ipeerbase)
306 @interfaceutil.implementer(ipeerbase)
307 class peer(object):
307 class peer(object):
308 """Base class for peer repositories."""
308 """Base class for peer repositories."""
309
309
310 def capable(self, name):
310 def capable(self, name):
311 caps = self.capabilities()
311 caps = self.capabilities()
312 if name in caps:
312 if name in caps:
313 return True
313 return True
314
314
315 name = '%s=' % name
315 name = '%s=' % name
316 for cap in caps:
316 for cap in caps:
317 if cap.startswith(name):
317 if cap.startswith(name):
318 return cap[len(name):]
318 return cap[len(name):]
319
319
320 return False
320 return False
321
321
322 def requirecap(self, name, purpose):
322 def requirecap(self, name, purpose):
323 if self.capable(name):
323 if self.capable(name):
324 return
324 return
325
325
326 raise error.CapabilityError(
326 raise error.CapabilityError(
327 _('cannot %s; remote repository does not support the %r '
327 _('cannot %s; remote repository does not support the %r '
328 'capability') % (purpose, name))
328 'capability') % (purpose, name))
329
329
330 class iverifyproblem(interfaceutil.Interface):
330 class iverifyproblem(interfaceutil.Interface):
331 """Represents a problem with the integrity of the repository.
331 """Represents a problem with the integrity of the repository.
332
332
333 Instances of this interface are emitted to describe an integrity issue
333 Instances of this interface are emitted to describe an integrity issue
334 with a repository (e.g. corrupt storage, missing data, etc).
334 with a repository (e.g. corrupt storage, missing data, etc).
335
335
336 Instances are essentially messages associated with severity.
336 Instances are essentially messages associated with severity.
337 """
337 """
338 warning = interfaceutil.Attribute(
338 warning = interfaceutil.Attribute(
339 """Message indicating a non-fatal problem.""")
339 """Message indicating a non-fatal problem.""")
340
340
341 error = interfaceutil.Attribute(
341 error = interfaceutil.Attribute(
342 """Message indicating a fatal problem.""")
342 """Message indicating a fatal problem.""")
343
343
344 node = interfaceutil.Attribute(
344 node = interfaceutil.Attribute(
345 """Revision encountering the problem.
345 """Revision encountering the problem.
346
346
347 ``None`` means the problem doesn't apply to a single revision.
347 ``None`` means the problem doesn't apply to a single revision.
348 """)
348 """)
349
349
350 class irevisiondelta(interfaceutil.Interface):
350 class irevisiondelta(interfaceutil.Interface):
351 """Represents a delta between one revision and another.
351 """Represents a delta between one revision and another.
352
352
353 Instances convey enough information to allow a revision to be exchanged
353 Instances convey enough information to allow a revision to be exchanged
354 with another repository.
354 with another repository.
355
355
356 Instances represent the fulltext revision data or a delta against
356 Instances represent the fulltext revision data or a delta against
357 another revision. Therefore the ``revision`` and ``delta`` attributes
357 another revision. Therefore the ``revision`` and ``delta`` attributes
358 are mutually exclusive.
358 are mutually exclusive.
359
359
360 Typically used for changegroup generation.
360 Typically used for changegroup generation.
361 """
361 """
362
362
363 node = interfaceutil.Attribute(
363 node = interfaceutil.Attribute(
364 """20 byte node of this revision.""")
364 """20 byte node of this revision.""")
365
365
366 p1node = interfaceutil.Attribute(
366 p1node = interfaceutil.Attribute(
367 """20 byte node of 1st parent of this revision.""")
367 """20 byte node of 1st parent of this revision.""")
368
368
369 p2node = interfaceutil.Attribute(
369 p2node = interfaceutil.Attribute(
370 """20 byte node of 2nd parent of this revision.""")
370 """20 byte node of 2nd parent of this revision.""")
371
371
372 linknode = interfaceutil.Attribute(
372 linknode = interfaceutil.Attribute(
373 """20 byte node of the changelog revision this node is linked to.""")
373 """20 byte node of the changelog revision this node is linked to.""")
374
374
375 flags = interfaceutil.Attribute(
375 flags = interfaceutil.Attribute(
376 """2 bytes of integer flags that apply to this revision.""")
376 """2 bytes of integer flags that apply to this revision.""")
377
377
378 basenode = interfaceutil.Attribute(
378 basenode = interfaceutil.Attribute(
379 """20 byte node of the revision this data is a delta against.
379 """20 byte node of the revision this data is a delta against.
380
380
381 ``nullid`` indicates that the revision is a full revision and not
381 ``nullid`` indicates that the revision is a full revision and not
382 a delta.
382 a delta.
383 """)
383 """)
384
384
385 baserevisionsize = interfaceutil.Attribute(
385 baserevisionsize = interfaceutil.Attribute(
386 """Size of base revision this delta is against.
386 """Size of base revision this delta is against.
387
387
388 May be ``None`` if ``basenode`` is ``nullid``.
388 May be ``None`` if ``basenode`` is ``nullid``.
389 """)
389 """)
390
390
391 revision = interfaceutil.Attribute(
391 revision = interfaceutil.Attribute(
392 """Raw fulltext of revision data for this node.""")
392 """Raw fulltext of revision data for this node.""")
393
393
394 delta = interfaceutil.Attribute(
394 delta = interfaceutil.Attribute(
395 """Delta between ``basenode`` and ``node``.
395 """Delta between ``basenode`` and ``node``.
396
396
397 Stored in the bdiff delta format.
397 Stored in the bdiff delta format.
398 """)
398 """)
399
399
400 class ifilerevisionssequence(interfaceutil.Interface):
400 class ifilerevisionssequence(interfaceutil.Interface):
401 """Contains index data for all revisions of a file.
401 """Contains index data for all revisions of a file.
402
402
403 Types implementing this behave like lists of tuples. The index
403 Types implementing this behave like lists of tuples. The index
404 in the list corresponds to the revision number. The values contain
404 in the list corresponds to the revision number. The values contain
405 index metadata.
405 index metadata.
406
406
407 The *null* revision (revision number -1) is always the last item
407 The *null* revision (revision number -1) is always the last item
408 in the index.
408 in the index.
409 """
409 """
410
410
411 def __len__():
411 def __len__():
412 """The total number of revisions."""
412 """The total number of revisions."""
413
413
414 def __getitem__(rev):
414 def __getitem__(rev):
415 """Returns the object having a specific revision number.
415 """Returns the object having a specific revision number.
416
416
417 Returns an 8-tuple with the following fields:
417 Returns an 8-tuple with the following fields:
418
418
419 offset+flags
419 offset+flags
420 Contains the offset and flags for the revision. 64-bit unsigned
420 Contains the offset and flags for the revision. 64-bit unsigned
421 integer where first 6 bytes are the offset and the next 2 bytes
421 integer where first 6 bytes are the offset and the next 2 bytes
422 are flags. The offset can be 0 if it is not used by the store.
422 are flags. The offset can be 0 if it is not used by the store.
423 compressed size
423 compressed size
424 Size of the revision data in the store. It can be 0 if it isn't
424 Size of the revision data in the store. It can be 0 if it isn't
425 needed by the store.
425 needed by the store.
426 uncompressed size
426 uncompressed size
427 Fulltext size. It can be 0 if it isn't needed by the store.
427 Fulltext size. It can be 0 if it isn't needed by the store.
428 base revision
428 base revision
429 Revision number of revision the delta for storage is encoded
429 Revision number of revision the delta for storage is encoded
430 against. -1 indicates not encoded against a base revision.
430 against. -1 indicates not encoded against a base revision.
431 link revision
431 link revision
432 Revision number of changelog revision this entry is related to.
432 Revision number of changelog revision this entry is related to.
433 p1 revision
433 p1 revision
434 Revision number of 1st parent. -1 if no 1st parent.
434 Revision number of 1st parent. -1 if no 1st parent.
435 p2 revision
435 p2 revision
436 Revision number of 2nd parent. -1 if no 1st parent.
436 Revision number of 2nd parent. -1 if no 1st parent.
437 node
437 node
438 Binary node value for this revision number.
438 Binary node value for this revision number.
439
439
440 Negative values should index off the end of the sequence. ``-1``
440 Negative values should index off the end of the sequence. ``-1``
441 should return the null revision. ``-2`` should return the most
441 should return the null revision. ``-2`` should return the most
442 recent revision.
442 recent revision.
443 """
443 """
444
444
445 def __contains__(rev):
445 def __contains__(rev):
446 """Whether a revision number exists."""
446 """Whether a revision number exists."""
447
447
448 def insert(self, i, entry):
448 def insert(self, i, entry):
449 """Add an item to the index at specific revision."""
449 """Add an item to the index at specific revision."""
450
450
451 class ifileindex(interfaceutil.Interface):
451 class ifileindex(interfaceutil.Interface):
452 """Storage interface for index data of a single file.
452 """Storage interface for index data of a single file.
453
453
454 File storage data is divided into index metadata and data storage.
454 File storage data is divided into index metadata and data storage.
455 This interface defines the index portion of the interface.
455 This interface defines the index portion of the interface.
456
456
457 The index logically consists of:
457 The index logically consists of:
458
458
459 * A mapping between revision numbers and nodes.
459 * A mapping between revision numbers and nodes.
460 * DAG data (storing and querying the relationship between nodes).
460 * DAG data (storing and querying the relationship between nodes).
461 * Metadata to facilitate storage.
461 * Metadata to facilitate storage.
462 """
462 """
463 def __len__():
463 def __len__():
464 """Obtain the number of revisions stored for this file."""
464 """Obtain the number of revisions stored for this file."""
465
465
466 def __iter__():
466 def __iter__():
467 """Iterate over revision numbers for this file."""
467 """Iterate over revision numbers for this file."""
468
468
469 def revs(start=0, stop=None):
469 def revs(start=0, stop=None):
470 """Iterate over revision numbers for this file, with control."""
470 """Iterate over revision numbers for this file, with control."""
471
471
472 def parents(node):
472 def parents(node):
473 """Returns a 2-tuple of parent nodes for a revision.
473 """Returns a 2-tuple of parent nodes for a revision.
474
474
475 Values will be ``nullid`` if the parent is empty.
475 Values will be ``nullid`` if the parent is empty.
476 """
476 """
477
477
478 def parentrevs(rev):
478 def parentrevs(rev):
479 """Like parents() but operates on revision numbers."""
479 """Like parents() but operates on revision numbers."""
480
480
481 def rev(node):
481 def rev(node):
482 """Obtain the revision number given a node.
482 """Obtain the revision number given a node.
483
483
484 Raises ``error.LookupError`` if the node is not known.
484 Raises ``error.LookupError`` if the node is not known.
485 """
485 """
486
486
487 def node(rev):
487 def node(rev):
488 """Obtain the node value given a revision number.
488 """Obtain the node value given a revision number.
489
489
490 Raises ``IndexError`` if the node is not known.
490 Raises ``IndexError`` if the node is not known.
491 """
491 """
492
492
493 def lookup(node):
493 def lookup(node):
494 """Attempt to resolve a value to a node.
494 """Attempt to resolve a value to a node.
495
495
496 Value can be a binary node, hex node, revision number, or a string
496 Value can be a binary node, hex node, revision number, or a string
497 that can be converted to an integer.
497 that can be converted to an integer.
498
498
499 Raises ``error.LookupError`` if a node could not be resolved.
499 Raises ``error.LookupError`` if a node could not be resolved.
500 """
500 """
501
501
502 def linkrev(rev):
502 def linkrev(rev):
503 """Obtain the changeset revision number a revision is linked to."""
503 """Obtain the changeset revision number a revision is linked to."""
504
504
505 def iscensored(rev):
505 def iscensored(rev):
506 """Return whether a revision's content has been censored."""
506 """Return whether a revision's content has been censored."""
507
507
508 def commonancestorsheads(node1, node2):
508 def commonancestorsheads(node1, node2):
509 """Obtain an iterable of nodes containing heads of common ancestors.
509 """Obtain an iterable of nodes containing heads of common ancestors.
510
510
511 See ``ancestor.commonancestorsheads()``.
511 See ``ancestor.commonancestorsheads()``.
512 """
512 """
513
513
514 def descendants(revs):
514 def descendants(revs):
515 """Obtain descendant revision numbers for a set of revision numbers.
515 """Obtain descendant revision numbers for a set of revision numbers.
516
516
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
517 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
518 """
518 """
519
519
520 def heads(start=None, stop=None):
520 def heads(start=None, stop=None):
521 """Obtain a list of nodes that are DAG heads, with control.
521 """Obtain a list of nodes that are DAG heads, with control.
522
522
523 The set of revisions examined can be limited by specifying
523 The set of revisions examined can be limited by specifying
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
524 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
525 iterable of nodes. DAG traversal starts at earlier revision
525 iterable of nodes. DAG traversal starts at earlier revision
526 ``start`` and iterates forward until any node in ``stop`` is
526 ``start`` and iterates forward until any node in ``stop`` is
527 encountered.
527 encountered.
528 """
528 """
529
529
530 def children(node):
530 def children(node):
531 """Obtain nodes that are children of a node.
531 """Obtain nodes that are children of a node.
532
532
533 Returns a list of nodes.
533 Returns a list of nodes.
534 """
534 """
535
535
536 class ifiledata(interfaceutil.Interface):
536 class ifiledata(interfaceutil.Interface):
537 """Storage interface for data storage of a specific file.
537 """Storage interface for data storage of a specific file.
538
538
539 This complements ``ifileindex`` and provides an interface for accessing
539 This complements ``ifileindex`` and provides an interface for accessing
540 data for a tracked file.
540 data for a tracked file.
541 """
541 """
542 def size(rev):
542 def size(rev):
543 """Obtain the fulltext size of file data.
543 """Obtain the fulltext size of file data.
544
544
545 Any metadata is excluded from size measurements.
545 Any metadata is excluded from size measurements.
546 """
546 """
547
547
548 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
548 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
549 """Validate the stored hash of a given fulltext and node.
549 """Validate the stored hash of a given fulltext and node.
550
550
551 Raises ``error.StorageError`` is hash validation fails.
551 Raises ``error.StorageError`` is hash validation fails.
552 """
552 """
553
553
554 def revision(node, raw=False):
554 def revision(node, raw=False):
555 """"Obtain fulltext data for a node.
555 """"Obtain fulltext data for a node.
556
556
557 By default, any storage transformations are applied before the data
557 By default, any storage transformations are applied before the data
558 is returned. If ``raw`` is True, non-raw storage transformations
558 is returned. If ``raw`` is True, non-raw storage transformations
559 are not applied.
559 are not applied.
560
560
561 The fulltext data may contain a header containing metadata. Most
561 The fulltext data may contain a header containing metadata. Most
562 consumers should use ``read()`` to obtain the actual file data.
562 consumers should use ``read()`` to obtain the actual file data.
563 """
563 """
564
564
565 def read(node):
565 def read(node):
566 """Resolve file fulltext data.
566 """Resolve file fulltext data.
567
567
568 This is similar to ``revision()`` except any metadata in the data
568 This is similar to ``revision()`` except any metadata in the data
569 headers is stripped.
569 headers is stripped.
570 """
570 """
571
571
572 def renamed(node):
572 def renamed(node):
573 """Obtain copy metadata for a node.
573 """Obtain copy metadata for a node.
574
574
575 Returns ``False`` if no copy metadata is stored or a 2-tuple of
575 Returns ``False`` if no copy metadata is stored or a 2-tuple of
576 (path, node) from which this revision was copied.
576 (path, node) from which this revision was copied.
577 """
577 """
578
578
579 def cmp(node, fulltext):
579 def cmp(node, fulltext):
580 """Compare fulltext to another revision.
580 """Compare fulltext to another revision.
581
581
582 Returns True if the fulltext is different from what is stored.
582 Returns True if the fulltext is different from what is stored.
583
583
584 This takes copy metadata into account.
584 This takes copy metadata into account.
585
585
586 TODO better document the copy metadata and censoring logic.
586 TODO better document the copy metadata and censoring logic.
587 """
587 """
588
588
589 def revdiff(rev1, rev2):
590 """Obtain a delta between two revision numbers.
591
592 Operates on raw data in the store (``revision(node, raw=True)``).
593
594 The returned data is the result of ``bdiff.bdiff`` on the raw
595 revision data.
596 """
597
598 def emitrevisions(nodes,
589 def emitrevisions(nodes,
599 nodesorder=None,
590 nodesorder=None,
600 revisiondata=False,
591 revisiondata=False,
601 assumehaveparentrevisions=False,
592 assumehaveparentrevisions=False,
602 deltaprevious=False):
593 deltaprevious=False):
603 """Produce ``irevisiondelta`` for revisions.
594 """Produce ``irevisiondelta`` for revisions.
604
595
605 Given an iterable of nodes, emits objects conforming to the
596 Given an iterable of nodes, emits objects conforming to the
606 ``irevisiondelta`` interface that describe revisions in storage.
597 ``irevisiondelta`` interface that describe revisions in storage.
607
598
608 This method is a generator.
599 This method is a generator.
609
600
610 The input nodes may be unordered. Implementations must ensure that a
601 The input nodes may be unordered. Implementations must ensure that a
611 node's parents are emitted before the node itself. Transitively, this
602 node's parents are emitted before the node itself. Transitively, this
612 means that a node may only be emitted once all its ancestors in
603 means that a node may only be emitted once all its ancestors in
613 ``nodes`` have also been emitted.
604 ``nodes`` have also been emitted.
614
605
615 By default, emits "index" data (the ``node``, ``p1node``, and
606 By default, emits "index" data (the ``node``, ``p1node``, and
616 ``p2node`` attributes). If ``revisiondata`` is set, revision data
607 ``p2node`` attributes). If ``revisiondata`` is set, revision data
617 will also be present on the emitted objects.
608 will also be present on the emitted objects.
618
609
619 With default argument values, implementations can choose to emit
610 With default argument values, implementations can choose to emit
620 either fulltext revision data or a delta. When emitting deltas,
611 either fulltext revision data or a delta. When emitting deltas,
621 implementations must consider whether the delta's base revision
612 implementations must consider whether the delta's base revision
622 fulltext is available to the receiver.
613 fulltext is available to the receiver.
623
614
624 The base revision fulltext is guaranteed to be available if any of
615 The base revision fulltext is guaranteed to be available if any of
625 the following are met:
616 the following are met:
626
617
627 * Its fulltext revision was emitted by this method call.
618 * Its fulltext revision was emitted by this method call.
628 * A delta for that revision was emitted by this method call.
619 * A delta for that revision was emitted by this method call.
629 * ``assumehaveparentrevisions`` is True and the base revision is a
620 * ``assumehaveparentrevisions`` is True and the base revision is a
630 parent of the node.
621 parent of the node.
631
622
632 ``nodesorder`` can be used to control the order that revisions are
623 ``nodesorder`` can be used to control the order that revisions are
633 emitted. By default, revisions can be reordered as long as they are
624 emitted. By default, revisions can be reordered as long as they are
634 in DAG topological order (see above). If the value is ``nodes``,
625 in DAG topological order (see above). If the value is ``nodes``,
635 the iteration order from ``nodes`` should be used. If the value is
626 the iteration order from ``nodes`` should be used. If the value is
636 ``storage``, then the native order from the backing storage layer
627 ``storage``, then the native order from the backing storage layer
637 is used. (Not all storage layers will have strong ordering and behavior
628 is used. (Not all storage layers will have strong ordering and behavior
638 of this mode is storage-dependent.) ``nodes`` ordering can force
629 of this mode is storage-dependent.) ``nodes`` ordering can force
639 revisions to be emitted before their ancestors, so consumers should
630 revisions to be emitted before their ancestors, so consumers should
640 use it with care.
631 use it with care.
641
632
642 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
633 The ``linknode`` attribute on the returned ``irevisiondelta`` may not
643 be set and it is the caller's responsibility to resolve it, if needed.
634 be set and it is the caller's responsibility to resolve it, if needed.
644
635
645 If ``deltaprevious`` is True and revision data is requested, all
636 If ``deltaprevious`` is True and revision data is requested, all
646 revision data should be emitted as deltas against the revision
637 revision data should be emitted as deltas against the revision
647 emitted just prior. The initial revision should be a delta against
638 emitted just prior. The initial revision should be a delta against
648 its 1st parent.
639 its 1st parent.
649 """
640 """
650
641
651 class ifilemutation(interfaceutil.Interface):
642 class ifilemutation(interfaceutil.Interface):
652 """Storage interface for mutation events of a tracked file."""
643 """Storage interface for mutation events of a tracked file."""
653
644
654 def add(filedata, meta, transaction, linkrev, p1, p2):
645 def add(filedata, meta, transaction, linkrev, p1, p2):
655 """Add a new revision to the store.
646 """Add a new revision to the store.
656
647
657 Takes file data, dictionary of metadata, a transaction, linkrev,
648 Takes file data, dictionary of metadata, a transaction, linkrev,
658 and parent nodes.
649 and parent nodes.
659
650
660 Returns the node that was added.
651 Returns the node that was added.
661
652
662 May no-op if a revision matching the supplied data is already stored.
653 May no-op if a revision matching the supplied data is already stored.
663 """
654 """
664
655
665 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
656 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
666 flags=0, cachedelta=None):
657 flags=0, cachedelta=None):
667 """Add a new revision to the store.
658 """Add a new revision to the store.
668
659
669 This is similar to ``add()`` except it operates at a lower level.
660 This is similar to ``add()`` except it operates at a lower level.
670
661
671 The data passed in already contains a metadata header, if any.
662 The data passed in already contains a metadata header, if any.
672
663
673 ``node`` and ``flags`` can be used to define the expected node and
664 ``node`` and ``flags`` can be used to define the expected node and
674 the flags to use with storage.
665 the flags to use with storage.
675
666
676 ``add()`` is usually called when adding files from e.g. the working
667 ``add()`` is usually called when adding files from e.g. the working
677 directory. ``addrevision()`` is often called by ``add()`` and for
668 directory. ``addrevision()`` is often called by ``add()`` and for
678 scenarios where revision data has already been computed, such as when
669 scenarios where revision data has already been computed, such as when
679 applying raw data from a peer repo.
670 applying raw data from a peer repo.
680 """
671 """
681
672
682 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
673 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
683 """Process a series of deltas for storage.
674 """Process a series of deltas for storage.
684
675
685 ``deltas`` is an iterable of 7-tuples of
676 ``deltas`` is an iterable of 7-tuples of
686 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
677 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
687 to add.
678 to add.
688
679
689 The ``delta`` field contains ``mpatch`` data to apply to a base
680 The ``delta`` field contains ``mpatch`` data to apply to a base
690 revision, identified by ``deltabase``. The base node can be
681 revision, identified by ``deltabase``. The base node can be
691 ``nullid``, in which case the header from the delta can be ignored
682 ``nullid``, in which case the header from the delta can be ignored
692 and the delta used as the fulltext.
683 and the delta used as the fulltext.
693
684
694 ``addrevisioncb`` should be called for each node as it is committed.
685 ``addrevisioncb`` should be called for each node as it is committed.
695
686
696 Returns a list of nodes that were processed. A node will be in the list
687 Returns a list of nodes that were processed. A node will be in the list
697 even if it existed in the store previously.
688 even if it existed in the store previously.
698 """
689 """
699
690
700 def censorrevision(tr, node, tombstone=b''):
691 def censorrevision(tr, node, tombstone=b''):
701 """Remove the content of a single revision.
692 """Remove the content of a single revision.
702
693
703 The specified ``node`` will have its content purged from storage.
694 The specified ``node`` will have its content purged from storage.
704 Future attempts to access the revision data for this node will
695 Future attempts to access the revision data for this node will
705 result in failure.
696 result in failure.
706
697
707 A ``tombstone`` message can optionally be stored. This message may be
698 A ``tombstone`` message can optionally be stored. This message may be
708 displayed to users when they attempt to access the missing revision
699 displayed to users when they attempt to access the missing revision
709 data.
700 data.
710
701
711 Storage backends may have stored deltas against the previous content
702 Storage backends may have stored deltas against the previous content
712 in this revision. As part of censoring a revision, these storage
703 in this revision. As part of censoring a revision, these storage
713 backends are expected to rewrite any internally stored deltas such
704 backends are expected to rewrite any internally stored deltas such
714 that they no longer reference the deleted content.
705 that they no longer reference the deleted content.
715 """
706 """
716
707
717 def getstrippoint(minlink):
708 def getstrippoint(minlink):
718 """Find the minimum revision that must be stripped to strip a linkrev.
709 """Find the minimum revision that must be stripped to strip a linkrev.
719
710
720 Returns a 2-tuple containing the minimum revision number and a set
711 Returns a 2-tuple containing the minimum revision number and a set
721 of all revisions numbers that would be broken by this strip.
712 of all revisions numbers that would be broken by this strip.
722
713
723 TODO this is highly revlog centric and should be abstracted into
714 TODO this is highly revlog centric and should be abstracted into
724 a higher-level deletion API. ``repair.strip()`` relies on this.
715 a higher-level deletion API. ``repair.strip()`` relies on this.
725 """
716 """
726
717
727 def strip(minlink, transaction):
718 def strip(minlink, transaction):
728 """Remove storage of items starting at a linkrev.
719 """Remove storage of items starting at a linkrev.
729
720
730 This uses ``getstrippoint()`` to determine the first node to remove.
721 This uses ``getstrippoint()`` to determine the first node to remove.
731 Then it effectively truncates storage for all revisions after that.
722 Then it effectively truncates storage for all revisions after that.
732
723
733 TODO this is highly revlog centric and should be abstracted into a
724 TODO this is highly revlog centric and should be abstracted into a
734 higher-level deletion API.
725 higher-level deletion API.
735 """
726 """
736
727
737 class ifilestorage(ifileindex, ifiledata, ifilemutation):
728 class ifilestorage(ifileindex, ifiledata, ifilemutation):
738 """Complete storage interface for a single tracked file."""
729 """Complete storage interface for a single tracked file."""
739
730
740 def files():
731 def files():
741 """Obtain paths that are backing storage for this file.
732 """Obtain paths that are backing storage for this file.
742
733
743 TODO this is used heavily by verify code and there should probably
734 TODO this is used heavily by verify code and there should probably
744 be a better API for that.
735 be a better API for that.
745 """
736 """
746
737
747 def storageinfo(exclusivefiles=False, sharedfiles=False,
738 def storageinfo(exclusivefiles=False, sharedfiles=False,
748 revisionscount=False, trackedsize=False,
739 revisionscount=False, trackedsize=False,
749 storedsize=False):
740 storedsize=False):
750 """Obtain information about storage for this file's data.
741 """Obtain information about storage for this file's data.
751
742
752 Returns a dict describing storage for this tracked path. The keys
743 Returns a dict describing storage for this tracked path. The keys
753 in the dict map to arguments of the same. The arguments are bools
744 in the dict map to arguments of the same. The arguments are bools
754 indicating whether to calculate and obtain that data.
745 indicating whether to calculate and obtain that data.
755
746
756 exclusivefiles
747 exclusivefiles
757 Iterable of (vfs, path) describing files that are exclusively
748 Iterable of (vfs, path) describing files that are exclusively
758 used to back storage for this tracked path.
749 used to back storage for this tracked path.
759
750
760 sharedfiles
751 sharedfiles
761 Iterable of (vfs, path) describing files that are used to back
752 Iterable of (vfs, path) describing files that are used to back
762 storage for this tracked path. Those files may also provide storage
753 storage for this tracked path. Those files may also provide storage
763 for other stored entities.
754 for other stored entities.
764
755
765 revisionscount
756 revisionscount
766 Number of revisions available for retrieval.
757 Number of revisions available for retrieval.
767
758
768 trackedsize
759 trackedsize
769 Total size in bytes of all tracked revisions. This is a sum of the
760 Total size in bytes of all tracked revisions. This is a sum of the
770 length of the fulltext of all revisions.
761 length of the fulltext of all revisions.
771
762
772 storedsize
763 storedsize
773 Total size in bytes used to store data for all tracked revisions.
764 Total size in bytes used to store data for all tracked revisions.
774 This is commonly less than ``trackedsize`` due to internal usage
765 This is commonly less than ``trackedsize`` due to internal usage
775 of deltas rather than fulltext revisions.
766 of deltas rather than fulltext revisions.
776
767
777 Not all storage backends may support all queries are have a reasonable
768 Not all storage backends may support all queries are have a reasonable
778 value to use. In that case, the value should be set to ``None`` and
769 value to use. In that case, the value should be set to ``None`` and
779 callers are expected to handle this special value.
770 callers are expected to handle this special value.
780 """
771 """
781
772
782 def verifyintegrity(state):
773 def verifyintegrity(state):
783 """Verifies the integrity of file storage.
774 """Verifies the integrity of file storage.
784
775
785 ``state`` is a dict holding state of the verifier process. It can be
776 ``state`` is a dict holding state of the verifier process. It can be
786 used to communicate data between invocations of multiple storage
777 used to communicate data between invocations of multiple storage
787 primitives.
778 primitives.
788
779
789 If individual revisions cannot have their revision content resolved,
780 If individual revisions cannot have their revision content resolved,
790 the method is expected to set the ``skipread`` key to a set of nodes
781 the method is expected to set the ``skipread`` key to a set of nodes
791 that encountered problems.
782 that encountered problems.
792
783
793 The method yields objects conforming to the ``iverifyproblem``
784 The method yields objects conforming to the ``iverifyproblem``
794 interface.
785 interface.
795 """
786 """
796
787
797 class idirs(interfaceutil.Interface):
788 class idirs(interfaceutil.Interface):
798 """Interface representing a collection of directories from paths.
789 """Interface representing a collection of directories from paths.
799
790
800 This interface is essentially a derived data structure representing
791 This interface is essentially a derived data structure representing
801 directories from a collection of paths.
792 directories from a collection of paths.
802 """
793 """
803
794
804 def addpath(path):
795 def addpath(path):
805 """Add a path to the collection.
796 """Add a path to the collection.
806
797
807 All directories in the path will be added to the collection.
798 All directories in the path will be added to the collection.
808 """
799 """
809
800
810 def delpath(path):
801 def delpath(path):
811 """Remove a path from the collection.
802 """Remove a path from the collection.
812
803
813 If the removal was the last path in a particular directory, the
804 If the removal was the last path in a particular directory, the
814 directory is removed from the collection.
805 directory is removed from the collection.
815 """
806 """
816
807
817 def __iter__():
808 def __iter__():
818 """Iterate over the directories in this collection of paths."""
809 """Iterate over the directories in this collection of paths."""
819
810
820 def __contains__(path):
811 def __contains__(path):
821 """Whether a specific directory is in this collection."""
812 """Whether a specific directory is in this collection."""
822
813
823 class imanifestdict(interfaceutil.Interface):
814 class imanifestdict(interfaceutil.Interface):
824 """Interface representing a manifest data structure.
815 """Interface representing a manifest data structure.
825
816
826 A manifest is effectively a dict mapping paths to entries. Each entry
817 A manifest is effectively a dict mapping paths to entries. Each entry
827 consists of a binary node and extra flags affecting that entry.
818 consists of a binary node and extra flags affecting that entry.
828 """
819 """
829
820
830 def __getitem__(path):
821 def __getitem__(path):
831 """Returns the binary node value for a path in the manifest.
822 """Returns the binary node value for a path in the manifest.
832
823
833 Raises ``KeyError`` if the path does not exist in the manifest.
824 Raises ``KeyError`` if the path does not exist in the manifest.
834
825
835 Equivalent to ``self.find(path)[0]``.
826 Equivalent to ``self.find(path)[0]``.
836 """
827 """
837
828
838 def find(path):
829 def find(path):
839 """Returns the entry for a path in the manifest.
830 """Returns the entry for a path in the manifest.
840
831
841 Returns a 2-tuple of (node, flags).
832 Returns a 2-tuple of (node, flags).
842
833
843 Raises ``KeyError`` if the path does not exist in the manifest.
834 Raises ``KeyError`` if the path does not exist in the manifest.
844 """
835 """
845
836
846 def __len__():
837 def __len__():
847 """Return the number of entries in the manifest."""
838 """Return the number of entries in the manifest."""
848
839
849 def __nonzero__():
840 def __nonzero__():
850 """Returns True if the manifest has entries, False otherwise."""
841 """Returns True if the manifest has entries, False otherwise."""
851
842
852 __bool__ = __nonzero__
843 __bool__ = __nonzero__
853
844
854 def __setitem__(path, node):
845 def __setitem__(path, node):
855 """Define the node value for a path in the manifest.
846 """Define the node value for a path in the manifest.
856
847
857 If the path is already in the manifest, its flags will be copied to
848 If the path is already in the manifest, its flags will be copied to
858 the new entry.
849 the new entry.
859 """
850 """
860
851
861 def __contains__(path):
852 def __contains__(path):
862 """Whether a path exists in the manifest."""
853 """Whether a path exists in the manifest."""
863
854
864 def __delitem__(path):
855 def __delitem__(path):
865 """Remove a path from the manifest.
856 """Remove a path from the manifest.
866
857
867 Raises ``KeyError`` if the path is not in the manifest.
858 Raises ``KeyError`` if the path is not in the manifest.
868 """
859 """
869
860
870 def __iter__():
861 def __iter__():
871 """Iterate over paths in the manifest."""
862 """Iterate over paths in the manifest."""
872
863
873 def iterkeys():
864 def iterkeys():
874 """Iterate over paths in the manifest."""
865 """Iterate over paths in the manifest."""
875
866
876 def keys():
867 def keys():
877 """Obtain a list of paths in the manifest."""
868 """Obtain a list of paths in the manifest."""
878
869
879 def filesnotin(other, match=None):
870 def filesnotin(other, match=None):
880 """Obtain the set of paths in this manifest but not in another.
871 """Obtain the set of paths in this manifest but not in another.
881
872
882 ``match`` is an optional matcher function to be applied to both
873 ``match`` is an optional matcher function to be applied to both
883 manifests.
874 manifests.
884
875
885 Returns a set of paths.
876 Returns a set of paths.
886 """
877 """
887
878
888 def dirs():
879 def dirs():
889 """Returns an object implementing the ``idirs`` interface."""
880 """Returns an object implementing the ``idirs`` interface."""
890
881
891 def hasdir(dir):
882 def hasdir(dir):
892 """Returns a bool indicating if a directory is in this manifest."""
883 """Returns a bool indicating if a directory is in this manifest."""
893
884
894 def matches(match):
885 def matches(match):
895 """Generate a new manifest filtered through a matcher.
886 """Generate a new manifest filtered through a matcher.
896
887
897 Returns an object conforming to the ``imanifestdict`` interface.
888 Returns an object conforming to the ``imanifestdict`` interface.
898 """
889 """
899
890
900 def walk(match):
891 def walk(match):
901 """Generator of paths in manifest satisfying a matcher.
892 """Generator of paths in manifest satisfying a matcher.
902
893
903 This is equivalent to ``self.matches(match).iterkeys()`` except a new
894 This is equivalent to ``self.matches(match).iterkeys()`` except a new
904 manifest object is not created.
895 manifest object is not created.
905
896
906 If the matcher has explicit files listed and they don't exist in
897 If the matcher has explicit files listed and they don't exist in
907 the manifest, ``match.bad()`` is called for each missing file.
898 the manifest, ``match.bad()`` is called for each missing file.
908 """
899 """
909
900
910 def diff(other, match=None, clean=False):
901 def diff(other, match=None, clean=False):
911 """Find differences between this manifest and another.
902 """Find differences between this manifest and another.
912
903
913 This manifest is compared to ``other``.
904 This manifest is compared to ``other``.
914
905
915 If ``match`` is provided, the two manifests are filtered against this
906 If ``match`` is provided, the two manifests are filtered against this
916 matcher and only entries satisfying the matcher are compared.
907 matcher and only entries satisfying the matcher are compared.
917
908
918 If ``clean`` is True, unchanged files are included in the returned
909 If ``clean`` is True, unchanged files are included in the returned
919 object.
910 object.
920
911
921 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
912 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
922 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
913 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
923 represents the node and flags for this manifest and ``(node2, flag2)``
914 represents the node and flags for this manifest and ``(node2, flag2)``
924 are the same for the other manifest.
915 are the same for the other manifest.
925 """
916 """
926
917
927 def setflag(path, flag):
918 def setflag(path, flag):
928 """Set the flag value for a given path.
919 """Set the flag value for a given path.
929
920
930 Raises ``KeyError`` if the path is not already in the manifest.
921 Raises ``KeyError`` if the path is not already in the manifest.
931 """
922 """
932
923
933 def get(path, default=None):
924 def get(path, default=None):
934 """Obtain the node value for a path or a default value if missing."""
925 """Obtain the node value for a path or a default value if missing."""
935
926
936 def flags(path, default=''):
927 def flags(path, default=''):
937 """Return the flags value for a path or a default value if missing."""
928 """Return the flags value for a path or a default value if missing."""
938
929
939 def copy():
930 def copy():
940 """Return a copy of this manifest."""
931 """Return a copy of this manifest."""
941
932
942 def items():
933 def items():
943 """Returns an iterable of (path, node) for items in this manifest."""
934 """Returns an iterable of (path, node) for items in this manifest."""
944
935
945 def iteritems():
936 def iteritems():
946 """Identical to items()."""
937 """Identical to items()."""
947
938
948 def iterentries():
939 def iterentries():
949 """Returns an iterable of (path, node, flags) for this manifest.
940 """Returns an iterable of (path, node, flags) for this manifest.
950
941
951 Similar to ``iteritems()`` except items are a 3-tuple and include
942 Similar to ``iteritems()`` except items are a 3-tuple and include
952 flags.
943 flags.
953 """
944 """
954
945
955 def text():
946 def text():
956 """Obtain the raw data representation for this manifest.
947 """Obtain the raw data representation for this manifest.
957
948
958 Result is used to create a manifest revision.
949 Result is used to create a manifest revision.
959 """
950 """
960
951
961 def fastdelta(base, changes):
952 def fastdelta(base, changes):
962 """Obtain a delta between this manifest and another given changes.
953 """Obtain a delta between this manifest and another given changes.
963
954
964 ``base`` in the raw data representation for another manifest.
955 ``base`` in the raw data representation for another manifest.
965
956
966 ``changes`` is an iterable of ``(path, to_delete)``.
957 ``changes`` is an iterable of ``(path, to_delete)``.
967
958
968 Returns a 2-tuple containing ``bytearray(self.text())`` and the
959 Returns a 2-tuple containing ``bytearray(self.text())`` and the
969 delta between ``base`` and this manifest.
960 delta between ``base`` and this manifest.
970 """
961 """
971
962
972 class imanifestrevisionbase(interfaceutil.Interface):
963 class imanifestrevisionbase(interfaceutil.Interface):
973 """Base interface representing a single revision of a manifest.
964 """Base interface representing a single revision of a manifest.
974
965
975 Should not be used as a primary interface: should always be inherited
966 Should not be used as a primary interface: should always be inherited
976 as part of a larger interface.
967 as part of a larger interface.
977 """
968 """
978
969
979 def new():
970 def new():
980 """Obtain a new manifest instance.
971 """Obtain a new manifest instance.
981
972
982 Returns an object conforming to the ``imanifestrevisionwritable``
973 Returns an object conforming to the ``imanifestrevisionwritable``
983 interface. The instance will be associated with the same
974 interface. The instance will be associated with the same
984 ``imanifestlog`` collection as this instance.
975 ``imanifestlog`` collection as this instance.
985 """
976 """
986
977
987 def copy():
978 def copy():
988 """Obtain a copy of this manifest instance.
979 """Obtain a copy of this manifest instance.
989
980
990 Returns an object conforming to the ``imanifestrevisionwritable``
981 Returns an object conforming to the ``imanifestrevisionwritable``
991 interface. The instance will be associated with the same
982 interface. The instance will be associated with the same
992 ``imanifestlog`` collection as this instance.
983 ``imanifestlog`` collection as this instance.
993 """
984 """
994
985
995 def read():
986 def read():
996 """Obtain the parsed manifest data structure.
987 """Obtain the parsed manifest data structure.
997
988
998 The returned object conforms to the ``imanifestdict`` interface.
989 The returned object conforms to the ``imanifestdict`` interface.
999 """
990 """
1000
991
1001 class imanifestrevisionstored(imanifestrevisionbase):
992 class imanifestrevisionstored(imanifestrevisionbase):
1002 """Interface representing a manifest revision committed to storage."""
993 """Interface representing a manifest revision committed to storage."""
1003
994
1004 def node():
995 def node():
1005 """The binary node for this manifest."""
996 """The binary node for this manifest."""
1006
997
1007 parents = interfaceutil.Attribute(
998 parents = interfaceutil.Attribute(
1008 """List of binary nodes that are parents for this manifest revision."""
999 """List of binary nodes that are parents for this manifest revision."""
1009 )
1000 )
1010
1001
1011 def readdelta(shallow=False):
1002 def readdelta(shallow=False):
1012 """Obtain the manifest data structure representing changes from parent.
1003 """Obtain the manifest data structure representing changes from parent.
1013
1004
1014 This manifest is compared to its 1st parent. A new manifest representing
1005 This manifest is compared to its 1st parent. A new manifest representing
1015 those differences is constructed.
1006 those differences is constructed.
1016
1007
1017 The returned object conforms to the ``imanifestdict`` interface.
1008 The returned object conforms to the ``imanifestdict`` interface.
1018 """
1009 """
1019
1010
1020 def readfast(shallow=False):
1011 def readfast(shallow=False):
1021 """Calls either ``read()`` or ``readdelta()``.
1012 """Calls either ``read()`` or ``readdelta()``.
1022
1013
1023 The faster of the two options is called.
1014 The faster of the two options is called.
1024 """
1015 """
1025
1016
1026 def find(key):
1017 def find(key):
1027 """Calls self.read().find(key)``.
1018 """Calls self.read().find(key)``.
1028
1019
1029 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1020 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1030 """
1021 """
1031
1022
1032 class imanifestrevisionwritable(imanifestrevisionbase):
1023 class imanifestrevisionwritable(imanifestrevisionbase):
1033 """Interface representing a manifest revision that can be committed."""
1024 """Interface representing a manifest revision that can be committed."""
1034
1025
1035 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1026 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1036 """Add this revision to storage.
1027 """Add this revision to storage.
1037
1028
1038 Takes a transaction object, the changeset revision number it will
1029 Takes a transaction object, the changeset revision number it will
1039 be associated with, its parent nodes, and lists of added and
1030 be associated with, its parent nodes, and lists of added and
1040 removed paths.
1031 removed paths.
1041
1032
1042 If match is provided, storage can choose not to inspect or write out
1033 If match is provided, storage can choose not to inspect or write out
1043 items that do not match. Storage is still required to be able to provide
1034 items that do not match. Storage is still required to be able to provide
1044 the full manifest in the future for any directories written (these
1035 the full manifest in the future for any directories written (these
1045 manifests should not be "narrowed on disk").
1036 manifests should not be "narrowed on disk").
1046
1037
1047 Returns the binary node of the created revision.
1038 Returns the binary node of the created revision.
1048 """
1039 """
1049
1040
1050 class imanifeststorage(interfaceutil.Interface):
1041 class imanifeststorage(interfaceutil.Interface):
1051 """Storage interface for manifest data."""
1042 """Storage interface for manifest data."""
1052
1043
1053 tree = interfaceutil.Attribute(
1044 tree = interfaceutil.Attribute(
1054 """The path to the directory this manifest tracks.
1045 """The path to the directory this manifest tracks.
1055
1046
1056 The empty bytestring represents the root manifest.
1047 The empty bytestring represents the root manifest.
1057 """)
1048 """)
1058
1049
1059 index = interfaceutil.Attribute(
1050 index = interfaceutil.Attribute(
1060 """An ``ifilerevisionssequence`` instance.""")
1051 """An ``ifilerevisionssequence`` instance.""")
1061
1052
1062 indexfile = interfaceutil.Attribute(
1053 indexfile = interfaceutil.Attribute(
1063 """Path of revlog index file.
1054 """Path of revlog index file.
1064
1055
1065 TODO this is revlog specific and should not be exposed.
1056 TODO this is revlog specific and should not be exposed.
1066 """)
1057 """)
1067
1058
1068 opener = interfaceutil.Attribute(
1059 opener = interfaceutil.Attribute(
1069 """VFS opener to use to access underlying files used for storage.
1060 """VFS opener to use to access underlying files used for storage.
1070
1061
1071 TODO this is revlog specific and should not be exposed.
1062 TODO this is revlog specific and should not be exposed.
1072 """)
1063 """)
1073
1064
1074 version = interfaceutil.Attribute(
1065 version = interfaceutil.Attribute(
1075 """Revlog version number.
1066 """Revlog version number.
1076
1067
1077 TODO this is revlog specific and should not be exposed.
1068 TODO this is revlog specific and should not be exposed.
1078 """)
1069 """)
1079
1070
1080 _generaldelta = interfaceutil.Attribute(
1071 _generaldelta = interfaceutil.Attribute(
1081 """Whether generaldelta storage is being used.
1072 """Whether generaldelta storage is being used.
1082
1073
1083 TODO this is revlog specific and should not be exposed.
1074 TODO this is revlog specific and should not be exposed.
1084 """)
1075 """)
1085
1076
1086 fulltextcache = interfaceutil.Attribute(
1077 fulltextcache = interfaceutil.Attribute(
1087 """Dict with cache of fulltexts.
1078 """Dict with cache of fulltexts.
1088
1079
1089 TODO this doesn't feel appropriate for the storage interface.
1080 TODO this doesn't feel appropriate for the storage interface.
1090 """)
1081 """)
1091
1082
1092 def __len__():
1083 def __len__():
1093 """Obtain the number of revisions stored for this manifest."""
1084 """Obtain the number of revisions stored for this manifest."""
1094
1085
1095 def __iter__():
1086 def __iter__():
1096 """Iterate over revision numbers for this manifest."""
1087 """Iterate over revision numbers for this manifest."""
1097
1088
1098 def rev(node):
1089 def rev(node):
1099 """Obtain the revision number given a binary node.
1090 """Obtain the revision number given a binary node.
1100
1091
1101 Raises ``error.LookupError`` if the node is not known.
1092 Raises ``error.LookupError`` if the node is not known.
1102 """
1093 """
1103
1094
1104 def node(rev):
1095 def node(rev):
1105 """Obtain the node value given a revision number.
1096 """Obtain the node value given a revision number.
1106
1097
1107 Raises ``error.LookupError`` if the revision is not known.
1098 Raises ``error.LookupError`` if the revision is not known.
1108 """
1099 """
1109
1100
1110 def lookup(value):
1101 def lookup(value):
1111 """Attempt to resolve a value to a node.
1102 """Attempt to resolve a value to a node.
1112
1103
1113 Value can be a binary node, hex node, revision number, or a bytes
1104 Value can be a binary node, hex node, revision number, or a bytes
1114 that can be converted to an integer.
1105 that can be converted to an integer.
1115
1106
1116 Raises ``error.LookupError`` if a ndoe could not be resolved.
1107 Raises ``error.LookupError`` if a ndoe could not be resolved.
1117
1108
1118 TODO this is only used by debug* commands and can probably be deleted
1109 TODO this is only used by debug* commands and can probably be deleted
1119 easily.
1110 easily.
1120 """
1111 """
1121
1112
1122 def parents(node):
1113 def parents(node):
1123 """Returns a 2-tuple of parent nodes for a node.
1114 """Returns a 2-tuple of parent nodes for a node.
1124
1115
1125 Values will be ``nullid`` if the parent is empty.
1116 Values will be ``nullid`` if the parent is empty.
1126 """
1117 """
1127
1118
1128 def parentrevs(rev):
1119 def parentrevs(rev):
1129 """Like parents() but operates on revision numbers."""
1120 """Like parents() but operates on revision numbers."""
1130
1121
1131 def linkrev(rev):
1122 def linkrev(rev):
1132 """Obtain the changeset revision number a revision is linked to."""
1123 """Obtain the changeset revision number a revision is linked to."""
1133
1124
1134 def revision(node, _df=None, raw=False):
1125 def revision(node, _df=None, raw=False):
1135 """Obtain fulltext data for a node."""
1126 """Obtain fulltext data for a node."""
1136
1127
1137 def revdiff(rev1, rev2):
1128 def revdiff(rev1, rev2):
1138 """Obtain a delta between two revision numbers.
1129 """Obtain a delta between two revision numbers.
1139
1130
1140 The returned data is the result of ``bdiff.bdiff()`` on the raw
1131 The returned data is the result of ``bdiff.bdiff()`` on the raw
1141 revision data.
1132 revision data.
1142 """
1133 """
1143
1134
1144 def cmp(node, fulltext):
1135 def cmp(node, fulltext):
1145 """Compare fulltext to another revision.
1136 """Compare fulltext to another revision.
1146
1137
1147 Returns True if the fulltext is different from what is stored.
1138 Returns True if the fulltext is different from what is stored.
1148 """
1139 """
1149
1140
1150 def emitrevisions(nodes,
1141 def emitrevisions(nodes,
1151 nodesorder=None,
1142 nodesorder=None,
1152 revisiondata=False,
1143 revisiondata=False,
1153 assumehaveparentrevisions=False):
1144 assumehaveparentrevisions=False):
1154 """Produce ``irevisiondelta`` describing revisions.
1145 """Produce ``irevisiondelta`` describing revisions.
1155
1146
1156 See the documentation for ``ifiledata`` for more.
1147 See the documentation for ``ifiledata`` for more.
1157 """
1148 """
1158
1149
1159 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1150 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1160 """Process a series of deltas for storage.
1151 """Process a series of deltas for storage.
1161
1152
1162 See the documentation in ``ifilemutation`` for more.
1153 See the documentation in ``ifilemutation`` for more.
1163 """
1154 """
1164
1155
1165 def rawsize(rev):
1156 def rawsize(rev):
1166 """Obtain the size of tracked data.
1157 """Obtain the size of tracked data.
1167
1158
1168 Is equivalent to ``len(m.revision(node, raw=True))``.
1159 Is equivalent to ``len(m.revision(node, raw=True))``.
1169
1160
1170 TODO this method is only used by upgrade code and may be removed.
1161 TODO this method is only used by upgrade code and may be removed.
1171 """
1162 """
1172
1163
1173 def getstrippoint(minlink):
1164 def getstrippoint(minlink):
1174 """Find minimum revision that must be stripped to strip a linkrev.
1165 """Find minimum revision that must be stripped to strip a linkrev.
1175
1166
1176 See the documentation in ``ifilemutation`` for more.
1167 See the documentation in ``ifilemutation`` for more.
1177 """
1168 """
1178
1169
1179 def strip(minlink, transaction):
1170 def strip(minlink, transaction):
1180 """Remove storage of items starting at a linkrev.
1171 """Remove storage of items starting at a linkrev.
1181
1172
1182 See the documentation in ``ifilemutation`` for more.
1173 See the documentation in ``ifilemutation`` for more.
1183 """
1174 """
1184
1175
1185 def checksize():
1176 def checksize():
1186 """Obtain the expected sizes of backing files.
1177 """Obtain the expected sizes of backing files.
1187
1178
1188 TODO this is used by verify and it should not be part of the interface.
1179 TODO this is used by verify and it should not be part of the interface.
1189 """
1180 """
1190
1181
1191 def files():
1182 def files():
1192 """Obtain paths that are backing storage for this manifest.
1183 """Obtain paths that are backing storage for this manifest.
1193
1184
1194 TODO this is used by verify and there should probably be a better API
1185 TODO this is used by verify and there should probably be a better API
1195 for this functionality.
1186 for this functionality.
1196 """
1187 """
1197
1188
1198 def deltaparent(rev):
1189 def deltaparent(rev):
1199 """Obtain the revision that a revision is delta'd against.
1190 """Obtain the revision that a revision is delta'd against.
1200
1191
1201 TODO delta encoding is an implementation detail of storage and should
1192 TODO delta encoding is an implementation detail of storage and should
1202 not be exposed to the storage interface.
1193 not be exposed to the storage interface.
1203 """
1194 """
1204
1195
1205 def clone(tr, dest, **kwargs):
1196 def clone(tr, dest, **kwargs):
1206 """Clone this instance to another."""
1197 """Clone this instance to another."""
1207
1198
1208 def clearcaches(clear_persisted_data=False):
1199 def clearcaches(clear_persisted_data=False):
1209 """Clear any caches associated with this instance."""
1200 """Clear any caches associated with this instance."""
1210
1201
1211 def dirlog(d):
1202 def dirlog(d):
1212 """Obtain a manifest storage instance for a tree."""
1203 """Obtain a manifest storage instance for a tree."""
1213
1204
1214 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1205 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1215 match=None):
1206 match=None):
1216 """Add a revision to storage.
1207 """Add a revision to storage.
1217
1208
1218 ``m`` is an object conforming to ``imanifestdict``.
1209 ``m`` is an object conforming to ``imanifestdict``.
1219
1210
1220 ``link`` is the linkrev revision number.
1211 ``link`` is the linkrev revision number.
1221
1212
1222 ``p1`` and ``p2`` are the parent revision numbers.
1213 ``p1`` and ``p2`` are the parent revision numbers.
1223
1214
1224 ``added`` and ``removed`` are iterables of added and removed paths,
1215 ``added`` and ``removed`` are iterables of added and removed paths,
1225 respectively.
1216 respectively.
1226
1217
1227 ``readtree`` is a function that can be used to read the child tree(s)
1218 ``readtree`` is a function that can be used to read the child tree(s)
1228 when recursively writing the full tree structure when using
1219 when recursively writing the full tree structure when using
1229 treemanifets.
1220 treemanifets.
1230
1221
1231 ``match`` is a matcher that can be used to hint to storage that not all
1222 ``match`` is a matcher that can be used to hint to storage that not all
1232 paths must be inspected; this is an optimization and can be safely
1223 paths must be inspected; this is an optimization and can be safely
1233 ignored. Note that the storage must still be able to reproduce a full
1224 ignored. Note that the storage must still be able to reproduce a full
1234 manifest including files that did not match.
1225 manifest including files that did not match.
1235 """
1226 """
1236
1227
1237 def storageinfo(exclusivefiles=False, sharedfiles=False,
1228 def storageinfo(exclusivefiles=False, sharedfiles=False,
1238 revisionscount=False, trackedsize=False,
1229 revisionscount=False, trackedsize=False,
1239 storedsize=False):
1230 storedsize=False):
1240 """Obtain information about storage for this manifest's data.
1231 """Obtain information about storage for this manifest's data.
1241
1232
1242 See ``ifilestorage.storageinfo()`` for a description of this method.
1233 See ``ifilestorage.storageinfo()`` for a description of this method.
1243 This one behaves the same way, except for manifest data.
1234 This one behaves the same way, except for manifest data.
1244 """
1235 """
1245
1236
1246 class imanifestlog(interfaceutil.Interface):
1237 class imanifestlog(interfaceutil.Interface):
1247 """Interface representing a collection of manifest snapshots.
1238 """Interface representing a collection of manifest snapshots.
1248
1239
1249 Represents the root manifest in a repository.
1240 Represents the root manifest in a repository.
1250
1241
1251 Also serves as a means to access nested tree manifests and to cache
1242 Also serves as a means to access nested tree manifests and to cache
1252 tree manifests.
1243 tree manifests.
1253 """
1244 """
1254
1245
1255 def __getitem__(node):
1246 def __getitem__(node):
1256 """Obtain a manifest instance for a given binary node.
1247 """Obtain a manifest instance for a given binary node.
1257
1248
1258 Equivalent to calling ``self.get('', node)``.
1249 Equivalent to calling ``self.get('', node)``.
1259
1250
1260 The returned object conforms to the ``imanifestrevisionstored``
1251 The returned object conforms to the ``imanifestrevisionstored``
1261 interface.
1252 interface.
1262 """
1253 """
1263
1254
1264 def get(tree, node, verify=True):
1255 def get(tree, node, verify=True):
1265 """Retrieve the manifest instance for a given directory and binary node.
1256 """Retrieve the manifest instance for a given directory and binary node.
1266
1257
1267 ``node`` always refers to the node of the root manifest (which will be
1258 ``node`` always refers to the node of the root manifest (which will be
1268 the only manifest if flat manifests are being used).
1259 the only manifest if flat manifests are being used).
1269
1260
1270 If ``tree`` is the empty string, the root manifest is returned.
1261 If ``tree`` is the empty string, the root manifest is returned.
1271 Otherwise the manifest for the specified directory will be returned
1262 Otherwise the manifest for the specified directory will be returned
1272 (requires tree manifests).
1263 (requires tree manifests).
1273
1264
1274 If ``verify`` is True, ``LookupError`` is raised if the node is not
1265 If ``verify`` is True, ``LookupError`` is raised if the node is not
1275 known.
1266 known.
1276
1267
1277 The returned object conforms to the ``imanifestrevisionstored``
1268 The returned object conforms to the ``imanifestrevisionstored``
1278 interface.
1269 interface.
1279 """
1270 """
1280
1271
1281 def getstorage(tree):
1272 def getstorage(tree):
1282 """Retrieve an interface to storage for a particular tree.
1273 """Retrieve an interface to storage for a particular tree.
1283
1274
1284 If ``tree`` is the empty bytestring, storage for the root manifest will
1275 If ``tree`` is the empty bytestring, storage for the root manifest will
1285 be returned. Otherwise storage for a tree manifest is returned.
1276 be returned. Otherwise storage for a tree manifest is returned.
1286
1277
1287 TODO formalize interface for returned object.
1278 TODO formalize interface for returned object.
1288 """
1279 """
1289
1280
1290 def clearcaches():
1281 def clearcaches():
1291 """Clear caches associated with this collection."""
1282 """Clear caches associated with this collection."""
1292
1283
1293 def rev(node):
1284 def rev(node):
1294 """Obtain the revision number for a binary node.
1285 """Obtain the revision number for a binary node.
1295
1286
1296 Raises ``error.LookupError`` if the node is not known.
1287 Raises ``error.LookupError`` if the node is not known.
1297 """
1288 """
1298
1289
1299 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1290 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1300 """Local repository sub-interface providing access to tracked file storage.
1291 """Local repository sub-interface providing access to tracked file storage.
1301
1292
1302 This interface defines how a repository accesses storage for a single
1293 This interface defines how a repository accesses storage for a single
1303 tracked file path.
1294 tracked file path.
1304 """
1295 """
1305
1296
1306 def file(f):
1297 def file(f):
1307 """Obtain a filelog for a tracked path.
1298 """Obtain a filelog for a tracked path.
1308
1299
1309 The returned type conforms to the ``ifilestorage`` interface.
1300 The returned type conforms to the ``ifilestorage`` interface.
1310 """
1301 """
1311
1302
1312 class ilocalrepositorymain(interfaceutil.Interface):
1303 class ilocalrepositorymain(interfaceutil.Interface):
1313 """Main interface for local repositories.
1304 """Main interface for local repositories.
1314
1305
1315 This currently captures the reality of things - not how things should be.
1306 This currently captures the reality of things - not how things should be.
1316 """
1307 """
1317
1308
1318 supportedformats = interfaceutil.Attribute(
1309 supportedformats = interfaceutil.Attribute(
1319 """Set of requirements that apply to stream clone.
1310 """Set of requirements that apply to stream clone.
1320
1311
1321 This is actually a class attribute and is shared among all instances.
1312 This is actually a class attribute and is shared among all instances.
1322 """)
1313 """)
1323
1314
1324 supported = interfaceutil.Attribute(
1315 supported = interfaceutil.Attribute(
1325 """Set of requirements that this repo is capable of opening.""")
1316 """Set of requirements that this repo is capable of opening.""")
1326
1317
1327 requirements = interfaceutil.Attribute(
1318 requirements = interfaceutil.Attribute(
1328 """Set of requirements this repo uses.""")
1319 """Set of requirements this repo uses.""")
1329
1320
1330 features = interfaceutil.Attribute(
1321 features = interfaceutil.Attribute(
1331 """Set of "features" this repository supports.
1322 """Set of "features" this repository supports.
1332
1323
1333 A "feature" is a loosely-defined term. It can refer to a feature
1324 A "feature" is a loosely-defined term. It can refer to a feature
1334 in the classical sense or can describe an implementation detail
1325 in the classical sense or can describe an implementation detail
1335 of the repository. For example, a ``readonly`` feature may denote
1326 of the repository. For example, a ``readonly`` feature may denote
1336 the repository as read-only. Or a ``revlogfilestore`` feature may
1327 the repository as read-only. Or a ``revlogfilestore`` feature may
1337 denote that the repository is using revlogs for file storage.
1328 denote that the repository is using revlogs for file storage.
1338
1329
1339 The intent of features is to provide a machine-queryable mechanism
1330 The intent of features is to provide a machine-queryable mechanism
1340 for repo consumers to test for various repository characteristics.
1331 for repo consumers to test for various repository characteristics.
1341
1332
1342 Features are similar to ``requirements``. The main difference is that
1333 Features are similar to ``requirements``. The main difference is that
1343 requirements are stored on-disk and represent requirements to open the
1334 requirements are stored on-disk and represent requirements to open the
1344 repository. Features are more run-time capabilities of the repository
1335 repository. Features are more run-time capabilities of the repository
1345 and more granular capabilities (which may be derived from requirements).
1336 and more granular capabilities (which may be derived from requirements).
1346 """)
1337 """)
1347
1338
1348 filtername = interfaceutil.Attribute(
1339 filtername = interfaceutil.Attribute(
1349 """Name of the repoview that is active on this repo.""")
1340 """Name of the repoview that is active on this repo.""")
1350
1341
1351 wvfs = interfaceutil.Attribute(
1342 wvfs = interfaceutil.Attribute(
1352 """VFS used to access the working directory.""")
1343 """VFS used to access the working directory.""")
1353
1344
1354 vfs = interfaceutil.Attribute(
1345 vfs = interfaceutil.Attribute(
1355 """VFS rooted at the .hg directory.
1346 """VFS rooted at the .hg directory.
1356
1347
1357 Used to access repository data not in the store.
1348 Used to access repository data not in the store.
1358 """)
1349 """)
1359
1350
1360 svfs = interfaceutil.Attribute(
1351 svfs = interfaceutil.Attribute(
1361 """VFS rooted at the store.
1352 """VFS rooted at the store.
1362
1353
1363 Used to access repository data in the store. Typically .hg/store.
1354 Used to access repository data in the store. Typically .hg/store.
1364 But can point elsewhere if the store is shared.
1355 But can point elsewhere if the store is shared.
1365 """)
1356 """)
1366
1357
1367 root = interfaceutil.Attribute(
1358 root = interfaceutil.Attribute(
1368 """Path to the root of the working directory.""")
1359 """Path to the root of the working directory.""")
1369
1360
1370 path = interfaceutil.Attribute(
1361 path = interfaceutil.Attribute(
1371 """Path to the .hg directory.""")
1362 """Path to the .hg directory.""")
1372
1363
1373 origroot = interfaceutil.Attribute(
1364 origroot = interfaceutil.Attribute(
1374 """The filesystem path that was used to construct the repo.""")
1365 """The filesystem path that was used to construct the repo.""")
1375
1366
1376 auditor = interfaceutil.Attribute(
1367 auditor = interfaceutil.Attribute(
1377 """A pathauditor for the working directory.
1368 """A pathauditor for the working directory.
1378
1369
1379 This checks if a path refers to a nested repository.
1370 This checks if a path refers to a nested repository.
1380
1371
1381 Operates on the filesystem.
1372 Operates on the filesystem.
1382 """)
1373 """)
1383
1374
1384 nofsauditor = interfaceutil.Attribute(
1375 nofsauditor = interfaceutil.Attribute(
1385 """A pathauditor for the working directory.
1376 """A pathauditor for the working directory.
1386
1377
1387 This is like ``auditor`` except it doesn't do filesystem checks.
1378 This is like ``auditor`` except it doesn't do filesystem checks.
1388 """)
1379 """)
1389
1380
1390 baseui = interfaceutil.Attribute(
1381 baseui = interfaceutil.Attribute(
1391 """Original ui instance passed into constructor.""")
1382 """Original ui instance passed into constructor.""")
1392
1383
1393 ui = interfaceutil.Attribute(
1384 ui = interfaceutil.Attribute(
1394 """Main ui instance for this instance.""")
1385 """Main ui instance for this instance.""")
1395
1386
1396 sharedpath = interfaceutil.Attribute(
1387 sharedpath = interfaceutil.Attribute(
1397 """Path to the .hg directory of the repo this repo was shared from.""")
1388 """Path to the .hg directory of the repo this repo was shared from.""")
1398
1389
1399 store = interfaceutil.Attribute(
1390 store = interfaceutil.Attribute(
1400 """A store instance.""")
1391 """A store instance.""")
1401
1392
1402 spath = interfaceutil.Attribute(
1393 spath = interfaceutil.Attribute(
1403 """Path to the store.""")
1394 """Path to the store.""")
1404
1395
1405 sjoin = interfaceutil.Attribute(
1396 sjoin = interfaceutil.Attribute(
1406 """Alias to self.store.join.""")
1397 """Alias to self.store.join.""")
1407
1398
1408 cachevfs = interfaceutil.Attribute(
1399 cachevfs = interfaceutil.Attribute(
1409 """A VFS used to access the cache directory.
1400 """A VFS used to access the cache directory.
1410
1401
1411 Typically .hg/cache.
1402 Typically .hg/cache.
1412 """)
1403 """)
1413
1404
1414 filteredrevcache = interfaceutil.Attribute(
1405 filteredrevcache = interfaceutil.Attribute(
1415 """Holds sets of revisions to be filtered.""")
1406 """Holds sets of revisions to be filtered.""")
1416
1407
1417 names = interfaceutil.Attribute(
1408 names = interfaceutil.Attribute(
1418 """A ``namespaces`` instance.""")
1409 """A ``namespaces`` instance.""")
1419
1410
1420 def close():
1411 def close():
1421 """Close the handle on this repository."""
1412 """Close the handle on this repository."""
1422
1413
1423 def peer():
1414 def peer():
1424 """Obtain an object conforming to the ``peer`` interface."""
1415 """Obtain an object conforming to the ``peer`` interface."""
1425
1416
1426 def unfiltered():
1417 def unfiltered():
1427 """Obtain an unfiltered/raw view of this repo."""
1418 """Obtain an unfiltered/raw view of this repo."""
1428
1419
1429 def filtered(name, visibilityexceptions=None):
1420 def filtered(name, visibilityexceptions=None):
1430 """Obtain a named view of this repository."""
1421 """Obtain a named view of this repository."""
1431
1422
1432 obsstore = interfaceutil.Attribute(
1423 obsstore = interfaceutil.Attribute(
1433 """A store of obsolescence data.""")
1424 """A store of obsolescence data.""")
1434
1425
1435 changelog = interfaceutil.Attribute(
1426 changelog = interfaceutil.Attribute(
1436 """A handle on the changelog revlog.""")
1427 """A handle on the changelog revlog.""")
1437
1428
1438 manifestlog = interfaceutil.Attribute(
1429 manifestlog = interfaceutil.Attribute(
1439 """An instance conforming to the ``imanifestlog`` interface.
1430 """An instance conforming to the ``imanifestlog`` interface.
1440
1431
1441 Provides access to manifests for the repository.
1432 Provides access to manifests for the repository.
1442 """)
1433 """)
1443
1434
1444 dirstate = interfaceutil.Attribute(
1435 dirstate = interfaceutil.Attribute(
1445 """Working directory state.""")
1436 """Working directory state.""")
1446
1437
1447 narrowpats = interfaceutil.Attribute(
1438 narrowpats = interfaceutil.Attribute(
1448 """Matcher patterns for this repository's narrowspec.""")
1439 """Matcher patterns for this repository's narrowspec.""")
1449
1440
1450 def narrowmatch():
1441 def narrowmatch():
1451 """Obtain a matcher for the narrowspec."""
1442 """Obtain a matcher for the narrowspec."""
1452
1443
1453 def setnarrowpats(newincludes, newexcludes):
1444 def setnarrowpats(newincludes, newexcludes):
1454 """Define the narrowspec for this repository."""
1445 """Define the narrowspec for this repository."""
1455
1446
1456 def __getitem__(changeid):
1447 def __getitem__(changeid):
1457 """Try to resolve a changectx."""
1448 """Try to resolve a changectx."""
1458
1449
1459 def __contains__(changeid):
1450 def __contains__(changeid):
1460 """Whether a changeset exists."""
1451 """Whether a changeset exists."""
1461
1452
1462 def __nonzero__():
1453 def __nonzero__():
1463 """Always returns True."""
1454 """Always returns True."""
1464 return True
1455 return True
1465
1456
1466 __bool__ = __nonzero__
1457 __bool__ = __nonzero__
1467
1458
1468 def __len__():
1459 def __len__():
1469 """Returns the number of changesets in the repo."""
1460 """Returns the number of changesets in the repo."""
1470
1461
1471 def __iter__():
1462 def __iter__():
1472 """Iterate over revisions in the changelog."""
1463 """Iterate over revisions in the changelog."""
1473
1464
1474 def revs(expr, *args):
1465 def revs(expr, *args):
1475 """Evaluate a revset.
1466 """Evaluate a revset.
1476
1467
1477 Emits revisions.
1468 Emits revisions.
1478 """
1469 """
1479
1470
1480 def set(expr, *args):
1471 def set(expr, *args):
1481 """Evaluate a revset.
1472 """Evaluate a revset.
1482
1473
1483 Emits changectx instances.
1474 Emits changectx instances.
1484 """
1475 """
1485
1476
1486 def anyrevs(specs, user=False, localalias=None):
1477 def anyrevs(specs, user=False, localalias=None):
1487 """Find revisions matching one of the given revsets."""
1478 """Find revisions matching one of the given revsets."""
1488
1479
1489 def url():
1480 def url():
1490 """Returns a string representing the location of this repo."""
1481 """Returns a string representing the location of this repo."""
1491
1482
1492 def hook(name, throw=False, **args):
1483 def hook(name, throw=False, **args):
1493 """Call a hook."""
1484 """Call a hook."""
1494
1485
1495 def tags():
1486 def tags():
1496 """Return a mapping of tag to node."""
1487 """Return a mapping of tag to node."""
1497
1488
1498 def tagtype(tagname):
1489 def tagtype(tagname):
1499 """Return the type of a given tag."""
1490 """Return the type of a given tag."""
1500
1491
1501 def tagslist():
1492 def tagslist():
1502 """Return a list of tags ordered by revision."""
1493 """Return a list of tags ordered by revision."""
1503
1494
1504 def nodetags(node):
1495 def nodetags(node):
1505 """Return the tags associated with a node."""
1496 """Return the tags associated with a node."""
1506
1497
1507 def nodebookmarks(node):
1498 def nodebookmarks(node):
1508 """Return the list of bookmarks pointing to the specified node."""
1499 """Return the list of bookmarks pointing to the specified node."""
1509
1500
1510 def branchmap():
1501 def branchmap():
1511 """Return a mapping of branch to heads in that branch."""
1502 """Return a mapping of branch to heads in that branch."""
1512
1503
1513 def revbranchcache():
1504 def revbranchcache():
1514 pass
1505 pass
1515
1506
1516 def branchtip(branchtip, ignoremissing=False):
1507 def branchtip(branchtip, ignoremissing=False):
1517 """Return the tip node for a given branch."""
1508 """Return the tip node for a given branch."""
1518
1509
1519 def lookup(key):
1510 def lookup(key):
1520 """Resolve the node for a revision."""
1511 """Resolve the node for a revision."""
1521
1512
1522 def lookupbranch(key):
1513 def lookupbranch(key):
1523 """Look up the branch name of the given revision or branch name."""
1514 """Look up the branch name of the given revision or branch name."""
1524
1515
1525 def known(nodes):
1516 def known(nodes):
1526 """Determine whether a series of nodes is known.
1517 """Determine whether a series of nodes is known.
1527
1518
1528 Returns a list of bools.
1519 Returns a list of bools.
1529 """
1520 """
1530
1521
1531 def local():
1522 def local():
1532 """Whether the repository is local."""
1523 """Whether the repository is local."""
1533 return True
1524 return True
1534
1525
1535 def publishing():
1526 def publishing():
1536 """Whether the repository is a publishing repository."""
1527 """Whether the repository is a publishing repository."""
1537
1528
1538 def cancopy():
1529 def cancopy():
1539 pass
1530 pass
1540
1531
1541 def shared():
1532 def shared():
1542 """The type of shared repository or None."""
1533 """The type of shared repository or None."""
1543
1534
1544 def wjoin(f, *insidef):
1535 def wjoin(f, *insidef):
1545 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1536 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1546
1537
1547 def setparents(p1, p2):
1538 def setparents(p1, p2):
1548 """Set the parent nodes of the working directory."""
1539 """Set the parent nodes of the working directory."""
1549
1540
1550 def filectx(path, changeid=None, fileid=None):
1541 def filectx(path, changeid=None, fileid=None):
1551 """Obtain a filectx for the given file revision."""
1542 """Obtain a filectx for the given file revision."""
1552
1543
1553 def getcwd():
1544 def getcwd():
1554 """Obtain the current working directory from the dirstate."""
1545 """Obtain the current working directory from the dirstate."""
1555
1546
1556 def pathto(f, cwd=None):
1547 def pathto(f, cwd=None):
1557 """Obtain the relative path to a file."""
1548 """Obtain the relative path to a file."""
1558
1549
1559 def adddatafilter(name, fltr):
1550 def adddatafilter(name, fltr):
1560 pass
1551 pass
1561
1552
1562 def wread(filename):
1553 def wread(filename):
1563 """Read a file from wvfs, using data filters."""
1554 """Read a file from wvfs, using data filters."""
1564
1555
1565 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1556 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1566 """Write data to a file in the wvfs, using data filters."""
1557 """Write data to a file in the wvfs, using data filters."""
1567
1558
1568 def wwritedata(filename, data):
1559 def wwritedata(filename, data):
1569 """Resolve data for writing to the wvfs, using data filters."""
1560 """Resolve data for writing to the wvfs, using data filters."""
1570
1561
1571 def currenttransaction():
1562 def currenttransaction():
1572 """Obtain the current transaction instance or None."""
1563 """Obtain the current transaction instance or None."""
1573
1564
1574 def transaction(desc, report=None):
1565 def transaction(desc, report=None):
1575 """Open a new transaction to write to the repository."""
1566 """Open a new transaction to write to the repository."""
1576
1567
1577 def undofiles():
1568 def undofiles():
1578 """Returns a list of (vfs, path) for files to undo transactions."""
1569 """Returns a list of (vfs, path) for files to undo transactions."""
1579
1570
1580 def recover():
1571 def recover():
1581 """Roll back an interrupted transaction."""
1572 """Roll back an interrupted transaction."""
1582
1573
1583 def rollback(dryrun=False, force=False):
1574 def rollback(dryrun=False, force=False):
1584 """Undo the last transaction.
1575 """Undo the last transaction.
1585
1576
1586 DANGEROUS.
1577 DANGEROUS.
1587 """
1578 """
1588
1579
1589 def updatecaches(tr=None, full=False):
1580 def updatecaches(tr=None, full=False):
1590 """Warm repo caches."""
1581 """Warm repo caches."""
1591
1582
1592 def invalidatecaches():
1583 def invalidatecaches():
1593 """Invalidate cached data due to the repository mutating."""
1584 """Invalidate cached data due to the repository mutating."""
1594
1585
1595 def invalidatevolatilesets():
1586 def invalidatevolatilesets():
1596 pass
1587 pass
1597
1588
1598 def invalidatedirstate():
1589 def invalidatedirstate():
1599 """Invalidate the dirstate."""
1590 """Invalidate the dirstate."""
1600
1591
1601 def invalidate(clearfilecache=False):
1592 def invalidate(clearfilecache=False):
1602 pass
1593 pass
1603
1594
1604 def invalidateall():
1595 def invalidateall():
1605 pass
1596 pass
1606
1597
1607 def lock(wait=True):
1598 def lock(wait=True):
1608 """Lock the repository store and return a lock instance."""
1599 """Lock the repository store and return a lock instance."""
1609
1600
1610 def wlock(wait=True):
1601 def wlock(wait=True):
1611 """Lock the non-store parts of the repository."""
1602 """Lock the non-store parts of the repository."""
1612
1603
1613 def currentwlock():
1604 def currentwlock():
1614 """Return the wlock if it's held or None."""
1605 """Return the wlock if it's held or None."""
1615
1606
1616 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1607 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1617 pass
1608 pass
1618
1609
1619 def commit(text='', user=None, date=None, match=None, force=False,
1610 def commit(text='', user=None, date=None, match=None, force=False,
1620 editor=False, extra=None):
1611 editor=False, extra=None):
1621 """Add a new revision to the repository."""
1612 """Add a new revision to the repository."""
1622
1613
1623 def commitctx(ctx, error=False):
1614 def commitctx(ctx, error=False):
1624 """Commit a commitctx instance to the repository."""
1615 """Commit a commitctx instance to the repository."""
1625
1616
1626 def destroying():
1617 def destroying():
1627 """Inform the repository that nodes are about to be destroyed."""
1618 """Inform the repository that nodes are about to be destroyed."""
1628
1619
1629 def destroyed():
1620 def destroyed():
1630 """Inform the repository that nodes have been destroyed."""
1621 """Inform the repository that nodes have been destroyed."""
1631
1622
1632 def status(node1='.', node2=None, match=None, ignored=False,
1623 def status(node1='.', node2=None, match=None, ignored=False,
1633 clean=False, unknown=False, listsubrepos=False):
1624 clean=False, unknown=False, listsubrepos=False):
1634 """Convenience method to call repo[x].status()."""
1625 """Convenience method to call repo[x].status()."""
1635
1626
1636 def addpostdsstatus(ps):
1627 def addpostdsstatus(ps):
1637 pass
1628 pass
1638
1629
1639 def postdsstatus():
1630 def postdsstatus():
1640 pass
1631 pass
1641
1632
1642 def clearpostdsstatus():
1633 def clearpostdsstatus():
1643 pass
1634 pass
1644
1635
1645 def heads(start=None):
1636 def heads(start=None):
1646 """Obtain list of nodes that are DAG heads."""
1637 """Obtain list of nodes that are DAG heads."""
1647
1638
1648 def branchheads(branch=None, start=None, closed=False):
1639 def branchheads(branch=None, start=None, closed=False):
1649 pass
1640 pass
1650
1641
1651 def branches(nodes):
1642 def branches(nodes):
1652 pass
1643 pass
1653
1644
1654 def between(pairs):
1645 def between(pairs):
1655 pass
1646 pass
1656
1647
1657 def checkpush(pushop):
1648 def checkpush(pushop):
1658 pass
1649 pass
1659
1650
1660 prepushoutgoinghooks = interfaceutil.Attribute(
1651 prepushoutgoinghooks = interfaceutil.Attribute(
1661 """util.hooks instance.""")
1652 """util.hooks instance.""")
1662
1653
1663 def pushkey(namespace, key, old, new):
1654 def pushkey(namespace, key, old, new):
1664 pass
1655 pass
1665
1656
1666 def listkeys(namespace):
1657 def listkeys(namespace):
1667 pass
1658 pass
1668
1659
1669 def debugwireargs(one, two, three=None, four=None, five=None):
1660 def debugwireargs(one, two, three=None, four=None, five=None):
1670 pass
1661 pass
1671
1662
1672 def savecommitmessage(text):
1663 def savecommitmessage(text):
1673 pass
1664 pass
1674
1665
1675 class completelocalrepository(ilocalrepositorymain,
1666 class completelocalrepository(ilocalrepositorymain,
1676 ilocalrepositoryfilestorage):
1667 ilocalrepositoryfilestorage):
1677 """Complete interface for a local repository."""
1668 """Complete interface for a local repository."""
@@ -1,1060 +1,1033
1 # storage.py - Testing of storage primitives.
1 # storage.py - Testing of storage primitives.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import unittest
10 import unittest
11
11
12 from ..node import (
12 from ..node import (
13 hex,
13 hex,
14 nullid,
14 nullid,
15 nullrev,
15 nullrev,
16 )
16 )
17 from .. import (
17 from .. import (
18 error,
18 error,
19 mdiff,
19 mdiff,
20 revlog,
20 revlog,
21 )
21 )
22 from ..utils import (
22 from ..utils import (
23 storageutil,
23 storageutil,
24 )
24 )
25
25
26 class basetestcase(unittest.TestCase):
26 class basetestcase(unittest.TestCase):
27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
27 if not getattr(unittest.TestCase, r'assertRaisesRegex', False):
28 assertRaisesRegex = (# camelcase-required
28 assertRaisesRegex = (# camelcase-required
29 unittest.TestCase.assertRaisesRegexp)
29 unittest.TestCase.assertRaisesRegexp)
30
30
31 class ifileindextests(basetestcase):
31 class ifileindextests(basetestcase):
32 """Generic tests for the ifileindex interface.
32 """Generic tests for the ifileindex interface.
33
33
34 All file storage backends for index data should conform to the tests in this
34 All file storage backends for index data should conform to the tests in this
35 class.
35 class.
36
36
37 Use ``makeifileindextests()`` to create an instance of this type.
37 Use ``makeifileindextests()`` to create an instance of this type.
38 """
38 """
39 def testempty(self):
39 def testempty(self):
40 f = self._makefilefn()
40 f = self._makefilefn()
41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
41 self.assertEqual(len(f), 0, 'new file store has 0 length by default')
42 self.assertEqual(list(f), [], 'iter yields nothing by default')
42 self.assertEqual(list(f), [], 'iter yields nothing by default')
43
43
44 gen = iter(f)
44 gen = iter(f)
45 with self.assertRaises(StopIteration):
45 with self.assertRaises(StopIteration):
46 next(gen)
46 next(gen)
47
47
48 # revs() should evaluate to an empty list.
48 # revs() should evaluate to an empty list.
49 self.assertEqual(list(f.revs()), [])
49 self.assertEqual(list(f.revs()), [])
50
50
51 revs = iter(f.revs())
51 revs = iter(f.revs())
52 with self.assertRaises(StopIteration):
52 with self.assertRaises(StopIteration):
53 next(revs)
53 next(revs)
54
54
55 self.assertEqual(list(f.revs(start=20)), [])
55 self.assertEqual(list(f.revs(start=20)), [])
56
56
57 # parents() and parentrevs() work with nullid/nullrev.
57 # parents() and parentrevs() work with nullid/nullrev.
58 self.assertEqual(f.parents(nullid), (nullid, nullid))
58 self.assertEqual(f.parents(nullid), (nullid, nullid))
59 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
59 self.assertEqual(f.parentrevs(nullrev), (nullrev, nullrev))
60
60
61 with self.assertRaises(error.LookupError):
61 with self.assertRaises(error.LookupError):
62 f.parents(b'\x01' * 20)
62 f.parents(b'\x01' * 20)
63
63
64 for i in range(-5, 5):
64 for i in range(-5, 5):
65 if i == nullrev:
65 if i == nullrev:
66 continue
66 continue
67
67
68 with self.assertRaises(IndexError):
68 with self.assertRaises(IndexError):
69 f.parentrevs(i)
69 f.parentrevs(i)
70
70
71 # nullid/nullrev lookup always works.
71 # nullid/nullrev lookup always works.
72 self.assertEqual(f.rev(nullid), nullrev)
72 self.assertEqual(f.rev(nullid), nullrev)
73 self.assertEqual(f.node(nullrev), nullid)
73 self.assertEqual(f.node(nullrev), nullid)
74
74
75 with self.assertRaises(error.LookupError):
75 with self.assertRaises(error.LookupError):
76 f.rev(b'\x01' * 20)
76 f.rev(b'\x01' * 20)
77
77
78 for i in range(-5, 5):
78 for i in range(-5, 5):
79 if i == nullrev:
79 if i == nullrev:
80 continue
80 continue
81
81
82 with self.assertRaises(IndexError):
82 with self.assertRaises(IndexError):
83 f.node(i)
83 f.node(i)
84
84
85 self.assertEqual(f.lookup(nullid), nullid)
85 self.assertEqual(f.lookup(nullid), nullid)
86 self.assertEqual(f.lookup(nullrev), nullid)
86 self.assertEqual(f.lookup(nullrev), nullid)
87 self.assertEqual(f.lookup(hex(nullid)), nullid)
87 self.assertEqual(f.lookup(hex(nullid)), nullid)
88
88
89 # String converted to integer doesn't work for nullrev.
89 # String converted to integer doesn't work for nullrev.
90 with self.assertRaises(error.LookupError):
90 with self.assertRaises(error.LookupError):
91 f.lookup(b'%d' % nullrev)
91 f.lookup(b'%d' % nullrev)
92
92
93 self.assertEqual(f.linkrev(nullrev), nullrev)
93 self.assertEqual(f.linkrev(nullrev), nullrev)
94
94
95 for i in range(-5, 5):
95 for i in range(-5, 5):
96 if i == nullrev:
96 if i == nullrev:
97 continue
97 continue
98
98
99 with self.assertRaises(IndexError):
99 with self.assertRaises(IndexError):
100 f.linkrev(i)
100 f.linkrev(i)
101
101
102 self.assertFalse(f.iscensored(nullrev))
102 self.assertFalse(f.iscensored(nullrev))
103
103
104 for i in range(-5, 5):
104 for i in range(-5, 5):
105 if i == nullrev:
105 if i == nullrev:
106 continue
106 continue
107
107
108 with self.assertRaises(IndexError):
108 with self.assertRaises(IndexError):
109 f.iscensored(i)
109 f.iscensored(i)
110
110
111 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
111 self.assertEqual(list(f.commonancestorsheads(nullid, nullid)), [])
112
112
113 with self.assertRaises(ValueError):
113 with self.assertRaises(ValueError):
114 self.assertEqual(list(f.descendants([])), [])
114 self.assertEqual(list(f.descendants([])), [])
115
115
116 self.assertEqual(list(f.descendants([nullrev])), [])
116 self.assertEqual(list(f.descendants([nullrev])), [])
117
117
118 self.assertEqual(f.heads(), [nullid])
118 self.assertEqual(f.heads(), [nullid])
119 self.assertEqual(f.heads(nullid), [nullid])
119 self.assertEqual(f.heads(nullid), [nullid])
120 self.assertEqual(f.heads(None, [nullid]), [nullid])
120 self.assertEqual(f.heads(None, [nullid]), [nullid])
121 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
121 self.assertEqual(f.heads(nullid, [nullid]), [nullid])
122
122
123 self.assertEqual(f.children(nullid), [])
123 self.assertEqual(f.children(nullid), [])
124
124
125 with self.assertRaises(error.LookupError):
125 with self.assertRaises(error.LookupError):
126 f.children(b'\x01' * 20)
126 f.children(b'\x01' * 20)
127
127
128 def testsinglerevision(self):
128 def testsinglerevision(self):
129 f = self._makefilefn()
129 f = self._makefilefn()
130 with self._maketransactionfn() as tr:
130 with self._maketransactionfn() as tr:
131 node = f.add(b'initial', None, tr, 0, nullid, nullid)
131 node = f.add(b'initial', None, tr, 0, nullid, nullid)
132
132
133 self.assertEqual(len(f), 1)
133 self.assertEqual(len(f), 1)
134 self.assertEqual(list(f), [0])
134 self.assertEqual(list(f), [0])
135
135
136 gen = iter(f)
136 gen = iter(f)
137 self.assertEqual(next(gen), 0)
137 self.assertEqual(next(gen), 0)
138
138
139 with self.assertRaises(StopIteration):
139 with self.assertRaises(StopIteration):
140 next(gen)
140 next(gen)
141
141
142 self.assertEqual(list(f.revs()), [0])
142 self.assertEqual(list(f.revs()), [0])
143 self.assertEqual(list(f.revs(start=1)), [])
143 self.assertEqual(list(f.revs(start=1)), [])
144 self.assertEqual(list(f.revs(start=0)), [0])
144 self.assertEqual(list(f.revs(start=0)), [0])
145 self.assertEqual(list(f.revs(stop=0)), [0])
145 self.assertEqual(list(f.revs(stop=0)), [0])
146 self.assertEqual(list(f.revs(stop=1)), [0])
146 self.assertEqual(list(f.revs(stop=1)), [0])
147 self.assertEqual(list(f.revs(1, 1)), [])
147 self.assertEqual(list(f.revs(1, 1)), [])
148 # TODO buggy
148 # TODO buggy
149 self.assertEqual(list(f.revs(1, 0)), [1, 0])
149 self.assertEqual(list(f.revs(1, 0)), [1, 0])
150 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
150 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
151
151
152 self.assertEqual(f.parents(node), (nullid, nullid))
152 self.assertEqual(f.parents(node), (nullid, nullid))
153 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
153 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
154
154
155 with self.assertRaises(error.LookupError):
155 with self.assertRaises(error.LookupError):
156 f.parents(b'\x01' * 20)
156 f.parents(b'\x01' * 20)
157
157
158 with self.assertRaises(IndexError):
158 with self.assertRaises(IndexError):
159 f.parentrevs(1)
159 f.parentrevs(1)
160
160
161 self.assertEqual(f.rev(node), 0)
161 self.assertEqual(f.rev(node), 0)
162
162
163 with self.assertRaises(error.LookupError):
163 with self.assertRaises(error.LookupError):
164 f.rev(b'\x01' * 20)
164 f.rev(b'\x01' * 20)
165
165
166 self.assertEqual(f.node(0), node)
166 self.assertEqual(f.node(0), node)
167
167
168 with self.assertRaises(IndexError):
168 with self.assertRaises(IndexError):
169 f.node(1)
169 f.node(1)
170
170
171 self.assertEqual(f.lookup(node), node)
171 self.assertEqual(f.lookup(node), node)
172 self.assertEqual(f.lookup(0), node)
172 self.assertEqual(f.lookup(0), node)
173 self.assertEqual(f.lookup(b'0'), node)
173 self.assertEqual(f.lookup(b'0'), node)
174 self.assertEqual(f.lookup(hex(node)), node)
174 self.assertEqual(f.lookup(hex(node)), node)
175
175
176 self.assertEqual(f.linkrev(0), 0)
176 self.assertEqual(f.linkrev(0), 0)
177
177
178 with self.assertRaises(IndexError):
178 with self.assertRaises(IndexError):
179 f.linkrev(1)
179 f.linkrev(1)
180
180
181 self.assertFalse(f.iscensored(0))
181 self.assertFalse(f.iscensored(0))
182
182
183 with self.assertRaises(IndexError):
183 with self.assertRaises(IndexError):
184 f.iscensored(1)
184 f.iscensored(1)
185
185
186 self.assertEqual(list(f.descendants([0])), [])
186 self.assertEqual(list(f.descendants([0])), [])
187
187
188 self.assertEqual(f.heads(), [node])
188 self.assertEqual(f.heads(), [node])
189 self.assertEqual(f.heads(node), [node])
189 self.assertEqual(f.heads(node), [node])
190 self.assertEqual(f.heads(stop=[node]), [node])
190 self.assertEqual(f.heads(stop=[node]), [node])
191
191
192 with self.assertRaises(error.LookupError):
192 with self.assertRaises(error.LookupError):
193 f.heads(stop=[b'\x01' * 20])
193 f.heads(stop=[b'\x01' * 20])
194
194
195 self.assertEqual(f.children(node), [])
195 self.assertEqual(f.children(node), [])
196
196
197 def testmultiplerevisions(self):
197 def testmultiplerevisions(self):
198 fulltext0 = b'x' * 1024
198 fulltext0 = b'x' * 1024
199 fulltext1 = fulltext0 + b'y'
199 fulltext1 = fulltext0 + b'y'
200 fulltext2 = b'y' + fulltext0 + b'z'
200 fulltext2 = b'y' + fulltext0 + b'z'
201
201
202 f = self._makefilefn()
202 f = self._makefilefn()
203 with self._maketransactionfn() as tr:
203 with self._maketransactionfn() as tr:
204 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
204 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
205 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
205 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
206 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
206 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
207
207
208 self.assertEqual(len(f), 3)
208 self.assertEqual(len(f), 3)
209 self.assertEqual(list(f), [0, 1, 2])
209 self.assertEqual(list(f), [0, 1, 2])
210
210
211 gen = iter(f)
211 gen = iter(f)
212 self.assertEqual(next(gen), 0)
212 self.assertEqual(next(gen), 0)
213 self.assertEqual(next(gen), 1)
213 self.assertEqual(next(gen), 1)
214 self.assertEqual(next(gen), 2)
214 self.assertEqual(next(gen), 2)
215
215
216 with self.assertRaises(StopIteration):
216 with self.assertRaises(StopIteration):
217 next(gen)
217 next(gen)
218
218
219 self.assertEqual(list(f.revs()), [0, 1, 2])
219 self.assertEqual(list(f.revs()), [0, 1, 2])
220 self.assertEqual(list(f.revs(0)), [0, 1, 2])
220 self.assertEqual(list(f.revs(0)), [0, 1, 2])
221 self.assertEqual(list(f.revs(1)), [1, 2])
221 self.assertEqual(list(f.revs(1)), [1, 2])
222 self.assertEqual(list(f.revs(2)), [2])
222 self.assertEqual(list(f.revs(2)), [2])
223 self.assertEqual(list(f.revs(3)), [])
223 self.assertEqual(list(f.revs(3)), [])
224 self.assertEqual(list(f.revs(stop=1)), [0, 1])
224 self.assertEqual(list(f.revs(stop=1)), [0, 1])
225 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
225 self.assertEqual(list(f.revs(stop=2)), [0, 1, 2])
226 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
226 self.assertEqual(list(f.revs(stop=3)), [0, 1, 2])
227 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
227 self.assertEqual(list(f.revs(2, 0)), [2, 1, 0])
228 self.assertEqual(list(f.revs(2, 1)), [2, 1])
228 self.assertEqual(list(f.revs(2, 1)), [2, 1])
229 # TODO this is wrong
229 # TODO this is wrong
230 self.assertEqual(list(f.revs(3, 2)), [3, 2])
230 self.assertEqual(list(f.revs(3, 2)), [3, 2])
231
231
232 self.assertEqual(f.parents(node0), (nullid, nullid))
232 self.assertEqual(f.parents(node0), (nullid, nullid))
233 self.assertEqual(f.parents(node1), (node0, nullid))
233 self.assertEqual(f.parents(node1), (node0, nullid))
234 self.assertEqual(f.parents(node2), (node1, nullid))
234 self.assertEqual(f.parents(node2), (node1, nullid))
235
235
236 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
236 self.assertEqual(f.parentrevs(0), (nullrev, nullrev))
237 self.assertEqual(f.parentrevs(1), (0, nullrev))
237 self.assertEqual(f.parentrevs(1), (0, nullrev))
238 self.assertEqual(f.parentrevs(2), (1, nullrev))
238 self.assertEqual(f.parentrevs(2), (1, nullrev))
239
239
240 self.assertEqual(f.rev(node0), 0)
240 self.assertEqual(f.rev(node0), 0)
241 self.assertEqual(f.rev(node1), 1)
241 self.assertEqual(f.rev(node1), 1)
242 self.assertEqual(f.rev(node2), 2)
242 self.assertEqual(f.rev(node2), 2)
243
243
244 with self.assertRaises(error.LookupError):
244 with self.assertRaises(error.LookupError):
245 f.rev(b'\x01' * 20)
245 f.rev(b'\x01' * 20)
246
246
247 self.assertEqual(f.node(0), node0)
247 self.assertEqual(f.node(0), node0)
248 self.assertEqual(f.node(1), node1)
248 self.assertEqual(f.node(1), node1)
249 self.assertEqual(f.node(2), node2)
249 self.assertEqual(f.node(2), node2)
250
250
251 with self.assertRaises(IndexError):
251 with self.assertRaises(IndexError):
252 f.node(3)
252 f.node(3)
253
253
254 self.assertEqual(f.lookup(node0), node0)
254 self.assertEqual(f.lookup(node0), node0)
255 self.assertEqual(f.lookup(0), node0)
255 self.assertEqual(f.lookup(0), node0)
256 self.assertEqual(f.lookup(b'0'), node0)
256 self.assertEqual(f.lookup(b'0'), node0)
257 self.assertEqual(f.lookup(hex(node0)), node0)
257 self.assertEqual(f.lookup(hex(node0)), node0)
258
258
259 self.assertEqual(f.lookup(node1), node1)
259 self.assertEqual(f.lookup(node1), node1)
260 self.assertEqual(f.lookup(1), node1)
260 self.assertEqual(f.lookup(1), node1)
261 self.assertEqual(f.lookup(b'1'), node1)
261 self.assertEqual(f.lookup(b'1'), node1)
262 self.assertEqual(f.lookup(hex(node1)), node1)
262 self.assertEqual(f.lookup(hex(node1)), node1)
263
263
264 self.assertEqual(f.linkrev(0), 0)
264 self.assertEqual(f.linkrev(0), 0)
265 self.assertEqual(f.linkrev(1), 1)
265 self.assertEqual(f.linkrev(1), 1)
266 self.assertEqual(f.linkrev(2), 3)
266 self.assertEqual(f.linkrev(2), 3)
267
267
268 with self.assertRaises(IndexError):
268 with self.assertRaises(IndexError):
269 f.linkrev(3)
269 f.linkrev(3)
270
270
271 self.assertFalse(f.iscensored(0))
271 self.assertFalse(f.iscensored(0))
272 self.assertFalse(f.iscensored(1))
272 self.assertFalse(f.iscensored(1))
273 self.assertFalse(f.iscensored(2))
273 self.assertFalse(f.iscensored(2))
274
274
275 with self.assertRaises(IndexError):
275 with self.assertRaises(IndexError):
276 f.iscensored(3)
276 f.iscensored(3)
277
277
278 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
278 self.assertEqual(f.commonancestorsheads(node1, nullid), [])
279 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
279 self.assertEqual(f.commonancestorsheads(node1, node0), [node0])
280 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
280 self.assertEqual(f.commonancestorsheads(node1, node1), [node1])
281 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
281 self.assertEqual(f.commonancestorsheads(node0, node1), [node0])
282 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
282 self.assertEqual(f.commonancestorsheads(node1, node2), [node1])
283 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
283 self.assertEqual(f.commonancestorsheads(node2, node1), [node1])
284
284
285 self.assertEqual(list(f.descendants([0])), [1, 2])
285 self.assertEqual(list(f.descendants([0])), [1, 2])
286 self.assertEqual(list(f.descendants([1])), [2])
286 self.assertEqual(list(f.descendants([1])), [2])
287 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
287 self.assertEqual(list(f.descendants([0, 1])), [1, 2])
288
288
289 self.assertEqual(f.heads(), [node2])
289 self.assertEqual(f.heads(), [node2])
290 self.assertEqual(f.heads(node0), [node2])
290 self.assertEqual(f.heads(node0), [node2])
291 self.assertEqual(f.heads(node1), [node2])
291 self.assertEqual(f.heads(node1), [node2])
292 self.assertEqual(f.heads(node2), [node2])
292 self.assertEqual(f.heads(node2), [node2])
293
293
294 # TODO this behavior seems wonky. Is it correct? If so, the
294 # TODO this behavior seems wonky. Is it correct? If so, the
295 # docstring for heads() should be updated to reflect desired
295 # docstring for heads() should be updated to reflect desired
296 # behavior.
296 # behavior.
297 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
297 self.assertEqual(f.heads(stop=[node1]), [node1, node2])
298 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
298 self.assertEqual(f.heads(stop=[node0]), [node0, node2])
299 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
299 self.assertEqual(f.heads(stop=[node1, node2]), [node1, node2])
300
300
301 with self.assertRaises(error.LookupError):
301 with self.assertRaises(error.LookupError):
302 f.heads(stop=[b'\x01' * 20])
302 f.heads(stop=[b'\x01' * 20])
303
303
304 self.assertEqual(f.children(node0), [node1])
304 self.assertEqual(f.children(node0), [node1])
305 self.assertEqual(f.children(node1), [node2])
305 self.assertEqual(f.children(node1), [node2])
306 self.assertEqual(f.children(node2), [])
306 self.assertEqual(f.children(node2), [])
307
307
308 def testmultipleheads(self):
308 def testmultipleheads(self):
309 f = self._makefilefn()
309 f = self._makefilefn()
310
310
311 with self._maketransactionfn() as tr:
311 with self._maketransactionfn() as tr:
312 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
312 node0 = f.add(b'0', None, tr, 0, nullid, nullid)
313 node1 = f.add(b'1', None, tr, 1, node0, nullid)
313 node1 = f.add(b'1', None, tr, 1, node0, nullid)
314 node2 = f.add(b'2', None, tr, 2, node1, nullid)
314 node2 = f.add(b'2', None, tr, 2, node1, nullid)
315 node3 = f.add(b'3', None, tr, 3, node0, nullid)
315 node3 = f.add(b'3', None, tr, 3, node0, nullid)
316 node4 = f.add(b'4', None, tr, 4, node3, nullid)
316 node4 = f.add(b'4', None, tr, 4, node3, nullid)
317 node5 = f.add(b'5', None, tr, 5, node0, nullid)
317 node5 = f.add(b'5', None, tr, 5, node0, nullid)
318
318
319 self.assertEqual(len(f), 6)
319 self.assertEqual(len(f), 6)
320
320
321 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
321 self.assertEqual(list(f.descendants([0])), [1, 2, 3, 4, 5])
322 self.assertEqual(list(f.descendants([1])), [2])
322 self.assertEqual(list(f.descendants([1])), [2])
323 self.assertEqual(list(f.descendants([2])), [])
323 self.assertEqual(list(f.descendants([2])), [])
324 self.assertEqual(list(f.descendants([3])), [4])
324 self.assertEqual(list(f.descendants([3])), [4])
325 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
325 self.assertEqual(list(f.descendants([0, 1])), [1, 2, 3, 4, 5])
326 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
326 self.assertEqual(list(f.descendants([1, 3])), [2, 4])
327
327
328 self.assertEqual(f.heads(), [node2, node4, node5])
328 self.assertEqual(f.heads(), [node2, node4, node5])
329 self.assertEqual(f.heads(node0), [node2, node4, node5])
329 self.assertEqual(f.heads(node0), [node2, node4, node5])
330 self.assertEqual(f.heads(node1), [node2])
330 self.assertEqual(f.heads(node1), [node2])
331 self.assertEqual(f.heads(node2), [node2])
331 self.assertEqual(f.heads(node2), [node2])
332 self.assertEqual(f.heads(node3), [node4])
332 self.assertEqual(f.heads(node3), [node4])
333 self.assertEqual(f.heads(node4), [node4])
333 self.assertEqual(f.heads(node4), [node4])
334 self.assertEqual(f.heads(node5), [node5])
334 self.assertEqual(f.heads(node5), [node5])
335
335
336 # TODO this seems wrong.
336 # TODO this seems wrong.
337 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
337 self.assertEqual(f.heads(stop=[node0]), [node0, node2, node4, node5])
338 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
338 self.assertEqual(f.heads(stop=[node1]), [node1, node2, node4, node5])
339
339
340 self.assertEqual(f.children(node0), [node1, node3, node5])
340 self.assertEqual(f.children(node0), [node1, node3, node5])
341 self.assertEqual(f.children(node1), [node2])
341 self.assertEqual(f.children(node1), [node2])
342 self.assertEqual(f.children(node2), [])
342 self.assertEqual(f.children(node2), [])
343 self.assertEqual(f.children(node3), [node4])
343 self.assertEqual(f.children(node3), [node4])
344 self.assertEqual(f.children(node4), [])
344 self.assertEqual(f.children(node4), [])
345 self.assertEqual(f.children(node5), [])
345 self.assertEqual(f.children(node5), [])
346
346
347 class ifiledatatests(basetestcase):
347 class ifiledatatests(basetestcase):
348 """Generic tests for the ifiledata interface.
348 """Generic tests for the ifiledata interface.
349
349
350 All file storage backends for data should conform to the tests in this
350 All file storage backends for data should conform to the tests in this
351 class.
351 class.
352
352
353 Use ``makeifiledatatests()`` to create an instance of this type.
353 Use ``makeifiledatatests()`` to create an instance of this type.
354 """
354 """
355 def testempty(self):
355 def testempty(self):
356 f = self._makefilefn()
356 f = self._makefilefn()
357
357
358 self.assertEqual(f.storageinfo(), {})
358 self.assertEqual(f.storageinfo(), {})
359 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
359 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
360 {'revisionscount': 0, 'trackedsize': 0})
360 {'revisionscount': 0, 'trackedsize': 0})
361
361
362 self.assertEqual(f.size(nullrev), 0)
362 self.assertEqual(f.size(nullrev), 0)
363
363
364 for i in range(-5, 5):
364 for i in range(-5, 5):
365 if i == nullrev:
365 if i == nullrev:
366 continue
366 continue
367
367
368 with self.assertRaises(IndexError):
368 with self.assertRaises(IndexError):
369 f.size(i)
369 f.size(i)
370
370
371 with self.assertRaises(error.StorageError):
371 with self.assertRaises(error.StorageError):
372 f.checkhash(b'', nullid)
372 f.checkhash(b'', nullid)
373
373
374 with self.assertRaises(error.LookupError):
374 with self.assertRaises(error.LookupError):
375 f.checkhash(b'', b'\x01' * 20)
375 f.checkhash(b'', b'\x01' * 20)
376
376
377 self.assertEqual(f.revision(nullid), b'')
377 self.assertEqual(f.revision(nullid), b'')
378 self.assertEqual(f.revision(nullid, raw=True), b'')
378 self.assertEqual(f.revision(nullid, raw=True), b'')
379
379
380 with self.assertRaises(error.LookupError):
380 with self.assertRaises(error.LookupError):
381 f.revision(b'\x01' * 20)
381 f.revision(b'\x01' * 20)
382
382
383 self.assertEqual(f.read(nullid), b'')
383 self.assertEqual(f.read(nullid), b'')
384
384
385 with self.assertRaises(error.LookupError):
385 with self.assertRaises(error.LookupError):
386 f.read(b'\x01' * 20)
386 f.read(b'\x01' * 20)
387
387
388 self.assertFalse(f.renamed(nullid))
388 self.assertFalse(f.renamed(nullid))
389
389
390 with self.assertRaises(error.LookupError):
390 with self.assertRaises(error.LookupError):
391 f.read(b'\x01' * 20)
391 f.read(b'\x01' * 20)
392
392
393 self.assertTrue(f.cmp(nullid, b''))
393 self.assertTrue(f.cmp(nullid, b''))
394 self.assertTrue(f.cmp(nullid, b'foo'))
394 self.assertTrue(f.cmp(nullid, b'foo'))
395
395
396 with self.assertRaises(error.LookupError):
396 with self.assertRaises(error.LookupError):
397 f.cmp(b'\x01' * 20, b'irrelevant')
397 f.cmp(b'\x01' * 20, b'irrelevant')
398
398
399 self.assertEqual(f.revdiff(nullrev, nullrev), b'')
400
401 with self.assertRaises(IndexError):
402 f.revdiff(0, nullrev)
403
404 with self.assertRaises(IndexError):
405 f.revdiff(nullrev, 0)
406
407 with self.assertRaises(IndexError):
408 f.revdiff(0, 0)
409
410 # Emitting empty list is an empty generator.
399 # Emitting empty list is an empty generator.
411 gen = f.emitrevisions([])
400 gen = f.emitrevisions([])
412 with self.assertRaises(StopIteration):
401 with self.assertRaises(StopIteration):
413 next(gen)
402 next(gen)
414
403
415 # Emitting null node yields nothing.
404 # Emitting null node yields nothing.
416 gen = f.emitrevisions([nullid])
405 gen = f.emitrevisions([nullid])
417 with self.assertRaises(StopIteration):
406 with self.assertRaises(StopIteration):
418 next(gen)
407 next(gen)
419
408
420 # Requesting unknown node fails.
409 # Requesting unknown node fails.
421 with self.assertRaises(error.LookupError):
410 with self.assertRaises(error.LookupError):
422 list(f.emitrevisions([b'\x01' * 20]))
411 list(f.emitrevisions([b'\x01' * 20]))
423
412
424 def testsinglerevision(self):
413 def testsinglerevision(self):
425 fulltext = b'initial'
414 fulltext = b'initial'
426
415
427 f = self._makefilefn()
416 f = self._makefilefn()
428 with self._maketransactionfn() as tr:
417 with self._maketransactionfn() as tr:
429 node = f.add(fulltext, None, tr, 0, nullid, nullid)
418 node = f.add(fulltext, None, tr, 0, nullid, nullid)
430
419
431 self.assertEqual(f.storageinfo(), {})
420 self.assertEqual(f.storageinfo(), {})
432 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
421 self.assertEqual(f.storageinfo(revisionscount=True, trackedsize=True),
433 {'revisionscount': 1, 'trackedsize': len(fulltext)})
422 {'revisionscount': 1, 'trackedsize': len(fulltext)})
434
423
435 self.assertEqual(f.size(0), len(fulltext))
424 self.assertEqual(f.size(0), len(fulltext))
436
425
437 with self.assertRaises(IndexError):
426 with self.assertRaises(IndexError):
438 f.size(1)
427 f.size(1)
439
428
440 f.checkhash(fulltext, node)
429 f.checkhash(fulltext, node)
441 f.checkhash(fulltext, node, nullid, nullid)
430 f.checkhash(fulltext, node, nullid, nullid)
442
431
443 with self.assertRaises(error.StorageError):
432 with self.assertRaises(error.StorageError):
444 f.checkhash(fulltext + b'extra', node)
433 f.checkhash(fulltext + b'extra', node)
445
434
446 with self.assertRaises(error.StorageError):
435 with self.assertRaises(error.StorageError):
447 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
436 f.checkhash(fulltext, node, b'\x01' * 20, nullid)
448
437
449 with self.assertRaises(error.StorageError):
438 with self.assertRaises(error.StorageError):
450 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
439 f.checkhash(fulltext, node, nullid, b'\x01' * 20)
451
440
452 self.assertEqual(f.revision(node), fulltext)
441 self.assertEqual(f.revision(node), fulltext)
453 self.assertEqual(f.revision(node, raw=True), fulltext)
442 self.assertEqual(f.revision(node, raw=True), fulltext)
454
443
455 self.assertEqual(f.read(node), fulltext)
444 self.assertEqual(f.read(node), fulltext)
456
445
457 self.assertFalse(f.renamed(node))
446 self.assertFalse(f.renamed(node))
458
447
459 self.assertFalse(f.cmp(node, fulltext))
448 self.assertFalse(f.cmp(node, fulltext))
460 self.assertTrue(f.cmp(node, fulltext + b'extra'))
449 self.assertTrue(f.cmp(node, fulltext + b'extra'))
461
450
462 self.assertEqual(f.revdiff(0, 0), b'')
463 self.assertEqual(f.revdiff(nullrev, 0),
464 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07%s' %
465 fulltext)
466
467 self.assertEqual(f.revdiff(0, nullrev),
468 b'\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00')
469
470 # Emitting a single revision works.
451 # Emitting a single revision works.
471 gen = f.emitrevisions([node])
452 gen = f.emitrevisions([node])
472 rev = next(gen)
453 rev = next(gen)
473
454
474 self.assertEqual(rev.node, node)
455 self.assertEqual(rev.node, node)
475 self.assertEqual(rev.p1node, nullid)
456 self.assertEqual(rev.p1node, nullid)
476 self.assertEqual(rev.p2node, nullid)
457 self.assertEqual(rev.p2node, nullid)
477 self.assertIsNone(rev.linknode)
458 self.assertIsNone(rev.linknode)
478 self.assertEqual(rev.basenode, nullid)
459 self.assertEqual(rev.basenode, nullid)
479 self.assertIsNone(rev.baserevisionsize)
460 self.assertIsNone(rev.baserevisionsize)
480 self.assertIsNone(rev.revision)
461 self.assertIsNone(rev.revision)
481 self.assertIsNone(rev.delta)
462 self.assertIsNone(rev.delta)
482
463
483 with self.assertRaises(StopIteration):
464 with self.assertRaises(StopIteration):
484 next(gen)
465 next(gen)
485
466
486 # Requesting revision data works.
467 # Requesting revision data works.
487 gen = f.emitrevisions([node], revisiondata=True)
468 gen = f.emitrevisions([node], revisiondata=True)
488 rev = next(gen)
469 rev = next(gen)
489
470
490 self.assertEqual(rev.node, node)
471 self.assertEqual(rev.node, node)
491 self.assertEqual(rev.p1node, nullid)
472 self.assertEqual(rev.p1node, nullid)
492 self.assertEqual(rev.p2node, nullid)
473 self.assertEqual(rev.p2node, nullid)
493 self.assertIsNone(rev.linknode)
474 self.assertIsNone(rev.linknode)
494 self.assertEqual(rev.basenode, nullid)
475 self.assertEqual(rev.basenode, nullid)
495 self.assertIsNone(rev.baserevisionsize)
476 self.assertIsNone(rev.baserevisionsize)
496 self.assertEqual(rev.revision, fulltext)
477 self.assertEqual(rev.revision, fulltext)
497 self.assertIsNone(rev.delta)
478 self.assertIsNone(rev.delta)
498
479
499 with self.assertRaises(StopIteration):
480 with self.assertRaises(StopIteration):
500 next(gen)
481 next(gen)
501
482
502 # Emitting an unknown node after a known revision results in error.
483 # Emitting an unknown node after a known revision results in error.
503 with self.assertRaises(error.LookupError):
484 with self.assertRaises(error.LookupError):
504 list(f.emitrevisions([node, b'\x01' * 20]))
485 list(f.emitrevisions([node, b'\x01' * 20]))
505
486
506 def testmultiplerevisions(self):
487 def testmultiplerevisions(self):
507 fulltext0 = b'x' * 1024
488 fulltext0 = b'x' * 1024
508 fulltext1 = fulltext0 + b'y'
489 fulltext1 = fulltext0 + b'y'
509 fulltext2 = b'y' + fulltext0 + b'z'
490 fulltext2 = b'y' + fulltext0 + b'z'
510
491
511 f = self._makefilefn()
492 f = self._makefilefn()
512 with self._maketransactionfn() as tr:
493 with self._maketransactionfn() as tr:
513 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
494 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
514 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
495 node1 = f.add(fulltext1, None, tr, 1, node0, nullid)
515 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
496 node2 = f.add(fulltext2, None, tr, 3, node1, nullid)
516
497
517 self.assertEqual(f.storageinfo(), {})
498 self.assertEqual(f.storageinfo(), {})
518 self.assertEqual(
499 self.assertEqual(
519 f.storageinfo(revisionscount=True, trackedsize=True),
500 f.storageinfo(revisionscount=True, trackedsize=True),
520 {
501 {
521 'revisionscount': 3,
502 'revisionscount': 3,
522 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
503 'trackedsize': len(fulltext0) + len(fulltext1) + len(fulltext2),
523 })
504 })
524
505
525 self.assertEqual(f.size(0), len(fulltext0))
506 self.assertEqual(f.size(0), len(fulltext0))
526 self.assertEqual(f.size(1), len(fulltext1))
507 self.assertEqual(f.size(1), len(fulltext1))
527 self.assertEqual(f.size(2), len(fulltext2))
508 self.assertEqual(f.size(2), len(fulltext2))
528
509
529 with self.assertRaises(IndexError):
510 with self.assertRaises(IndexError):
530 f.size(3)
511 f.size(3)
531
512
532 f.checkhash(fulltext0, node0)
513 f.checkhash(fulltext0, node0)
533 f.checkhash(fulltext1, node1)
514 f.checkhash(fulltext1, node1)
534 f.checkhash(fulltext1, node1, node0, nullid)
515 f.checkhash(fulltext1, node1, node0, nullid)
535 f.checkhash(fulltext2, node2, node1, nullid)
516 f.checkhash(fulltext2, node2, node1, nullid)
536
517
537 with self.assertRaises(error.StorageError):
518 with self.assertRaises(error.StorageError):
538 f.checkhash(fulltext1, b'\x01' * 20)
519 f.checkhash(fulltext1, b'\x01' * 20)
539
520
540 with self.assertRaises(error.StorageError):
521 with self.assertRaises(error.StorageError):
541 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
522 f.checkhash(fulltext1 + b'extra', node1, node0, nullid)
542
523
543 with self.assertRaises(error.StorageError):
524 with self.assertRaises(error.StorageError):
544 f.checkhash(fulltext1, node1, node0, node0)
525 f.checkhash(fulltext1, node1, node0, node0)
545
526
546 self.assertEqual(f.revision(node0), fulltext0)
527 self.assertEqual(f.revision(node0), fulltext0)
547 self.assertEqual(f.revision(node0, raw=True), fulltext0)
528 self.assertEqual(f.revision(node0, raw=True), fulltext0)
548 self.assertEqual(f.revision(node1), fulltext1)
529 self.assertEqual(f.revision(node1), fulltext1)
549 self.assertEqual(f.revision(node1, raw=True), fulltext1)
530 self.assertEqual(f.revision(node1, raw=True), fulltext1)
550 self.assertEqual(f.revision(node2), fulltext2)
531 self.assertEqual(f.revision(node2), fulltext2)
551 self.assertEqual(f.revision(node2, raw=True), fulltext2)
532 self.assertEqual(f.revision(node2, raw=True), fulltext2)
552
533
553 with self.assertRaises(error.LookupError):
534 with self.assertRaises(error.LookupError):
554 f.revision(b'\x01' * 20)
535 f.revision(b'\x01' * 20)
555
536
556 self.assertEqual(f.read(node0), fulltext0)
537 self.assertEqual(f.read(node0), fulltext0)
557 self.assertEqual(f.read(node1), fulltext1)
538 self.assertEqual(f.read(node1), fulltext1)
558 self.assertEqual(f.read(node2), fulltext2)
539 self.assertEqual(f.read(node2), fulltext2)
559
540
560 with self.assertRaises(error.LookupError):
541 with self.assertRaises(error.LookupError):
561 f.read(b'\x01' * 20)
542 f.read(b'\x01' * 20)
562
543
563 self.assertFalse(f.renamed(node0))
544 self.assertFalse(f.renamed(node0))
564 self.assertFalse(f.renamed(node1))
545 self.assertFalse(f.renamed(node1))
565 self.assertFalse(f.renamed(node2))
546 self.assertFalse(f.renamed(node2))
566
547
567 with self.assertRaises(error.LookupError):
548 with self.assertRaises(error.LookupError):
568 f.renamed(b'\x01' * 20)
549 f.renamed(b'\x01' * 20)
569
550
570 self.assertFalse(f.cmp(node0, fulltext0))
551 self.assertFalse(f.cmp(node0, fulltext0))
571 self.assertFalse(f.cmp(node1, fulltext1))
552 self.assertFalse(f.cmp(node1, fulltext1))
572 self.assertFalse(f.cmp(node2, fulltext2))
553 self.assertFalse(f.cmp(node2, fulltext2))
573
554
574 self.assertTrue(f.cmp(node1, fulltext0))
555 self.assertTrue(f.cmp(node1, fulltext0))
575 self.assertTrue(f.cmp(node2, fulltext1))
556 self.assertTrue(f.cmp(node2, fulltext1))
576
557
577 with self.assertRaises(error.LookupError):
558 with self.assertRaises(error.LookupError):
578 f.cmp(b'\x01' * 20, b'irrelevant')
559 f.cmp(b'\x01' * 20, b'irrelevant')
579
560
580 self.assertEqual(f.revdiff(0, 1),
581 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
582 fulltext1)
583
584 self.assertEqual(f.revdiff(0, 2),
585 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x02' +
586 fulltext2)
587
588 # Nodes should be emitted in order.
561 # Nodes should be emitted in order.
589 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
562 gen = f.emitrevisions([node0, node1, node2], revisiondata=True)
590
563
591 rev = next(gen)
564 rev = next(gen)
592
565
593 self.assertEqual(rev.node, node0)
566 self.assertEqual(rev.node, node0)
594 self.assertEqual(rev.p1node, nullid)
567 self.assertEqual(rev.p1node, nullid)
595 self.assertEqual(rev.p2node, nullid)
568 self.assertEqual(rev.p2node, nullid)
596 self.assertIsNone(rev.linknode)
569 self.assertIsNone(rev.linknode)
597 self.assertEqual(rev.basenode, nullid)
570 self.assertEqual(rev.basenode, nullid)
598 self.assertIsNone(rev.baserevisionsize)
571 self.assertIsNone(rev.baserevisionsize)
599 self.assertEqual(rev.revision, fulltext0)
572 self.assertEqual(rev.revision, fulltext0)
600 self.assertIsNone(rev.delta)
573 self.assertIsNone(rev.delta)
601
574
602 rev = next(gen)
575 rev = next(gen)
603
576
604 self.assertEqual(rev.node, node1)
577 self.assertEqual(rev.node, node1)
605 self.assertEqual(rev.p1node, node0)
578 self.assertEqual(rev.p1node, node0)
606 self.assertEqual(rev.p2node, nullid)
579 self.assertEqual(rev.p2node, nullid)
607 self.assertIsNone(rev.linknode)
580 self.assertIsNone(rev.linknode)
608 self.assertEqual(rev.basenode, node0)
581 self.assertEqual(rev.basenode, node0)
609 self.assertIsNone(rev.baserevisionsize)
582 self.assertIsNone(rev.baserevisionsize)
610 self.assertIsNone(rev.revision)
583 self.assertIsNone(rev.revision)
611 self.assertEqual(rev.delta,
584 self.assertEqual(rev.delta,
612 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
585 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
613 fulltext1)
586 fulltext1)
614
587
615 rev = next(gen)
588 rev = next(gen)
616
589
617 self.assertEqual(rev.node, node2)
590 self.assertEqual(rev.node, node2)
618 self.assertEqual(rev.p1node, node1)
591 self.assertEqual(rev.p1node, node1)
619 self.assertEqual(rev.p2node, nullid)
592 self.assertEqual(rev.p2node, nullid)
620 self.assertIsNone(rev.linknode)
593 self.assertIsNone(rev.linknode)
621 self.assertEqual(rev.basenode, node1)
594 self.assertEqual(rev.basenode, node1)
622 self.assertIsNone(rev.baserevisionsize)
595 self.assertIsNone(rev.baserevisionsize)
623 self.assertIsNone(rev.revision)
596 self.assertIsNone(rev.revision)
624 self.assertEqual(rev.delta,
597 self.assertEqual(rev.delta,
625 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
598 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
626 fulltext2)
599 fulltext2)
627
600
628 with self.assertRaises(StopIteration):
601 with self.assertRaises(StopIteration):
629 next(gen)
602 next(gen)
630
603
631 # Request not in DAG order is reordered to be in DAG order.
604 # Request not in DAG order is reordered to be in DAG order.
632 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
605 gen = f.emitrevisions([node2, node1, node0], revisiondata=True)
633
606
634 rev = next(gen)
607 rev = next(gen)
635
608
636 self.assertEqual(rev.node, node0)
609 self.assertEqual(rev.node, node0)
637 self.assertEqual(rev.p1node, nullid)
610 self.assertEqual(rev.p1node, nullid)
638 self.assertEqual(rev.p2node, nullid)
611 self.assertEqual(rev.p2node, nullid)
639 self.assertIsNone(rev.linknode)
612 self.assertIsNone(rev.linknode)
640 self.assertEqual(rev.basenode, nullid)
613 self.assertEqual(rev.basenode, nullid)
641 self.assertIsNone(rev.baserevisionsize)
614 self.assertIsNone(rev.baserevisionsize)
642 self.assertEqual(rev.revision, fulltext0)
615 self.assertEqual(rev.revision, fulltext0)
643 self.assertIsNone(rev.delta)
616 self.assertIsNone(rev.delta)
644
617
645 rev = next(gen)
618 rev = next(gen)
646
619
647 self.assertEqual(rev.node, node1)
620 self.assertEqual(rev.node, node1)
648 self.assertEqual(rev.p1node, node0)
621 self.assertEqual(rev.p1node, node0)
649 self.assertEqual(rev.p2node, nullid)
622 self.assertEqual(rev.p2node, nullid)
650 self.assertIsNone(rev.linknode)
623 self.assertIsNone(rev.linknode)
651 self.assertEqual(rev.basenode, node0)
624 self.assertEqual(rev.basenode, node0)
652 self.assertIsNone(rev.baserevisionsize)
625 self.assertIsNone(rev.baserevisionsize)
653 self.assertIsNone(rev.revision)
626 self.assertIsNone(rev.revision)
654 self.assertEqual(rev.delta,
627 self.assertEqual(rev.delta,
655 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
628 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
656 fulltext1)
629 fulltext1)
657
630
658 rev = next(gen)
631 rev = next(gen)
659
632
660 self.assertEqual(rev.node, node2)
633 self.assertEqual(rev.node, node2)
661 self.assertEqual(rev.p1node, node1)
634 self.assertEqual(rev.p1node, node1)
662 self.assertEqual(rev.p2node, nullid)
635 self.assertEqual(rev.p2node, nullid)
663 self.assertIsNone(rev.linknode)
636 self.assertIsNone(rev.linknode)
664 self.assertEqual(rev.basenode, node1)
637 self.assertEqual(rev.basenode, node1)
665 self.assertIsNone(rev.baserevisionsize)
638 self.assertIsNone(rev.baserevisionsize)
666 self.assertIsNone(rev.revision)
639 self.assertIsNone(rev.revision)
667 self.assertEqual(rev.delta,
640 self.assertEqual(rev.delta,
668 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
641 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
669 fulltext2)
642 fulltext2)
670
643
671 with self.assertRaises(StopIteration):
644 with self.assertRaises(StopIteration):
672 next(gen)
645 next(gen)
673
646
674 # Unrecognized nodesorder value raises ProgrammingError.
647 # Unrecognized nodesorder value raises ProgrammingError.
675 with self.assertRaises(error.ProgrammingError):
648 with self.assertRaises(error.ProgrammingError):
676 list(f.emitrevisions([], nodesorder='bad'))
649 list(f.emitrevisions([], nodesorder='bad'))
677
650
678 # nodesorder=storage is recognized. But we can't test it thoroughly
651 # nodesorder=storage is recognized. But we can't test it thoroughly
679 # because behavior is storage-dependent.
652 # because behavior is storage-dependent.
680 res = list(f.emitrevisions([node2, node1, node0],
653 res = list(f.emitrevisions([node2, node1, node0],
681 nodesorder='storage'))
654 nodesorder='storage'))
682 self.assertEqual(len(res), 3)
655 self.assertEqual(len(res), 3)
683 self.assertEqual({o.node for o in res}, {node0, node1, node2})
656 self.assertEqual({o.node for o in res}, {node0, node1, node2})
684
657
685 # nodesorder=nodes forces the order.
658 # nodesorder=nodes forces the order.
686 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
659 gen = f.emitrevisions([node2, node0], nodesorder='nodes',
687 revisiondata=True)
660 revisiondata=True)
688
661
689 rev = next(gen)
662 rev = next(gen)
690 self.assertEqual(rev.node, node2)
663 self.assertEqual(rev.node, node2)
691 self.assertEqual(rev.p1node, node1)
664 self.assertEqual(rev.p1node, node1)
692 self.assertEqual(rev.p2node, nullid)
665 self.assertEqual(rev.p2node, nullid)
693 self.assertEqual(rev.basenode, nullid)
666 self.assertEqual(rev.basenode, nullid)
694 self.assertIsNone(rev.baserevisionsize)
667 self.assertIsNone(rev.baserevisionsize)
695 self.assertEqual(rev.revision, fulltext2)
668 self.assertEqual(rev.revision, fulltext2)
696 self.assertIsNone(rev.delta)
669 self.assertIsNone(rev.delta)
697
670
698 rev = next(gen)
671 rev = next(gen)
699 self.assertEqual(rev.node, node0)
672 self.assertEqual(rev.node, node0)
700 self.assertEqual(rev.p1node, nullid)
673 self.assertEqual(rev.p1node, nullid)
701 self.assertEqual(rev.p2node, nullid)
674 self.assertEqual(rev.p2node, nullid)
702 # Delta behavior is storage dependent, so we can't easily test it.
675 # Delta behavior is storage dependent, so we can't easily test it.
703
676
704 with self.assertRaises(StopIteration):
677 with self.assertRaises(StopIteration):
705 next(gen)
678 next(gen)
706
679
707 # assumehaveparentrevisions=False (the default) won't send a delta for
680 # assumehaveparentrevisions=False (the default) won't send a delta for
708 # the first revision.
681 # the first revision.
709 gen = f.emitrevisions({node2, node1}, revisiondata=True)
682 gen = f.emitrevisions({node2, node1}, revisiondata=True)
710
683
711 rev = next(gen)
684 rev = next(gen)
712 self.assertEqual(rev.node, node1)
685 self.assertEqual(rev.node, node1)
713 self.assertEqual(rev.p1node, node0)
686 self.assertEqual(rev.p1node, node0)
714 self.assertEqual(rev.p2node, nullid)
687 self.assertEqual(rev.p2node, nullid)
715 self.assertEqual(rev.basenode, nullid)
688 self.assertEqual(rev.basenode, nullid)
716 self.assertIsNone(rev.baserevisionsize)
689 self.assertIsNone(rev.baserevisionsize)
717 self.assertEqual(rev.revision, fulltext1)
690 self.assertEqual(rev.revision, fulltext1)
718 self.assertIsNone(rev.delta)
691 self.assertIsNone(rev.delta)
719
692
720 rev = next(gen)
693 rev = next(gen)
721 self.assertEqual(rev.node, node2)
694 self.assertEqual(rev.node, node2)
722 self.assertEqual(rev.p1node, node1)
695 self.assertEqual(rev.p1node, node1)
723 self.assertEqual(rev.p2node, nullid)
696 self.assertEqual(rev.p2node, nullid)
724 self.assertEqual(rev.basenode, node1)
697 self.assertEqual(rev.basenode, node1)
725 self.assertIsNone(rev.baserevisionsize)
698 self.assertIsNone(rev.baserevisionsize)
726 self.assertIsNone(rev.revision)
699 self.assertIsNone(rev.revision)
727 self.assertEqual(rev.delta,
700 self.assertEqual(rev.delta,
728 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
701 b'\x00\x00\x00\x00\x00\x00\x04\x01\x00\x00\x04\x02' +
729 fulltext2)
702 fulltext2)
730
703
731 with self.assertRaises(StopIteration):
704 with self.assertRaises(StopIteration):
732 next(gen)
705 next(gen)
733
706
734 # assumehaveparentrevisions=True allows delta against initial revision.
707 # assumehaveparentrevisions=True allows delta against initial revision.
735 gen = f.emitrevisions([node2, node1],
708 gen = f.emitrevisions([node2, node1],
736 revisiondata=True, assumehaveparentrevisions=True)
709 revisiondata=True, assumehaveparentrevisions=True)
737
710
738 rev = next(gen)
711 rev = next(gen)
739 self.assertEqual(rev.node, node1)
712 self.assertEqual(rev.node, node1)
740 self.assertEqual(rev.p1node, node0)
713 self.assertEqual(rev.p1node, node0)
741 self.assertEqual(rev.p2node, nullid)
714 self.assertEqual(rev.p2node, nullid)
742 self.assertEqual(rev.basenode, node0)
715 self.assertEqual(rev.basenode, node0)
743 self.assertIsNone(rev.baserevisionsize)
716 self.assertIsNone(rev.baserevisionsize)
744 self.assertIsNone(rev.revision)
717 self.assertIsNone(rev.revision)
745 self.assertEqual(rev.delta,
718 self.assertEqual(rev.delta,
746 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
719 b'\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x04\x01' +
747 fulltext1)
720 fulltext1)
748
721
749 # forceprevious=True forces a delta against the previous revision.
722 # forceprevious=True forces a delta against the previous revision.
750 # Special case for initial revision.
723 # Special case for initial revision.
751 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
724 gen = f.emitrevisions([node0], revisiondata=True, deltaprevious=True)
752
725
753 rev = next(gen)
726 rev = next(gen)
754 self.assertEqual(rev.node, node0)
727 self.assertEqual(rev.node, node0)
755 self.assertEqual(rev.p1node, nullid)
728 self.assertEqual(rev.p1node, nullid)
756 self.assertEqual(rev.p2node, nullid)
729 self.assertEqual(rev.p2node, nullid)
757 self.assertEqual(rev.basenode, nullid)
730 self.assertEqual(rev.basenode, nullid)
758 self.assertIsNone(rev.baserevisionsize)
731 self.assertIsNone(rev.baserevisionsize)
759 self.assertIsNone(rev.revision)
732 self.assertIsNone(rev.revision)
760 self.assertEqual(rev.delta,
733 self.assertEqual(rev.delta,
761 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
734 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
762 fulltext0)
735 fulltext0)
763
736
764 with self.assertRaises(StopIteration):
737 with self.assertRaises(StopIteration):
765 next(gen)
738 next(gen)
766
739
767 gen = f.emitrevisions([node0, node2], revisiondata=True,
740 gen = f.emitrevisions([node0, node2], revisiondata=True,
768 deltaprevious=True)
741 deltaprevious=True)
769
742
770 rev = next(gen)
743 rev = next(gen)
771 self.assertEqual(rev.node, node0)
744 self.assertEqual(rev.node, node0)
772 self.assertEqual(rev.p1node, nullid)
745 self.assertEqual(rev.p1node, nullid)
773 self.assertEqual(rev.p2node, nullid)
746 self.assertEqual(rev.p2node, nullid)
774 self.assertEqual(rev.basenode, nullid)
747 self.assertEqual(rev.basenode, nullid)
775 self.assertIsNone(rev.baserevisionsize)
748 self.assertIsNone(rev.baserevisionsize)
776 self.assertIsNone(rev.revision)
749 self.assertIsNone(rev.revision)
777 self.assertEqual(rev.delta,
750 self.assertEqual(rev.delta,
778 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
751 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00' +
779 fulltext0)
752 fulltext0)
780
753
781 rev = next(gen)
754 rev = next(gen)
782 self.assertEqual(rev.node, node2)
755 self.assertEqual(rev.node, node2)
783 self.assertEqual(rev.p1node, node1)
756 self.assertEqual(rev.p1node, node1)
784 self.assertEqual(rev.p2node, nullid)
757 self.assertEqual(rev.p2node, nullid)
785 self.assertEqual(rev.basenode, node0)
758 self.assertEqual(rev.basenode, node0)
786
759
787 with self.assertRaises(StopIteration):
760 with self.assertRaises(StopIteration):
788 next(gen)
761 next(gen)
789
762
790 def testrenamed(self):
763 def testrenamed(self):
791 fulltext0 = b'foo'
764 fulltext0 = b'foo'
792 fulltext1 = b'bar'
765 fulltext1 = b'bar'
793 fulltext2 = b'baz'
766 fulltext2 = b'baz'
794
767
795 meta1 = {
768 meta1 = {
796 b'copy': b'source0',
769 b'copy': b'source0',
797 b'copyrev': b'a' * 40,
770 b'copyrev': b'a' * 40,
798 }
771 }
799
772
800 meta2 = {
773 meta2 = {
801 b'copy': b'source1',
774 b'copy': b'source1',
802 b'copyrev': b'b' * 40,
775 b'copyrev': b'b' * 40,
803 }
776 }
804
777
805 stored1 = b''.join([
778 stored1 = b''.join([
806 b'\x01\ncopy: source0\n',
779 b'\x01\ncopy: source0\n',
807 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
780 b'copyrev: aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\n\x01\n',
808 fulltext1,
781 fulltext1,
809 ])
782 ])
810
783
811 stored2 = b''.join([
784 stored2 = b''.join([
812 b'\x01\ncopy: source1\n',
785 b'\x01\ncopy: source1\n',
813 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
786 b'copyrev: bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\n\x01\n',
814 fulltext2,
787 fulltext2,
815 ])
788 ])
816
789
817 f = self._makefilefn()
790 f = self._makefilefn()
818 with self._maketransactionfn() as tr:
791 with self._maketransactionfn() as tr:
819 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
792 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
820 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
793 node1 = f.add(fulltext1, meta1, tr, 1, node0, nullid)
821 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
794 node2 = f.add(fulltext2, meta2, tr, 2, nullid, nullid)
822
795
823 # Metadata header isn't recognized when parent isn't nullid.
796 # Metadata header isn't recognized when parent isn't nullid.
824 self.assertEqual(f.size(1), len(stored1))
797 self.assertEqual(f.size(1), len(stored1))
825 self.assertEqual(f.size(2), len(fulltext2))
798 self.assertEqual(f.size(2), len(fulltext2))
826
799
827 self.assertEqual(f.revision(node1), stored1)
800 self.assertEqual(f.revision(node1), stored1)
828 self.assertEqual(f.revision(node1, raw=True), stored1)
801 self.assertEqual(f.revision(node1, raw=True), stored1)
829 self.assertEqual(f.revision(node2), stored2)
802 self.assertEqual(f.revision(node2), stored2)
830 self.assertEqual(f.revision(node2, raw=True), stored2)
803 self.assertEqual(f.revision(node2, raw=True), stored2)
831
804
832 self.assertEqual(f.read(node1), fulltext1)
805 self.assertEqual(f.read(node1), fulltext1)
833 self.assertEqual(f.read(node2), fulltext2)
806 self.assertEqual(f.read(node2), fulltext2)
834
807
835 # Returns False when first parent is set.
808 # Returns False when first parent is set.
836 self.assertFalse(f.renamed(node1))
809 self.assertFalse(f.renamed(node1))
837 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
810 self.assertEqual(f.renamed(node2), (b'source1', b'\xbb' * 20))
838
811
839 self.assertTrue(f.cmp(node1, fulltext1))
812 self.assertTrue(f.cmp(node1, fulltext1))
840 self.assertTrue(f.cmp(node1, stored1))
813 self.assertTrue(f.cmp(node1, stored1))
841 self.assertFalse(f.cmp(node2, fulltext2))
814 self.assertFalse(f.cmp(node2, fulltext2))
842 self.assertTrue(f.cmp(node2, stored2))
815 self.assertTrue(f.cmp(node2, stored2))
843
816
844 def testmetadataprefix(self):
817 def testmetadataprefix(self):
845 # Content with metadata prefix has extra prefix inserted in storage.
818 # Content with metadata prefix has extra prefix inserted in storage.
846 fulltext0 = b'\x01\nfoo'
819 fulltext0 = b'\x01\nfoo'
847 stored0 = b'\x01\n\x01\n\x01\nfoo'
820 stored0 = b'\x01\n\x01\n\x01\nfoo'
848
821
849 fulltext1 = b'\x01\nbar'
822 fulltext1 = b'\x01\nbar'
850 meta1 = {
823 meta1 = {
851 b'copy': b'source0',
824 b'copy': b'source0',
852 b'copyrev': b'b' * 40,
825 b'copyrev': b'b' * 40,
853 }
826 }
854 stored1 = b''.join([
827 stored1 = b''.join([
855 b'\x01\ncopy: source0\n',
828 b'\x01\ncopy: source0\n',
856 b'copyrev: %s\n' % (b'b' * 40),
829 b'copyrev: %s\n' % (b'b' * 40),
857 b'\x01\n\x01\nbar',
830 b'\x01\n\x01\nbar',
858 ])
831 ])
859
832
860 f = self._makefilefn()
833 f = self._makefilefn()
861 with self._maketransactionfn() as tr:
834 with self._maketransactionfn() as tr:
862 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
835 node0 = f.add(fulltext0, {}, tr, 0, nullid, nullid)
863 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
836 node1 = f.add(fulltext1, meta1, tr, 1, nullid, nullid)
864
837
865 # TODO this is buggy.
838 # TODO this is buggy.
866 self.assertEqual(f.size(0), len(fulltext0) + 4)
839 self.assertEqual(f.size(0), len(fulltext0) + 4)
867
840
868 self.assertEqual(f.size(1), len(fulltext1))
841 self.assertEqual(f.size(1), len(fulltext1))
869
842
870 self.assertEqual(f.revision(node0), stored0)
843 self.assertEqual(f.revision(node0), stored0)
871 self.assertEqual(f.revision(node0, raw=True), stored0)
844 self.assertEqual(f.revision(node0, raw=True), stored0)
872
845
873 self.assertEqual(f.revision(node1), stored1)
846 self.assertEqual(f.revision(node1), stored1)
874 self.assertEqual(f.revision(node1, raw=True), stored1)
847 self.assertEqual(f.revision(node1, raw=True), stored1)
875
848
876 self.assertEqual(f.read(node0), fulltext0)
849 self.assertEqual(f.read(node0), fulltext0)
877 self.assertEqual(f.read(node1), fulltext1)
850 self.assertEqual(f.read(node1), fulltext1)
878
851
879 self.assertFalse(f.cmp(node0, fulltext0))
852 self.assertFalse(f.cmp(node0, fulltext0))
880 self.assertTrue(f.cmp(node0, stored0))
853 self.assertTrue(f.cmp(node0, stored0))
881
854
882 self.assertFalse(f.cmp(node1, fulltext1))
855 self.assertFalse(f.cmp(node1, fulltext1))
883 self.assertTrue(f.cmp(node1, stored0))
856 self.assertTrue(f.cmp(node1, stored0))
884
857
885 def testcensored(self):
858 def testcensored(self):
886 f = self._makefilefn()
859 f = self._makefilefn()
887
860
888 stored1 = storageutil.packmeta({
861 stored1 = storageutil.packmeta({
889 b'censored': b'tombstone',
862 b'censored': b'tombstone',
890 }, b'')
863 }, b'')
891
864
892 # TODO tests are incomplete because we need the node to be
865 # TODO tests are incomplete because we need the node to be
893 # different due to presence of censor metadata. But we can't
866 # different due to presence of censor metadata. But we can't
894 # do this with addrevision().
867 # do this with addrevision().
895 with self._maketransactionfn() as tr:
868 with self._maketransactionfn() as tr:
896 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
869 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
897 f.addrevision(stored1, tr, 1, node0, nullid,
870 f.addrevision(stored1, tr, 1, node0, nullid,
898 flags=revlog.REVIDX_ISCENSORED)
871 flags=revlog.REVIDX_ISCENSORED)
899
872
900 self.assertTrue(f.iscensored(1))
873 self.assertTrue(f.iscensored(1))
901
874
902 self.assertEqual(f.revision(1), stored1)
875 self.assertEqual(f.revision(1), stored1)
903 self.assertEqual(f.revision(1, raw=True), stored1)
876 self.assertEqual(f.revision(1, raw=True), stored1)
904
877
905 self.assertEqual(f.read(1), b'')
878 self.assertEqual(f.read(1), b'')
906
879
907 class ifilemutationtests(basetestcase):
880 class ifilemutationtests(basetestcase):
908 """Generic tests for the ifilemutation interface.
881 """Generic tests for the ifilemutation interface.
909
882
910 All file storage backends that support writing should conform to this
883 All file storage backends that support writing should conform to this
911 interface.
884 interface.
912
885
913 Use ``makeifilemutationtests()`` to create an instance of this type.
886 Use ``makeifilemutationtests()`` to create an instance of this type.
914 """
887 """
915 def testaddnoop(self):
888 def testaddnoop(self):
916 f = self._makefilefn()
889 f = self._makefilefn()
917 with self._maketransactionfn() as tr:
890 with self._maketransactionfn() as tr:
918 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
891 node0 = f.add(b'foo', None, tr, 0, nullid, nullid)
919 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
892 node1 = f.add(b'foo', None, tr, 0, nullid, nullid)
920 # Varying by linkrev shouldn't impact hash.
893 # Varying by linkrev shouldn't impact hash.
921 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
894 node2 = f.add(b'foo', None, tr, 1, nullid, nullid)
922
895
923 self.assertEqual(node1, node0)
896 self.assertEqual(node1, node0)
924 self.assertEqual(node2, node0)
897 self.assertEqual(node2, node0)
925 self.assertEqual(len(f), 1)
898 self.assertEqual(len(f), 1)
926
899
927 def testaddrevisionbadnode(self):
900 def testaddrevisionbadnode(self):
928 f = self._makefilefn()
901 f = self._makefilefn()
929 with self._maketransactionfn() as tr:
902 with self._maketransactionfn() as tr:
930 # Adding a revision with bad node value fails.
903 # Adding a revision with bad node value fails.
931 with self.assertRaises(error.StorageError):
904 with self.assertRaises(error.StorageError):
932 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
905 f.addrevision(b'foo', tr, 0, nullid, nullid, node=b'\x01' * 20)
933
906
934 def testaddrevisionunknownflag(self):
907 def testaddrevisionunknownflag(self):
935 f = self._makefilefn()
908 f = self._makefilefn()
936 with self._maketransactionfn() as tr:
909 with self._maketransactionfn() as tr:
937 for i in range(15, 0, -1):
910 for i in range(15, 0, -1):
938 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
911 if (1 << i) & ~revlog.REVIDX_KNOWN_FLAGS:
939 flags = 1 << i
912 flags = 1 << i
940 break
913 break
941
914
942 with self.assertRaises(error.StorageError):
915 with self.assertRaises(error.StorageError):
943 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
916 f.addrevision(b'foo', tr, 0, nullid, nullid, flags=flags)
944
917
945 def testaddgroupsimple(self):
918 def testaddgroupsimple(self):
946 f = self._makefilefn()
919 f = self._makefilefn()
947
920
948 callbackargs = []
921 callbackargs = []
949 def cb(*args, **kwargs):
922 def cb(*args, **kwargs):
950 callbackargs.append((args, kwargs))
923 callbackargs.append((args, kwargs))
951
924
952 def linkmapper(node):
925 def linkmapper(node):
953 return 0
926 return 0
954
927
955 with self._maketransactionfn() as tr:
928 with self._maketransactionfn() as tr:
956 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
929 nodes = f.addgroup([], None, tr, addrevisioncb=cb)
957
930
958 self.assertEqual(nodes, [])
931 self.assertEqual(nodes, [])
959 self.assertEqual(callbackargs, [])
932 self.assertEqual(callbackargs, [])
960 self.assertEqual(len(f), 0)
933 self.assertEqual(len(f), 0)
961
934
962 fulltext0 = b'foo'
935 fulltext0 = b'foo'
963 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
936 delta0 = mdiff.trivialdiffheader(len(fulltext0)) + fulltext0
964
937
965 deltas = [
938 deltas = [
966 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
939 (b'\x01' * 20, nullid, nullid, nullid, nullid, delta0, 0),
967 ]
940 ]
968
941
969 with self._maketransactionfn() as tr:
942 with self._maketransactionfn() as tr:
970 with self.assertRaises(error.StorageError):
943 with self.assertRaises(error.StorageError):
971 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
944 f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
972
945
973 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
946 node0 = f.add(fulltext0, None, tr, 0, nullid, nullid)
974
947
975 f = self._makefilefn()
948 f = self._makefilefn()
976
949
977 deltas = [
950 deltas = [
978 (node0, nullid, nullid, nullid, nullid, delta0, 0),
951 (node0, nullid, nullid, nullid, nullid, delta0, 0),
979 ]
952 ]
980
953
981 with self._maketransactionfn() as tr:
954 with self._maketransactionfn() as tr:
982 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
955 nodes = f.addgroup(deltas, linkmapper, tr, addrevisioncb=cb)
983
956
984 self.assertEqual(nodes, [
957 self.assertEqual(nodes, [
985 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
958 b'\x49\xd8\xcb\xb1\x5c\xe2\x57\x92\x04\x47'
986 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
959 b'\x00\x6b\x46\x97\x8b\x7a\xf9\x80\xa9\x79'])
987
960
988 self.assertEqual(len(callbackargs), 1)
961 self.assertEqual(len(callbackargs), 1)
989 self.assertEqual(callbackargs[0][0][1], nodes[0])
962 self.assertEqual(callbackargs[0][0][1], nodes[0])
990
963
991 self.assertEqual(list(f.revs()), [0])
964 self.assertEqual(list(f.revs()), [0])
992 self.assertEqual(f.rev(nodes[0]), 0)
965 self.assertEqual(f.rev(nodes[0]), 0)
993 self.assertEqual(f.node(0), nodes[0])
966 self.assertEqual(f.node(0), nodes[0])
994
967
995 def testaddgroupmultiple(self):
968 def testaddgroupmultiple(self):
996 f = self._makefilefn()
969 f = self._makefilefn()
997
970
998 fulltexts = [
971 fulltexts = [
999 b'foo',
972 b'foo',
1000 b'bar',
973 b'bar',
1001 b'x' * 1024,
974 b'x' * 1024,
1002 ]
975 ]
1003
976
1004 nodes = []
977 nodes = []
1005 with self._maketransactionfn() as tr:
978 with self._maketransactionfn() as tr:
1006 for fulltext in fulltexts:
979 for fulltext in fulltexts:
1007 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
980 nodes.append(f.add(fulltext, None, tr, 0, nullid, nullid))
1008
981
1009 f = self._makefilefn()
982 f = self._makefilefn()
1010 deltas = []
983 deltas = []
1011 for i, fulltext in enumerate(fulltexts):
984 for i, fulltext in enumerate(fulltexts):
1012 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
985 delta = mdiff.trivialdiffheader(len(fulltext)) + fulltext
1013
986
1014 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
987 deltas.append((nodes[i], nullid, nullid, nullid, nullid, delta, 0))
1015
988
1016 with self._maketransactionfn() as tr:
989 with self._maketransactionfn() as tr:
1017 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
990 self.assertEqual(f.addgroup(deltas, lambda x: 0, tr), nodes)
1018
991
1019 self.assertEqual(len(f), len(deltas))
992 self.assertEqual(len(f), len(deltas))
1020 self.assertEqual(list(f.revs()), [0, 1, 2])
993 self.assertEqual(list(f.revs()), [0, 1, 2])
1021 self.assertEqual(f.rev(nodes[0]), 0)
994 self.assertEqual(f.rev(nodes[0]), 0)
1022 self.assertEqual(f.rev(nodes[1]), 1)
995 self.assertEqual(f.rev(nodes[1]), 1)
1023 self.assertEqual(f.rev(nodes[2]), 2)
996 self.assertEqual(f.rev(nodes[2]), 2)
1024 self.assertEqual(f.node(0), nodes[0])
997 self.assertEqual(f.node(0), nodes[0])
1025 self.assertEqual(f.node(1), nodes[1])
998 self.assertEqual(f.node(1), nodes[1])
1026 self.assertEqual(f.node(2), nodes[2])
999 self.assertEqual(f.node(2), nodes[2])
1027
1000
1028 def makeifileindextests(makefilefn, maketransactionfn):
1001 def makeifileindextests(makefilefn, maketransactionfn):
1029 """Create a unittest.TestCase class suitable for testing file storage.
1002 """Create a unittest.TestCase class suitable for testing file storage.
1030
1003
1031 ``makefilefn`` is a callable which receives the test case as an
1004 ``makefilefn`` is a callable which receives the test case as an
1032 argument and returns an object implementing the ``ifilestorage`` interface.
1005 argument and returns an object implementing the ``ifilestorage`` interface.
1033
1006
1034 ``maketransactionfn`` is a callable which receives the test case as an
1007 ``maketransactionfn`` is a callable which receives the test case as an
1035 argument and returns a transaction object.
1008 argument and returns a transaction object.
1036
1009
1037 Returns a type that is a ``unittest.TestCase`` that can be used for
1010 Returns a type that is a ``unittest.TestCase`` that can be used for
1038 testing the object implementing the file storage interface. Simply
1011 testing the object implementing the file storage interface. Simply
1039 assign the returned value to a module-level attribute and a test loader
1012 assign the returned value to a module-level attribute and a test loader
1040 should find and run it automatically.
1013 should find and run it automatically.
1041 """
1014 """
1042 d = {
1015 d = {
1043 r'_makefilefn': makefilefn,
1016 r'_makefilefn': makefilefn,
1044 r'_maketransactionfn': maketransactionfn,
1017 r'_maketransactionfn': maketransactionfn,
1045 }
1018 }
1046 return type(r'ifileindextests', (ifileindextests,), d)
1019 return type(r'ifileindextests', (ifileindextests,), d)
1047
1020
1048 def makeifiledatatests(makefilefn, maketransactionfn):
1021 def makeifiledatatests(makefilefn, maketransactionfn):
1049 d = {
1022 d = {
1050 r'_makefilefn': makefilefn,
1023 r'_makefilefn': makefilefn,
1051 r'_maketransactionfn': maketransactionfn,
1024 r'_maketransactionfn': maketransactionfn,
1052 }
1025 }
1053 return type(r'ifiledatatests', (ifiledatatests,), d)
1026 return type(r'ifiledatatests', (ifiledatatests,), d)
1054
1027
1055 def makeifilemutationtests(makefilefn, maketransactionfn):
1028 def makeifilemutationtests(makefilefn, maketransactionfn):
1056 d = {
1029 d = {
1057 r'_makefilefn': makefilefn,
1030 r'_makefilefn': makefilefn,
1058 r'_maketransactionfn': maketransactionfn,
1031 r'_maketransactionfn': maketransactionfn,
1059 }
1032 }
1060 return type(r'ifilemutationtests', (ifilemutationtests,), d)
1033 return type(r'ifilemutationtests', (ifilemutationtests,), d)
@@ -1,674 +1,664
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 storageutil,
43 storageutil,
44 )
44 )
45
45
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
48 # be specifying the version(s) of Mercurial they are tested with, or
48 # be specifying the version(s) of Mercurial they are tested with, or
49 # leave the attribute unspecified.
49 # leave the attribute unspecified.
50 testedwith = 'ships-with-hg-core'
50 testedwith = 'ships-with-hg-core'
51
51
52 REQUIREMENT = 'testonly-simplestore'
52 REQUIREMENT = 'testonly-simplestore'
53
53
54 def validatenode(node):
54 def validatenode(node):
55 if isinstance(node, int):
55 if isinstance(node, int):
56 raise ValueError('expected node; got int')
56 raise ValueError('expected node; got int')
57
57
58 if len(node) != 20:
58 if len(node) != 20:
59 raise ValueError('expected 20 byte node')
59 raise ValueError('expected 20 byte node')
60
60
61 def validaterev(rev):
61 def validaterev(rev):
62 if not isinstance(rev, int):
62 if not isinstance(rev, int):
63 raise ValueError('expected int')
63 raise ValueError('expected int')
64
64
65 class simplestoreerror(error.StorageError):
65 class simplestoreerror(error.StorageError):
66 pass
66 pass
67
67
68 @interfaceutil.implementer(repository.irevisiondelta)
68 @interfaceutil.implementer(repository.irevisiondelta)
69 @attr.s(slots=True, frozen=True)
69 @attr.s(slots=True, frozen=True)
70 class simplestorerevisiondelta(object):
70 class simplestorerevisiondelta(object):
71 node = attr.ib()
71 node = attr.ib()
72 p1node = attr.ib()
72 p1node = attr.ib()
73 p2node = attr.ib()
73 p2node = attr.ib()
74 basenode = attr.ib()
74 basenode = attr.ib()
75 linknode = attr.ib()
75 linknode = attr.ib()
76 flags = attr.ib()
76 flags = attr.ib()
77 baserevisionsize = attr.ib()
77 baserevisionsize = attr.ib()
78 revision = attr.ib()
78 revision = attr.ib()
79 delta = attr.ib()
79 delta = attr.ib()
80
80
81 @interfaceutil.implementer(repository.ifilestorage)
81 @interfaceutil.implementer(repository.ifilestorage)
82 class filestorage(object):
82 class filestorage(object):
83 """Implements storage for a tracked path.
83 """Implements storage for a tracked path.
84
84
85 Data is stored in the VFS in a directory corresponding to the tracked
85 Data is stored in the VFS in a directory corresponding to the tracked
86 path.
86 path.
87
87
88 Index data is stored in an ``index`` file using CBOR.
88 Index data is stored in an ``index`` file using CBOR.
89
89
90 Fulltext data is stored in files having names of the node.
90 Fulltext data is stored in files having names of the node.
91 """
91 """
92
92
93 def __init__(self, svfs, path):
93 def __init__(self, svfs, path):
94 self._svfs = svfs
94 self._svfs = svfs
95 self._path = path
95 self._path = path
96
96
97 self._storepath = b'/'.join([b'data', path])
97 self._storepath = b'/'.join([b'data', path])
98 self._indexpath = b'/'.join([self._storepath, b'index'])
98 self._indexpath = b'/'.join([self._storepath, b'index'])
99
99
100 indexdata = self._svfs.tryread(self._indexpath)
100 indexdata = self._svfs.tryread(self._indexpath)
101 if indexdata:
101 if indexdata:
102 indexdata = cbor.loads(indexdata)
102 indexdata = cbor.loads(indexdata)
103
103
104 self._indexdata = indexdata or []
104 self._indexdata = indexdata or []
105 self._indexbynode = {}
105 self._indexbynode = {}
106 self._indexbyrev = {}
106 self._indexbyrev = {}
107 self._index = []
107 self._index = []
108 self._refreshindex()
108 self._refreshindex()
109
109
110 def _refreshindex(self):
110 def _refreshindex(self):
111 self._indexbynode.clear()
111 self._indexbynode.clear()
112 self._indexbyrev.clear()
112 self._indexbyrev.clear()
113 self._index = []
113 self._index = []
114
114
115 for i, entry in enumerate(self._indexdata):
115 for i, entry in enumerate(self._indexdata):
116 self._indexbynode[entry[b'node']] = entry
116 self._indexbynode[entry[b'node']] = entry
117 self._indexbyrev[i] = entry
117 self._indexbyrev[i] = entry
118
118
119 self._indexbynode[nullid] = {
119 self._indexbynode[nullid] = {
120 b'node': nullid,
120 b'node': nullid,
121 b'p1': nullid,
121 b'p1': nullid,
122 b'p2': nullid,
122 b'p2': nullid,
123 b'linkrev': nullrev,
123 b'linkrev': nullrev,
124 b'flags': 0,
124 b'flags': 0,
125 }
125 }
126
126
127 self._indexbyrev[nullrev] = {
127 self._indexbyrev[nullrev] = {
128 b'node': nullid,
128 b'node': nullid,
129 b'p1': nullid,
129 b'p1': nullid,
130 b'p2': nullid,
130 b'p2': nullid,
131 b'linkrev': nullrev,
131 b'linkrev': nullrev,
132 b'flags': 0,
132 b'flags': 0,
133 }
133 }
134
134
135 for i, entry in enumerate(self._indexdata):
135 for i, entry in enumerate(self._indexdata):
136 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
136 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
137
137
138 # start, length, rawsize, chainbase, linkrev, p1, p2, node
138 # start, length, rawsize, chainbase, linkrev, p1, p2, node
139 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
139 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
140 entry[b'node']))
140 entry[b'node']))
141
141
142 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
142 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
143
143
144 def __len__(self):
144 def __len__(self):
145 return len(self._indexdata)
145 return len(self._indexdata)
146
146
147 def __iter__(self):
147 def __iter__(self):
148 return iter(range(len(self)))
148 return iter(range(len(self)))
149
149
150 def revs(self, start=0, stop=None):
150 def revs(self, start=0, stop=None):
151 step = 1
151 step = 1
152 if stop is not None:
152 if stop is not None:
153 if start > stop:
153 if start > stop:
154 step = -1
154 step = -1
155
155
156 stop += step
156 stop += step
157 else:
157 else:
158 stop = len(self)
158 stop = len(self)
159
159
160 return range(start, stop, step)
160 return range(start, stop, step)
161
161
162 def parents(self, node):
162 def parents(self, node):
163 validatenode(node)
163 validatenode(node)
164
164
165 if node not in self._indexbynode:
165 if node not in self._indexbynode:
166 raise KeyError('unknown node')
166 raise KeyError('unknown node')
167
167
168 entry = self._indexbynode[node]
168 entry = self._indexbynode[node]
169
169
170 return entry[b'p1'], entry[b'p2']
170 return entry[b'p1'], entry[b'p2']
171
171
172 def parentrevs(self, rev):
172 def parentrevs(self, rev):
173 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
173 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
174 return self.rev(p1), self.rev(p2)
174 return self.rev(p1), self.rev(p2)
175
175
176 def rev(self, node):
176 def rev(self, node):
177 validatenode(node)
177 validatenode(node)
178
178
179 try:
179 try:
180 self._indexbynode[node]
180 self._indexbynode[node]
181 except KeyError:
181 except KeyError:
182 raise error.LookupError(node, self._indexpath, _('no node'))
182 raise error.LookupError(node, self._indexpath, _('no node'))
183
183
184 for rev, entry in self._indexbyrev.items():
184 for rev, entry in self._indexbyrev.items():
185 if entry[b'node'] == node:
185 if entry[b'node'] == node:
186 return rev
186 return rev
187
187
188 raise error.ProgrammingError('this should not occur')
188 raise error.ProgrammingError('this should not occur')
189
189
190 def node(self, rev):
190 def node(self, rev):
191 validaterev(rev)
191 validaterev(rev)
192
192
193 return self._indexbyrev[rev][b'node']
193 return self._indexbyrev[rev][b'node']
194
194
195 def lookup(self, node):
195 def lookup(self, node):
196 if isinstance(node, int):
196 if isinstance(node, int):
197 return self.node(node)
197 return self.node(node)
198
198
199 if len(node) == 20:
199 if len(node) == 20:
200 self.rev(node)
200 self.rev(node)
201 return node
201 return node
202
202
203 try:
203 try:
204 rev = int(node)
204 rev = int(node)
205 if '%d' % rev != node:
205 if '%d' % rev != node:
206 raise ValueError
206 raise ValueError
207
207
208 if rev < 0:
208 if rev < 0:
209 rev = len(self) + rev
209 rev = len(self) + rev
210 if rev < 0 or rev >= len(self):
210 if rev < 0 or rev >= len(self):
211 raise ValueError
211 raise ValueError
212
212
213 return self.node(rev)
213 return self.node(rev)
214 except (ValueError, OverflowError):
214 except (ValueError, OverflowError):
215 pass
215 pass
216
216
217 if len(node) == 40:
217 if len(node) == 40:
218 try:
218 try:
219 rawnode = bin(node)
219 rawnode = bin(node)
220 self.rev(rawnode)
220 self.rev(rawnode)
221 return rawnode
221 return rawnode
222 except TypeError:
222 except TypeError:
223 pass
223 pass
224
224
225 raise error.LookupError(node, self._path, _('invalid lookup input'))
225 raise error.LookupError(node, self._path, _('invalid lookup input'))
226
226
227 def linkrev(self, rev):
227 def linkrev(self, rev):
228 validaterev(rev)
228 validaterev(rev)
229
229
230 return self._indexbyrev[rev][b'linkrev']
230 return self._indexbyrev[rev][b'linkrev']
231
231
232 def _flags(self, rev):
232 def _flags(self, rev):
233 validaterev(rev)
233 validaterev(rev)
234
234
235 return self._indexbyrev[rev][b'flags']
235 return self._indexbyrev[rev][b'flags']
236
236
237 def _candelta(self, baserev, rev):
237 def _candelta(self, baserev, rev):
238 validaterev(baserev)
238 validaterev(baserev)
239 validaterev(rev)
239 validaterev(rev)
240
240
241 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
241 if ((self._flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
242 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
242 or (self._flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
243 return False
243 return False
244
244
245 return True
245 return True
246
246
247 def _processflags(self, text, flags, operation, raw=False):
247 def _processflags(self, text, flags, operation, raw=False):
248 if flags == 0:
248 if flags == 0:
249 return text, True
249 return text, True
250
250
251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
251 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
252 raise simplestoreerror(_("incompatible revision flag '%#x'") %
252 raise simplestoreerror(_("incompatible revision flag '%#x'") %
253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
253 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
254
254
255 validatehash = True
255 validatehash = True
256 # Depending on the operation (read or write), the order might be
256 # Depending on the operation (read or write), the order might be
257 # reversed due to non-commutative transforms.
257 # reversed due to non-commutative transforms.
258 orderedflags = revlog.REVIDX_FLAGS_ORDER
258 orderedflags = revlog.REVIDX_FLAGS_ORDER
259 if operation == 'write':
259 if operation == 'write':
260 orderedflags = reversed(orderedflags)
260 orderedflags = reversed(orderedflags)
261
261
262 for flag in orderedflags:
262 for flag in orderedflags:
263 # If a flagprocessor has been registered for a known flag, apply the
263 # If a flagprocessor has been registered for a known flag, apply the
264 # related operation transform and update result tuple.
264 # related operation transform and update result tuple.
265 if flag & flags:
265 if flag & flags:
266 vhash = True
266 vhash = True
267
267
268 if flag not in revlog._flagprocessors:
268 if flag not in revlog._flagprocessors:
269 message = _("missing processor for flag '%#x'") % (flag)
269 message = _("missing processor for flag '%#x'") % (flag)
270 raise simplestoreerror(message)
270 raise simplestoreerror(message)
271
271
272 processor = revlog._flagprocessors[flag]
272 processor = revlog._flagprocessors[flag]
273 if processor is not None:
273 if processor is not None:
274 readtransform, writetransform, rawtransform = processor
274 readtransform, writetransform, rawtransform = processor
275
275
276 if raw:
276 if raw:
277 vhash = rawtransform(self, text)
277 vhash = rawtransform(self, text)
278 elif operation == 'read':
278 elif operation == 'read':
279 text, vhash = readtransform(self, text)
279 text, vhash = readtransform(self, text)
280 else: # write operation
280 else: # write operation
281 text, vhash = writetransform(self, text)
281 text, vhash = writetransform(self, text)
282 validatehash = validatehash and vhash
282 validatehash = validatehash and vhash
283
283
284 return text, validatehash
284 return text, validatehash
285
285
286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
286 def checkhash(self, text, node, p1=None, p2=None, rev=None):
287 if p1 is None and p2 is None:
287 if p1 is None and p2 is None:
288 p1, p2 = self.parents(node)
288 p1, p2 = self.parents(node)
289 if node != storageutil.hashrevisionsha1(text, p1, p2):
289 if node != storageutil.hashrevisionsha1(text, p1, p2):
290 raise simplestoreerror(_("integrity check failed on %s") %
290 raise simplestoreerror(_("integrity check failed on %s") %
291 self._path)
291 self._path)
292
292
293 def revision(self, node, raw=False):
293 def revision(self, node, raw=False):
294 validatenode(node)
294 validatenode(node)
295
295
296 if node == nullid:
296 if node == nullid:
297 return b''
297 return b''
298
298
299 rev = self.rev(node)
299 rev = self.rev(node)
300 flags = self._flags(rev)
300 flags = self._flags(rev)
301
301
302 path = b'/'.join([self._storepath, hex(node)])
302 path = b'/'.join([self._storepath, hex(node)])
303 rawtext = self._svfs.read(path)
303 rawtext = self._svfs.read(path)
304
304
305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
305 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
306 if validatehash:
306 if validatehash:
307 self.checkhash(text, node, rev=rev)
307 self.checkhash(text, node, rev=rev)
308
308
309 return text
309 return text
310
310
311 def read(self, node):
311 def read(self, node):
312 validatenode(node)
312 validatenode(node)
313
313
314 revision = self.revision(node)
314 revision = self.revision(node)
315
315
316 if not revision.startswith(b'\1\n'):
316 if not revision.startswith(b'\1\n'):
317 return revision
317 return revision
318
318
319 start = revision.index(b'\1\n', 2)
319 start = revision.index(b'\1\n', 2)
320 return revision[start + 2:]
320 return revision[start + 2:]
321
321
322 def renamed(self, node):
322 def renamed(self, node):
323 validatenode(node)
323 validatenode(node)
324
324
325 if self.parents(node)[0] != nullid:
325 if self.parents(node)[0] != nullid:
326 return False
326 return False
327
327
328 fulltext = self.revision(node)
328 fulltext = self.revision(node)
329 m = storageutil.parsemeta(fulltext)[0]
329 m = storageutil.parsemeta(fulltext)[0]
330
330
331 if m and 'copy' in m:
331 if m and 'copy' in m:
332 return m['copy'], bin(m['copyrev'])
332 return m['copy'], bin(m['copyrev'])
333
333
334 return False
334 return False
335
335
336 def cmp(self, node, text):
336 def cmp(self, node, text):
337 validatenode(node)
337 validatenode(node)
338
338
339 t = text
339 t = text
340
340
341 if text.startswith(b'\1\n'):
341 if text.startswith(b'\1\n'):
342 t = b'\1\n\1\n' + text
342 t = b'\1\n\1\n' + text
343
343
344 p1, p2 = self.parents(node)
344 p1, p2 = self.parents(node)
345
345
346 if storageutil.hashrevisionsha1(t, p1, p2) == node:
346 if storageutil.hashrevisionsha1(t, p1, p2) == node:
347 return False
347 return False
348
348
349 if self.iscensored(self.rev(node)):
349 if self.iscensored(self.rev(node)):
350 return text != b''
350 return text != b''
351
351
352 if self.renamed(node):
352 if self.renamed(node):
353 t2 = self.read(node)
353 t2 = self.read(node)
354 return t2 != text
354 return t2 != text
355
355
356 return True
356 return True
357
357
358 def size(self, rev):
358 def size(self, rev):
359 validaterev(rev)
359 validaterev(rev)
360
360
361 node = self._indexbyrev[rev][b'node']
361 node = self._indexbyrev[rev][b'node']
362
362
363 if self.renamed(node):
363 if self.renamed(node):
364 return len(self.read(node))
364 return len(self.read(node))
365
365
366 if self.iscensored(rev):
366 if self.iscensored(rev):
367 return 0
367 return 0
368
368
369 return len(self.revision(node))
369 return len(self.revision(node))
370
370
371 def iscensored(self, rev):
371 def iscensored(self, rev):
372 validaterev(rev)
372 validaterev(rev)
373
373
374 return self._flags(rev) & revlog.REVIDX_ISCENSORED
374 return self._flags(rev) & revlog.REVIDX_ISCENSORED
375
375
376 def commonancestorsheads(self, a, b):
376 def commonancestorsheads(self, a, b):
377 validatenode(a)
377 validatenode(a)
378 validatenode(b)
378 validatenode(b)
379
379
380 a = self.rev(a)
380 a = self.rev(a)
381 b = self.rev(b)
381 b = self.rev(b)
382
382
383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
383 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
384 return pycompat.maplist(self.node, ancestors)
384 return pycompat.maplist(self.node, ancestors)
385
385
386 def descendants(self, revs):
386 def descendants(self, revs):
387 # This is a copy of revlog.descendants()
387 # This is a copy of revlog.descendants()
388 first = min(revs)
388 first = min(revs)
389 if first == nullrev:
389 if first == nullrev:
390 for i in self:
390 for i in self:
391 yield i
391 yield i
392 return
392 return
393
393
394 seen = set(revs)
394 seen = set(revs)
395 for i in self.revs(start=first + 1):
395 for i in self.revs(start=first + 1):
396 for x in self.parentrevs(i):
396 for x in self.parentrevs(i):
397 if x != nullrev and x in seen:
397 if x != nullrev and x in seen:
398 seen.add(i)
398 seen.add(i)
399 yield i
399 yield i
400 break
400 break
401
401
402 # Required by verify.
402 # Required by verify.
403 def files(self):
403 def files(self):
404 entries = self._svfs.listdir(self._storepath)
404 entries = self._svfs.listdir(self._storepath)
405
405
406 # Strip out undo.backup.* files created as part of transaction
406 # Strip out undo.backup.* files created as part of transaction
407 # recording.
407 # recording.
408 entries = [f for f in entries if not f.startswith('undo.backup.')]
408 entries = [f for f in entries if not f.startswith('undo.backup.')]
409
409
410 return [b'/'.join((self._storepath, f)) for f in entries]
410 return [b'/'.join((self._storepath, f)) for f in entries]
411
411
412 def add(self, text, meta, transaction, linkrev, p1, p2):
412 def add(self, text, meta, transaction, linkrev, p1, p2):
413 if meta or text.startswith(b'\1\n'):
413 if meta or text.startswith(b'\1\n'):
414 text = storageutil.packmeta(meta, text)
414 text = storageutil.packmeta(meta, text)
415
415
416 return self.addrevision(text, transaction, linkrev, p1, p2)
416 return self.addrevision(text, transaction, linkrev, p1, p2)
417
417
418 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
418 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
419 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
419 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
420 validatenode(p1)
420 validatenode(p1)
421 validatenode(p2)
421 validatenode(p2)
422
422
423 if flags:
423 if flags:
424 node = node or storageutil.hashrevisionsha1(text, p1, p2)
424 node = node or storageutil.hashrevisionsha1(text, p1, p2)
425
425
426 rawtext, validatehash = self._processflags(text, flags, 'write')
426 rawtext, validatehash = self._processflags(text, flags, 'write')
427
427
428 node = node or storageutil.hashrevisionsha1(text, p1, p2)
428 node = node or storageutil.hashrevisionsha1(text, p1, p2)
429
429
430 if node in self._indexbynode:
430 if node in self._indexbynode:
431 return node
431 return node
432
432
433 if validatehash:
433 if validatehash:
434 self.checkhash(rawtext, node, p1=p1, p2=p2)
434 self.checkhash(rawtext, node, p1=p1, p2=p2)
435
435
436 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
436 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
437 flags)
437 flags)
438
438
439 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
439 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
440 transaction.addbackup(self._indexpath)
440 transaction.addbackup(self._indexpath)
441
441
442 path = b'/'.join([self._storepath, hex(node)])
442 path = b'/'.join([self._storepath, hex(node)])
443
443
444 self._svfs.write(path, rawtext)
444 self._svfs.write(path, rawtext)
445
445
446 self._indexdata.append({
446 self._indexdata.append({
447 b'node': node,
447 b'node': node,
448 b'p1': p1,
448 b'p1': p1,
449 b'p2': p2,
449 b'p2': p2,
450 b'linkrev': link,
450 b'linkrev': link,
451 b'flags': flags,
451 b'flags': flags,
452 })
452 })
453
453
454 self._reflectindexupdate()
454 self._reflectindexupdate()
455
455
456 return node
456 return node
457
457
458 def _reflectindexupdate(self):
458 def _reflectindexupdate(self):
459 self._refreshindex()
459 self._refreshindex()
460 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
460 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
461
461
462 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
462 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
463 nodes = []
463 nodes = []
464
464
465 transaction.addbackup(self._indexpath)
465 transaction.addbackup(self._indexpath)
466
466
467 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
467 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
468 linkrev = linkmapper(linknode)
468 linkrev = linkmapper(linknode)
469 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
469 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
470
470
471 nodes.append(node)
471 nodes.append(node)
472
472
473 if node in self._indexbynode:
473 if node in self._indexbynode:
474 continue
474 continue
475
475
476 # Need to resolve the fulltext from the delta base.
476 # Need to resolve the fulltext from the delta base.
477 if deltabase == nullid:
477 if deltabase == nullid:
478 text = mdiff.patch(b'', delta)
478 text = mdiff.patch(b'', delta)
479 else:
479 else:
480 text = mdiff.patch(self.revision(deltabase), delta)
480 text = mdiff.patch(self.revision(deltabase), delta)
481
481
482 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
482 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
483 flags)
483 flags)
484
484
485 if addrevisioncb:
485 if addrevisioncb:
486 addrevisioncb(self, node)
486 addrevisioncb(self, node)
487
487
488 return nodes
488 return nodes
489
489
490 def revdiff(self, rev1, rev2):
491 validaterev(rev1)
492 validaterev(rev2)
493
494 node1 = self.node(rev1)
495 node2 = self.node(rev2)
496
497 return mdiff.textdiff(self.revision(node1, raw=True),
498 self.revision(node2, raw=True))
499
500 def heads(self, start=None, stop=None):
490 def heads(self, start=None, stop=None):
501 # This is copied from revlog.py.
491 # This is copied from revlog.py.
502 if start is None and stop is None:
492 if start is None and stop is None:
503 if not len(self):
493 if not len(self):
504 return [nullid]
494 return [nullid]
505 return [self.node(r) for r in self.headrevs()]
495 return [self.node(r) for r in self.headrevs()]
506
496
507 if start is None:
497 if start is None:
508 start = nullid
498 start = nullid
509 if stop is None:
499 if stop is None:
510 stop = []
500 stop = []
511 stoprevs = set([self.rev(n) for n in stop])
501 stoprevs = set([self.rev(n) for n in stop])
512 startrev = self.rev(start)
502 startrev = self.rev(start)
513 reachable = {startrev}
503 reachable = {startrev}
514 heads = {startrev}
504 heads = {startrev}
515
505
516 parentrevs = self.parentrevs
506 parentrevs = self.parentrevs
517 for r in self.revs(start=startrev + 1):
507 for r in self.revs(start=startrev + 1):
518 for p in parentrevs(r):
508 for p in parentrevs(r):
519 if p in reachable:
509 if p in reachable:
520 if r not in stoprevs:
510 if r not in stoprevs:
521 reachable.add(r)
511 reachable.add(r)
522 heads.add(r)
512 heads.add(r)
523 if p in heads and p not in stoprevs:
513 if p in heads and p not in stoprevs:
524 heads.remove(p)
514 heads.remove(p)
525
515
526 return [self.node(r) for r in heads]
516 return [self.node(r) for r in heads]
527
517
528 def children(self, node):
518 def children(self, node):
529 validatenode(node)
519 validatenode(node)
530
520
531 # This is a copy of revlog.children().
521 # This is a copy of revlog.children().
532 c = []
522 c = []
533 p = self.rev(node)
523 p = self.rev(node)
534 for r in self.revs(start=p + 1):
524 for r in self.revs(start=p + 1):
535 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
525 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
536 if prevs:
526 if prevs:
537 for pr in prevs:
527 for pr in prevs:
538 if pr == p:
528 if pr == p:
539 c.append(self.node(r))
529 c.append(self.node(r))
540 elif p == nullrev:
530 elif p == nullrev:
541 c.append(self.node(r))
531 c.append(self.node(r))
542 return c
532 return c
543
533
544 def getstrippoint(self, minlink):
534 def getstrippoint(self, minlink):
545
535
546 # This is largely a copy of revlog.getstrippoint().
536 # This is largely a copy of revlog.getstrippoint().
547 brokenrevs = set()
537 brokenrevs = set()
548 strippoint = len(self)
538 strippoint = len(self)
549
539
550 heads = {}
540 heads = {}
551 futurelargelinkrevs = set()
541 futurelargelinkrevs = set()
552 for head in self.heads():
542 for head in self.heads():
553 headlinkrev = self.linkrev(self.rev(head))
543 headlinkrev = self.linkrev(self.rev(head))
554 heads[head] = headlinkrev
544 heads[head] = headlinkrev
555 if headlinkrev >= minlink:
545 if headlinkrev >= minlink:
556 futurelargelinkrevs.add(headlinkrev)
546 futurelargelinkrevs.add(headlinkrev)
557
547
558 # This algorithm involves walking down the rev graph, starting at the
548 # This algorithm involves walking down the rev graph, starting at the
559 # heads. Since the revs are topologically sorted according to linkrev,
549 # heads. Since the revs are topologically sorted according to linkrev,
560 # once all head linkrevs are below the minlink, we know there are
550 # once all head linkrevs are below the minlink, we know there are
561 # no more revs that could have a linkrev greater than minlink.
551 # no more revs that could have a linkrev greater than minlink.
562 # So we can stop walking.
552 # So we can stop walking.
563 while futurelargelinkrevs:
553 while futurelargelinkrevs:
564 strippoint -= 1
554 strippoint -= 1
565 linkrev = heads.pop(strippoint)
555 linkrev = heads.pop(strippoint)
566
556
567 if linkrev < minlink:
557 if linkrev < minlink:
568 brokenrevs.add(strippoint)
558 brokenrevs.add(strippoint)
569 else:
559 else:
570 futurelargelinkrevs.remove(linkrev)
560 futurelargelinkrevs.remove(linkrev)
571
561
572 for p in self.parentrevs(strippoint):
562 for p in self.parentrevs(strippoint):
573 if p != nullrev:
563 if p != nullrev:
574 plinkrev = self.linkrev(p)
564 plinkrev = self.linkrev(p)
575 heads[p] = plinkrev
565 heads[p] = plinkrev
576 if plinkrev >= minlink:
566 if plinkrev >= minlink:
577 futurelargelinkrevs.add(plinkrev)
567 futurelargelinkrevs.add(plinkrev)
578
568
579 return strippoint, brokenrevs
569 return strippoint, brokenrevs
580
570
581 def strip(self, minlink, transaction):
571 def strip(self, minlink, transaction):
582 if not len(self):
572 if not len(self):
583 return
573 return
584
574
585 rev, _ignored = self.getstrippoint(minlink)
575 rev, _ignored = self.getstrippoint(minlink)
586 if rev == len(self):
576 if rev == len(self):
587 return
577 return
588
578
589 # Purge index data starting at the requested revision.
579 # Purge index data starting at the requested revision.
590 self._indexdata[rev:] = []
580 self._indexdata[rev:] = []
591 self._reflectindexupdate()
581 self._reflectindexupdate()
592
582
593 def issimplestorefile(f, kind, st):
583 def issimplestorefile(f, kind, st):
594 if kind != stat.S_IFREG:
584 if kind != stat.S_IFREG:
595 return False
585 return False
596
586
597 if store.isrevlog(f, kind, st):
587 if store.isrevlog(f, kind, st):
598 return False
588 return False
599
589
600 # Ignore transaction undo files.
590 # Ignore transaction undo files.
601 if f.startswith('undo.'):
591 if f.startswith('undo.'):
602 return False
592 return False
603
593
604 # Otherwise assume it belongs to the simple store.
594 # Otherwise assume it belongs to the simple store.
605 return True
595 return True
606
596
607 class simplestore(store.encodedstore):
597 class simplestore(store.encodedstore):
608 def datafiles(self):
598 def datafiles(self):
609 for x in super(simplestore, self).datafiles():
599 for x in super(simplestore, self).datafiles():
610 yield x
600 yield x
611
601
612 # Supplement with non-revlog files.
602 # Supplement with non-revlog files.
613 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
603 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
614
604
615 for unencoded, encoded, size in extrafiles:
605 for unencoded, encoded, size in extrafiles:
616 try:
606 try:
617 unencoded = store.decodefilename(unencoded)
607 unencoded = store.decodefilename(unencoded)
618 except KeyError:
608 except KeyError:
619 unencoded = None
609 unencoded = None
620
610
621 yield unencoded, encoded, size
611 yield unencoded, encoded, size
622
612
623 def reposetup(ui, repo):
613 def reposetup(ui, repo):
624 if not repo.local():
614 if not repo.local():
625 return
615 return
626
616
627 if isinstance(repo, bundlerepo.bundlerepository):
617 if isinstance(repo, bundlerepo.bundlerepository):
628 raise error.Abort(_('cannot use simple store with bundlerepo'))
618 raise error.Abort(_('cannot use simple store with bundlerepo'))
629
619
630 class simplestorerepo(repo.__class__):
620 class simplestorerepo(repo.__class__):
631 def file(self, f):
621 def file(self, f):
632 return filestorage(self.svfs, f)
622 return filestorage(self.svfs, f)
633
623
634 repo.__class__ = simplestorerepo
624 repo.__class__ = simplestorerepo
635
625
636 def featuresetup(ui, supported):
626 def featuresetup(ui, supported):
637 supported.add(REQUIREMENT)
627 supported.add(REQUIREMENT)
638
628
639 def newreporequirements(orig, ui):
629 def newreporequirements(orig, ui):
640 """Modifies default requirements for new repos to use the simple store."""
630 """Modifies default requirements for new repos to use the simple store."""
641 requirements = orig(ui)
631 requirements = orig(ui)
642
632
643 # These requirements are only used to affect creation of the store
633 # These requirements are only used to affect creation of the store
644 # object. We have our own store. So we can remove them.
634 # object. We have our own store. So we can remove them.
645 # TODO do this once we feel like taking the test hit.
635 # TODO do this once we feel like taking the test hit.
646 #if 'fncache' in requirements:
636 #if 'fncache' in requirements:
647 # requirements.remove('fncache')
637 # requirements.remove('fncache')
648 #if 'dotencode' in requirements:
638 #if 'dotencode' in requirements:
649 # requirements.remove('dotencode')
639 # requirements.remove('dotencode')
650
640
651 requirements.add(REQUIREMENT)
641 requirements.add(REQUIREMENT)
652
642
653 return requirements
643 return requirements
654
644
655 def makestore(orig, requirements, path, vfstype):
645 def makestore(orig, requirements, path, vfstype):
656 if REQUIREMENT not in requirements:
646 if REQUIREMENT not in requirements:
657 return orig(requirements, path, vfstype)
647 return orig(requirements, path, vfstype)
658
648
659 return simplestore(path, vfstype)
649 return simplestore(path, vfstype)
660
650
661 def verifierinit(orig, self, *args, **kwargs):
651 def verifierinit(orig, self, *args, **kwargs):
662 orig(self, *args, **kwargs)
652 orig(self, *args, **kwargs)
663
653
664 # We don't care that files in the store don't align with what is
654 # We don't care that files in the store don't align with what is
665 # advertised. So suppress these warnings.
655 # advertised. So suppress these warnings.
666 self.warnorphanstorefiles = False
656 self.warnorphanstorefiles = False
667
657
668 def extsetup(ui):
658 def extsetup(ui):
669 localrepo.featuresetupfuncs.add(featuresetup)
659 localrepo.featuresetupfuncs.add(featuresetup)
670
660
671 extensions.wrapfunction(localrepo, 'newreporequirements',
661 extensions.wrapfunction(localrepo, 'newreporequirements',
672 newreporequirements)
662 newreporequirements)
673 extensions.wrapfunction(store, 'store', makestore)
663 extensions.wrapfunction(store, 'store', makestore)
674 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
664 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now