##// END OF EJS Templates
filelog: drop index attribute (API)...
Gregory Szorc -
r39896:d9b3cc3d default
parent child Browse files
Show More
@@ -1,262 +1,260 b''
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 )
17 )
18
18
19 @interfaceutil.implementer(repository.ifilestorage)
19 @interfaceutil.implementer(repository.ifilestorage)
20 class filelog(object):
20 class filelog(object):
21 def __init__(self, opener, path):
21 def __init__(self, opener, path):
22 self._revlog = revlog.revlog(opener,
22 self._revlog = revlog.revlog(opener,
23 '/'.join(('data', path + '.i')),
23 '/'.join(('data', path + '.i')),
24 censorable=True)
24 censorable=True)
25 # Full name of the user visible file, relative to the repository root.
25 # Full name of the user visible file, relative to the repository root.
26 # Used by LFS.
26 # Used by LFS.
27 self._revlog.filename = path
27 self._revlog.filename = path
28 # Used by repo upgrade.
29 self.index = self._revlog.index
30 # Used by changegroup generation.
28 # Used by changegroup generation.
31 self._generaldelta = self._revlog._generaldelta
29 self._generaldelta = self._revlog._generaldelta
32
30
33 def __len__(self):
31 def __len__(self):
34 return len(self._revlog)
32 return len(self._revlog)
35
33
36 def __iter__(self):
34 def __iter__(self):
37 return self._revlog.__iter__()
35 return self._revlog.__iter__()
38
36
39 def revs(self, start=0, stop=None):
37 def revs(self, start=0, stop=None):
40 return self._revlog.revs(start=start, stop=stop)
38 return self._revlog.revs(start=start, stop=stop)
41
39
42 def parents(self, node):
40 def parents(self, node):
43 return self._revlog.parents(node)
41 return self._revlog.parents(node)
44
42
45 def parentrevs(self, rev):
43 def parentrevs(self, rev):
46 return self._revlog.parentrevs(rev)
44 return self._revlog.parentrevs(rev)
47
45
48 def rev(self, node):
46 def rev(self, node):
49 return self._revlog.rev(node)
47 return self._revlog.rev(node)
50
48
51 def node(self, rev):
49 def node(self, rev):
52 return self._revlog.node(rev)
50 return self._revlog.node(rev)
53
51
54 def lookup(self, node):
52 def lookup(self, node):
55 return self._revlog.lookup(node)
53 return self._revlog.lookup(node)
56
54
57 def linkrev(self, rev):
55 def linkrev(self, rev):
58 return self._revlog.linkrev(rev)
56 return self._revlog.linkrev(rev)
59
57
60 # Used by verify.
58 # Used by verify.
61 def flags(self, rev):
59 def flags(self, rev):
62 return self._revlog.flags(rev)
60 return self._revlog.flags(rev)
63
61
64 def commonancestorsheads(self, node1, node2):
62 def commonancestorsheads(self, node1, node2):
65 return self._revlog.commonancestorsheads(node1, node2)
63 return self._revlog.commonancestorsheads(node1, node2)
66
64
67 # Used by dagop.blockdescendants().
65 # Used by dagop.blockdescendants().
68 def descendants(self, revs):
66 def descendants(self, revs):
69 return self._revlog.descendants(revs)
67 return self._revlog.descendants(revs)
70
68
71 def heads(self, start=None, stop=None):
69 def heads(self, start=None, stop=None):
72 return self._revlog.heads(start, stop)
70 return self._revlog.heads(start, stop)
73
71
74 # Used by hgweb, children extension.
72 # Used by hgweb, children extension.
75 def children(self, node):
73 def children(self, node):
76 return self._revlog.children(node)
74 return self._revlog.children(node)
77
75
78 def deltaparent(self, rev):
76 def deltaparent(self, rev):
79 return self._revlog.deltaparent(rev)
77 return self._revlog.deltaparent(rev)
80
78
81 def iscensored(self, rev):
79 def iscensored(self, rev):
82 return self._revlog.iscensored(rev)
80 return self._revlog.iscensored(rev)
83
81
84 # Used by repo upgrade, verify.
82 # Used by repo upgrade, verify.
85 def rawsize(self, rev):
83 def rawsize(self, rev):
86 return self._revlog.rawsize(rev)
84 return self._revlog.rawsize(rev)
87
85
88 # Might be unused.
86 # Might be unused.
89 def checkhash(self, text, node, p1=None, p2=None, rev=None):
87 def checkhash(self, text, node, p1=None, p2=None, rev=None):
90 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
88 return self._revlog.checkhash(text, node, p1=p1, p2=p2, rev=rev)
91
89
92 def revision(self, node, _df=None, raw=False):
90 def revision(self, node, _df=None, raw=False):
93 return self._revlog.revision(node, _df=_df, raw=raw)
91 return self._revlog.revision(node, _df=_df, raw=raw)
94
92
95 def revdiff(self, rev1, rev2):
93 def revdiff(self, rev1, rev2):
96 return self._revlog.revdiff(rev1, rev2)
94 return self._revlog.revdiff(rev1, rev2)
97
95
98 def emitrevisiondeltas(self, requests):
96 def emitrevisiondeltas(self, requests):
99 return self._revlog.emitrevisiondeltas(requests)
97 return self._revlog.emitrevisiondeltas(requests)
100
98
101 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
99 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
102 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
100 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
103 cachedelta=None):
101 cachedelta=None):
104 return self._revlog.addrevision(revisiondata, transaction, linkrev,
102 return self._revlog.addrevision(revisiondata, transaction, linkrev,
105 p1, p2, node=node, flags=flags,
103 p1, p2, node=node, flags=flags,
106 cachedelta=cachedelta)
104 cachedelta=cachedelta)
107
105
108 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
106 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
109 return self._revlog.addgroup(deltas, linkmapper, transaction,
107 return self._revlog.addgroup(deltas, linkmapper, transaction,
110 addrevisioncb=addrevisioncb)
108 addrevisioncb=addrevisioncb)
111
109
112 def getstrippoint(self, minlink):
110 def getstrippoint(self, minlink):
113 return self._revlog.getstrippoint(minlink)
111 return self._revlog.getstrippoint(minlink)
114
112
115 def strip(self, minlink, transaction):
113 def strip(self, minlink, transaction):
116 return self._revlog.strip(minlink, transaction)
114 return self._revlog.strip(minlink, transaction)
117
115
118 def censorrevision(self, tr, node, tombstone=b''):
116 def censorrevision(self, tr, node, tombstone=b''):
119 return self._revlog.censorrevision(node, tombstone=tombstone)
117 return self._revlog.censorrevision(node, tombstone=tombstone)
120
118
121 def files(self):
119 def files(self):
122 return self._revlog.files()
120 return self._revlog.files()
123
121
124 def read(self, node):
122 def read(self, node):
125 t = self.revision(node)
123 t = self.revision(node)
126 if not t.startswith('\1\n'):
124 if not t.startswith('\1\n'):
127 return t
125 return t
128 s = t.index('\1\n', 2)
126 s = t.index('\1\n', 2)
129 return t[s + 2:]
127 return t[s + 2:]
130
128
131 def add(self, text, meta, transaction, link, p1=None, p2=None):
129 def add(self, text, meta, transaction, link, p1=None, p2=None):
132 if meta or text.startswith('\1\n'):
130 if meta or text.startswith('\1\n'):
133 text = revlog.packmeta(meta, text)
131 text = revlog.packmeta(meta, text)
134 return self.addrevision(text, transaction, link, p1, p2)
132 return self.addrevision(text, transaction, link, p1, p2)
135
133
136 def renamed(self, node):
134 def renamed(self, node):
137 if self.parents(node)[0] != revlog.nullid:
135 if self.parents(node)[0] != revlog.nullid:
138 return False
136 return False
139 t = self.revision(node)
137 t = self.revision(node)
140 m = revlog.parsemeta(t)[0]
138 m = revlog.parsemeta(t)[0]
141 # copy and copyrev occur in pairs. In rare cases due to bugs,
139 # copy and copyrev occur in pairs. In rare cases due to bugs,
142 # one can occur without the other.
140 # one can occur without the other.
143 if m and "copy" in m and "copyrev" in m:
141 if m and "copy" in m and "copyrev" in m:
144 return (m["copy"], revlog.bin(m["copyrev"]))
142 return (m["copy"], revlog.bin(m["copyrev"]))
145 return False
143 return False
146
144
147 def size(self, rev):
145 def size(self, rev):
148 """return the size of a given revision"""
146 """return the size of a given revision"""
149
147
150 # for revisions with renames, we have to go the slow way
148 # for revisions with renames, we have to go the slow way
151 node = self.node(rev)
149 node = self.node(rev)
152 if self.renamed(node):
150 if self.renamed(node):
153 return len(self.read(node))
151 return len(self.read(node))
154 if self.iscensored(rev):
152 if self.iscensored(rev):
155 return 0
153 return 0
156
154
157 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
155 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
158 return self._revlog.size(rev)
156 return self._revlog.size(rev)
159
157
160 def cmp(self, node, text):
158 def cmp(self, node, text):
161 """compare text with a given file revision
159 """compare text with a given file revision
162
160
163 returns True if text is different than what is stored.
161 returns True if text is different than what is stored.
164 """
162 """
165
163
166 t = text
164 t = text
167 if text.startswith('\1\n'):
165 if text.startswith('\1\n'):
168 t = '\1\n\1\n' + text
166 t = '\1\n\1\n' + text
169
167
170 samehashes = not self._revlog.cmp(node, t)
168 samehashes = not self._revlog.cmp(node, t)
171 if samehashes:
169 if samehashes:
172 return False
170 return False
173
171
174 # censored files compare against the empty file
172 # censored files compare against the empty file
175 if self.iscensored(self.rev(node)):
173 if self.iscensored(self.rev(node)):
176 return text != ''
174 return text != ''
177
175
178 # renaming a file produces a different hash, even if the data
176 # renaming a file produces a different hash, even if the data
179 # remains unchanged. Check if it's the case (slow):
177 # remains unchanged. Check if it's the case (slow):
180 if self.renamed(node):
178 if self.renamed(node):
181 t2 = self.read(node)
179 t2 = self.read(node)
182 return t2 != text
180 return t2 != text
183
181
184 return True
182 return True
185
183
186 def verifyintegrity(self, state):
184 def verifyintegrity(self, state):
187 return self._revlog.verifyintegrity(state)
185 return self._revlog.verifyintegrity(state)
188
186
189 # TODO these aren't part of the interface and aren't internal methods.
187 # TODO these aren't part of the interface and aren't internal methods.
190 # Callers should be fixed to not use them.
188 # Callers should be fixed to not use them.
191
189
192 # Used by bundlefilelog, unionfilelog.
190 # Used by bundlefilelog, unionfilelog.
193 @property
191 @property
194 def indexfile(self):
192 def indexfile(self):
195 return self._revlog.indexfile
193 return self._revlog.indexfile
196
194
197 @indexfile.setter
195 @indexfile.setter
198 def indexfile(self, value):
196 def indexfile(self, value):
199 self._revlog.indexfile = value
197 self._revlog.indexfile = value
200
198
201 # Used by repo upgrade.
199 # Used by repo upgrade.
202 @property
200 @property
203 def opener(self):
201 def opener(self):
204 return self._revlog.opener
202 return self._revlog.opener
205
203
206 # Used by repo upgrade.
204 # Used by repo upgrade.
207 def clone(self, tr, destrevlog, **kwargs):
205 def clone(self, tr, destrevlog, **kwargs):
208 if not isinstance(destrevlog, filelog):
206 if not isinstance(destrevlog, filelog):
209 raise error.ProgrammingError('expected filelog to clone()')
207 raise error.ProgrammingError('expected filelog to clone()')
210
208
211 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
209 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
212
210
213 class narrowfilelog(filelog):
211 class narrowfilelog(filelog):
214 """Filelog variation to be used with narrow stores."""
212 """Filelog variation to be used with narrow stores."""
215
213
216 def __init__(self, opener, path, narrowmatch):
214 def __init__(self, opener, path, narrowmatch):
217 super(narrowfilelog, self).__init__(opener, path)
215 super(narrowfilelog, self).__init__(opener, path)
218 self._narrowmatch = narrowmatch
216 self._narrowmatch = narrowmatch
219
217
220 def renamed(self, node):
218 def renamed(self, node):
221 res = super(narrowfilelog, self).renamed(node)
219 res = super(narrowfilelog, self).renamed(node)
222
220
223 # Renames that come from outside the narrowspec are problematic
221 # Renames that come from outside the narrowspec are problematic
224 # because we may lack the base text for the rename. This can result
222 # because we may lack the base text for the rename. This can result
225 # in code attempting to walk the ancestry or compute a diff
223 # in code attempting to walk the ancestry or compute a diff
226 # encountering a missing revision. We address this by silently
224 # encountering a missing revision. We address this by silently
227 # removing rename metadata if the source file is outside the
225 # removing rename metadata if the source file is outside the
228 # narrow spec.
226 # narrow spec.
229 #
227 #
230 # A better solution would be to see if the base revision is available,
228 # A better solution would be to see if the base revision is available,
231 # rather than assuming it isn't.
229 # rather than assuming it isn't.
232 #
230 #
233 # An even better solution would be to teach all consumers of rename
231 # An even better solution would be to teach all consumers of rename
234 # metadata that the base revision may not be available.
232 # metadata that the base revision may not be available.
235 #
233 #
236 # TODO consider better ways of doing this.
234 # TODO consider better ways of doing this.
237 if res and not self._narrowmatch(res[0]):
235 if res and not self._narrowmatch(res[0]):
238 return None
236 return None
239
237
240 return res
238 return res
241
239
242 def size(self, rev):
240 def size(self, rev):
243 # Because we have a custom renamed() that may lie, we need to call
241 # Because we have a custom renamed() that may lie, we need to call
244 # the base renamed() to report accurate results.
242 # the base renamed() to report accurate results.
245 node = self.node(rev)
243 node = self.node(rev)
246 if super(narrowfilelog, self).renamed(node):
244 if super(narrowfilelog, self).renamed(node):
247 return len(self.read(node))
245 return len(self.read(node))
248 else:
246 else:
249 return super(narrowfilelog, self).size(rev)
247 return super(narrowfilelog, self).size(rev)
250
248
251 def cmp(self, node, text):
249 def cmp(self, node, text):
252 different = super(narrowfilelog, self).cmp(node, text)
250 different = super(narrowfilelog, self).cmp(node, text)
253
251
254 # Because renamed() may lie, we may get false positives for
252 # Because renamed() may lie, we may get false positives for
255 # different content. Check for this by comparing against the original
253 # different content. Check for this by comparing against the original
256 # renamed() implementation.
254 # renamed() implementation.
257 if different:
255 if different:
258 if super(narrowfilelog, self).renamed(node):
256 if super(narrowfilelog, self).renamed(node):
259 t2 = self.read(node)
257 t2 = self.read(node)
260 return t2 != text
258 return t2 != text
261
259
262 return different
260 return different
@@ -1,1642 +1,1639 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
21
22 # Local repository feature string.
22 # Local repository feature string.
23
23
24 # Revlogs are being used for file storage.
24 # Revlogs are being used for file storage.
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
25 REPO_FEATURE_REVLOG_FILE_STORAGE = b'revlogfilestorage'
26 # The storage part of the repository is shared from an external source.
26 # The storage part of the repository is shared from an external source.
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
27 REPO_FEATURE_SHARED_STORAGE = b'sharedstore'
28 # LFS supported for backing file storage.
28 # LFS supported for backing file storage.
29 REPO_FEATURE_LFS = b'lfs'
29 REPO_FEATURE_LFS = b'lfs'
30
30
31 class ipeerconnection(interfaceutil.Interface):
31 class ipeerconnection(interfaceutil.Interface):
32 """Represents a "connection" to a repository.
32 """Represents a "connection" to a repository.
33
33
34 This is the base interface for representing a connection to a repository.
34 This is the base interface for representing a connection to a repository.
35 It holds basic properties and methods applicable to all peer types.
35 It holds basic properties and methods applicable to all peer types.
36
36
37 This is not a complete interface definition and should not be used
37 This is not a complete interface definition and should not be used
38 outside of this module.
38 outside of this module.
39 """
39 """
40 ui = interfaceutil.Attribute("""ui.ui instance""")
40 ui = interfaceutil.Attribute("""ui.ui instance""")
41
41
42 def url():
42 def url():
43 """Returns a URL string representing this peer.
43 """Returns a URL string representing this peer.
44
44
45 Currently, implementations expose the raw URL used to construct the
45 Currently, implementations expose the raw URL used to construct the
46 instance. It may contain credentials as part of the URL. The
46 instance. It may contain credentials as part of the URL. The
47 expectations of the value aren't well-defined and this could lead to
47 expectations of the value aren't well-defined and this could lead to
48 data leakage.
48 data leakage.
49
49
50 TODO audit/clean consumers and more clearly define the contents of this
50 TODO audit/clean consumers and more clearly define the contents of this
51 value.
51 value.
52 """
52 """
53
53
54 def local():
54 def local():
55 """Returns a local repository instance.
55 """Returns a local repository instance.
56
56
57 If the peer represents a local repository, returns an object that
57 If the peer represents a local repository, returns an object that
58 can be used to interface with it. Otherwise returns ``None``.
58 can be used to interface with it. Otherwise returns ``None``.
59 """
59 """
60
60
61 def peer():
61 def peer():
62 """Returns an object conforming to this interface.
62 """Returns an object conforming to this interface.
63
63
64 Most implementations will ``return self``.
64 Most implementations will ``return self``.
65 """
65 """
66
66
67 def canpush():
67 def canpush():
68 """Returns a boolean indicating if this peer can be pushed to."""
68 """Returns a boolean indicating if this peer can be pushed to."""
69
69
70 def close():
70 def close():
71 """Close the connection to this peer.
71 """Close the connection to this peer.
72
72
73 This is called when the peer will no longer be used. Resources
73 This is called when the peer will no longer be used. Resources
74 associated with the peer should be cleaned up.
74 associated with the peer should be cleaned up.
75 """
75 """
76
76
77 class ipeercapabilities(interfaceutil.Interface):
77 class ipeercapabilities(interfaceutil.Interface):
78 """Peer sub-interface related to capabilities."""
78 """Peer sub-interface related to capabilities."""
79
79
80 def capable(name):
80 def capable(name):
81 """Determine support for a named capability.
81 """Determine support for a named capability.
82
82
83 Returns ``False`` if capability not supported.
83 Returns ``False`` if capability not supported.
84
84
85 Returns ``True`` if boolean capability is supported. Returns a string
85 Returns ``True`` if boolean capability is supported. Returns a string
86 if capability support is non-boolean.
86 if capability support is non-boolean.
87
87
88 Capability strings may or may not map to wire protocol capabilities.
88 Capability strings may or may not map to wire protocol capabilities.
89 """
89 """
90
90
91 def requirecap(name, purpose):
91 def requirecap(name, purpose):
92 """Require a capability to be present.
92 """Require a capability to be present.
93
93
94 Raises a ``CapabilityError`` if the capability isn't present.
94 Raises a ``CapabilityError`` if the capability isn't present.
95 """
95 """
96
96
97 class ipeercommands(interfaceutil.Interface):
97 class ipeercommands(interfaceutil.Interface):
98 """Client-side interface for communicating over the wire protocol.
98 """Client-side interface for communicating over the wire protocol.
99
99
100 This interface is used as a gateway to the Mercurial wire protocol.
100 This interface is used as a gateway to the Mercurial wire protocol.
101 methods commonly call wire protocol commands of the same name.
101 methods commonly call wire protocol commands of the same name.
102 """
102 """
103
103
104 def branchmap():
104 def branchmap():
105 """Obtain heads in named branches.
105 """Obtain heads in named branches.
106
106
107 Returns a dict mapping branch name to an iterable of nodes that are
107 Returns a dict mapping branch name to an iterable of nodes that are
108 heads on that branch.
108 heads on that branch.
109 """
109 """
110
110
111 def capabilities():
111 def capabilities():
112 """Obtain capabilities of the peer.
112 """Obtain capabilities of the peer.
113
113
114 Returns a set of string capabilities.
114 Returns a set of string capabilities.
115 """
115 """
116
116
117 def clonebundles():
117 def clonebundles():
118 """Obtains the clone bundles manifest for the repo.
118 """Obtains the clone bundles manifest for the repo.
119
119
120 Returns the manifest as unparsed bytes.
120 Returns the manifest as unparsed bytes.
121 """
121 """
122
122
123 def debugwireargs(one, two, three=None, four=None, five=None):
123 def debugwireargs(one, two, three=None, four=None, five=None):
124 """Used to facilitate debugging of arguments passed over the wire."""
124 """Used to facilitate debugging of arguments passed over the wire."""
125
125
126 def getbundle(source, **kwargs):
126 def getbundle(source, **kwargs):
127 """Obtain remote repository data as a bundle.
127 """Obtain remote repository data as a bundle.
128
128
129 This command is how the bulk of repository data is transferred from
129 This command is how the bulk of repository data is transferred from
130 the peer to the local repository
130 the peer to the local repository
131
131
132 Returns a generator of bundle data.
132 Returns a generator of bundle data.
133 """
133 """
134
134
135 def heads():
135 def heads():
136 """Determine all known head revisions in the peer.
136 """Determine all known head revisions in the peer.
137
137
138 Returns an iterable of binary nodes.
138 Returns an iterable of binary nodes.
139 """
139 """
140
140
141 def known(nodes):
141 def known(nodes):
142 """Determine whether multiple nodes are known.
142 """Determine whether multiple nodes are known.
143
143
144 Accepts an iterable of nodes whose presence to check for.
144 Accepts an iterable of nodes whose presence to check for.
145
145
146 Returns an iterable of booleans indicating of the corresponding node
146 Returns an iterable of booleans indicating of the corresponding node
147 at that index is known to the peer.
147 at that index is known to the peer.
148 """
148 """
149
149
150 def listkeys(namespace):
150 def listkeys(namespace):
151 """Obtain all keys in a pushkey namespace.
151 """Obtain all keys in a pushkey namespace.
152
152
153 Returns an iterable of key names.
153 Returns an iterable of key names.
154 """
154 """
155
155
156 def lookup(key):
156 def lookup(key):
157 """Resolve a value to a known revision.
157 """Resolve a value to a known revision.
158
158
159 Returns a binary node of the resolved revision on success.
159 Returns a binary node of the resolved revision on success.
160 """
160 """
161
161
162 def pushkey(namespace, key, old, new):
162 def pushkey(namespace, key, old, new):
163 """Set a value using the ``pushkey`` protocol.
163 """Set a value using the ``pushkey`` protocol.
164
164
165 Arguments correspond to the pushkey namespace and key to operate on and
165 Arguments correspond to the pushkey namespace and key to operate on and
166 the old and new values for that key.
166 the old and new values for that key.
167
167
168 Returns a string with the peer result. The value inside varies by the
168 Returns a string with the peer result. The value inside varies by the
169 namespace.
169 namespace.
170 """
170 """
171
171
172 def stream_out():
172 def stream_out():
173 """Obtain streaming clone data.
173 """Obtain streaming clone data.
174
174
175 Successful result should be a generator of data chunks.
175 Successful result should be a generator of data chunks.
176 """
176 """
177
177
178 def unbundle(bundle, heads, url):
178 def unbundle(bundle, heads, url):
179 """Transfer repository data to the peer.
179 """Transfer repository data to the peer.
180
180
181 This is how the bulk of data during a push is transferred.
181 This is how the bulk of data during a push is transferred.
182
182
183 Returns the integer number of heads added to the peer.
183 Returns the integer number of heads added to the peer.
184 """
184 """
185
185
186 class ipeerlegacycommands(interfaceutil.Interface):
186 class ipeerlegacycommands(interfaceutil.Interface):
187 """Interface for implementing support for legacy wire protocol commands.
187 """Interface for implementing support for legacy wire protocol commands.
188
188
189 Wire protocol commands transition to legacy status when they are no longer
189 Wire protocol commands transition to legacy status when they are no longer
190 used by modern clients. To facilitate identifying which commands are
190 used by modern clients. To facilitate identifying which commands are
191 legacy, the interfaces are split.
191 legacy, the interfaces are split.
192 """
192 """
193
193
194 def between(pairs):
194 def between(pairs):
195 """Obtain nodes between pairs of nodes.
195 """Obtain nodes between pairs of nodes.
196
196
197 ``pairs`` is an iterable of node pairs.
197 ``pairs`` is an iterable of node pairs.
198
198
199 Returns an iterable of iterables of nodes corresponding to each
199 Returns an iterable of iterables of nodes corresponding to each
200 requested pair.
200 requested pair.
201 """
201 """
202
202
203 def branches(nodes):
203 def branches(nodes):
204 """Obtain ancestor changesets of specific nodes back to a branch point.
204 """Obtain ancestor changesets of specific nodes back to a branch point.
205
205
206 For each requested node, the peer finds the first ancestor node that is
206 For each requested node, the peer finds the first ancestor node that is
207 a DAG root or is a merge.
207 a DAG root or is a merge.
208
208
209 Returns an iterable of iterables with the resolved values for each node.
209 Returns an iterable of iterables with the resolved values for each node.
210 """
210 """
211
211
212 def changegroup(nodes, source):
212 def changegroup(nodes, source):
213 """Obtain a changegroup with data for descendants of specified nodes."""
213 """Obtain a changegroup with data for descendants of specified nodes."""
214
214
215 def changegroupsubset(bases, heads, source):
215 def changegroupsubset(bases, heads, source):
216 pass
216 pass
217
217
218 class ipeercommandexecutor(interfaceutil.Interface):
218 class ipeercommandexecutor(interfaceutil.Interface):
219 """Represents a mechanism to execute remote commands.
219 """Represents a mechanism to execute remote commands.
220
220
221 This is the primary interface for requesting that wire protocol commands
221 This is the primary interface for requesting that wire protocol commands
222 be executed. Instances of this interface are active in a context manager
222 be executed. Instances of this interface are active in a context manager
223 and have a well-defined lifetime. When the context manager exits, all
223 and have a well-defined lifetime. When the context manager exits, all
224 outstanding requests are waited on.
224 outstanding requests are waited on.
225 """
225 """
226
226
227 def callcommand(name, args):
227 def callcommand(name, args):
228 """Request that a named command be executed.
228 """Request that a named command be executed.
229
229
230 Receives the command name and a dictionary of command arguments.
230 Receives the command name and a dictionary of command arguments.
231
231
232 Returns a ``concurrent.futures.Future`` that will resolve to the
232 Returns a ``concurrent.futures.Future`` that will resolve to the
233 result of that command request. That exact value is left up to
233 result of that command request. That exact value is left up to
234 the implementation and possibly varies by command.
234 the implementation and possibly varies by command.
235
235
236 Not all commands can coexist with other commands in an executor
236 Not all commands can coexist with other commands in an executor
237 instance: it depends on the underlying wire protocol transport being
237 instance: it depends on the underlying wire protocol transport being
238 used and the command itself.
238 used and the command itself.
239
239
240 Implementations MAY call ``sendcommands()`` automatically if the
240 Implementations MAY call ``sendcommands()`` automatically if the
241 requested command can not coexist with other commands in this executor.
241 requested command can not coexist with other commands in this executor.
242
242
243 Implementations MAY call ``sendcommands()`` automatically when the
243 Implementations MAY call ``sendcommands()`` automatically when the
244 future's ``result()`` is called. So, consumers using multiple
244 future's ``result()`` is called. So, consumers using multiple
245 commands with an executor MUST ensure that ``result()`` is not called
245 commands with an executor MUST ensure that ``result()`` is not called
246 until all command requests have been issued.
246 until all command requests have been issued.
247 """
247 """
248
248
249 def sendcommands():
249 def sendcommands():
250 """Trigger submission of queued command requests.
250 """Trigger submission of queued command requests.
251
251
252 Not all transports submit commands as soon as they are requested to
252 Not all transports submit commands as soon as they are requested to
253 run. When called, this method forces queued command requests to be
253 run. When called, this method forces queued command requests to be
254 issued. It will no-op if all commands have already been sent.
254 issued. It will no-op if all commands have already been sent.
255
255
256 When called, no more new commands may be issued with this executor.
256 When called, no more new commands may be issued with this executor.
257 """
257 """
258
258
259 def close():
259 def close():
260 """Signal that this command request is finished.
260 """Signal that this command request is finished.
261
261
262 When called, no more new commands may be issued. All outstanding
262 When called, no more new commands may be issued. All outstanding
263 commands that have previously been issued are waited on before
263 commands that have previously been issued are waited on before
264 returning. This not only includes waiting for the futures to resolve,
264 returning. This not only includes waiting for the futures to resolve,
265 but also waiting for all response data to arrive. In other words,
265 but also waiting for all response data to arrive. In other words,
266 calling this waits for all on-wire state for issued command requests
266 calling this waits for all on-wire state for issued command requests
267 to finish.
267 to finish.
268
268
269 When used as a context manager, this method is called when exiting the
269 When used as a context manager, this method is called when exiting the
270 context manager.
270 context manager.
271
271
272 This method may call ``sendcommands()`` if there are buffered commands.
272 This method may call ``sendcommands()`` if there are buffered commands.
273 """
273 """
274
274
275 class ipeerrequests(interfaceutil.Interface):
275 class ipeerrequests(interfaceutil.Interface):
276 """Interface for executing commands on a peer."""
276 """Interface for executing commands on a peer."""
277
277
278 def commandexecutor():
278 def commandexecutor():
279 """A context manager that resolves to an ipeercommandexecutor.
279 """A context manager that resolves to an ipeercommandexecutor.
280
280
281 The object this resolves to can be used to issue command requests
281 The object this resolves to can be used to issue command requests
282 to the peer.
282 to the peer.
283
283
284 Callers should call its ``callcommand`` method to issue command
284 Callers should call its ``callcommand`` method to issue command
285 requests.
285 requests.
286
286
287 A new executor should be obtained for each distinct set of commands
287 A new executor should be obtained for each distinct set of commands
288 (possibly just a single command) that the consumer wants to execute
288 (possibly just a single command) that the consumer wants to execute
289 as part of a single operation or round trip. This is because some
289 as part of a single operation or round trip. This is because some
290 peers are half-duplex and/or don't support persistent connections.
290 peers are half-duplex and/or don't support persistent connections.
291 e.g. in the case of HTTP peers, commands sent to an executor represent
291 e.g. in the case of HTTP peers, commands sent to an executor represent
292 a single HTTP request. While some peers may support multiple command
292 a single HTTP request. While some peers may support multiple command
293 sends over the wire per executor, consumers need to code to the least
293 sends over the wire per executor, consumers need to code to the least
294 capable peer. So it should be assumed that command executors buffer
294 capable peer. So it should be assumed that command executors buffer
295 called commands until they are told to send them and that each
295 called commands until they are told to send them and that each
296 command executor could result in a new connection or wire-level request
296 command executor could result in a new connection or wire-level request
297 being issued.
297 being issued.
298 """
298 """
299
299
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
300 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
301 """Unified interface for peer repositories.
301 """Unified interface for peer repositories.
302
302
303 All peer instances must conform to this interface.
303 All peer instances must conform to this interface.
304 """
304 """
305
305
306 @interfaceutil.implementer(ipeerbase)
306 @interfaceutil.implementer(ipeerbase)
307 class peer(object):
307 class peer(object):
308 """Base class for peer repositories."""
308 """Base class for peer repositories."""
309
309
310 def capable(self, name):
310 def capable(self, name):
311 caps = self.capabilities()
311 caps = self.capabilities()
312 if name in caps:
312 if name in caps:
313 return True
313 return True
314
314
315 name = '%s=' % name
315 name = '%s=' % name
316 for cap in caps:
316 for cap in caps:
317 if cap.startswith(name):
317 if cap.startswith(name):
318 return cap[len(name):]
318 return cap[len(name):]
319
319
320 return False
320 return False
321
321
322 def requirecap(self, name, purpose):
322 def requirecap(self, name, purpose):
323 if self.capable(name):
323 if self.capable(name):
324 return
324 return
325
325
326 raise error.CapabilityError(
326 raise error.CapabilityError(
327 _('cannot %s; remote repository does not support the %r '
327 _('cannot %s; remote repository does not support the %r '
328 'capability') % (purpose, name))
328 'capability') % (purpose, name))
329
329
330 class iverifyproblem(interfaceutil.Interface):
330 class iverifyproblem(interfaceutil.Interface):
331 """Represents a problem with the integrity of the repository.
331 """Represents a problem with the integrity of the repository.
332
332
333 Instances of this interface are emitted to describe an integrity issue
333 Instances of this interface are emitted to describe an integrity issue
334 with a repository (e.g. corrupt storage, missing data, etc).
334 with a repository (e.g. corrupt storage, missing data, etc).
335
335
336 Instances are essentially messages associated with severity.
336 Instances are essentially messages associated with severity.
337 """
337 """
338 warning = interfaceutil.Attribute(
338 warning = interfaceutil.Attribute(
339 """Message indicating a non-fatal problem.""")
339 """Message indicating a non-fatal problem.""")
340
340
341 error = interfaceutil.Attribute(
341 error = interfaceutil.Attribute(
342 """Message indicating a fatal problem.""")
342 """Message indicating a fatal problem.""")
343
343
344 class irevisiondelta(interfaceutil.Interface):
344 class irevisiondelta(interfaceutil.Interface):
345 """Represents a delta between one revision and another.
345 """Represents a delta between one revision and another.
346
346
347 Instances convey enough information to allow a revision to be exchanged
347 Instances convey enough information to allow a revision to be exchanged
348 with another repository.
348 with another repository.
349
349
350 Instances represent the fulltext revision data or a delta against
350 Instances represent the fulltext revision data or a delta against
351 another revision. Therefore the ``revision`` and ``delta`` attributes
351 another revision. Therefore the ``revision`` and ``delta`` attributes
352 are mutually exclusive.
352 are mutually exclusive.
353
353
354 Typically used for changegroup generation.
354 Typically used for changegroup generation.
355 """
355 """
356
356
357 node = interfaceutil.Attribute(
357 node = interfaceutil.Attribute(
358 """20 byte node of this revision.""")
358 """20 byte node of this revision.""")
359
359
360 p1node = interfaceutil.Attribute(
360 p1node = interfaceutil.Attribute(
361 """20 byte node of 1st parent of this revision.""")
361 """20 byte node of 1st parent of this revision.""")
362
362
363 p2node = interfaceutil.Attribute(
363 p2node = interfaceutil.Attribute(
364 """20 byte node of 2nd parent of this revision.""")
364 """20 byte node of 2nd parent of this revision.""")
365
365
366 linknode = interfaceutil.Attribute(
366 linknode = interfaceutil.Attribute(
367 """20 byte node of the changelog revision this node is linked to.""")
367 """20 byte node of the changelog revision this node is linked to.""")
368
368
369 flags = interfaceutil.Attribute(
369 flags = interfaceutil.Attribute(
370 """2 bytes of integer flags that apply to this revision.""")
370 """2 bytes of integer flags that apply to this revision.""")
371
371
372 basenode = interfaceutil.Attribute(
372 basenode = interfaceutil.Attribute(
373 """20 byte node of the revision this data is a delta against.
373 """20 byte node of the revision this data is a delta against.
374
374
375 ``nullid`` indicates that the revision is a full revision and not
375 ``nullid`` indicates that the revision is a full revision and not
376 a delta.
376 a delta.
377 """)
377 """)
378
378
379 baserevisionsize = interfaceutil.Attribute(
379 baserevisionsize = interfaceutil.Attribute(
380 """Size of base revision this delta is against.
380 """Size of base revision this delta is against.
381
381
382 May be ``None`` if ``basenode`` is ``nullid``.
382 May be ``None`` if ``basenode`` is ``nullid``.
383 """)
383 """)
384
384
385 revision = interfaceutil.Attribute(
385 revision = interfaceutil.Attribute(
386 """Raw fulltext of revision data for this node.""")
386 """Raw fulltext of revision data for this node.""")
387
387
388 delta = interfaceutil.Attribute(
388 delta = interfaceutil.Attribute(
389 """Delta between ``basenode`` and ``node``.
389 """Delta between ``basenode`` and ``node``.
390
390
391 Stored in the bdiff delta format.
391 Stored in the bdiff delta format.
392 """)
392 """)
393
393
394 class irevisiondeltarequest(interfaceutil.Interface):
394 class irevisiondeltarequest(interfaceutil.Interface):
395 """Represents a request to generate an ``irevisiondelta``."""
395 """Represents a request to generate an ``irevisiondelta``."""
396
396
397 node = interfaceutil.Attribute(
397 node = interfaceutil.Attribute(
398 """20 byte node of revision being requested.""")
398 """20 byte node of revision being requested.""")
399
399
400 p1node = interfaceutil.Attribute(
400 p1node = interfaceutil.Attribute(
401 """20 byte node of 1st parent of revision.""")
401 """20 byte node of 1st parent of revision.""")
402
402
403 p2node = interfaceutil.Attribute(
403 p2node = interfaceutil.Attribute(
404 """20 byte node of 2nd parent of revision.""")
404 """20 byte node of 2nd parent of revision.""")
405
405
406 linknode = interfaceutil.Attribute(
406 linknode = interfaceutil.Attribute(
407 """20 byte node to store in ``linknode`` attribute.""")
407 """20 byte node to store in ``linknode`` attribute.""")
408
408
409 basenode = interfaceutil.Attribute(
409 basenode = interfaceutil.Attribute(
410 """Base revision that delta should be generated against.
410 """Base revision that delta should be generated against.
411
411
412 If ``nullid``, the derived ``irevisiondelta`` should have its
412 If ``nullid``, the derived ``irevisiondelta`` should have its
413 ``revision`` field populated and no delta should be generated.
413 ``revision`` field populated and no delta should be generated.
414
414
415 If ``None``, the delta may be generated against any revision that
415 If ``None``, the delta may be generated against any revision that
416 is an ancestor of this revision. Or a full revision may be used.
416 is an ancestor of this revision. Or a full revision may be used.
417
417
418 If any other value, the delta should be produced against that
418 If any other value, the delta should be produced against that
419 revision.
419 revision.
420 """)
420 """)
421
421
422 ellipsis = interfaceutil.Attribute(
422 ellipsis = interfaceutil.Attribute(
423 """Boolean on whether the ellipsis flag should be set.""")
423 """Boolean on whether the ellipsis flag should be set.""")
424
424
425 class ifilerevisionssequence(interfaceutil.Interface):
425 class ifilerevisionssequence(interfaceutil.Interface):
426 """Contains index data for all revisions of a file.
426 """Contains index data for all revisions of a file.
427
427
428 Types implementing this behave like lists of tuples. The index
428 Types implementing this behave like lists of tuples. The index
429 in the list corresponds to the revision number. The values contain
429 in the list corresponds to the revision number. The values contain
430 index metadata.
430 index metadata.
431
431
432 The *null* revision (revision number -1) is always the last item
432 The *null* revision (revision number -1) is always the last item
433 in the index.
433 in the index.
434 """
434 """
435
435
436 def __len__():
436 def __len__():
437 """The total number of revisions."""
437 """The total number of revisions."""
438
438
439 def __getitem__(rev):
439 def __getitem__(rev):
440 """Returns the object having a specific revision number.
440 """Returns the object having a specific revision number.
441
441
442 Returns an 8-tuple with the following fields:
442 Returns an 8-tuple with the following fields:
443
443
444 offset+flags
444 offset+flags
445 Contains the offset and flags for the revision. 64-bit unsigned
445 Contains the offset and flags for the revision. 64-bit unsigned
446 integer where first 6 bytes are the offset and the next 2 bytes
446 integer where first 6 bytes are the offset and the next 2 bytes
447 are flags. The offset can be 0 if it is not used by the store.
447 are flags. The offset can be 0 if it is not used by the store.
448 compressed size
448 compressed size
449 Size of the revision data in the store. It can be 0 if it isn't
449 Size of the revision data in the store. It can be 0 if it isn't
450 needed by the store.
450 needed by the store.
451 uncompressed size
451 uncompressed size
452 Fulltext size. It can be 0 if it isn't needed by the store.
452 Fulltext size. It can be 0 if it isn't needed by the store.
453 base revision
453 base revision
454 Revision number of revision the delta for storage is encoded
454 Revision number of revision the delta for storage is encoded
455 against. -1 indicates not encoded against a base revision.
455 against. -1 indicates not encoded against a base revision.
456 link revision
456 link revision
457 Revision number of changelog revision this entry is related to.
457 Revision number of changelog revision this entry is related to.
458 p1 revision
458 p1 revision
459 Revision number of 1st parent. -1 if no 1st parent.
459 Revision number of 1st parent. -1 if no 1st parent.
460 p2 revision
460 p2 revision
461 Revision number of 2nd parent. -1 if no 1st parent.
461 Revision number of 2nd parent. -1 if no 1st parent.
462 node
462 node
463 Binary node value for this revision number.
463 Binary node value for this revision number.
464
464
465 Negative values should index off the end of the sequence. ``-1``
465 Negative values should index off the end of the sequence. ``-1``
466 should return the null revision. ``-2`` should return the most
466 should return the null revision. ``-2`` should return the most
467 recent revision.
467 recent revision.
468 """
468 """
469
469
470 def __contains__(rev):
470 def __contains__(rev):
471 """Whether a revision number exists."""
471 """Whether a revision number exists."""
472
472
473 def insert(self, i, entry):
473 def insert(self, i, entry):
474 """Add an item to the index at specific revision."""
474 """Add an item to the index at specific revision."""
475
475
476 class ifileindex(interfaceutil.Interface):
476 class ifileindex(interfaceutil.Interface):
477 """Storage interface for index data of a single file.
477 """Storage interface for index data of a single file.
478
478
479 File storage data is divided into index metadata and data storage.
479 File storage data is divided into index metadata and data storage.
480 This interface defines the index portion of the interface.
480 This interface defines the index portion of the interface.
481
481
482 The index logically consists of:
482 The index logically consists of:
483
483
484 * A mapping between revision numbers and nodes.
484 * A mapping between revision numbers and nodes.
485 * DAG data (storing and querying the relationship between nodes).
485 * DAG data (storing and querying the relationship between nodes).
486 * Metadata to facilitate storage.
486 * Metadata to facilitate storage.
487 """
487 """
488 index = interfaceutil.Attribute(
489 """An ``ifilerevisionssequence`` instance.""")
490
491 def __len__():
488 def __len__():
492 """Obtain the number of revisions stored for this file."""
489 """Obtain the number of revisions stored for this file."""
493
490
494 def __iter__():
491 def __iter__():
495 """Iterate over revision numbers for this file."""
492 """Iterate over revision numbers for this file."""
496
493
497 def revs(start=0, stop=None):
494 def revs(start=0, stop=None):
498 """Iterate over revision numbers for this file, with control."""
495 """Iterate over revision numbers for this file, with control."""
499
496
500 def parents(node):
497 def parents(node):
501 """Returns a 2-tuple of parent nodes for a revision.
498 """Returns a 2-tuple of parent nodes for a revision.
502
499
503 Values will be ``nullid`` if the parent is empty.
500 Values will be ``nullid`` if the parent is empty.
504 """
501 """
505
502
506 def parentrevs(rev):
503 def parentrevs(rev):
507 """Like parents() but operates on revision numbers."""
504 """Like parents() but operates on revision numbers."""
508
505
509 def rev(node):
506 def rev(node):
510 """Obtain the revision number given a node.
507 """Obtain the revision number given a node.
511
508
512 Raises ``error.LookupError`` if the node is not known.
509 Raises ``error.LookupError`` if the node is not known.
513 """
510 """
514
511
515 def node(rev):
512 def node(rev):
516 """Obtain the node value given a revision number.
513 """Obtain the node value given a revision number.
517
514
518 Raises ``IndexError`` if the node is not known.
515 Raises ``IndexError`` if the node is not known.
519 """
516 """
520
517
521 def lookup(node):
518 def lookup(node):
522 """Attempt to resolve a value to a node.
519 """Attempt to resolve a value to a node.
523
520
524 Value can be a binary node, hex node, revision number, or a string
521 Value can be a binary node, hex node, revision number, or a string
525 that can be converted to an integer.
522 that can be converted to an integer.
526
523
527 Raises ``error.LookupError`` if a node could not be resolved.
524 Raises ``error.LookupError`` if a node could not be resolved.
528 """
525 """
529
526
530 def linkrev(rev):
527 def linkrev(rev):
531 """Obtain the changeset revision number a revision is linked to."""
528 """Obtain the changeset revision number a revision is linked to."""
532
529
533 def flags(rev):
530 def flags(rev):
534 """Obtain flags used to affect storage of a revision."""
531 """Obtain flags used to affect storage of a revision."""
535
532
536 def iscensored(rev):
533 def iscensored(rev):
537 """Return whether a revision's content has been censored."""
534 """Return whether a revision's content has been censored."""
538
535
539 def commonancestorsheads(node1, node2):
536 def commonancestorsheads(node1, node2):
540 """Obtain an iterable of nodes containing heads of common ancestors.
537 """Obtain an iterable of nodes containing heads of common ancestors.
541
538
542 See ``ancestor.commonancestorsheads()``.
539 See ``ancestor.commonancestorsheads()``.
543 """
540 """
544
541
545 def descendants(revs):
542 def descendants(revs):
546 """Obtain descendant revision numbers for a set of revision numbers.
543 """Obtain descendant revision numbers for a set of revision numbers.
547
544
548 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
545 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
549 """
546 """
550
547
551 def heads(start=None, stop=None):
548 def heads(start=None, stop=None):
552 """Obtain a list of nodes that are DAG heads, with control.
549 """Obtain a list of nodes that are DAG heads, with control.
553
550
554 The set of revisions examined can be limited by specifying
551 The set of revisions examined can be limited by specifying
555 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
552 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
556 iterable of nodes. DAG traversal starts at earlier revision
553 iterable of nodes. DAG traversal starts at earlier revision
557 ``start`` and iterates forward until any node in ``stop`` is
554 ``start`` and iterates forward until any node in ``stop`` is
558 encountered.
555 encountered.
559 """
556 """
560
557
561 def children(node):
558 def children(node):
562 """Obtain nodes that are children of a node.
559 """Obtain nodes that are children of a node.
563
560
564 Returns a list of nodes.
561 Returns a list of nodes.
565 """
562 """
566
563
567 def deltaparent(rev):
564 def deltaparent(rev):
568 """"Return the revision that is a suitable parent to delta against."""
565 """"Return the revision that is a suitable parent to delta against."""
569
566
570 class ifiledata(interfaceutil.Interface):
567 class ifiledata(interfaceutil.Interface):
571 """Storage interface for data storage of a specific file.
568 """Storage interface for data storage of a specific file.
572
569
573 This complements ``ifileindex`` and provides an interface for accessing
570 This complements ``ifileindex`` and provides an interface for accessing
574 data for a tracked file.
571 data for a tracked file.
575 """
572 """
576 def rawsize(rev):
573 def rawsize(rev):
577 """The size of the fulltext data for a revision as stored."""
574 """The size of the fulltext data for a revision as stored."""
578
575
579 def size(rev):
576 def size(rev):
580 """Obtain the fulltext size of file data.
577 """Obtain the fulltext size of file data.
581
578
582 Any metadata is excluded from size measurements. Use ``rawsize()`` if
579 Any metadata is excluded from size measurements. Use ``rawsize()`` if
583 metadata size is important.
580 metadata size is important.
584 """
581 """
585
582
586 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
583 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
587 """Validate the stored hash of a given fulltext and node.
584 """Validate the stored hash of a given fulltext and node.
588
585
589 Raises ``error.StorageError`` is hash validation fails.
586 Raises ``error.StorageError`` is hash validation fails.
590 """
587 """
591
588
592 def revision(node, raw=False):
589 def revision(node, raw=False):
593 """"Obtain fulltext data for a node.
590 """"Obtain fulltext data for a node.
594
591
595 By default, any storage transformations are applied before the data
592 By default, any storage transformations are applied before the data
596 is returned. If ``raw`` is True, non-raw storage transformations
593 is returned. If ``raw`` is True, non-raw storage transformations
597 are not applied.
594 are not applied.
598
595
599 The fulltext data may contain a header containing metadata. Most
596 The fulltext data may contain a header containing metadata. Most
600 consumers should use ``read()`` to obtain the actual file data.
597 consumers should use ``read()`` to obtain the actual file data.
601 """
598 """
602
599
603 def read(node):
600 def read(node):
604 """Resolve file fulltext data.
601 """Resolve file fulltext data.
605
602
606 This is similar to ``revision()`` except any metadata in the data
603 This is similar to ``revision()`` except any metadata in the data
607 headers is stripped.
604 headers is stripped.
608 """
605 """
609
606
610 def renamed(node):
607 def renamed(node):
611 """Obtain copy metadata for a node.
608 """Obtain copy metadata for a node.
612
609
613 Returns ``False`` if no copy metadata is stored or a 2-tuple of
610 Returns ``False`` if no copy metadata is stored or a 2-tuple of
614 (path, node) from which this revision was copied.
611 (path, node) from which this revision was copied.
615 """
612 """
616
613
617 def cmp(node, fulltext):
614 def cmp(node, fulltext):
618 """Compare fulltext to another revision.
615 """Compare fulltext to another revision.
619
616
620 Returns True if the fulltext is different from what is stored.
617 Returns True if the fulltext is different from what is stored.
621
618
622 This takes copy metadata into account.
619 This takes copy metadata into account.
623
620
624 TODO better document the copy metadata and censoring logic.
621 TODO better document the copy metadata and censoring logic.
625 """
622 """
626
623
627 def revdiff(rev1, rev2):
624 def revdiff(rev1, rev2):
628 """Obtain a delta between two revision numbers.
625 """Obtain a delta between two revision numbers.
629
626
630 Operates on raw data in the store (``revision(node, raw=True)``).
627 Operates on raw data in the store (``revision(node, raw=True)``).
631
628
632 The returned data is the result of ``bdiff.bdiff`` on the raw
629 The returned data is the result of ``bdiff.bdiff`` on the raw
633 revision data.
630 revision data.
634 """
631 """
635
632
636 def emitrevisiondeltas(requests):
633 def emitrevisiondeltas(requests):
637 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
634 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
638
635
639 Given an iterable of objects conforming to the ``irevisiondeltarequest``
636 Given an iterable of objects conforming to the ``irevisiondeltarequest``
640 interface, emits objects conforming to the ``irevisiondelta``
637 interface, emits objects conforming to the ``irevisiondelta``
641 interface.
638 interface.
642
639
643 This method is a generator.
640 This method is a generator.
644
641
645 ``irevisiondelta`` should be emitted in the same order of
642 ``irevisiondelta`` should be emitted in the same order of
646 ``irevisiondeltarequest`` that was passed in.
643 ``irevisiondeltarequest`` that was passed in.
647
644
648 The emitted objects MUST conform by the results of
645 The emitted objects MUST conform by the results of
649 ``irevisiondeltarequest``. Namely, they must respect any requests
646 ``irevisiondeltarequest``. Namely, they must respect any requests
650 for building a delta from a specific ``basenode`` if defined.
647 for building a delta from a specific ``basenode`` if defined.
651
648
652 When sending deltas, implementations must take into account whether
649 When sending deltas, implementations must take into account whether
653 the client has the base delta before encoding a delta against that
650 the client has the base delta before encoding a delta against that
654 revision. A revision encountered previously in ``requests`` is
651 revision. A revision encountered previously in ``requests`` is
655 always a suitable base revision. An example of a bad delta is a delta
652 always a suitable base revision. An example of a bad delta is a delta
656 against a non-ancestor revision. Another example of a bad delta is a
653 against a non-ancestor revision. Another example of a bad delta is a
657 delta against a censored revision.
654 delta against a censored revision.
658 """
655 """
659
656
660 class ifilemutation(interfaceutil.Interface):
657 class ifilemutation(interfaceutil.Interface):
661 """Storage interface for mutation events of a tracked file."""
658 """Storage interface for mutation events of a tracked file."""
662
659
663 def add(filedata, meta, transaction, linkrev, p1, p2):
660 def add(filedata, meta, transaction, linkrev, p1, p2):
664 """Add a new revision to the store.
661 """Add a new revision to the store.
665
662
666 Takes file data, dictionary of metadata, a transaction, linkrev,
663 Takes file data, dictionary of metadata, a transaction, linkrev,
667 and parent nodes.
664 and parent nodes.
668
665
669 Returns the node that was added.
666 Returns the node that was added.
670
667
671 May no-op if a revision matching the supplied data is already stored.
668 May no-op if a revision matching the supplied data is already stored.
672 """
669 """
673
670
674 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
671 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
675 flags=0, cachedelta=None):
672 flags=0, cachedelta=None):
676 """Add a new revision to the store.
673 """Add a new revision to the store.
677
674
678 This is similar to ``add()`` except it operates at a lower level.
675 This is similar to ``add()`` except it operates at a lower level.
679
676
680 The data passed in already contains a metadata header, if any.
677 The data passed in already contains a metadata header, if any.
681
678
682 ``node`` and ``flags`` can be used to define the expected node and
679 ``node`` and ``flags`` can be used to define the expected node and
683 the flags to use with storage.
680 the flags to use with storage.
684
681
685 ``add()`` is usually called when adding files from e.g. the working
682 ``add()`` is usually called when adding files from e.g. the working
686 directory. ``addrevision()`` is often called by ``add()`` and for
683 directory. ``addrevision()`` is often called by ``add()`` and for
687 scenarios where revision data has already been computed, such as when
684 scenarios where revision data has already been computed, such as when
688 applying raw data from a peer repo.
685 applying raw data from a peer repo.
689 """
686 """
690
687
691 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
688 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
692 """Process a series of deltas for storage.
689 """Process a series of deltas for storage.
693
690
694 ``deltas`` is an iterable of 7-tuples of
691 ``deltas`` is an iterable of 7-tuples of
695 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
692 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
696 to add.
693 to add.
697
694
698 The ``delta`` field contains ``mpatch`` data to apply to a base
695 The ``delta`` field contains ``mpatch`` data to apply to a base
699 revision, identified by ``deltabase``. The base node can be
696 revision, identified by ``deltabase``. The base node can be
700 ``nullid``, in which case the header from the delta can be ignored
697 ``nullid``, in which case the header from the delta can be ignored
701 and the delta used as the fulltext.
698 and the delta used as the fulltext.
702
699
703 ``addrevisioncb`` should be called for each node as it is committed.
700 ``addrevisioncb`` should be called for each node as it is committed.
704
701
705 Returns a list of nodes that were processed. A node will be in the list
702 Returns a list of nodes that were processed. A node will be in the list
706 even if it existed in the store previously.
703 even if it existed in the store previously.
707 """
704 """
708
705
709 def censorrevision(tr, node, tombstone=b''):
706 def censorrevision(tr, node, tombstone=b''):
710 """Remove the content of a single revision.
707 """Remove the content of a single revision.
711
708
712 The specified ``node`` will have its content purged from storage.
709 The specified ``node`` will have its content purged from storage.
713 Future attempts to access the revision data for this node will
710 Future attempts to access the revision data for this node will
714 result in failure.
711 result in failure.
715
712
716 A ``tombstone`` message can optionally be stored. This message may be
713 A ``tombstone`` message can optionally be stored. This message may be
717 displayed to users when they attempt to access the missing revision
714 displayed to users when they attempt to access the missing revision
718 data.
715 data.
719
716
720 Storage backends may have stored deltas against the previous content
717 Storage backends may have stored deltas against the previous content
721 in this revision. As part of censoring a revision, these storage
718 in this revision. As part of censoring a revision, these storage
722 backends are expected to rewrite any internally stored deltas such
719 backends are expected to rewrite any internally stored deltas such
723 that they no longer reference the deleted content.
720 that they no longer reference the deleted content.
724 """
721 """
725
722
726 def getstrippoint(minlink):
723 def getstrippoint(minlink):
727 """Find the minimum revision that must be stripped to strip a linkrev.
724 """Find the minimum revision that must be stripped to strip a linkrev.
728
725
729 Returns a 2-tuple containing the minimum revision number and a set
726 Returns a 2-tuple containing the minimum revision number and a set
730 of all revisions numbers that would be broken by this strip.
727 of all revisions numbers that would be broken by this strip.
731
728
732 TODO this is highly revlog centric and should be abstracted into
729 TODO this is highly revlog centric and should be abstracted into
733 a higher-level deletion API. ``repair.strip()`` relies on this.
730 a higher-level deletion API. ``repair.strip()`` relies on this.
734 """
731 """
735
732
736 def strip(minlink, transaction):
733 def strip(minlink, transaction):
737 """Remove storage of items starting at a linkrev.
734 """Remove storage of items starting at a linkrev.
738
735
739 This uses ``getstrippoint()`` to determine the first node to remove.
736 This uses ``getstrippoint()`` to determine the first node to remove.
740 Then it effectively truncates storage for all revisions after that.
737 Then it effectively truncates storage for all revisions after that.
741
738
742 TODO this is highly revlog centric and should be abstracted into a
739 TODO this is highly revlog centric and should be abstracted into a
743 higher-level deletion API.
740 higher-level deletion API.
744 """
741 """
745
742
746 class ifilestorage(ifileindex, ifiledata, ifilemutation):
743 class ifilestorage(ifileindex, ifiledata, ifilemutation):
747 """Complete storage interface for a single tracked file."""
744 """Complete storage interface for a single tracked file."""
748
745
749 _generaldelta = interfaceutil.Attribute(
746 _generaldelta = interfaceutil.Attribute(
750 """Whether deltas can be against any parent revision.
747 """Whether deltas can be against any parent revision.
751
748
752 TODO this is used by changegroup code and it could probably be
749 TODO this is used by changegroup code and it could probably be
753 folded into another API.
750 folded into another API.
754 """)
751 """)
755
752
756 def files():
753 def files():
757 """Obtain paths that are backing storage for this file.
754 """Obtain paths that are backing storage for this file.
758
755
759 TODO this is used heavily by verify code and there should probably
756 TODO this is used heavily by verify code and there should probably
760 be a better API for that.
757 be a better API for that.
761 """
758 """
762
759
763 def verifyintegrity(state):
760 def verifyintegrity(state):
764 """Verifies the integrity of file storage.
761 """Verifies the integrity of file storage.
765
762
766 ``state`` is a dict holding state of the verifier process. It can be
763 ``state`` is a dict holding state of the verifier process. It can be
767 used to communicate data between invocations of multiple storage
764 used to communicate data between invocations of multiple storage
768 primitives.
765 primitives.
769
766
770 The method yields objects conforming to the ``iverifyproblem``
767 The method yields objects conforming to the ``iverifyproblem``
771 interface.
768 interface.
772 """
769 """
773
770
774 class idirs(interfaceutil.Interface):
771 class idirs(interfaceutil.Interface):
775 """Interface representing a collection of directories from paths.
772 """Interface representing a collection of directories from paths.
776
773
777 This interface is essentially a derived data structure representing
774 This interface is essentially a derived data structure representing
778 directories from a collection of paths.
775 directories from a collection of paths.
779 """
776 """
780
777
781 def addpath(path):
778 def addpath(path):
782 """Add a path to the collection.
779 """Add a path to the collection.
783
780
784 All directories in the path will be added to the collection.
781 All directories in the path will be added to the collection.
785 """
782 """
786
783
787 def delpath(path):
784 def delpath(path):
788 """Remove a path from the collection.
785 """Remove a path from the collection.
789
786
790 If the removal was the last path in a particular directory, the
787 If the removal was the last path in a particular directory, the
791 directory is removed from the collection.
788 directory is removed from the collection.
792 """
789 """
793
790
794 def __iter__():
791 def __iter__():
795 """Iterate over the directories in this collection of paths."""
792 """Iterate over the directories in this collection of paths."""
796
793
797 def __contains__(path):
794 def __contains__(path):
798 """Whether a specific directory is in this collection."""
795 """Whether a specific directory is in this collection."""
799
796
800 class imanifestdict(interfaceutil.Interface):
797 class imanifestdict(interfaceutil.Interface):
801 """Interface representing a manifest data structure.
798 """Interface representing a manifest data structure.
802
799
803 A manifest is effectively a dict mapping paths to entries. Each entry
800 A manifest is effectively a dict mapping paths to entries. Each entry
804 consists of a binary node and extra flags affecting that entry.
801 consists of a binary node and extra flags affecting that entry.
805 """
802 """
806
803
807 def __getitem__(path):
804 def __getitem__(path):
808 """Returns the binary node value for a path in the manifest.
805 """Returns the binary node value for a path in the manifest.
809
806
810 Raises ``KeyError`` if the path does not exist in the manifest.
807 Raises ``KeyError`` if the path does not exist in the manifest.
811
808
812 Equivalent to ``self.find(path)[0]``.
809 Equivalent to ``self.find(path)[0]``.
813 """
810 """
814
811
815 def find(path):
812 def find(path):
816 """Returns the entry for a path in the manifest.
813 """Returns the entry for a path in the manifest.
817
814
818 Returns a 2-tuple of (node, flags).
815 Returns a 2-tuple of (node, flags).
819
816
820 Raises ``KeyError`` if the path does not exist in the manifest.
817 Raises ``KeyError`` if the path does not exist in the manifest.
821 """
818 """
822
819
823 def __len__():
820 def __len__():
824 """Return the number of entries in the manifest."""
821 """Return the number of entries in the manifest."""
825
822
826 def __nonzero__():
823 def __nonzero__():
827 """Returns True if the manifest has entries, False otherwise."""
824 """Returns True if the manifest has entries, False otherwise."""
828
825
829 __bool__ = __nonzero__
826 __bool__ = __nonzero__
830
827
831 def __setitem__(path, node):
828 def __setitem__(path, node):
832 """Define the node value for a path in the manifest.
829 """Define the node value for a path in the manifest.
833
830
834 If the path is already in the manifest, its flags will be copied to
831 If the path is already in the manifest, its flags will be copied to
835 the new entry.
832 the new entry.
836 """
833 """
837
834
838 def __contains__(path):
835 def __contains__(path):
839 """Whether a path exists in the manifest."""
836 """Whether a path exists in the manifest."""
840
837
841 def __delitem__(path):
838 def __delitem__(path):
842 """Remove a path from the manifest.
839 """Remove a path from the manifest.
843
840
844 Raises ``KeyError`` if the path is not in the manifest.
841 Raises ``KeyError`` if the path is not in the manifest.
845 """
842 """
846
843
847 def __iter__():
844 def __iter__():
848 """Iterate over paths in the manifest."""
845 """Iterate over paths in the manifest."""
849
846
850 def iterkeys():
847 def iterkeys():
851 """Iterate over paths in the manifest."""
848 """Iterate over paths in the manifest."""
852
849
853 def keys():
850 def keys():
854 """Obtain a list of paths in the manifest."""
851 """Obtain a list of paths in the manifest."""
855
852
856 def filesnotin(other, match=None):
853 def filesnotin(other, match=None):
857 """Obtain the set of paths in this manifest but not in another.
854 """Obtain the set of paths in this manifest but not in another.
858
855
859 ``match`` is an optional matcher function to be applied to both
856 ``match`` is an optional matcher function to be applied to both
860 manifests.
857 manifests.
861
858
862 Returns a set of paths.
859 Returns a set of paths.
863 """
860 """
864
861
865 def dirs():
862 def dirs():
866 """Returns an object implementing the ``idirs`` interface."""
863 """Returns an object implementing the ``idirs`` interface."""
867
864
868 def hasdir(dir):
865 def hasdir(dir):
869 """Returns a bool indicating if a directory is in this manifest."""
866 """Returns a bool indicating if a directory is in this manifest."""
870
867
871 def matches(match):
868 def matches(match):
872 """Generate a new manifest filtered through a matcher.
869 """Generate a new manifest filtered through a matcher.
873
870
874 Returns an object conforming to the ``imanifestdict`` interface.
871 Returns an object conforming to the ``imanifestdict`` interface.
875 """
872 """
876
873
877 def walk(match):
874 def walk(match):
878 """Generator of paths in manifest satisfying a matcher.
875 """Generator of paths in manifest satisfying a matcher.
879
876
880 This is equivalent to ``self.matches(match).iterkeys()`` except a new
877 This is equivalent to ``self.matches(match).iterkeys()`` except a new
881 manifest object is not created.
878 manifest object is not created.
882
879
883 If the matcher has explicit files listed and they don't exist in
880 If the matcher has explicit files listed and they don't exist in
884 the manifest, ``match.bad()`` is called for each missing file.
881 the manifest, ``match.bad()`` is called for each missing file.
885 """
882 """
886
883
887 def diff(other, match=None, clean=False):
884 def diff(other, match=None, clean=False):
888 """Find differences between this manifest and another.
885 """Find differences between this manifest and another.
889
886
890 This manifest is compared to ``other``.
887 This manifest is compared to ``other``.
891
888
892 If ``match`` is provided, the two manifests are filtered against this
889 If ``match`` is provided, the two manifests are filtered against this
893 matcher and only entries satisfying the matcher are compared.
890 matcher and only entries satisfying the matcher are compared.
894
891
895 If ``clean`` is True, unchanged files are included in the returned
892 If ``clean`` is True, unchanged files are included in the returned
896 object.
893 object.
897
894
898 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
895 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
899 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
896 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
900 represents the node and flags for this manifest and ``(node2, flag2)``
897 represents the node and flags for this manifest and ``(node2, flag2)``
901 are the same for the other manifest.
898 are the same for the other manifest.
902 """
899 """
903
900
904 def setflag(path, flag):
901 def setflag(path, flag):
905 """Set the flag value for a given path.
902 """Set the flag value for a given path.
906
903
907 Raises ``KeyError`` if the path is not already in the manifest.
904 Raises ``KeyError`` if the path is not already in the manifest.
908 """
905 """
909
906
910 def get(path, default=None):
907 def get(path, default=None):
911 """Obtain the node value for a path or a default value if missing."""
908 """Obtain the node value for a path or a default value if missing."""
912
909
913 def flags(path, default=''):
910 def flags(path, default=''):
914 """Return the flags value for a path or a default value if missing."""
911 """Return the flags value for a path or a default value if missing."""
915
912
916 def copy():
913 def copy():
917 """Return a copy of this manifest."""
914 """Return a copy of this manifest."""
918
915
919 def items():
916 def items():
920 """Returns an iterable of (path, node) for items in this manifest."""
917 """Returns an iterable of (path, node) for items in this manifest."""
921
918
922 def iteritems():
919 def iteritems():
923 """Identical to items()."""
920 """Identical to items()."""
924
921
925 def iterentries():
922 def iterentries():
926 """Returns an iterable of (path, node, flags) for this manifest.
923 """Returns an iterable of (path, node, flags) for this manifest.
927
924
928 Similar to ``iteritems()`` except items are a 3-tuple and include
925 Similar to ``iteritems()`` except items are a 3-tuple and include
929 flags.
926 flags.
930 """
927 """
931
928
932 def text():
929 def text():
933 """Obtain the raw data representation for this manifest.
930 """Obtain the raw data representation for this manifest.
934
931
935 Result is used to create a manifest revision.
932 Result is used to create a manifest revision.
936 """
933 """
937
934
938 def fastdelta(base, changes):
935 def fastdelta(base, changes):
939 """Obtain a delta between this manifest and another given changes.
936 """Obtain a delta between this manifest and another given changes.
940
937
941 ``base`` in the raw data representation for another manifest.
938 ``base`` in the raw data representation for another manifest.
942
939
943 ``changes`` is an iterable of ``(path, to_delete)``.
940 ``changes`` is an iterable of ``(path, to_delete)``.
944
941
945 Returns a 2-tuple containing ``bytearray(self.text())`` and the
942 Returns a 2-tuple containing ``bytearray(self.text())`` and the
946 delta between ``base`` and this manifest.
943 delta between ``base`` and this manifest.
947 """
944 """
948
945
949 class imanifestrevisionbase(interfaceutil.Interface):
946 class imanifestrevisionbase(interfaceutil.Interface):
950 """Base interface representing a single revision of a manifest.
947 """Base interface representing a single revision of a manifest.
951
948
952 Should not be used as a primary interface: should always be inherited
949 Should not be used as a primary interface: should always be inherited
953 as part of a larger interface.
950 as part of a larger interface.
954 """
951 """
955
952
956 def new():
953 def new():
957 """Obtain a new manifest instance.
954 """Obtain a new manifest instance.
958
955
959 Returns an object conforming to the ``imanifestrevisionwritable``
956 Returns an object conforming to the ``imanifestrevisionwritable``
960 interface. The instance will be associated with the same
957 interface. The instance will be associated with the same
961 ``imanifestlog`` collection as this instance.
958 ``imanifestlog`` collection as this instance.
962 """
959 """
963
960
964 def copy():
961 def copy():
965 """Obtain a copy of this manifest instance.
962 """Obtain a copy of this manifest instance.
966
963
967 Returns an object conforming to the ``imanifestrevisionwritable``
964 Returns an object conforming to the ``imanifestrevisionwritable``
968 interface. The instance will be associated with the same
965 interface. The instance will be associated with the same
969 ``imanifestlog`` collection as this instance.
966 ``imanifestlog`` collection as this instance.
970 """
967 """
971
968
972 def read():
969 def read():
973 """Obtain the parsed manifest data structure.
970 """Obtain the parsed manifest data structure.
974
971
975 The returned object conforms to the ``imanifestdict`` interface.
972 The returned object conforms to the ``imanifestdict`` interface.
976 """
973 """
977
974
978 class imanifestrevisionstored(imanifestrevisionbase):
975 class imanifestrevisionstored(imanifestrevisionbase):
979 """Interface representing a manifest revision committed to storage."""
976 """Interface representing a manifest revision committed to storage."""
980
977
981 def node():
978 def node():
982 """The binary node for this manifest."""
979 """The binary node for this manifest."""
983
980
984 parents = interfaceutil.Attribute(
981 parents = interfaceutil.Attribute(
985 """List of binary nodes that are parents for this manifest revision."""
982 """List of binary nodes that are parents for this manifest revision."""
986 )
983 )
987
984
988 def readdelta(shallow=False):
985 def readdelta(shallow=False):
989 """Obtain the manifest data structure representing changes from parent.
986 """Obtain the manifest data structure representing changes from parent.
990
987
991 This manifest is compared to its 1st parent. A new manifest representing
988 This manifest is compared to its 1st parent. A new manifest representing
992 those differences is constructed.
989 those differences is constructed.
993
990
994 The returned object conforms to the ``imanifestdict`` interface.
991 The returned object conforms to the ``imanifestdict`` interface.
995 """
992 """
996
993
997 def readfast(shallow=False):
994 def readfast(shallow=False):
998 """Calls either ``read()`` or ``readdelta()``.
995 """Calls either ``read()`` or ``readdelta()``.
999
996
1000 The faster of the two options is called.
997 The faster of the two options is called.
1001 """
998 """
1002
999
1003 def find(key):
1000 def find(key):
1004 """Calls self.read().find(key)``.
1001 """Calls self.read().find(key)``.
1005
1002
1006 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1003 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
1007 """
1004 """
1008
1005
1009 class imanifestrevisionwritable(imanifestrevisionbase):
1006 class imanifestrevisionwritable(imanifestrevisionbase):
1010 """Interface representing a manifest revision that can be committed."""
1007 """Interface representing a manifest revision that can be committed."""
1011
1008
1012 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1009 def write(transaction, linkrev, p1node, p2node, added, removed, match=None):
1013 """Add this revision to storage.
1010 """Add this revision to storage.
1014
1011
1015 Takes a transaction object, the changeset revision number it will
1012 Takes a transaction object, the changeset revision number it will
1016 be associated with, its parent nodes, and lists of added and
1013 be associated with, its parent nodes, and lists of added and
1017 removed paths.
1014 removed paths.
1018
1015
1019 If match is provided, storage can choose not to inspect or write out
1016 If match is provided, storage can choose not to inspect or write out
1020 items that do not match. Storage is still required to be able to provide
1017 items that do not match. Storage is still required to be able to provide
1021 the full manifest in the future for any directories written (these
1018 the full manifest in the future for any directories written (these
1022 manifests should not be "narrowed on disk").
1019 manifests should not be "narrowed on disk").
1023
1020
1024 Returns the binary node of the created revision.
1021 Returns the binary node of the created revision.
1025 """
1022 """
1026
1023
1027 class imanifeststorage(interfaceutil.Interface):
1024 class imanifeststorage(interfaceutil.Interface):
1028 """Storage interface for manifest data."""
1025 """Storage interface for manifest data."""
1029
1026
1030 tree = interfaceutil.Attribute(
1027 tree = interfaceutil.Attribute(
1031 """The path to the directory this manifest tracks.
1028 """The path to the directory this manifest tracks.
1032
1029
1033 The empty bytestring represents the root manifest.
1030 The empty bytestring represents the root manifest.
1034 """)
1031 """)
1035
1032
1036 index = interfaceutil.Attribute(
1033 index = interfaceutil.Attribute(
1037 """An ``ifilerevisionssequence`` instance.""")
1034 """An ``ifilerevisionssequence`` instance.""")
1038
1035
1039 indexfile = interfaceutil.Attribute(
1036 indexfile = interfaceutil.Attribute(
1040 """Path of revlog index file.
1037 """Path of revlog index file.
1041
1038
1042 TODO this is revlog specific and should not be exposed.
1039 TODO this is revlog specific and should not be exposed.
1043 """)
1040 """)
1044
1041
1045 opener = interfaceutil.Attribute(
1042 opener = interfaceutil.Attribute(
1046 """VFS opener to use to access underlying files used for storage.
1043 """VFS opener to use to access underlying files used for storage.
1047
1044
1048 TODO this is revlog specific and should not be exposed.
1045 TODO this is revlog specific and should not be exposed.
1049 """)
1046 """)
1050
1047
1051 version = interfaceutil.Attribute(
1048 version = interfaceutil.Attribute(
1052 """Revlog version number.
1049 """Revlog version number.
1053
1050
1054 TODO this is revlog specific and should not be exposed.
1051 TODO this is revlog specific and should not be exposed.
1055 """)
1052 """)
1056
1053
1057 _generaldelta = interfaceutil.Attribute(
1054 _generaldelta = interfaceutil.Attribute(
1058 """Whether generaldelta storage is being used.
1055 """Whether generaldelta storage is being used.
1059
1056
1060 TODO this is revlog specific and should not be exposed.
1057 TODO this is revlog specific and should not be exposed.
1061 """)
1058 """)
1062
1059
1063 fulltextcache = interfaceutil.Attribute(
1060 fulltextcache = interfaceutil.Attribute(
1064 """Dict with cache of fulltexts.
1061 """Dict with cache of fulltexts.
1065
1062
1066 TODO this doesn't feel appropriate for the storage interface.
1063 TODO this doesn't feel appropriate for the storage interface.
1067 """)
1064 """)
1068
1065
1069 def __len__():
1066 def __len__():
1070 """Obtain the number of revisions stored for this manifest."""
1067 """Obtain the number of revisions stored for this manifest."""
1071
1068
1072 def __iter__():
1069 def __iter__():
1073 """Iterate over revision numbers for this manifest."""
1070 """Iterate over revision numbers for this manifest."""
1074
1071
1075 def rev(node):
1072 def rev(node):
1076 """Obtain the revision number given a binary node.
1073 """Obtain the revision number given a binary node.
1077
1074
1078 Raises ``error.LookupError`` if the node is not known.
1075 Raises ``error.LookupError`` if the node is not known.
1079 """
1076 """
1080
1077
1081 def node(rev):
1078 def node(rev):
1082 """Obtain the node value given a revision number.
1079 """Obtain the node value given a revision number.
1083
1080
1084 Raises ``error.LookupError`` if the revision is not known.
1081 Raises ``error.LookupError`` if the revision is not known.
1085 """
1082 """
1086
1083
1087 def lookup(value):
1084 def lookup(value):
1088 """Attempt to resolve a value to a node.
1085 """Attempt to resolve a value to a node.
1089
1086
1090 Value can be a binary node, hex node, revision number, or a bytes
1087 Value can be a binary node, hex node, revision number, or a bytes
1091 that can be converted to an integer.
1088 that can be converted to an integer.
1092
1089
1093 Raises ``error.LookupError`` if a ndoe could not be resolved.
1090 Raises ``error.LookupError`` if a ndoe could not be resolved.
1094
1091
1095 TODO this is only used by debug* commands and can probably be deleted
1092 TODO this is only used by debug* commands and can probably be deleted
1096 easily.
1093 easily.
1097 """
1094 """
1098
1095
1099 def parents(node):
1096 def parents(node):
1100 """Returns a 2-tuple of parent nodes for a node.
1097 """Returns a 2-tuple of parent nodes for a node.
1101
1098
1102 Values will be ``nullid`` if the parent is empty.
1099 Values will be ``nullid`` if the parent is empty.
1103 """
1100 """
1104
1101
1105 def parentrevs(rev):
1102 def parentrevs(rev):
1106 """Like parents() but operates on revision numbers."""
1103 """Like parents() but operates on revision numbers."""
1107
1104
1108 def linkrev(rev):
1105 def linkrev(rev):
1109 """Obtain the changeset revision number a revision is linked to."""
1106 """Obtain the changeset revision number a revision is linked to."""
1110
1107
1111 def revision(node, _df=None, raw=False):
1108 def revision(node, _df=None, raw=False):
1112 """Obtain fulltext data for a node."""
1109 """Obtain fulltext data for a node."""
1113
1110
1114 def revdiff(rev1, rev2):
1111 def revdiff(rev1, rev2):
1115 """Obtain a delta between two revision numbers.
1112 """Obtain a delta between two revision numbers.
1116
1113
1117 The returned data is the result of ``bdiff.bdiff()`` on the raw
1114 The returned data is the result of ``bdiff.bdiff()`` on the raw
1118 revision data.
1115 revision data.
1119 """
1116 """
1120
1117
1121 def cmp(node, fulltext):
1118 def cmp(node, fulltext):
1122 """Compare fulltext to another revision.
1119 """Compare fulltext to another revision.
1123
1120
1124 Returns True if the fulltext is different from what is stored.
1121 Returns True if the fulltext is different from what is stored.
1125 """
1122 """
1126
1123
1127 def emitrevisiondeltas(requests):
1124 def emitrevisiondeltas(requests):
1128 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1125 """Produce ``irevisiondelta`` from ``irevisiondeltarequest``s.
1129
1126
1130 See the documentation for ``ifiledata`` for more.
1127 See the documentation for ``ifiledata`` for more.
1131 """
1128 """
1132
1129
1133 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1130 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
1134 """Process a series of deltas for storage.
1131 """Process a series of deltas for storage.
1135
1132
1136 See the documentation in ``ifilemutation`` for more.
1133 See the documentation in ``ifilemutation`` for more.
1137 """
1134 """
1138
1135
1139 def rawsize(rev):
1136 def rawsize(rev):
1140 """Obtain the size of tracked data.
1137 """Obtain the size of tracked data.
1141
1138
1142 Is equivalent to ``len(m.revision(node, raw=True))``.
1139 Is equivalent to ``len(m.revision(node, raw=True))``.
1143
1140
1144 TODO this method is only used by upgrade code and may be removed.
1141 TODO this method is only used by upgrade code and may be removed.
1145 """
1142 """
1146
1143
1147 def getstrippoint(minlink):
1144 def getstrippoint(minlink):
1148 """Find minimum revision that must be stripped to strip a linkrev.
1145 """Find minimum revision that must be stripped to strip a linkrev.
1149
1146
1150 See the documentation in ``ifilemutation`` for more.
1147 See the documentation in ``ifilemutation`` for more.
1151 """
1148 """
1152
1149
1153 def strip(minlink, transaction):
1150 def strip(minlink, transaction):
1154 """Remove storage of items starting at a linkrev.
1151 """Remove storage of items starting at a linkrev.
1155
1152
1156 See the documentation in ``ifilemutation`` for more.
1153 See the documentation in ``ifilemutation`` for more.
1157 """
1154 """
1158
1155
1159 def checksize():
1156 def checksize():
1160 """Obtain the expected sizes of backing files.
1157 """Obtain the expected sizes of backing files.
1161
1158
1162 TODO this is used by verify and it should not be part of the interface.
1159 TODO this is used by verify and it should not be part of the interface.
1163 """
1160 """
1164
1161
1165 def files():
1162 def files():
1166 """Obtain paths that are backing storage for this manifest.
1163 """Obtain paths that are backing storage for this manifest.
1167
1164
1168 TODO this is used by verify and there should probably be a better API
1165 TODO this is used by verify and there should probably be a better API
1169 for this functionality.
1166 for this functionality.
1170 """
1167 """
1171
1168
1172 def deltaparent(rev):
1169 def deltaparent(rev):
1173 """Obtain the revision that a revision is delta'd against.
1170 """Obtain the revision that a revision is delta'd against.
1174
1171
1175 TODO delta encoding is an implementation detail of storage and should
1172 TODO delta encoding is an implementation detail of storage and should
1176 not be exposed to the storage interface.
1173 not be exposed to the storage interface.
1177 """
1174 """
1178
1175
1179 def clone(tr, dest, **kwargs):
1176 def clone(tr, dest, **kwargs):
1180 """Clone this instance to another."""
1177 """Clone this instance to another."""
1181
1178
1182 def clearcaches(clear_persisted_data=False):
1179 def clearcaches(clear_persisted_data=False):
1183 """Clear any caches associated with this instance."""
1180 """Clear any caches associated with this instance."""
1184
1181
1185 def dirlog(d):
1182 def dirlog(d):
1186 """Obtain a manifest storage instance for a tree."""
1183 """Obtain a manifest storage instance for a tree."""
1187
1184
1188 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1185 def add(m, transaction, link, p1, p2, added, removed, readtree=None,
1189 match=None):
1186 match=None):
1190 """Add a revision to storage.
1187 """Add a revision to storage.
1191
1188
1192 ``m`` is an object conforming to ``imanifestdict``.
1189 ``m`` is an object conforming to ``imanifestdict``.
1193
1190
1194 ``link`` is the linkrev revision number.
1191 ``link`` is the linkrev revision number.
1195
1192
1196 ``p1`` and ``p2`` are the parent revision numbers.
1193 ``p1`` and ``p2`` are the parent revision numbers.
1197
1194
1198 ``added`` and ``removed`` are iterables of added and removed paths,
1195 ``added`` and ``removed`` are iterables of added and removed paths,
1199 respectively.
1196 respectively.
1200
1197
1201 ``readtree`` is a function that can be used to read the child tree(s)
1198 ``readtree`` is a function that can be used to read the child tree(s)
1202 when recursively writing the full tree structure when using
1199 when recursively writing the full tree structure when using
1203 treemanifets.
1200 treemanifets.
1204
1201
1205 ``match`` is a matcher that can be used to hint to storage that not all
1202 ``match`` is a matcher that can be used to hint to storage that not all
1206 paths must be inspected; this is an optimization and can be safely
1203 paths must be inspected; this is an optimization and can be safely
1207 ignored. Note that the storage must still be able to reproduce a full
1204 ignored. Note that the storage must still be able to reproduce a full
1208 manifest including files that did not match.
1205 manifest including files that did not match.
1209 """
1206 """
1210
1207
1211 class imanifestlog(interfaceutil.Interface):
1208 class imanifestlog(interfaceutil.Interface):
1212 """Interface representing a collection of manifest snapshots.
1209 """Interface representing a collection of manifest snapshots.
1213
1210
1214 Represents the root manifest in a repository.
1211 Represents the root manifest in a repository.
1215
1212
1216 Also serves as a means to access nested tree manifests and to cache
1213 Also serves as a means to access nested tree manifests and to cache
1217 tree manifests.
1214 tree manifests.
1218 """
1215 """
1219
1216
1220 def __getitem__(node):
1217 def __getitem__(node):
1221 """Obtain a manifest instance for a given binary node.
1218 """Obtain a manifest instance for a given binary node.
1222
1219
1223 Equivalent to calling ``self.get('', node)``.
1220 Equivalent to calling ``self.get('', node)``.
1224
1221
1225 The returned object conforms to the ``imanifestrevisionstored``
1222 The returned object conforms to the ``imanifestrevisionstored``
1226 interface.
1223 interface.
1227 """
1224 """
1228
1225
1229 def get(tree, node, verify=True):
1226 def get(tree, node, verify=True):
1230 """Retrieve the manifest instance for a given directory and binary node.
1227 """Retrieve the manifest instance for a given directory and binary node.
1231
1228
1232 ``node`` always refers to the node of the root manifest (which will be
1229 ``node`` always refers to the node of the root manifest (which will be
1233 the only manifest if flat manifests are being used).
1230 the only manifest if flat manifests are being used).
1234
1231
1235 If ``tree`` is the empty string, the root manifest is returned.
1232 If ``tree`` is the empty string, the root manifest is returned.
1236 Otherwise the manifest for the specified directory will be returned
1233 Otherwise the manifest for the specified directory will be returned
1237 (requires tree manifests).
1234 (requires tree manifests).
1238
1235
1239 If ``verify`` is True, ``LookupError`` is raised if the node is not
1236 If ``verify`` is True, ``LookupError`` is raised if the node is not
1240 known.
1237 known.
1241
1238
1242 The returned object conforms to the ``imanifestrevisionstored``
1239 The returned object conforms to the ``imanifestrevisionstored``
1243 interface.
1240 interface.
1244 """
1241 """
1245
1242
1246 def getstorage(tree):
1243 def getstorage(tree):
1247 """Retrieve an interface to storage for a particular tree.
1244 """Retrieve an interface to storage for a particular tree.
1248
1245
1249 If ``tree`` is the empty bytestring, storage for the root manifest will
1246 If ``tree`` is the empty bytestring, storage for the root manifest will
1250 be returned. Otherwise storage for a tree manifest is returned.
1247 be returned. Otherwise storage for a tree manifest is returned.
1251
1248
1252 TODO formalize interface for returned object.
1249 TODO formalize interface for returned object.
1253 """
1250 """
1254
1251
1255 def clearcaches():
1252 def clearcaches():
1256 """Clear caches associated with this collection."""
1253 """Clear caches associated with this collection."""
1257
1254
1258 def rev(node):
1255 def rev(node):
1259 """Obtain the revision number for a binary node.
1256 """Obtain the revision number for a binary node.
1260
1257
1261 Raises ``error.LookupError`` if the node is not known.
1258 Raises ``error.LookupError`` if the node is not known.
1262 """
1259 """
1263
1260
1264 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1261 class ilocalrepositoryfilestorage(interfaceutil.Interface):
1265 """Local repository sub-interface providing access to tracked file storage.
1262 """Local repository sub-interface providing access to tracked file storage.
1266
1263
1267 This interface defines how a repository accesses storage for a single
1264 This interface defines how a repository accesses storage for a single
1268 tracked file path.
1265 tracked file path.
1269 """
1266 """
1270
1267
1271 def file(f):
1268 def file(f):
1272 """Obtain a filelog for a tracked path.
1269 """Obtain a filelog for a tracked path.
1273
1270
1274 The returned type conforms to the ``ifilestorage`` interface.
1271 The returned type conforms to the ``ifilestorage`` interface.
1275 """
1272 """
1276
1273
1277 class ilocalrepositorymain(interfaceutil.Interface):
1274 class ilocalrepositorymain(interfaceutil.Interface):
1278 """Main interface for local repositories.
1275 """Main interface for local repositories.
1279
1276
1280 This currently captures the reality of things - not how things should be.
1277 This currently captures the reality of things - not how things should be.
1281 """
1278 """
1282
1279
1283 supportedformats = interfaceutil.Attribute(
1280 supportedformats = interfaceutil.Attribute(
1284 """Set of requirements that apply to stream clone.
1281 """Set of requirements that apply to stream clone.
1285
1282
1286 This is actually a class attribute and is shared among all instances.
1283 This is actually a class attribute and is shared among all instances.
1287 """)
1284 """)
1288
1285
1289 supported = interfaceutil.Attribute(
1286 supported = interfaceutil.Attribute(
1290 """Set of requirements that this repo is capable of opening.""")
1287 """Set of requirements that this repo is capable of opening.""")
1291
1288
1292 requirements = interfaceutil.Attribute(
1289 requirements = interfaceutil.Attribute(
1293 """Set of requirements this repo uses.""")
1290 """Set of requirements this repo uses.""")
1294
1291
1295 features = interfaceutil.Attribute(
1292 features = interfaceutil.Attribute(
1296 """Set of "features" this repository supports.
1293 """Set of "features" this repository supports.
1297
1294
1298 A "feature" is a loosely-defined term. It can refer to a feature
1295 A "feature" is a loosely-defined term. It can refer to a feature
1299 in the classical sense or can describe an implementation detail
1296 in the classical sense or can describe an implementation detail
1300 of the repository. For example, a ``readonly`` feature may denote
1297 of the repository. For example, a ``readonly`` feature may denote
1301 the repository as read-only. Or a ``revlogfilestore`` feature may
1298 the repository as read-only. Or a ``revlogfilestore`` feature may
1302 denote that the repository is using revlogs for file storage.
1299 denote that the repository is using revlogs for file storage.
1303
1300
1304 The intent of features is to provide a machine-queryable mechanism
1301 The intent of features is to provide a machine-queryable mechanism
1305 for repo consumers to test for various repository characteristics.
1302 for repo consumers to test for various repository characteristics.
1306
1303
1307 Features are similar to ``requirements``. The main difference is that
1304 Features are similar to ``requirements``. The main difference is that
1308 requirements are stored on-disk and represent requirements to open the
1305 requirements are stored on-disk and represent requirements to open the
1309 repository. Features are more run-time capabilities of the repository
1306 repository. Features are more run-time capabilities of the repository
1310 and more granular capabilities (which may be derived from requirements).
1307 and more granular capabilities (which may be derived from requirements).
1311 """)
1308 """)
1312
1309
1313 filtername = interfaceutil.Attribute(
1310 filtername = interfaceutil.Attribute(
1314 """Name of the repoview that is active on this repo.""")
1311 """Name of the repoview that is active on this repo.""")
1315
1312
1316 wvfs = interfaceutil.Attribute(
1313 wvfs = interfaceutil.Attribute(
1317 """VFS used to access the working directory.""")
1314 """VFS used to access the working directory.""")
1318
1315
1319 vfs = interfaceutil.Attribute(
1316 vfs = interfaceutil.Attribute(
1320 """VFS rooted at the .hg directory.
1317 """VFS rooted at the .hg directory.
1321
1318
1322 Used to access repository data not in the store.
1319 Used to access repository data not in the store.
1323 """)
1320 """)
1324
1321
1325 svfs = interfaceutil.Attribute(
1322 svfs = interfaceutil.Attribute(
1326 """VFS rooted at the store.
1323 """VFS rooted at the store.
1327
1324
1328 Used to access repository data in the store. Typically .hg/store.
1325 Used to access repository data in the store. Typically .hg/store.
1329 But can point elsewhere if the store is shared.
1326 But can point elsewhere if the store is shared.
1330 """)
1327 """)
1331
1328
1332 root = interfaceutil.Attribute(
1329 root = interfaceutil.Attribute(
1333 """Path to the root of the working directory.""")
1330 """Path to the root of the working directory.""")
1334
1331
1335 path = interfaceutil.Attribute(
1332 path = interfaceutil.Attribute(
1336 """Path to the .hg directory.""")
1333 """Path to the .hg directory.""")
1337
1334
1338 origroot = interfaceutil.Attribute(
1335 origroot = interfaceutil.Attribute(
1339 """The filesystem path that was used to construct the repo.""")
1336 """The filesystem path that was used to construct the repo.""")
1340
1337
1341 auditor = interfaceutil.Attribute(
1338 auditor = interfaceutil.Attribute(
1342 """A pathauditor for the working directory.
1339 """A pathauditor for the working directory.
1343
1340
1344 This checks if a path refers to a nested repository.
1341 This checks if a path refers to a nested repository.
1345
1342
1346 Operates on the filesystem.
1343 Operates on the filesystem.
1347 """)
1344 """)
1348
1345
1349 nofsauditor = interfaceutil.Attribute(
1346 nofsauditor = interfaceutil.Attribute(
1350 """A pathauditor for the working directory.
1347 """A pathauditor for the working directory.
1351
1348
1352 This is like ``auditor`` except it doesn't do filesystem checks.
1349 This is like ``auditor`` except it doesn't do filesystem checks.
1353 """)
1350 """)
1354
1351
1355 baseui = interfaceutil.Attribute(
1352 baseui = interfaceutil.Attribute(
1356 """Original ui instance passed into constructor.""")
1353 """Original ui instance passed into constructor.""")
1357
1354
1358 ui = interfaceutil.Attribute(
1355 ui = interfaceutil.Attribute(
1359 """Main ui instance for this instance.""")
1356 """Main ui instance for this instance.""")
1360
1357
1361 sharedpath = interfaceutil.Attribute(
1358 sharedpath = interfaceutil.Attribute(
1362 """Path to the .hg directory of the repo this repo was shared from.""")
1359 """Path to the .hg directory of the repo this repo was shared from.""")
1363
1360
1364 store = interfaceutil.Attribute(
1361 store = interfaceutil.Attribute(
1365 """A store instance.""")
1362 """A store instance.""")
1366
1363
1367 spath = interfaceutil.Attribute(
1364 spath = interfaceutil.Attribute(
1368 """Path to the store.""")
1365 """Path to the store.""")
1369
1366
1370 sjoin = interfaceutil.Attribute(
1367 sjoin = interfaceutil.Attribute(
1371 """Alias to self.store.join.""")
1368 """Alias to self.store.join.""")
1372
1369
1373 cachevfs = interfaceutil.Attribute(
1370 cachevfs = interfaceutil.Attribute(
1374 """A VFS used to access the cache directory.
1371 """A VFS used to access the cache directory.
1375
1372
1376 Typically .hg/cache.
1373 Typically .hg/cache.
1377 """)
1374 """)
1378
1375
1379 filteredrevcache = interfaceutil.Attribute(
1376 filteredrevcache = interfaceutil.Attribute(
1380 """Holds sets of revisions to be filtered.""")
1377 """Holds sets of revisions to be filtered.""")
1381
1378
1382 names = interfaceutil.Attribute(
1379 names = interfaceutil.Attribute(
1383 """A ``namespaces`` instance.""")
1380 """A ``namespaces`` instance.""")
1384
1381
1385 def close():
1382 def close():
1386 """Close the handle on this repository."""
1383 """Close the handle on this repository."""
1387
1384
1388 def peer():
1385 def peer():
1389 """Obtain an object conforming to the ``peer`` interface."""
1386 """Obtain an object conforming to the ``peer`` interface."""
1390
1387
1391 def unfiltered():
1388 def unfiltered():
1392 """Obtain an unfiltered/raw view of this repo."""
1389 """Obtain an unfiltered/raw view of this repo."""
1393
1390
1394 def filtered(name, visibilityexceptions=None):
1391 def filtered(name, visibilityexceptions=None):
1395 """Obtain a named view of this repository."""
1392 """Obtain a named view of this repository."""
1396
1393
1397 obsstore = interfaceutil.Attribute(
1394 obsstore = interfaceutil.Attribute(
1398 """A store of obsolescence data.""")
1395 """A store of obsolescence data.""")
1399
1396
1400 changelog = interfaceutil.Attribute(
1397 changelog = interfaceutil.Attribute(
1401 """A handle on the changelog revlog.""")
1398 """A handle on the changelog revlog.""")
1402
1399
1403 manifestlog = interfaceutil.Attribute(
1400 manifestlog = interfaceutil.Attribute(
1404 """An instance conforming to the ``imanifestlog`` interface.
1401 """An instance conforming to the ``imanifestlog`` interface.
1405
1402
1406 Provides access to manifests for the repository.
1403 Provides access to manifests for the repository.
1407 """)
1404 """)
1408
1405
1409 dirstate = interfaceutil.Attribute(
1406 dirstate = interfaceutil.Attribute(
1410 """Working directory state.""")
1407 """Working directory state.""")
1411
1408
1412 narrowpats = interfaceutil.Attribute(
1409 narrowpats = interfaceutil.Attribute(
1413 """Matcher patterns for this repository's narrowspec.""")
1410 """Matcher patterns for this repository's narrowspec.""")
1414
1411
1415 def narrowmatch():
1412 def narrowmatch():
1416 """Obtain a matcher for the narrowspec."""
1413 """Obtain a matcher for the narrowspec."""
1417
1414
1418 def setnarrowpats(newincludes, newexcludes):
1415 def setnarrowpats(newincludes, newexcludes):
1419 """Define the narrowspec for this repository."""
1416 """Define the narrowspec for this repository."""
1420
1417
1421 def __getitem__(changeid):
1418 def __getitem__(changeid):
1422 """Try to resolve a changectx."""
1419 """Try to resolve a changectx."""
1423
1420
1424 def __contains__(changeid):
1421 def __contains__(changeid):
1425 """Whether a changeset exists."""
1422 """Whether a changeset exists."""
1426
1423
1427 def __nonzero__():
1424 def __nonzero__():
1428 """Always returns True."""
1425 """Always returns True."""
1429 return True
1426 return True
1430
1427
1431 __bool__ = __nonzero__
1428 __bool__ = __nonzero__
1432
1429
1433 def __len__():
1430 def __len__():
1434 """Returns the number of changesets in the repo."""
1431 """Returns the number of changesets in the repo."""
1435
1432
1436 def __iter__():
1433 def __iter__():
1437 """Iterate over revisions in the changelog."""
1434 """Iterate over revisions in the changelog."""
1438
1435
1439 def revs(expr, *args):
1436 def revs(expr, *args):
1440 """Evaluate a revset.
1437 """Evaluate a revset.
1441
1438
1442 Emits revisions.
1439 Emits revisions.
1443 """
1440 """
1444
1441
1445 def set(expr, *args):
1442 def set(expr, *args):
1446 """Evaluate a revset.
1443 """Evaluate a revset.
1447
1444
1448 Emits changectx instances.
1445 Emits changectx instances.
1449 """
1446 """
1450
1447
1451 def anyrevs(specs, user=False, localalias=None):
1448 def anyrevs(specs, user=False, localalias=None):
1452 """Find revisions matching one of the given revsets."""
1449 """Find revisions matching one of the given revsets."""
1453
1450
1454 def url():
1451 def url():
1455 """Returns a string representing the location of this repo."""
1452 """Returns a string representing the location of this repo."""
1456
1453
1457 def hook(name, throw=False, **args):
1454 def hook(name, throw=False, **args):
1458 """Call a hook."""
1455 """Call a hook."""
1459
1456
1460 def tags():
1457 def tags():
1461 """Return a mapping of tag to node."""
1458 """Return a mapping of tag to node."""
1462
1459
1463 def tagtype(tagname):
1460 def tagtype(tagname):
1464 """Return the type of a given tag."""
1461 """Return the type of a given tag."""
1465
1462
1466 def tagslist():
1463 def tagslist():
1467 """Return a list of tags ordered by revision."""
1464 """Return a list of tags ordered by revision."""
1468
1465
1469 def nodetags(node):
1466 def nodetags(node):
1470 """Return the tags associated with a node."""
1467 """Return the tags associated with a node."""
1471
1468
1472 def nodebookmarks(node):
1469 def nodebookmarks(node):
1473 """Return the list of bookmarks pointing to the specified node."""
1470 """Return the list of bookmarks pointing to the specified node."""
1474
1471
1475 def branchmap():
1472 def branchmap():
1476 """Return a mapping of branch to heads in that branch."""
1473 """Return a mapping of branch to heads in that branch."""
1477
1474
1478 def revbranchcache():
1475 def revbranchcache():
1479 pass
1476 pass
1480
1477
1481 def branchtip(branchtip, ignoremissing=False):
1478 def branchtip(branchtip, ignoremissing=False):
1482 """Return the tip node for a given branch."""
1479 """Return the tip node for a given branch."""
1483
1480
1484 def lookup(key):
1481 def lookup(key):
1485 """Resolve the node for a revision."""
1482 """Resolve the node for a revision."""
1486
1483
1487 def lookupbranch(key):
1484 def lookupbranch(key):
1488 """Look up the branch name of the given revision or branch name."""
1485 """Look up the branch name of the given revision or branch name."""
1489
1486
1490 def known(nodes):
1487 def known(nodes):
1491 """Determine whether a series of nodes is known.
1488 """Determine whether a series of nodes is known.
1492
1489
1493 Returns a list of bools.
1490 Returns a list of bools.
1494 """
1491 """
1495
1492
1496 def local():
1493 def local():
1497 """Whether the repository is local."""
1494 """Whether the repository is local."""
1498 return True
1495 return True
1499
1496
1500 def publishing():
1497 def publishing():
1501 """Whether the repository is a publishing repository."""
1498 """Whether the repository is a publishing repository."""
1502
1499
1503 def cancopy():
1500 def cancopy():
1504 pass
1501 pass
1505
1502
1506 def shared():
1503 def shared():
1507 """The type of shared repository or None."""
1504 """The type of shared repository or None."""
1508
1505
1509 def wjoin(f, *insidef):
1506 def wjoin(f, *insidef):
1510 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1507 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1511
1508
1512 def setparents(p1, p2):
1509 def setparents(p1, p2):
1513 """Set the parent nodes of the working directory."""
1510 """Set the parent nodes of the working directory."""
1514
1511
1515 def filectx(path, changeid=None, fileid=None):
1512 def filectx(path, changeid=None, fileid=None):
1516 """Obtain a filectx for the given file revision."""
1513 """Obtain a filectx for the given file revision."""
1517
1514
1518 def getcwd():
1515 def getcwd():
1519 """Obtain the current working directory from the dirstate."""
1516 """Obtain the current working directory from the dirstate."""
1520
1517
1521 def pathto(f, cwd=None):
1518 def pathto(f, cwd=None):
1522 """Obtain the relative path to a file."""
1519 """Obtain the relative path to a file."""
1523
1520
1524 def adddatafilter(name, fltr):
1521 def adddatafilter(name, fltr):
1525 pass
1522 pass
1526
1523
1527 def wread(filename):
1524 def wread(filename):
1528 """Read a file from wvfs, using data filters."""
1525 """Read a file from wvfs, using data filters."""
1529
1526
1530 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1527 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1531 """Write data to a file in the wvfs, using data filters."""
1528 """Write data to a file in the wvfs, using data filters."""
1532
1529
1533 def wwritedata(filename, data):
1530 def wwritedata(filename, data):
1534 """Resolve data for writing to the wvfs, using data filters."""
1531 """Resolve data for writing to the wvfs, using data filters."""
1535
1532
1536 def currenttransaction():
1533 def currenttransaction():
1537 """Obtain the current transaction instance or None."""
1534 """Obtain the current transaction instance or None."""
1538
1535
1539 def transaction(desc, report=None):
1536 def transaction(desc, report=None):
1540 """Open a new transaction to write to the repository."""
1537 """Open a new transaction to write to the repository."""
1541
1538
1542 def undofiles():
1539 def undofiles():
1543 """Returns a list of (vfs, path) for files to undo transactions."""
1540 """Returns a list of (vfs, path) for files to undo transactions."""
1544
1541
1545 def recover():
1542 def recover():
1546 """Roll back an interrupted transaction."""
1543 """Roll back an interrupted transaction."""
1547
1544
1548 def rollback(dryrun=False, force=False):
1545 def rollback(dryrun=False, force=False):
1549 """Undo the last transaction.
1546 """Undo the last transaction.
1550
1547
1551 DANGEROUS.
1548 DANGEROUS.
1552 """
1549 """
1553
1550
1554 def updatecaches(tr=None, full=False):
1551 def updatecaches(tr=None, full=False):
1555 """Warm repo caches."""
1552 """Warm repo caches."""
1556
1553
1557 def invalidatecaches():
1554 def invalidatecaches():
1558 """Invalidate cached data due to the repository mutating."""
1555 """Invalidate cached data due to the repository mutating."""
1559
1556
1560 def invalidatevolatilesets():
1557 def invalidatevolatilesets():
1561 pass
1558 pass
1562
1559
1563 def invalidatedirstate():
1560 def invalidatedirstate():
1564 """Invalidate the dirstate."""
1561 """Invalidate the dirstate."""
1565
1562
1566 def invalidate(clearfilecache=False):
1563 def invalidate(clearfilecache=False):
1567 pass
1564 pass
1568
1565
1569 def invalidateall():
1566 def invalidateall():
1570 pass
1567 pass
1571
1568
1572 def lock(wait=True):
1569 def lock(wait=True):
1573 """Lock the repository store and return a lock instance."""
1570 """Lock the repository store and return a lock instance."""
1574
1571
1575 def wlock(wait=True):
1572 def wlock(wait=True):
1576 """Lock the non-store parts of the repository."""
1573 """Lock the non-store parts of the repository."""
1577
1574
1578 def currentwlock():
1575 def currentwlock():
1579 """Return the wlock if it's held or None."""
1576 """Return the wlock if it's held or None."""
1580
1577
1581 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1578 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1582 pass
1579 pass
1583
1580
1584 def commit(text='', user=None, date=None, match=None, force=False,
1581 def commit(text='', user=None, date=None, match=None, force=False,
1585 editor=False, extra=None):
1582 editor=False, extra=None):
1586 """Add a new revision to the repository."""
1583 """Add a new revision to the repository."""
1587
1584
1588 def commitctx(ctx, error=False):
1585 def commitctx(ctx, error=False):
1589 """Commit a commitctx instance to the repository."""
1586 """Commit a commitctx instance to the repository."""
1590
1587
1591 def destroying():
1588 def destroying():
1592 """Inform the repository that nodes are about to be destroyed."""
1589 """Inform the repository that nodes are about to be destroyed."""
1593
1590
1594 def destroyed():
1591 def destroyed():
1595 """Inform the repository that nodes have been destroyed."""
1592 """Inform the repository that nodes have been destroyed."""
1596
1593
1597 def status(node1='.', node2=None, match=None, ignored=False,
1594 def status(node1='.', node2=None, match=None, ignored=False,
1598 clean=False, unknown=False, listsubrepos=False):
1595 clean=False, unknown=False, listsubrepos=False):
1599 """Convenience method to call repo[x].status()."""
1596 """Convenience method to call repo[x].status()."""
1600
1597
1601 def addpostdsstatus(ps):
1598 def addpostdsstatus(ps):
1602 pass
1599 pass
1603
1600
1604 def postdsstatus():
1601 def postdsstatus():
1605 pass
1602 pass
1606
1603
1607 def clearpostdsstatus():
1604 def clearpostdsstatus():
1608 pass
1605 pass
1609
1606
1610 def heads(start=None):
1607 def heads(start=None):
1611 """Obtain list of nodes that are DAG heads."""
1608 """Obtain list of nodes that are DAG heads."""
1612
1609
1613 def branchheads(branch=None, start=None, closed=False):
1610 def branchheads(branch=None, start=None, closed=False):
1614 pass
1611 pass
1615
1612
1616 def branches(nodes):
1613 def branches(nodes):
1617 pass
1614 pass
1618
1615
1619 def between(pairs):
1616 def between(pairs):
1620 pass
1617 pass
1621
1618
1622 def checkpush(pushop):
1619 def checkpush(pushop):
1623 pass
1620 pass
1624
1621
1625 prepushoutgoinghooks = interfaceutil.Attribute(
1622 prepushoutgoinghooks = interfaceutil.Attribute(
1626 """util.hooks instance.""")
1623 """util.hooks instance.""")
1627
1624
1628 def pushkey(namespace, key, old, new):
1625 def pushkey(namespace, key, old, new):
1629 pass
1626 pass
1630
1627
1631 def listkeys(namespace):
1628 def listkeys(namespace):
1632 pass
1629 pass
1633
1630
1634 def debugwireargs(one, two, three=None, four=None, five=None):
1631 def debugwireargs(one, two, three=None, four=None, five=None):
1635 pass
1632 pass
1636
1633
1637 def savecommitmessage(text):
1634 def savecommitmessage(text):
1638 pass
1635 pass
1639
1636
1640 class completelocalrepository(ilocalrepositorymain,
1637 class completelocalrepository(ilocalrepositorymain,
1641 ilocalrepositoryfilestorage):
1638 ilocalrepositoryfilestorage):
1642 """Complete interface for a local repository."""
1639 """Complete interface for a local repository."""
@@ -1,735 +1,735 b''
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
1 # simplestorerepo.py - Extension that swaps in alternate repository storage.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 # To use this with the test suite:
8 # To use this with the test suite:
9 #
9 #
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
10 # $ HGREPOFEATURES="simplestore" ./run-tests.py \
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
11 # --extra-config-opt extensions.simplestore=`pwd`/simplestorerepo.py
12
12
13 from __future__ import absolute_import
13 from __future__ import absolute_import
14
14
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import (
18 from mercurial.node import (
19 bin,
19 bin,
20 hex,
20 hex,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 )
23 )
24 from mercurial.thirdparty import (
24 from mercurial.thirdparty import (
25 attr,
25 attr,
26 cbor,
26 cbor,
27 )
27 )
28 from mercurial import (
28 from mercurial import (
29 ancestor,
29 ancestor,
30 bundlerepo,
30 bundlerepo,
31 error,
31 error,
32 extensions,
32 extensions,
33 localrepo,
33 localrepo,
34 mdiff,
34 mdiff,
35 pycompat,
35 pycompat,
36 repository,
36 repository,
37 revlog,
37 revlog,
38 store,
38 store,
39 verify,
39 verify,
40 )
40 )
41 from mercurial.utils import (
41 from mercurial.utils import (
42 interfaceutil,
42 interfaceutil,
43 )
43 )
44
44
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 # be specifying the version(s) of Mercurial they are tested with, or
47 # be specifying the version(s) of Mercurial they are tested with, or
48 # leave the attribute unspecified.
48 # leave the attribute unspecified.
49 testedwith = 'ships-with-hg-core'
49 testedwith = 'ships-with-hg-core'
50
50
51 REQUIREMENT = 'testonly-simplestore'
51 REQUIREMENT = 'testonly-simplestore'
52
52
53 def validatenode(node):
53 def validatenode(node):
54 if isinstance(node, int):
54 if isinstance(node, int):
55 raise ValueError('expected node; got int')
55 raise ValueError('expected node; got int')
56
56
57 if len(node) != 20:
57 if len(node) != 20:
58 raise ValueError('expected 20 byte node')
58 raise ValueError('expected 20 byte node')
59
59
60 def validaterev(rev):
60 def validaterev(rev):
61 if not isinstance(rev, int):
61 if not isinstance(rev, int):
62 raise ValueError('expected int')
62 raise ValueError('expected int')
63
63
64 class simplestoreerror(error.StorageError):
64 class simplestoreerror(error.StorageError):
65 pass
65 pass
66
66
67 @interfaceutil.implementer(repository.irevisiondelta)
67 @interfaceutil.implementer(repository.irevisiondelta)
68 @attr.s(slots=True, frozen=True)
68 @attr.s(slots=True, frozen=True)
69 class simplestorerevisiondelta(object):
69 class simplestorerevisiondelta(object):
70 node = attr.ib()
70 node = attr.ib()
71 p1node = attr.ib()
71 p1node = attr.ib()
72 p2node = attr.ib()
72 p2node = attr.ib()
73 basenode = attr.ib()
73 basenode = attr.ib()
74 linknode = attr.ib()
74 linknode = attr.ib()
75 flags = attr.ib()
75 flags = attr.ib()
76 baserevisionsize = attr.ib()
76 baserevisionsize = attr.ib()
77 revision = attr.ib()
77 revision = attr.ib()
78 delta = attr.ib()
78 delta = attr.ib()
79
79
80 @interfaceutil.implementer(repository.ifilestorage)
80 @interfaceutil.implementer(repository.ifilestorage)
81 class filestorage(object):
81 class filestorage(object):
82 """Implements storage for a tracked path.
82 """Implements storage for a tracked path.
83
83
84 Data is stored in the VFS in a directory corresponding to the tracked
84 Data is stored in the VFS in a directory corresponding to the tracked
85 path.
85 path.
86
86
87 Index data is stored in an ``index`` file using CBOR.
87 Index data is stored in an ``index`` file using CBOR.
88
88
89 Fulltext data is stored in files having names of the node.
89 Fulltext data is stored in files having names of the node.
90 """
90 """
91
91
92 def __init__(self, svfs, path):
92 def __init__(self, svfs, path):
93 self._svfs = svfs
93 self._svfs = svfs
94 self._path = path
94 self._path = path
95
95
96 self._storepath = b'/'.join([b'data', path])
96 self._storepath = b'/'.join([b'data', path])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
97 self._indexpath = b'/'.join([self._storepath, b'index'])
98
98
99 indexdata = self._svfs.tryread(self._indexpath)
99 indexdata = self._svfs.tryread(self._indexpath)
100 if indexdata:
100 if indexdata:
101 indexdata = cbor.loads(indexdata)
101 indexdata = cbor.loads(indexdata)
102
102
103 self._indexdata = indexdata or []
103 self._indexdata = indexdata or []
104 self._indexbynode = {}
104 self._indexbynode = {}
105 self._indexbyrev = {}
105 self._indexbyrev = {}
106 self.index = []
106 self._index = []
107 self._refreshindex()
107 self._refreshindex()
108
108
109 # This is used by changegroup code :/
109 # This is used by changegroup code :/
110 self._generaldelta = True
110 self._generaldelta = True
111
111
112 def _refreshindex(self):
112 def _refreshindex(self):
113 self._indexbynode.clear()
113 self._indexbynode.clear()
114 self._indexbyrev.clear()
114 self._indexbyrev.clear()
115 self.index = []
115 self._index = []
116
116
117 for i, entry in enumerate(self._indexdata):
117 for i, entry in enumerate(self._indexdata):
118 self._indexbynode[entry[b'node']] = entry
118 self._indexbynode[entry[b'node']] = entry
119 self._indexbyrev[i] = entry
119 self._indexbyrev[i] = entry
120
120
121 self._indexbynode[nullid] = {
121 self._indexbynode[nullid] = {
122 b'node': nullid,
122 b'node': nullid,
123 b'p1': nullid,
123 b'p1': nullid,
124 b'p2': nullid,
124 b'p2': nullid,
125 b'linkrev': nullrev,
125 b'linkrev': nullrev,
126 b'flags': 0,
126 b'flags': 0,
127 }
127 }
128
128
129 self._indexbyrev[nullrev] = {
129 self._indexbyrev[nullrev] = {
130 b'node': nullid,
130 b'node': nullid,
131 b'p1': nullid,
131 b'p1': nullid,
132 b'p2': nullid,
132 b'p2': nullid,
133 b'linkrev': nullrev,
133 b'linkrev': nullrev,
134 b'flags': 0,
134 b'flags': 0,
135 }
135 }
136
136
137 for i, entry in enumerate(self._indexdata):
137 for i, entry in enumerate(self._indexdata):
138 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
138 p1rev, p2rev = self.parentrevs(self.rev(entry[b'node']))
139
139
140 # start, length, rawsize, chainbase, linkrev, p1, p2, node
140 # start, length, rawsize, chainbase, linkrev, p1, p2, node
141 self.index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
141 self._index.append((0, 0, 0, -1, entry[b'linkrev'], p1rev, p2rev,
142 entry[b'node']))
142 entry[b'node']))
143
143
144 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
144 self._index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145
145
146 def __len__(self):
146 def __len__(self):
147 return len(self._indexdata)
147 return len(self._indexdata)
148
148
149 def __iter__(self):
149 def __iter__(self):
150 return iter(range(len(self)))
150 return iter(range(len(self)))
151
151
152 def revs(self, start=0, stop=None):
152 def revs(self, start=0, stop=None):
153 step = 1
153 step = 1
154 if stop is not None:
154 if stop is not None:
155 if start > stop:
155 if start > stop:
156 step = -1
156 step = -1
157
157
158 stop += step
158 stop += step
159 else:
159 else:
160 stop = len(self)
160 stop = len(self)
161
161
162 return range(start, stop, step)
162 return range(start, stop, step)
163
163
164 def parents(self, node):
164 def parents(self, node):
165 validatenode(node)
165 validatenode(node)
166
166
167 if node not in self._indexbynode:
167 if node not in self._indexbynode:
168 raise KeyError('unknown node')
168 raise KeyError('unknown node')
169
169
170 entry = self._indexbynode[node]
170 entry = self._indexbynode[node]
171
171
172 return entry[b'p1'], entry[b'p2']
172 return entry[b'p1'], entry[b'p2']
173
173
174 def parentrevs(self, rev):
174 def parentrevs(self, rev):
175 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
175 p1, p2 = self.parents(self._indexbyrev[rev][b'node'])
176 return self.rev(p1), self.rev(p2)
176 return self.rev(p1), self.rev(p2)
177
177
178 def rev(self, node):
178 def rev(self, node):
179 validatenode(node)
179 validatenode(node)
180
180
181 try:
181 try:
182 self._indexbynode[node]
182 self._indexbynode[node]
183 except KeyError:
183 except KeyError:
184 raise error.LookupError(node, self._indexpath, _('no node'))
184 raise error.LookupError(node, self._indexpath, _('no node'))
185
185
186 for rev, entry in self._indexbyrev.items():
186 for rev, entry in self._indexbyrev.items():
187 if entry[b'node'] == node:
187 if entry[b'node'] == node:
188 return rev
188 return rev
189
189
190 raise error.ProgrammingError('this should not occur')
190 raise error.ProgrammingError('this should not occur')
191
191
192 def node(self, rev):
192 def node(self, rev):
193 validaterev(rev)
193 validaterev(rev)
194
194
195 return self._indexbyrev[rev][b'node']
195 return self._indexbyrev[rev][b'node']
196
196
197 def lookup(self, node):
197 def lookup(self, node):
198 if isinstance(node, int):
198 if isinstance(node, int):
199 return self.node(node)
199 return self.node(node)
200
200
201 if len(node) == 20:
201 if len(node) == 20:
202 self.rev(node)
202 self.rev(node)
203 return node
203 return node
204
204
205 try:
205 try:
206 rev = int(node)
206 rev = int(node)
207 if '%d' % rev != node:
207 if '%d' % rev != node:
208 raise ValueError
208 raise ValueError
209
209
210 if rev < 0:
210 if rev < 0:
211 rev = len(self) + rev
211 rev = len(self) + rev
212 if rev < 0 or rev >= len(self):
212 if rev < 0 or rev >= len(self):
213 raise ValueError
213 raise ValueError
214
214
215 return self.node(rev)
215 return self.node(rev)
216 except (ValueError, OverflowError):
216 except (ValueError, OverflowError):
217 pass
217 pass
218
218
219 if len(node) == 40:
219 if len(node) == 40:
220 try:
220 try:
221 rawnode = bin(node)
221 rawnode = bin(node)
222 self.rev(rawnode)
222 self.rev(rawnode)
223 return rawnode
223 return rawnode
224 except TypeError:
224 except TypeError:
225 pass
225 pass
226
226
227 raise error.LookupError(node, self._path, _('invalid lookup input'))
227 raise error.LookupError(node, self._path, _('invalid lookup input'))
228
228
229 def linkrev(self, rev):
229 def linkrev(self, rev):
230 validaterev(rev)
230 validaterev(rev)
231
231
232 return self._indexbyrev[rev][b'linkrev']
232 return self._indexbyrev[rev][b'linkrev']
233
233
234 def flags(self, rev):
234 def flags(self, rev):
235 validaterev(rev)
235 validaterev(rev)
236
236
237 return self._indexbyrev[rev][b'flags']
237 return self._indexbyrev[rev][b'flags']
238
238
239 def deltaparent(self, rev):
239 def deltaparent(self, rev):
240 validaterev(rev)
240 validaterev(rev)
241
241
242 p1node = self.parents(self.node(rev))[0]
242 p1node = self.parents(self.node(rev))[0]
243 return self.rev(p1node)
243 return self.rev(p1node)
244
244
245 def _candelta(self, baserev, rev):
245 def _candelta(self, baserev, rev):
246 validaterev(baserev)
246 validaterev(baserev)
247 validaterev(rev)
247 validaterev(rev)
248
248
249 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
249 if ((self.flags(baserev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)
250 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
250 or (self.flags(rev) & revlog.REVIDX_RAWTEXT_CHANGING_FLAGS)):
251 return False
251 return False
252
252
253 return True
253 return True
254
254
255 def rawsize(self, rev):
255 def rawsize(self, rev):
256 validaterev(rev)
256 validaterev(rev)
257 node = self.node(rev)
257 node = self.node(rev)
258 return len(self.revision(node, raw=True))
258 return len(self.revision(node, raw=True))
259
259
260 def _processflags(self, text, flags, operation, raw=False):
260 def _processflags(self, text, flags, operation, raw=False):
261 if flags == 0:
261 if flags == 0:
262 return text, True
262 return text, True
263
263
264 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
264 if flags & ~revlog.REVIDX_KNOWN_FLAGS:
265 raise simplestoreerror(_("incompatible revision flag '%#x'") %
265 raise simplestoreerror(_("incompatible revision flag '%#x'") %
266 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
266 (flags & ~revlog.REVIDX_KNOWN_FLAGS))
267
267
268 validatehash = True
268 validatehash = True
269 # Depending on the operation (read or write), the order might be
269 # Depending on the operation (read or write), the order might be
270 # reversed due to non-commutative transforms.
270 # reversed due to non-commutative transforms.
271 orderedflags = revlog.REVIDX_FLAGS_ORDER
271 orderedflags = revlog.REVIDX_FLAGS_ORDER
272 if operation == 'write':
272 if operation == 'write':
273 orderedflags = reversed(orderedflags)
273 orderedflags = reversed(orderedflags)
274
274
275 for flag in orderedflags:
275 for flag in orderedflags:
276 # If a flagprocessor has been registered for a known flag, apply the
276 # If a flagprocessor has been registered for a known flag, apply the
277 # related operation transform and update result tuple.
277 # related operation transform and update result tuple.
278 if flag & flags:
278 if flag & flags:
279 vhash = True
279 vhash = True
280
280
281 if flag not in revlog._flagprocessors:
281 if flag not in revlog._flagprocessors:
282 message = _("missing processor for flag '%#x'") % (flag)
282 message = _("missing processor for flag '%#x'") % (flag)
283 raise simplestoreerror(message)
283 raise simplestoreerror(message)
284
284
285 processor = revlog._flagprocessors[flag]
285 processor = revlog._flagprocessors[flag]
286 if processor is not None:
286 if processor is not None:
287 readtransform, writetransform, rawtransform = processor
287 readtransform, writetransform, rawtransform = processor
288
288
289 if raw:
289 if raw:
290 vhash = rawtransform(self, text)
290 vhash = rawtransform(self, text)
291 elif operation == 'read':
291 elif operation == 'read':
292 text, vhash = readtransform(self, text)
292 text, vhash = readtransform(self, text)
293 else: # write operation
293 else: # write operation
294 text, vhash = writetransform(self, text)
294 text, vhash = writetransform(self, text)
295 validatehash = validatehash and vhash
295 validatehash = validatehash and vhash
296
296
297 return text, validatehash
297 return text, validatehash
298
298
299 def checkhash(self, text, node, p1=None, p2=None, rev=None):
299 def checkhash(self, text, node, p1=None, p2=None, rev=None):
300 if p1 is None and p2 is None:
300 if p1 is None and p2 is None:
301 p1, p2 = self.parents(node)
301 p1, p2 = self.parents(node)
302 if node != revlog.hash(text, p1, p2):
302 if node != revlog.hash(text, p1, p2):
303 raise simplestoreerror(_("integrity check failed on %s") %
303 raise simplestoreerror(_("integrity check failed on %s") %
304 self._path)
304 self._path)
305
305
306 def revision(self, node, raw=False):
306 def revision(self, node, raw=False):
307 validatenode(node)
307 validatenode(node)
308
308
309 if node == nullid:
309 if node == nullid:
310 return b''
310 return b''
311
311
312 rev = self.rev(node)
312 rev = self.rev(node)
313 flags = self.flags(rev)
313 flags = self.flags(rev)
314
314
315 path = b'/'.join([self._storepath, hex(node)])
315 path = b'/'.join([self._storepath, hex(node)])
316 rawtext = self._svfs.read(path)
316 rawtext = self._svfs.read(path)
317
317
318 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
318 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
319 if validatehash:
319 if validatehash:
320 self.checkhash(text, node, rev=rev)
320 self.checkhash(text, node, rev=rev)
321
321
322 return text
322 return text
323
323
324 def read(self, node):
324 def read(self, node):
325 validatenode(node)
325 validatenode(node)
326
326
327 revision = self.revision(node)
327 revision = self.revision(node)
328
328
329 if not revision.startswith(b'\1\n'):
329 if not revision.startswith(b'\1\n'):
330 return revision
330 return revision
331
331
332 start = revision.index(b'\1\n', 2)
332 start = revision.index(b'\1\n', 2)
333 return revision[start + 2:]
333 return revision[start + 2:]
334
334
335 def renamed(self, node):
335 def renamed(self, node):
336 validatenode(node)
336 validatenode(node)
337
337
338 if self.parents(node)[0] != nullid:
338 if self.parents(node)[0] != nullid:
339 return False
339 return False
340
340
341 fulltext = self.revision(node)
341 fulltext = self.revision(node)
342 m = revlog.parsemeta(fulltext)[0]
342 m = revlog.parsemeta(fulltext)[0]
343
343
344 if m and 'copy' in m:
344 if m and 'copy' in m:
345 return m['copy'], bin(m['copyrev'])
345 return m['copy'], bin(m['copyrev'])
346
346
347 return False
347 return False
348
348
349 def cmp(self, node, text):
349 def cmp(self, node, text):
350 validatenode(node)
350 validatenode(node)
351
351
352 t = text
352 t = text
353
353
354 if text.startswith(b'\1\n'):
354 if text.startswith(b'\1\n'):
355 t = b'\1\n\1\n' + text
355 t = b'\1\n\1\n' + text
356
356
357 p1, p2 = self.parents(node)
357 p1, p2 = self.parents(node)
358
358
359 if revlog.hash(t, p1, p2) == node:
359 if revlog.hash(t, p1, p2) == node:
360 return False
360 return False
361
361
362 if self.iscensored(self.rev(node)):
362 if self.iscensored(self.rev(node)):
363 return text != b''
363 return text != b''
364
364
365 if self.renamed(node):
365 if self.renamed(node):
366 t2 = self.read(node)
366 t2 = self.read(node)
367 return t2 != text
367 return t2 != text
368
368
369 return True
369 return True
370
370
371 def size(self, rev):
371 def size(self, rev):
372 validaterev(rev)
372 validaterev(rev)
373
373
374 node = self._indexbyrev[rev][b'node']
374 node = self._indexbyrev[rev][b'node']
375
375
376 if self.renamed(node):
376 if self.renamed(node):
377 return len(self.read(node))
377 return len(self.read(node))
378
378
379 if self.iscensored(rev):
379 if self.iscensored(rev):
380 return 0
380 return 0
381
381
382 return len(self.revision(node))
382 return len(self.revision(node))
383
383
384 def iscensored(self, rev):
384 def iscensored(self, rev):
385 validaterev(rev)
385 validaterev(rev)
386
386
387 return self.flags(rev) & revlog.REVIDX_ISCENSORED
387 return self.flags(rev) & revlog.REVIDX_ISCENSORED
388
388
389 def commonancestorsheads(self, a, b):
389 def commonancestorsheads(self, a, b):
390 validatenode(a)
390 validatenode(a)
391 validatenode(b)
391 validatenode(b)
392
392
393 a = self.rev(a)
393 a = self.rev(a)
394 b = self.rev(b)
394 b = self.rev(b)
395
395
396 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
396 ancestors = ancestor.commonancestorsheads(self.parentrevs, a, b)
397 return pycompat.maplist(self.node, ancestors)
397 return pycompat.maplist(self.node, ancestors)
398
398
399 def descendants(self, revs):
399 def descendants(self, revs):
400 # This is a copy of revlog.descendants()
400 # This is a copy of revlog.descendants()
401 first = min(revs)
401 first = min(revs)
402 if first == nullrev:
402 if first == nullrev:
403 for i in self:
403 for i in self:
404 yield i
404 yield i
405 return
405 return
406
406
407 seen = set(revs)
407 seen = set(revs)
408 for i in self.revs(start=first + 1):
408 for i in self.revs(start=first + 1):
409 for x in self.parentrevs(i):
409 for x in self.parentrevs(i):
410 if x != nullrev and x in seen:
410 if x != nullrev and x in seen:
411 seen.add(i)
411 seen.add(i)
412 yield i
412 yield i
413 break
413 break
414
414
415 # Required by verify.
415 # Required by verify.
416 def files(self):
416 def files(self):
417 entries = self._svfs.listdir(self._storepath)
417 entries = self._svfs.listdir(self._storepath)
418
418
419 # Strip out undo.backup.* files created as part of transaction
419 # Strip out undo.backup.* files created as part of transaction
420 # recording.
420 # recording.
421 entries = [f for f in entries if not f.startswith('undo.backup.')]
421 entries = [f for f in entries if not f.startswith('undo.backup.')]
422
422
423 return [b'/'.join((self._storepath, f)) for f in entries]
423 return [b'/'.join((self._storepath, f)) for f in entries]
424
424
425 def add(self, text, meta, transaction, linkrev, p1, p2):
425 def add(self, text, meta, transaction, linkrev, p1, p2):
426 if meta or text.startswith(b'\1\n'):
426 if meta or text.startswith(b'\1\n'):
427 text = revlog.packmeta(meta, text)
427 text = revlog.packmeta(meta, text)
428
428
429 return self.addrevision(text, transaction, linkrev, p1, p2)
429 return self.addrevision(text, transaction, linkrev, p1, p2)
430
430
431 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
431 def addrevision(self, text, transaction, linkrev, p1, p2, node=None,
432 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
432 flags=revlog.REVIDX_DEFAULT_FLAGS, cachedelta=None):
433 validatenode(p1)
433 validatenode(p1)
434 validatenode(p2)
434 validatenode(p2)
435
435
436 if flags:
436 if flags:
437 node = node or revlog.hash(text, p1, p2)
437 node = node or revlog.hash(text, p1, p2)
438
438
439 rawtext, validatehash = self._processflags(text, flags, 'write')
439 rawtext, validatehash = self._processflags(text, flags, 'write')
440
440
441 node = node or revlog.hash(text, p1, p2)
441 node = node or revlog.hash(text, p1, p2)
442
442
443 if node in self._indexbynode:
443 if node in self._indexbynode:
444 return node
444 return node
445
445
446 if validatehash:
446 if validatehash:
447 self.checkhash(rawtext, node, p1=p1, p2=p2)
447 self.checkhash(rawtext, node, p1=p1, p2=p2)
448
448
449 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
449 return self._addrawrevision(node, rawtext, transaction, linkrev, p1, p2,
450 flags)
450 flags)
451
451
452 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
452 def _addrawrevision(self, node, rawtext, transaction, link, p1, p2, flags):
453 transaction.addbackup(self._indexpath)
453 transaction.addbackup(self._indexpath)
454
454
455 path = b'/'.join([self._storepath, hex(node)])
455 path = b'/'.join([self._storepath, hex(node)])
456
456
457 self._svfs.write(path, rawtext)
457 self._svfs.write(path, rawtext)
458
458
459 self._indexdata.append({
459 self._indexdata.append({
460 b'node': node,
460 b'node': node,
461 b'p1': p1,
461 b'p1': p1,
462 b'p2': p2,
462 b'p2': p2,
463 b'linkrev': link,
463 b'linkrev': link,
464 b'flags': flags,
464 b'flags': flags,
465 })
465 })
466
466
467 self._reflectindexupdate()
467 self._reflectindexupdate()
468
468
469 return node
469 return node
470
470
471 def _reflectindexupdate(self):
471 def _reflectindexupdate(self):
472 self._refreshindex()
472 self._refreshindex()
473 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
473 self._svfs.write(self._indexpath, cbor.dumps(self._indexdata))
474
474
475 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
475 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
476 nodes = []
476 nodes = []
477
477
478 transaction.addbackup(self._indexpath)
478 transaction.addbackup(self._indexpath)
479
479
480 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
480 for node, p1, p2, linknode, deltabase, delta, flags in deltas:
481 linkrev = linkmapper(linknode)
481 linkrev = linkmapper(linknode)
482 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
482 flags = flags or revlog.REVIDX_DEFAULT_FLAGS
483
483
484 nodes.append(node)
484 nodes.append(node)
485
485
486 if node in self._indexbynode:
486 if node in self._indexbynode:
487 continue
487 continue
488
488
489 # Need to resolve the fulltext from the delta base.
489 # Need to resolve the fulltext from the delta base.
490 if deltabase == nullid:
490 if deltabase == nullid:
491 text = mdiff.patch(b'', delta)
491 text = mdiff.patch(b'', delta)
492 else:
492 else:
493 text = mdiff.patch(self.revision(deltabase), delta)
493 text = mdiff.patch(self.revision(deltabase), delta)
494
494
495 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
495 self._addrawrevision(node, text, transaction, linkrev, p1, p2,
496 flags)
496 flags)
497
497
498 if addrevisioncb:
498 if addrevisioncb:
499 addrevisioncb(self, node)
499 addrevisioncb(self, node)
500
500
501 return nodes
501 return nodes
502
502
503 def revdiff(self, rev1, rev2):
503 def revdiff(self, rev1, rev2):
504 validaterev(rev1)
504 validaterev(rev1)
505 validaterev(rev2)
505 validaterev(rev2)
506
506
507 node1 = self.node(rev1)
507 node1 = self.node(rev1)
508 node2 = self.node(rev2)
508 node2 = self.node(rev2)
509
509
510 return mdiff.textdiff(self.revision(node1, raw=True),
510 return mdiff.textdiff(self.revision(node1, raw=True),
511 self.revision(node2, raw=True))
511 self.revision(node2, raw=True))
512
512
513 def emitrevisiondeltas(self, requests):
513 def emitrevisiondeltas(self, requests):
514 for request in requests:
514 for request in requests:
515 node = request.node
515 node = request.node
516 rev = self.rev(node)
516 rev = self.rev(node)
517
517
518 if request.basenode == nullid:
518 if request.basenode == nullid:
519 baserev = nullrev
519 baserev = nullrev
520 elif request.basenode is not None:
520 elif request.basenode is not None:
521 baserev = self.rev(request.basenode)
521 baserev = self.rev(request.basenode)
522 else:
522 else:
523 # This is a test extension and we can do simple things
523 # This is a test extension and we can do simple things
524 # for choosing a delta parent.
524 # for choosing a delta parent.
525 baserev = self.deltaparent(rev)
525 baserev = self.deltaparent(rev)
526
526
527 if baserev != nullrev and not self._candelta(baserev, rev):
527 if baserev != nullrev and not self._candelta(baserev, rev):
528 baserev = nullrev
528 baserev = nullrev
529
529
530 revision = None
530 revision = None
531 delta = None
531 delta = None
532 baserevisionsize = None
532 baserevisionsize = None
533
533
534 if self.iscensored(baserev) or self.iscensored(rev):
534 if self.iscensored(baserev) or self.iscensored(rev):
535 try:
535 try:
536 revision = self.revision(node, raw=True)
536 revision = self.revision(node, raw=True)
537 except error.CensoredNodeError as e:
537 except error.CensoredNodeError as e:
538 revision = e.tombstone
538 revision = e.tombstone
539
539
540 if baserev != nullrev:
540 if baserev != nullrev:
541 baserevisionsize = self.rawsize(baserev)
541 baserevisionsize = self.rawsize(baserev)
542
542
543 elif baserev == nullrev:
543 elif baserev == nullrev:
544 revision = self.revision(node, raw=True)
544 revision = self.revision(node, raw=True)
545 else:
545 else:
546 delta = self.revdiff(baserev, rev)
546 delta = self.revdiff(baserev, rev)
547
547
548 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
548 extraflags = revlog.REVIDX_ELLIPSIS if request.ellipsis else 0
549
549
550 yield simplestorerevisiondelta(
550 yield simplestorerevisiondelta(
551 node=node,
551 node=node,
552 p1node=request.p1node,
552 p1node=request.p1node,
553 p2node=request.p2node,
553 p2node=request.p2node,
554 linknode=request.linknode,
554 linknode=request.linknode,
555 basenode=self.node(baserev),
555 basenode=self.node(baserev),
556 flags=self.flags(rev) | extraflags,
556 flags=self.flags(rev) | extraflags,
557 baserevisionsize=baserevisionsize,
557 baserevisionsize=baserevisionsize,
558 revision=revision,
558 revision=revision,
559 delta=delta)
559 delta=delta)
560
560
561 def heads(self, start=None, stop=None):
561 def heads(self, start=None, stop=None):
562 # This is copied from revlog.py.
562 # This is copied from revlog.py.
563 if start is None and stop is None:
563 if start is None and stop is None:
564 if not len(self):
564 if not len(self):
565 return [nullid]
565 return [nullid]
566 return [self.node(r) for r in self.headrevs()]
566 return [self.node(r) for r in self.headrevs()]
567
567
568 if start is None:
568 if start is None:
569 start = nullid
569 start = nullid
570 if stop is None:
570 if stop is None:
571 stop = []
571 stop = []
572 stoprevs = set([self.rev(n) for n in stop])
572 stoprevs = set([self.rev(n) for n in stop])
573 startrev = self.rev(start)
573 startrev = self.rev(start)
574 reachable = {startrev}
574 reachable = {startrev}
575 heads = {startrev}
575 heads = {startrev}
576
576
577 parentrevs = self.parentrevs
577 parentrevs = self.parentrevs
578 for r in self.revs(start=startrev + 1):
578 for r in self.revs(start=startrev + 1):
579 for p in parentrevs(r):
579 for p in parentrevs(r):
580 if p in reachable:
580 if p in reachable:
581 if r not in stoprevs:
581 if r not in stoprevs:
582 reachable.add(r)
582 reachable.add(r)
583 heads.add(r)
583 heads.add(r)
584 if p in heads and p not in stoprevs:
584 if p in heads and p not in stoprevs:
585 heads.remove(p)
585 heads.remove(p)
586
586
587 return [self.node(r) for r in heads]
587 return [self.node(r) for r in heads]
588
588
589 def children(self, node):
589 def children(self, node):
590 validatenode(node)
590 validatenode(node)
591
591
592 # This is a copy of revlog.children().
592 # This is a copy of revlog.children().
593 c = []
593 c = []
594 p = self.rev(node)
594 p = self.rev(node)
595 for r in self.revs(start=p + 1):
595 for r in self.revs(start=p + 1):
596 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
596 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
597 if prevs:
597 if prevs:
598 for pr in prevs:
598 for pr in prevs:
599 if pr == p:
599 if pr == p:
600 c.append(self.node(r))
600 c.append(self.node(r))
601 elif p == nullrev:
601 elif p == nullrev:
602 c.append(self.node(r))
602 c.append(self.node(r))
603 return c
603 return c
604
604
605 def getstrippoint(self, minlink):
605 def getstrippoint(self, minlink):
606
606
607 # This is largely a copy of revlog.getstrippoint().
607 # This is largely a copy of revlog.getstrippoint().
608 brokenrevs = set()
608 brokenrevs = set()
609 strippoint = len(self)
609 strippoint = len(self)
610
610
611 heads = {}
611 heads = {}
612 futurelargelinkrevs = set()
612 futurelargelinkrevs = set()
613 for head in self.heads():
613 for head in self.heads():
614 headlinkrev = self.linkrev(self.rev(head))
614 headlinkrev = self.linkrev(self.rev(head))
615 heads[head] = headlinkrev
615 heads[head] = headlinkrev
616 if headlinkrev >= minlink:
616 if headlinkrev >= minlink:
617 futurelargelinkrevs.add(headlinkrev)
617 futurelargelinkrevs.add(headlinkrev)
618
618
619 # This algorithm involves walking down the rev graph, starting at the
619 # This algorithm involves walking down the rev graph, starting at the
620 # heads. Since the revs are topologically sorted according to linkrev,
620 # heads. Since the revs are topologically sorted according to linkrev,
621 # once all head linkrevs are below the minlink, we know there are
621 # once all head linkrevs are below the minlink, we know there are
622 # no more revs that could have a linkrev greater than minlink.
622 # no more revs that could have a linkrev greater than minlink.
623 # So we can stop walking.
623 # So we can stop walking.
624 while futurelargelinkrevs:
624 while futurelargelinkrevs:
625 strippoint -= 1
625 strippoint -= 1
626 linkrev = heads.pop(strippoint)
626 linkrev = heads.pop(strippoint)
627
627
628 if linkrev < minlink:
628 if linkrev < minlink:
629 brokenrevs.add(strippoint)
629 brokenrevs.add(strippoint)
630 else:
630 else:
631 futurelargelinkrevs.remove(linkrev)
631 futurelargelinkrevs.remove(linkrev)
632
632
633 for p in self.parentrevs(strippoint):
633 for p in self.parentrevs(strippoint):
634 if p != nullrev:
634 if p != nullrev:
635 plinkrev = self.linkrev(p)
635 plinkrev = self.linkrev(p)
636 heads[p] = plinkrev
636 heads[p] = plinkrev
637 if plinkrev >= minlink:
637 if plinkrev >= minlink:
638 futurelargelinkrevs.add(plinkrev)
638 futurelargelinkrevs.add(plinkrev)
639
639
640 return strippoint, brokenrevs
640 return strippoint, brokenrevs
641
641
642 def strip(self, minlink, transaction):
642 def strip(self, minlink, transaction):
643 if not len(self):
643 if not len(self):
644 return
644 return
645
645
646 rev, _ignored = self.getstrippoint(minlink)
646 rev, _ignored = self.getstrippoint(minlink)
647 if rev == len(self):
647 if rev == len(self):
648 return
648 return
649
649
650 # Purge index data starting at the requested revision.
650 # Purge index data starting at the requested revision.
651 self._indexdata[rev:] = []
651 self._indexdata[rev:] = []
652 self._reflectindexupdate()
652 self._reflectindexupdate()
653
653
654 def issimplestorefile(f, kind, st):
654 def issimplestorefile(f, kind, st):
655 if kind != stat.S_IFREG:
655 if kind != stat.S_IFREG:
656 return False
656 return False
657
657
658 if store.isrevlog(f, kind, st):
658 if store.isrevlog(f, kind, st):
659 return False
659 return False
660
660
661 # Ignore transaction undo files.
661 # Ignore transaction undo files.
662 if f.startswith('undo.'):
662 if f.startswith('undo.'):
663 return False
663 return False
664
664
665 # Otherwise assume it belongs to the simple store.
665 # Otherwise assume it belongs to the simple store.
666 return True
666 return True
667
667
668 class simplestore(store.encodedstore):
668 class simplestore(store.encodedstore):
669 def datafiles(self):
669 def datafiles(self):
670 for x in super(simplestore, self).datafiles():
670 for x in super(simplestore, self).datafiles():
671 yield x
671 yield x
672
672
673 # Supplement with non-revlog files.
673 # Supplement with non-revlog files.
674 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
674 extrafiles = self._walk('data', True, filefilter=issimplestorefile)
675
675
676 for unencoded, encoded, size in extrafiles:
676 for unencoded, encoded, size in extrafiles:
677 try:
677 try:
678 unencoded = store.decodefilename(unencoded)
678 unencoded = store.decodefilename(unencoded)
679 except KeyError:
679 except KeyError:
680 unencoded = None
680 unencoded = None
681
681
682 yield unencoded, encoded, size
682 yield unencoded, encoded, size
683
683
684 def reposetup(ui, repo):
684 def reposetup(ui, repo):
685 if not repo.local():
685 if not repo.local():
686 return
686 return
687
687
688 if isinstance(repo, bundlerepo.bundlerepository):
688 if isinstance(repo, bundlerepo.bundlerepository):
689 raise error.Abort(_('cannot use simple store with bundlerepo'))
689 raise error.Abort(_('cannot use simple store with bundlerepo'))
690
690
691 class simplestorerepo(repo.__class__):
691 class simplestorerepo(repo.__class__):
692 def file(self, f):
692 def file(self, f):
693 return filestorage(self.svfs, f)
693 return filestorage(self.svfs, f)
694
694
695 repo.__class__ = simplestorerepo
695 repo.__class__ = simplestorerepo
696
696
697 def featuresetup(ui, supported):
697 def featuresetup(ui, supported):
698 supported.add(REQUIREMENT)
698 supported.add(REQUIREMENT)
699
699
700 def newreporequirements(orig, ui):
700 def newreporequirements(orig, ui):
701 """Modifies default requirements for new repos to use the simple store."""
701 """Modifies default requirements for new repos to use the simple store."""
702 requirements = orig(ui)
702 requirements = orig(ui)
703
703
704 # These requirements are only used to affect creation of the store
704 # These requirements are only used to affect creation of the store
705 # object. We have our own store. So we can remove them.
705 # object. We have our own store. So we can remove them.
706 # TODO do this once we feel like taking the test hit.
706 # TODO do this once we feel like taking the test hit.
707 #if 'fncache' in requirements:
707 #if 'fncache' in requirements:
708 # requirements.remove('fncache')
708 # requirements.remove('fncache')
709 #if 'dotencode' in requirements:
709 #if 'dotencode' in requirements:
710 # requirements.remove('dotencode')
710 # requirements.remove('dotencode')
711
711
712 requirements.add(REQUIREMENT)
712 requirements.add(REQUIREMENT)
713
713
714 return requirements
714 return requirements
715
715
716 def makestore(orig, requirements, path, vfstype):
716 def makestore(orig, requirements, path, vfstype):
717 if REQUIREMENT not in requirements:
717 if REQUIREMENT not in requirements:
718 return orig(requirements, path, vfstype)
718 return orig(requirements, path, vfstype)
719
719
720 return simplestore(path, vfstype)
720 return simplestore(path, vfstype)
721
721
722 def verifierinit(orig, self, *args, **kwargs):
722 def verifierinit(orig, self, *args, **kwargs):
723 orig(self, *args, **kwargs)
723 orig(self, *args, **kwargs)
724
724
725 # We don't care that files in the store don't align with what is
725 # We don't care that files in the store don't align with what is
726 # advertised. So suppress these warnings.
726 # advertised. So suppress these warnings.
727 self.warnorphanstorefiles = False
727 self.warnorphanstorefiles = False
728
728
729 def extsetup(ui):
729 def extsetup(ui):
730 localrepo.featuresetupfuncs.add(featuresetup)
730 localrepo.featuresetupfuncs.add(featuresetup)
731
731
732 extensions.wrapfunction(localrepo, 'newreporequirements',
732 extensions.wrapfunction(localrepo, 'newreporequirements',
733 newreporequirements)
733 newreporequirements)
734 extensions.wrapfunction(store, 'store', makestore)
734 extensions.wrapfunction(store, 'store', makestore)
735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
735 extensions.wrapfunction(verify.verifier, '__init__', verifierinit)
General Comments 0
You need to be logged in to leave comments. Login now