##// END OF EJS Templates
storageutil: extract filelog.cmp() to a standalone function...
Gregory Szorc -
r40042:422beffd default
parent child Browse files
Show More
@@ -1,238 +1,219
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from . import (
10 from . import (
11 error,
11 error,
12 repository,
12 repository,
13 revlog,
13 revlog,
14 )
14 )
15 from .utils import (
15 from .utils import (
16 interfaceutil,
16 interfaceutil,
17 storageutil,
17 storageutil,
18 )
18 )
19
19
20 @interfaceutil.implementer(repository.ifilestorage)
20 @interfaceutil.implementer(repository.ifilestorage)
21 class filelog(object):
21 class filelog(object):
22 def __init__(self, opener, path):
22 def __init__(self, opener, path):
23 self._revlog = revlog.revlog(opener,
23 self._revlog = revlog.revlog(opener,
24 '/'.join(('data', path + '.i')),
24 '/'.join(('data', path + '.i')),
25 censorable=True)
25 censorable=True)
26 # Full name of the user visible file, relative to the repository root.
26 # Full name of the user visible file, relative to the repository root.
27 # Used by LFS.
27 # Used by LFS.
28 self._revlog.filename = path
28 self._revlog.filename = path
29
29
30 def __len__(self):
30 def __len__(self):
31 return len(self._revlog)
31 return len(self._revlog)
32
32
33 def __iter__(self):
33 def __iter__(self):
34 return self._revlog.__iter__()
34 return self._revlog.__iter__()
35
35
36 def revs(self, start=0, stop=None):
36 def revs(self, start=0, stop=None):
37 return self._revlog.revs(start=start, stop=stop)
37 return self._revlog.revs(start=start, stop=stop)
38
38
39 def parents(self, node):
39 def parents(self, node):
40 return self._revlog.parents(node)
40 return self._revlog.parents(node)
41
41
42 def parentrevs(self, rev):
42 def parentrevs(self, rev):
43 return self._revlog.parentrevs(rev)
43 return self._revlog.parentrevs(rev)
44
44
45 def rev(self, node):
45 def rev(self, node):
46 return self._revlog.rev(node)
46 return self._revlog.rev(node)
47
47
48 def node(self, rev):
48 def node(self, rev):
49 return self._revlog.node(rev)
49 return self._revlog.node(rev)
50
50
51 def lookup(self, node):
51 def lookup(self, node):
52 return storageutil.fileidlookup(self._revlog, node,
52 return storageutil.fileidlookup(self._revlog, node,
53 self._revlog.indexfile)
53 self._revlog.indexfile)
54
54
55 def linkrev(self, rev):
55 def linkrev(self, rev):
56 return self._revlog.linkrev(rev)
56 return self._revlog.linkrev(rev)
57
57
58 def commonancestorsheads(self, node1, node2):
58 def commonancestorsheads(self, node1, node2):
59 return self._revlog.commonancestorsheads(node1, node2)
59 return self._revlog.commonancestorsheads(node1, node2)
60
60
61 # Used by dagop.blockdescendants().
61 # Used by dagop.blockdescendants().
62 def descendants(self, revs):
62 def descendants(self, revs):
63 return self._revlog.descendants(revs)
63 return self._revlog.descendants(revs)
64
64
65 def heads(self, start=None, stop=None):
65 def heads(self, start=None, stop=None):
66 return self._revlog.heads(start, stop)
66 return self._revlog.heads(start, stop)
67
67
68 # Used by hgweb, children extension.
68 # Used by hgweb, children extension.
69 def children(self, node):
69 def children(self, node):
70 return self._revlog.children(node)
70 return self._revlog.children(node)
71
71
72 def iscensored(self, rev):
72 def iscensored(self, rev):
73 return self._revlog.iscensored(rev)
73 return self._revlog.iscensored(rev)
74
74
75 def revision(self, node, _df=None, raw=False):
75 def revision(self, node, _df=None, raw=False):
76 return self._revlog.revision(node, _df=_df, raw=raw)
76 return self._revlog.revision(node, _df=_df, raw=raw)
77
77
78 def emitrevisions(self, nodes, nodesorder=None,
78 def emitrevisions(self, nodes, nodesorder=None,
79 revisiondata=False, assumehaveparentrevisions=False,
79 revisiondata=False, assumehaveparentrevisions=False,
80 deltaprevious=False):
80 deltaprevious=False):
81 return self._revlog.emitrevisions(
81 return self._revlog.emitrevisions(
82 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
82 nodes, nodesorder=nodesorder, revisiondata=revisiondata,
83 assumehaveparentrevisions=assumehaveparentrevisions,
83 assumehaveparentrevisions=assumehaveparentrevisions,
84 deltaprevious=deltaprevious)
84 deltaprevious=deltaprevious)
85
85
86 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
86 def addrevision(self, revisiondata, transaction, linkrev, p1, p2,
87 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
87 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
88 cachedelta=None):
88 cachedelta=None):
89 return self._revlog.addrevision(revisiondata, transaction, linkrev,
89 return self._revlog.addrevision(revisiondata, transaction, linkrev,
90 p1, p2, node=node, flags=flags,
90 p1, p2, node=node, flags=flags,
91 cachedelta=cachedelta)
91 cachedelta=cachedelta)
92
92
93 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
93 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
94 return self._revlog.addgroup(deltas, linkmapper, transaction,
94 return self._revlog.addgroup(deltas, linkmapper, transaction,
95 addrevisioncb=addrevisioncb)
95 addrevisioncb=addrevisioncb)
96
96
97 def getstrippoint(self, minlink):
97 def getstrippoint(self, minlink):
98 return self._revlog.getstrippoint(minlink)
98 return self._revlog.getstrippoint(minlink)
99
99
100 def strip(self, minlink, transaction):
100 def strip(self, minlink, transaction):
101 return self._revlog.strip(minlink, transaction)
101 return self._revlog.strip(minlink, transaction)
102
102
103 def censorrevision(self, tr, node, tombstone=b''):
103 def censorrevision(self, tr, node, tombstone=b''):
104 return self._revlog.censorrevision(node, tombstone=tombstone)
104 return self._revlog.censorrevision(node, tombstone=tombstone)
105
105
106 def files(self):
106 def files(self):
107 return self._revlog.files()
107 return self._revlog.files()
108
108
109 def read(self, node):
109 def read(self, node):
110 return storageutil.filtermetadata(self.revision(node))
110 return storageutil.filtermetadata(self.revision(node))
111
111
112 def add(self, text, meta, transaction, link, p1=None, p2=None):
112 def add(self, text, meta, transaction, link, p1=None, p2=None):
113 if meta or text.startswith('\1\n'):
113 if meta or text.startswith('\1\n'):
114 text = storageutil.packmeta(meta, text)
114 text = storageutil.packmeta(meta, text)
115 return self.addrevision(text, transaction, link, p1, p2)
115 return self.addrevision(text, transaction, link, p1, p2)
116
116
117 def renamed(self, node):
117 def renamed(self, node):
118 return storageutil.filerevisioncopied(self, node)
118 return storageutil.filerevisioncopied(self, node)
119
119
120 def size(self, rev):
120 def size(self, rev):
121 """return the size of a given revision"""
121 """return the size of a given revision"""
122
122
123 # for revisions with renames, we have to go the slow way
123 # for revisions with renames, we have to go the slow way
124 node = self.node(rev)
124 node = self.node(rev)
125 if self.renamed(node):
125 if self.renamed(node):
126 return len(self.read(node))
126 return len(self.read(node))
127 if self.iscensored(rev):
127 if self.iscensored(rev):
128 return 0
128 return 0
129
129
130 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
130 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
131 return self._revlog.size(rev)
131 return self._revlog.size(rev)
132
132
133 def cmp(self, node, text):
133 def cmp(self, node, text):
134 """compare text with a given file revision
134 """compare text with a given file revision
135
135
136 returns True if text is different than what is stored.
136 returns True if text is different than what is stored.
137 """
137 """
138
138 return storageutil.filerevisiondifferent(self, node, text)
139 t = text
140 if text.startswith('\1\n'):
141 t = '\1\n\1\n' + text
142
143 samehashes = not self._revlog.cmp(node, t)
144 if samehashes:
145 return False
146
147 # censored files compare against the empty file
148 if self.iscensored(self.rev(node)):
149 return text != ''
150
151 # renaming a file produces a different hash, even if the data
152 # remains unchanged. Check if it's the case (slow):
153 if self.renamed(node):
154 t2 = self.read(node)
155 return t2 != text
156
157 return True
158
139
159 def verifyintegrity(self, state):
140 def verifyintegrity(self, state):
160 return self._revlog.verifyintegrity(state)
141 return self._revlog.verifyintegrity(state)
161
142
162 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
143 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
163 revisionscount=False, trackedsize=False,
144 revisionscount=False, trackedsize=False,
164 storedsize=False):
145 storedsize=False):
165 return self._revlog.storageinfo(
146 return self._revlog.storageinfo(
166 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
147 exclusivefiles=exclusivefiles, sharedfiles=sharedfiles,
167 revisionscount=revisionscount, trackedsize=trackedsize,
148 revisionscount=revisionscount, trackedsize=trackedsize,
168 storedsize=storedsize)
149 storedsize=storedsize)
169
150
170 # TODO these aren't part of the interface and aren't internal methods.
151 # TODO these aren't part of the interface and aren't internal methods.
171 # Callers should be fixed to not use them.
152 # Callers should be fixed to not use them.
172
153
173 # Used by bundlefilelog, unionfilelog.
154 # Used by bundlefilelog, unionfilelog.
174 @property
155 @property
175 def indexfile(self):
156 def indexfile(self):
176 return self._revlog.indexfile
157 return self._revlog.indexfile
177
158
178 @indexfile.setter
159 @indexfile.setter
179 def indexfile(self, value):
160 def indexfile(self, value):
180 self._revlog.indexfile = value
161 self._revlog.indexfile = value
181
162
182 # Used by repo upgrade.
163 # Used by repo upgrade.
183 def clone(self, tr, destrevlog, **kwargs):
164 def clone(self, tr, destrevlog, **kwargs):
184 if not isinstance(destrevlog, filelog):
165 if not isinstance(destrevlog, filelog):
185 raise error.ProgrammingError('expected filelog to clone()')
166 raise error.ProgrammingError('expected filelog to clone()')
186
167
187 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
168 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
188
169
189 class narrowfilelog(filelog):
170 class narrowfilelog(filelog):
190 """Filelog variation to be used with narrow stores."""
171 """Filelog variation to be used with narrow stores."""
191
172
192 def __init__(self, opener, path, narrowmatch):
173 def __init__(self, opener, path, narrowmatch):
193 super(narrowfilelog, self).__init__(opener, path)
174 super(narrowfilelog, self).__init__(opener, path)
194 self._narrowmatch = narrowmatch
175 self._narrowmatch = narrowmatch
195
176
196 def renamed(self, node):
177 def renamed(self, node):
197 res = super(narrowfilelog, self).renamed(node)
178 res = super(narrowfilelog, self).renamed(node)
198
179
199 # Renames that come from outside the narrowspec are problematic
180 # Renames that come from outside the narrowspec are problematic
200 # because we may lack the base text for the rename. This can result
181 # because we may lack the base text for the rename. This can result
201 # in code attempting to walk the ancestry or compute a diff
182 # in code attempting to walk the ancestry or compute a diff
202 # encountering a missing revision. We address this by silently
183 # encountering a missing revision. We address this by silently
203 # removing rename metadata if the source file is outside the
184 # removing rename metadata if the source file is outside the
204 # narrow spec.
185 # narrow spec.
205 #
186 #
206 # A better solution would be to see if the base revision is available,
187 # A better solution would be to see if the base revision is available,
207 # rather than assuming it isn't.
188 # rather than assuming it isn't.
208 #
189 #
209 # An even better solution would be to teach all consumers of rename
190 # An even better solution would be to teach all consumers of rename
210 # metadata that the base revision may not be available.
191 # metadata that the base revision may not be available.
211 #
192 #
212 # TODO consider better ways of doing this.
193 # TODO consider better ways of doing this.
213 if res and not self._narrowmatch(res[0]):
194 if res and not self._narrowmatch(res[0]):
214 return None
195 return None
215
196
216 return res
197 return res
217
198
218 def size(self, rev):
199 def size(self, rev):
219 # Because we have a custom renamed() that may lie, we need to call
200 # Because we have a custom renamed() that may lie, we need to call
220 # the base renamed() to report accurate results.
201 # the base renamed() to report accurate results.
221 node = self.node(rev)
202 node = self.node(rev)
222 if super(narrowfilelog, self).renamed(node):
203 if super(narrowfilelog, self).renamed(node):
223 return len(self.read(node))
204 return len(self.read(node))
224 else:
205 else:
225 return super(narrowfilelog, self).size(rev)
206 return super(narrowfilelog, self).size(rev)
226
207
227 def cmp(self, node, text):
208 def cmp(self, node, text):
228 different = super(narrowfilelog, self).cmp(node, text)
209 different = super(narrowfilelog, self).cmp(node, text)
229
210
230 # Because renamed() may lie, we may get false positives for
211 # Because renamed() may lie, we may get false positives for
231 # different content. Check for this by comparing against the original
212 # different content. Check for this by comparing against the original
232 # renamed() implementation.
213 # renamed() implementation.
233 if different:
214 if different:
234 if super(narrowfilelog, self).renamed(node):
215 if super(narrowfilelog, self).renamed(node):
235 t2 = self.read(node)
216 t2 = self.read(node)
236 return t2 != text
217 return t2 != text
237
218
238 return different
219 return different
@@ -1,228 +1,254
1 # storageutil.py - Storage functionality agnostic of backend implementation.
1 # storageutil.py - Storage functionality agnostic of backend implementation.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import re
11 import re
12
12
13 from ..i18n import _
13 from ..i18n import _
14 from ..node import (
14 from ..node import (
15 bin,
15 bin,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .. import (
19 from .. import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 )
22 )
23
23
24 _nullhash = hashlib.sha1(nullid)
24 _nullhash = hashlib.sha1(nullid)
25
25
26 def hashrevisionsha1(text, p1, p2):
26 def hashrevisionsha1(text, p1, p2):
27 """Compute the SHA-1 for revision data and its parents.
27 """Compute the SHA-1 for revision data and its parents.
28
28
29 This hash combines both the current file contents and its history
29 This hash combines both the current file contents and its history
30 in a manner that makes it easy to distinguish nodes with the same
30 in a manner that makes it easy to distinguish nodes with the same
31 content in the revision graph.
31 content in the revision graph.
32 """
32 """
33 # As of now, if one of the parent node is null, p2 is null
33 # As of now, if one of the parent node is null, p2 is null
34 if p2 == nullid:
34 if p2 == nullid:
35 # deep copy of a hash is faster than creating one
35 # deep copy of a hash is faster than creating one
36 s = _nullhash.copy()
36 s = _nullhash.copy()
37 s.update(p1)
37 s.update(p1)
38 else:
38 else:
39 # none of the parent nodes are nullid
39 # none of the parent nodes are nullid
40 if p1 < p2:
40 if p1 < p2:
41 a = p1
41 a = p1
42 b = p2
42 b = p2
43 else:
43 else:
44 a = p2
44 a = p2
45 b = p1
45 b = p1
46 s = hashlib.sha1(a)
46 s = hashlib.sha1(a)
47 s.update(b)
47 s.update(b)
48 s.update(text)
48 s.update(text)
49 return s.digest()
49 return s.digest()
50
50
51 METADATA_RE = re.compile(b'\x01\n')
51 METADATA_RE = re.compile(b'\x01\n')
52
52
53 def parsemeta(text):
53 def parsemeta(text):
54 """Parse metadata header from revision data.
54 """Parse metadata header from revision data.
55
55
56 Returns a 2-tuple of (metadata, offset), where both can be None if there
56 Returns a 2-tuple of (metadata, offset), where both can be None if there
57 is no metadata.
57 is no metadata.
58 """
58 """
59 # text can be buffer, so we can't use .startswith or .index
59 # text can be buffer, so we can't use .startswith or .index
60 if text[:2] != b'\x01\n':
60 if text[:2] != b'\x01\n':
61 return None, None
61 return None, None
62 s = METADATA_RE.search(text, 2).start()
62 s = METADATA_RE.search(text, 2).start()
63 mtext = text[2:s]
63 mtext = text[2:s]
64 meta = {}
64 meta = {}
65 for l in mtext.splitlines():
65 for l in mtext.splitlines():
66 k, v = l.split(b': ', 1)
66 k, v = l.split(b': ', 1)
67 meta[k] = v
67 meta[k] = v
68 return meta, s + 2
68 return meta, s + 2
69
69
70 def packmeta(meta, text):
70 def packmeta(meta, text):
71 """Add metadata to fulltext to produce revision text."""
71 """Add metadata to fulltext to produce revision text."""
72 keys = sorted(meta)
72 keys = sorted(meta)
73 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
73 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
74 return b'\x01\n%s\x01\n%s' % (metatext, text)
74 return b'\x01\n%s\x01\n%s' % (metatext, text)
75
75
76 def iscensoredtext(text):
76 def iscensoredtext(text):
77 meta = parsemeta(text)[0]
77 meta = parsemeta(text)[0]
78 return meta and b'censored' in meta
78 return meta and b'censored' in meta
79
79
80 def filtermetadata(text):
80 def filtermetadata(text):
81 """Extract just the revision data from source text.
81 """Extract just the revision data from source text.
82
82
83 Returns ``text`` unless it has a metadata header, in which case we return
83 Returns ``text`` unless it has a metadata header, in which case we return
84 a new buffer without hte metadata.
84 a new buffer without hte metadata.
85 """
85 """
86 if not text.startswith(b'\x01\n'):
86 if not text.startswith(b'\x01\n'):
87 return text
87 return text
88
88
89 offset = text.index(b'\x01\n', 2)
89 offset = text.index(b'\x01\n', 2)
90 return text[offset + 2:]
90 return text[offset + 2:]
91
91
92 def filerevisioncopied(store, node):
92 def filerevisioncopied(store, node):
93 """Resolve file revision copy metadata.
93 """Resolve file revision copy metadata.
94
94
95 Returns ``False`` if the file has no copy metadata. Otherwise a
95 Returns ``False`` if the file has no copy metadata. Otherwise a
96 2-tuple of the source filename and node.
96 2-tuple of the source filename and node.
97 """
97 """
98 if store.parents(node)[0] != nullid:
98 if store.parents(node)[0] != nullid:
99 return False
99 return False
100
100
101 meta = parsemeta(store.revision(node))[0]
101 meta = parsemeta(store.revision(node))[0]
102
102
103 # copy and copyrev occur in pairs. In rare cases due to old bugs,
103 # copy and copyrev occur in pairs. In rare cases due to old bugs,
104 # one can occur without the other. So ensure both are present to flag
104 # one can occur without the other. So ensure both are present to flag
105 # as a copy.
105 # as a copy.
106 if meta and b'copy' in meta and b'copyrev' in meta:
106 if meta and b'copy' in meta and b'copyrev' in meta:
107 return meta[b'copy'], bin(meta[b'copyrev'])
107 return meta[b'copy'], bin(meta[b'copyrev'])
108
108
109 return False
109 return False
110
110
111 def filerevisiondifferent(store, node, filedata):
112 """Determines whether file data is equivalent to a stored node."""
113
114 if filedata.startswith(b'\x01\n'):
115 revisiontext = b'\x01\n\x01\n' + filedata
116 else:
117 revisiontext = filedata
118
119 p1, p2 = store.parents(node)
120
121 computednode = hashrevisionsha1(revisiontext, p1, p2)
122
123 if computednode == node:
124 return False
125
126 # Censored files compare against the empty file.
127 if store.iscensored(store.rev(node)):
128 return filedata != b''
129
130 # Renaming a file produces a different hash, even if the data
131 # remains unchanged. Check if that's the case.
132 if store.renamed(node):
133 return store.read(node) != filedata
134
135 return True
136
111 def iterrevs(storelen, start=0, stop=None):
137 def iterrevs(storelen, start=0, stop=None):
112 """Iterate over revision numbers in a store."""
138 """Iterate over revision numbers in a store."""
113 step = 1
139 step = 1
114
140
115 if stop is not None:
141 if stop is not None:
116 if start > stop:
142 if start > stop:
117 step = -1
143 step = -1
118 stop += step
144 stop += step
119 if stop > storelen:
145 if stop > storelen:
120 stop = storelen
146 stop = storelen
121 else:
147 else:
122 stop = storelen
148 stop = storelen
123
149
124 return pycompat.xrange(start, stop, step)
150 return pycompat.xrange(start, stop, step)
125
151
126 def fileidlookup(store, fileid, identifier):
152 def fileidlookup(store, fileid, identifier):
127 """Resolve the file node for a value.
153 """Resolve the file node for a value.
128
154
129 ``store`` is an object implementing the ``ifileindex`` interface.
155 ``store`` is an object implementing the ``ifileindex`` interface.
130
156
131 ``fileid`` can be:
157 ``fileid`` can be:
132
158
133 * A 20 byte binary node.
159 * A 20 byte binary node.
134 * An integer revision number
160 * An integer revision number
135 * A 40 byte hex node.
161 * A 40 byte hex node.
136 * A bytes that can be parsed as an integer representing a revision number.
162 * A bytes that can be parsed as an integer representing a revision number.
137
163
138 ``identifier`` is used to populate ``error.LookupError`` with an identifier
164 ``identifier`` is used to populate ``error.LookupError`` with an identifier
139 for the store.
165 for the store.
140
166
141 Raises ``error.LookupError`` on failure.
167 Raises ``error.LookupError`` on failure.
142 """
168 """
143 if isinstance(fileid, int):
169 if isinstance(fileid, int):
144 try:
170 try:
145 return store.node(fileid)
171 return store.node(fileid)
146 except IndexError:
172 except IndexError:
147 raise error.LookupError(fileid, identifier, _('no match found'))
173 raise error.LookupError(fileid, identifier, _('no match found'))
148
174
149 if len(fileid) == 20:
175 if len(fileid) == 20:
150 try:
176 try:
151 store.rev(fileid)
177 store.rev(fileid)
152 return fileid
178 return fileid
153 except error.LookupError:
179 except error.LookupError:
154 pass
180 pass
155
181
156 if len(fileid) == 40:
182 if len(fileid) == 40:
157 try:
183 try:
158 rawnode = bin(fileid)
184 rawnode = bin(fileid)
159 store.rev(rawnode)
185 store.rev(rawnode)
160 return rawnode
186 return rawnode
161 except TypeError:
187 except TypeError:
162 pass
188 pass
163
189
164 try:
190 try:
165 rev = int(fileid)
191 rev = int(fileid)
166
192
167 if b'%d' % rev != fileid:
193 if b'%d' % rev != fileid:
168 raise ValueError
194 raise ValueError
169
195
170 try:
196 try:
171 return store.node(rev)
197 return store.node(rev)
172 except (IndexError, TypeError):
198 except (IndexError, TypeError):
173 pass
199 pass
174 except (ValueError, OverflowError):
200 except (ValueError, OverflowError):
175 pass
201 pass
176
202
177 raise error.LookupError(fileid, identifier, _('no match found'))
203 raise error.LookupError(fileid, identifier, _('no match found'))
178
204
179 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
205 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
180 """Resolve information needed to strip revisions.
206 """Resolve information needed to strip revisions.
181
207
182 Finds the minimum revision number that must be stripped in order to
208 Finds the minimum revision number that must be stripped in order to
183 strip ``minlinkrev``.
209 strip ``minlinkrev``.
184
210
185 Returns a 2-tuple of the minimum revision number to do that and a set
211 Returns a 2-tuple of the minimum revision number to do that and a set
186 of all revision numbers that have linkrevs that would be broken
212 of all revision numbers that have linkrevs that would be broken
187 by that strip.
213 by that strip.
188
214
189 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
215 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
190 ``headrevs`` is an iterable of head revisions.
216 ``headrevs`` is an iterable of head revisions.
191 ``linkrevfn`` is a callable that receives a revision and returns a linked
217 ``linkrevfn`` is a callable that receives a revision and returns a linked
192 revision.
218 revision.
193 ``parentrevsfn`` is a callable that receives a revision number and returns
219 ``parentrevsfn`` is a callable that receives a revision number and returns
194 an iterable of its parent revision numbers.
220 an iterable of its parent revision numbers.
195 """
221 """
196 brokenrevs = set()
222 brokenrevs = set()
197 strippoint = tiprev + 1
223 strippoint = tiprev + 1
198
224
199 heads = {}
225 heads = {}
200 futurelargelinkrevs = set()
226 futurelargelinkrevs = set()
201 for head in headrevs:
227 for head in headrevs:
202 headlinkrev = linkrevfn(head)
228 headlinkrev = linkrevfn(head)
203 heads[head] = headlinkrev
229 heads[head] = headlinkrev
204 if headlinkrev >= minlinkrev:
230 if headlinkrev >= minlinkrev:
205 futurelargelinkrevs.add(headlinkrev)
231 futurelargelinkrevs.add(headlinkrev)
206
232
207 # This algorithm involves walking down the rev graph, starting at the
233 # This algorithm involves walking down the rev graph, starting at the
208 # heads. Since the revs are topologically sorted according to linkrev,
234 # heads. Since the revs are topologically sorted according to linkrev,
209 # once all head linkrevs are below the minlink, we know there are
235 # once all head linkrevs are below the minlink, we know there are
210 # no more revs that could have a linkrev greater than minlink.
236 # no more revs that could have a linkrev greater than minlink.
211 # So we can stop walking.
237 # So we can stop walking.
212 while futurelargelinkrevs:
238 while futurelargelinkrevs:
213 strippoint -= 1
239 strippoint -= 1
214 linkrev = heads.pop(strippoint)
240 linkrev = heads.pop(strippoint)
215
241
216 if linkrev < minlinkrev:
242 if linkrev < minlinkrev:
217 brokenrevs.add(strippoint)
243 brokenrevs.add(strippoint)
218 else:
244 else:
219 futurelargelinkrevs.remove(linkrev)
245 futurelargelinkrevs.remove(linkrev)
220
246
221 for p in parentrevsfn(strippoint):
247 for p in parentrevsfn(strippoint):
222 if p != nullrev:
248 if p != nullrev:
223 plinkrev = linkrevfn(p)
249 plinkrev = linkrevfn(p)
224 heads[p] = plinkrev
250 heads[p] = plinkrev
225 if plinkrev >= minlinkrev:
251 if plinkrev >= minlinkrev:
226 futurelargelinkrevs.add(plinkrev)
252 futurelargelinkrevs.add(plinkrev)
227
253
228 return strippoint, brokenrevs
254 return strippoint, brokenrevs
General Comments 0
You need to be logged in to leave comments. Login now