##// END OF EJS Templates
storageutil: convert fileid to bytes to avoid cast to %s...
Gregory Szorc -
r40357:6994a8be default
parent child Browse files
Show More
@@ -1,450 +1,451 b''
1 # storageutil.py - Storage functionality agnostic of backend implementation.
1 # storageutil.py - Storage functionality agnostic of backend implementation.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import re
11 import re
12
12
13 from ..i18n import _
13 from ..i18n import _
14 from ..node import (
14 from ..node import (
15 bin,
15 bin,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .. import (
19 from .. import (
20 dagop,
20 dagop,
21 error,
21 error,
22 mdiff,
22 mdiff,
23 pycompat,
23 pycompat,
24 )
24 )
25
25
26 _nullhash = hashlib.sha1(nullid)
26 _nullhash = hashlib.sha1(nullid)
27
27
28 def hashrevisionsha1(text, p1, p2):
28 def hashrevisionsha1(text, p1, p2):
29 """Compute the SHA-1 for revision data and its parents.
29 """Compute the SHA-1 for revision data and its parents.
30
30
31 This hash combines both the current file contents and its history
31 This hash combines both the current file contents and its history
32 in a manner that makes it easy to distinguish nodes with the same
32 in a manner that makes it easy to distinguish nodes with the same
33 content in the revision graph.
33 content in the revision graph.
34 """
34 """
35 # As of now, if one of the parent node is null, p2 is null
35 # As of now, if one of the parent node is null, p2 is null
36 if p2 == nullid:
36 if p2 == nullid:
37 # deep copy of a hash is faster than creating one
37 # deep copy of a hash is faster than creating one
38 s = _nullhash.copy()
38 s = _nullhash.copy()
39 s.update(p1)
39 s.update(p1)
40 else:
40 else:
41 # none of the parent nodes are nullid
41 # none of the parent nodes are nullid
42 if p1 < p2:
42 if p1 < p2:
43 a = p1
43 a = p1
44 b = p2
44 b = p2
45 else:
45 else:
46 a = p2
46 a = p2
47 b = p1
47 b = p1
48 s = hashlib.sha1(a)
48 s = hashlib.sha1(a)
49 s.update(b)
49 s.update(b)
50 s.update(text)
50 s.update(text)
51 return s.digest()
51 return s.digest()
52
52
53 METADATA_RE = re.compile(b'\x01\n')
53 METADATA_RE = re.compile(b'\x01\n')
54
54
55 def parsemeta(text):
55 def parsemeta(text):
56 """Parse metadata header from revision data.
56 """Parse metadata header from revision data.
57
57
58 Returns a 2-tuple of (metadata, offset), where both can be None if there
58 Returns a 2-tuple of (metadata, offset), where both can be None if there
59 is no metadata.
59 is no metadata.
60 """
60 """
61 # text can be buffer, so we can't use .startswith or .index
61 # text can be buffer, so we can't use .startswith or .index
62 if text[:2] != b'\x01\n':
62 if text[:2] != b'\x01\n':
63 return None, None
63 return None, None
64 s = METADATA_RE.search(text, 2).start()
64 s = METADATA_RE.search(text, 2).start()
65 mtext = text[2:s]
65 mtext = text[2:s]
66 meta = {}
66 meta = {}
67 for l in mtext.splitlines():
67 for l in mtext.splitlines():
68 k, v = l.split(b': ', 1)
68 k, v = l.split(b': ', 1)
69 meta[k] = v
69 meta[k] = v
70 return meta, s + 2
70 return meta, s + 2
71
71
72 def packmeta(meta, text):
72 def packmeta(meta, text):
73 """Add metadata to fulltext to produce revision text."""
73 """Add metadata to fulltext to produce revision text."""
74 keys = sorted(meta)
74 keys = sorted(meta)
75 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
75 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
76 return b'\x01\n%s\x01\n%s' % (metatext, text)
76 return b'\x01\n%s\x01\n%s' % (metatext, text)
77
77
78 def iscensoredtext(text):
78 def iscensoredtext(text):
79 meta = parsemeta(text)[0]
79 meta = parsemeta(text)[0]
80 return meta and b'censored' in meta
80 return meta and b'censored' in meta
81
81
82 def filtermetadata(text):
82 def filtermetadata(text):
83 """Extract just the revision data from source text.
83 """Extract just the revision data from source text.
84
84
85 Returns ``text`` unless it has a metadata header, in which case we return
85 Returns ``text`` unless it has a metadata header, in which case we return
86 a new buffer without hte metadata.
86 a new buffer without hte metadata.
87 """
87 """
88 if not text.startswith(b'\x01\n'):
88 if not text.startswith(b'\x01\n'):
89 return text
89 return text
90
90
91 offset = text.index(b'\x01\n', 2)
91 offset = text.index(b'\x01\n', 2)
92 return text[offset + 2:]
92 return text[offset + 2:]
93
93
94 def filerevisioncopied(store, node):
94 def filerevisioncopied(store, node):
95 """Resolve file revision copy metadata.
95 """Resolve file revision copy metadata.
96
96
97 Returns ``False`` if the file has no copy metadata. Otherwise a
97 Returns ``False`` if the file has no copy metadata. Otherwise a
98 2-tuple of the source filename and node.
98 2-tuple of the source filename and node.
99 """
99 """
100 if store.parents(node)[0] != nullid:
100 if store.parents(node)[0] != nullid:
101 return False
101 return False
102
102
103 meta = parsemeta(store.revision(node))[0]
103 meta = parsemeta(store.revision(node))[0]
104
104
105 # copy and copyrev occur in pairs. In rare cases due to old bugs,
105 # copy and copyrev occur in pairs. In rare cases due to old bugs,
106 # one can occur without the other. So ensure both are present to flag
106 # one can occur without the other. So ensure both are present to flag
107 # as a copy.
107 # as a copy.
108 if meta and b'copy' in meta and b'copyrev' in meta:
108 if meta and b'copy' in meta and b'copyrev' in meta:
109 return meta[b'copy'], bin(meta[b'copyrev'])
109 return meta[b'copy'], bin(meta[b'copyrev'])
110
110
111 return False
111 return False
112
112
113 def filedataequivalent(store, node, filedata):
113 def filedataequivalent(store, node, filedata):
114 """Determines whether file data is equivalent to a stored node.
114 """Determines whether file data is equivalent to a stored node.
115
115
116 Returns True if the passed file data would hash to the same value
116 Returns True if the passed file data would hash to the same value
117 as a stored revision and False otherwise.
117 as a stored revision and False otherwise.
118
118
119 When a stored revision is censored, filedata must be empty to have
119 When a stored revision is censored, filedata must be empty to have
120 equivalence.
120 equivalence.
121
121
122 When a stored revision has copy metadata, it is ignored as part
122 When a stored revision has copy metadata, it is ignored as part
123 of the compare.
123 of the compare.
124 """
124 """
125
125
126 if filedata.startswith(b'\x01\n'):
126 if filedata.startswith(b'\x01\n'):
127 revisiontext = b'\x01\n\x01\n' + filedata
127 revisiontext = b'\x01\n\x01\n' + filedata
128 else:
128 else:
129 revisiontext = filedata
129 revisiontext = filedata
130
130
131 p1, p2 = store.parents(node)
131 p1, p2 = store.parents(node)
132
132
133 computednode = hashrevisionsha1(revisiontext, p1, p2)
133 computednode = hashrevisionsha1(revisiontext, p1, p2)
134
134
135 if computednode == node:
135 if computednode == node:
136 return True
136 return True
137
137
138 # Censored files compare against the empty file.
138 # Censored files compare against the empty file.
139 if store.iscensored(store.rev(node)):
139 if store.iscensored(store.rev(node)):
140 return filedata == b''
140 return filedata == b''
141
141
142 # Renaming a file produces a different hash, even if the data
142 # Renaming a file produces a different hash, even if the data
143 # remains unchanged. Check if that's the case.
143 # remains unchanged. Check if that's the case.
144 if store.renamed(node):
144 if store.renamed(node):
145 return store.read(node) == filedata
145 return store.read(node) == filedata
146
146
147 return False
147 return False
148
148
149 def iterrevs(storelen, start=0, stop=None):
149 def iterrevs(storelen, start=0, stop=None):
150 """Iterate over revision numbers in a store."""
150 """Iterate over revision numbers in a store."""
151 step = 1
151 step = 1
152
152
153 if stop is not None:
153 if stop is not None:
154 if start > stop:
154 if start > stop:
155 step = -1
155 step = -1
156 stop += step
156 stop += step
157 if stop > storelen:
157 if stop > storelen:
158 stop = storelen
158 stop = storelen
159 else:
159 else:
160 stop = storelen
160 stop = storelen
161
161
162 return pycompat.xrange(start, stop, step)
162 return pycompat.xrange(start, stop, step)
163
163
164 def fileidlookup(store, fileid, identifier):
164 def fileidlookup(store, fileid, identifier):
165 """Resolve the file node for a value.
165 """Resolve the file node for a value.
166
166
167 ``store`` is an object implementing the ``ifileindex`` interface.
167 ``store`` is an object implementing the ``ifileindex`` interface.
168
168
169 ``fileid`` can be:
169 ``fileid`` can be:
170
170
171 * A 20 byte binary node.
171 * A 20 byte binary node.
172 * An integer revision number
172 * An integer revision number
173 * A 40 byte hex node.
173 * A 40 byte hex node.
174 * A bytes that can be parsed as an integer representing a revision number.
174 * A bytes that can be parsed as an integer representing a revision number.
175
175
176 ``identifier`` is used to populate ``error.LookupError`` with an identifier
176 ``identifier`` is used to populate ``error.LookupError`` with an identifier
177 for the store.
177 for the store.
178
178
179 Raises ``error.LookupError`` on failure.
179 Raises ``error.LookupError`` on failure.
180 """
180 """
181 if isinstance(fileid, int):
181 if isinstance(fileid, int):
182 try:
182 try:
183 return store.node(fileid)
183 return store.node(fileid)
184 except IndexError:
184 except IndexError:
185 raise error.LookupError(fileid, identifier, _('no match found'))
185 raise error.LookupError('%d' % fileid, identifier,
186 _('no match found'))
186
187
187 if len(fileid) == 20:
188 if len(fileid) == 20:
188 try:
189 try:
189 store.rev(fileid)
190 store.rev(fileid)
190 return fileid
191 return fileid
191 except error.LookupError:
192 except error.LookupError:
192 pass
193 pass
193
194
194 if len(fileid) == 40:
195 if len(fileid) == 40:
195 try:
196 try:
196 rawnode = bin(fileid)
197 rawnode = bin(fileid)
197 store.rev(rawnode)
198 store.rev(rawnode)
198 return rawnode
199 return rawnode
199 except TypeError:
200 except TypeError:
200 pass
201 pass
201
202
202 try:
203 try:
203 rev = int(fileid)
204 rev = int(fileid)
204
205
205 if b'%d' % rev != fileid:
206 if b'%d' % rev != fileid:
206 raise ValueError
207 raise ValueError
207
208
208 try:
209 try:
209 return store.node(rev)
210 return store.node(rev)
210 except (IndexError, TypeError):
211 except (IndexError, TypeError):
211 pass
212 pass
212 except (ValueError, OverflowError):
213 except (ValueError, OverflowError):
213 pass
214 pass
214
215
215 raise error.LookupError(fileid, identifier, _('no match found'))
216 raise error.LookupError(fileid, identifier, _('no match found'))
216
217
217 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
218 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
218 """Resolve information needed to strip revisions.
219 """Resolve information needed to strip revisions.
219
220
220 Finds the minimum revision number that must be stripped in order to
221 Finds the minimum revision number that must be stripped in order to
221 strip ``minlinkrev``.
222 strip ``minlinkrev``.
222
223
223 Returns a 2-tuple of the minimum revision number to do that and a set
224 Returns a 2-tuple of the minimum revision number to do that and a set
224 of all revision numbers that have linkrevs that would be broken
225 of all revision numbers that have linkrevs that would be broken
225 by that strip.
226 by that strip.
226
227
227 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
228 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
228 ``headrevs`` is an iterable of head revisions.
229 ``headrevs`` is an iterable of head revisions.
229 ``linkrevfn`` is a callable that receives a revision and returns a linked
230 ``linkrevfn`` is a callable that receives a revision and returns a linked
230 revision.
231 revision.
231 ``parentrevsfn`` is a callable that receives a revision number and returns
232 ``parentrevsfn`` is a callable that receives a revision number and returns
232 an iterable of its parent revision numbers.
233 an iterable of its parent revision numbers.
233 """
234 """
234 brokenrevs = set()
235 brokenrevs = set()
235 strippoint = tiprev + 1
236 strippoint = tiprev + 1
236
237
237 heads = {}
238 heads = {}
238 futurelargelinkrevs = set()
239 futurelargelinkrevs = set()
239 for head in headrevs:
240 for head in headrevs:
240 headlinkrev = linkrevfn(head)
241 headlinkrev = linkrevfn(head)
241 heads[head] = headlinkrev
242 heads[head] = headlinkrev
242 if headlinkrev >= minlinkrev:
243 if headlinkrev >= minlinkrev:
243 futurelargelinkrevs.add(headlinkrev)
244 futurelargelinkrevs.add(headlinkrev)
244
245
245 # This algorithm involves walking down the rev graph, starting at the
246 # This algorithm involves walking down the rev graph, starting at the
246 # heads. Since the revs are topologically sorted according to linkrev,
247 # heads. Since the revs are topologically sorted according to linkrev,
247 # once all head linkrevs are below the minlink, we know there are
248 # once all head linkrevs are below the minlink, we know there are
248 # no more revs that could have a linkrev greater than minlink.
249 # no more revs that could have a linkrev greater than minlink.
249 # So we can stop walking.
250 # So we can stop walking.
250 while futurelargelinkrevs:
251 while futurelargelinkrevs:
251 strippoint -= 1
252 strippoint -= 1
252 linkrev = heads.pop(strippoint)
253 linkrev = heads.pop(strippoint)
253
254
254 if linkrev < minlinkrev:
255 if linkrev < minlinkrev:
255 brokenrevs.add(strippoint)
256 brokenrevs.add(strippoint)
256 else:
257 else:
257 futurelargelinkrevs.remove(linkrev)
258 futurelargelinkrevs.remove(linkrev)
258
259
259 for p in parentrevsfn(strippoint):
260 for p in parentrevsfn(strippoint):
260 if p != nullrev:
261 if p != nullrev:
261 plinkrev = linkrevfn(p)
262 plinkrev = linkrevfn(p)
262 heads[p] = plinkrev
263 heads[p] = plinkrev
263 if plinkrev >= minlinkrev:
264 if plinkrev >= minlinkrev:
264 futurelargelinkrevs.add(plinkrev)
265 futurelargelinkrevs.add(plinkrev)
265
266
266 return strippoint, brokenrevs
267 return strippoint, brokenrevs
267
268
268 def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
269 def emitrevisions(store, nodes, nodesorder, resultcls, deltaparentfn=None,
269 candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
270 candeltafn=None, rawsizefn=None, revdifffn=None, flagsfn=None,
270 sendfulltext=False,
271 sendfulltext=False,
271 revisiondata=False, assumehaveparentrevisions=False,
272 revisiondata=False, assumehaveparentrevisions=False,
272 deltaprevious=False):
273 deltaprevious=False):
273 """Generic implementation of ifiledata.emitrevisions().
274 """Generic implementation of ifiledata.emitrevisions().
274
275
275 Emitting revision data is subtly complex. This function attempts to
276 Emitting revision data is subtly complex. This function attempts to
276 encapsulate all the logic for doing so in a backend-agnostic way.
277 encapsulate all the logic for doing so in a backend-agnostic way.
277
278
278 ``store``
279 ``store``
279 Object conforming to ``ifilestorage`` interface.
280 Object conforming to ``ifilestorage`` interface.
280
281
281 ``nodes``
282 ``nodes``
282 List of revision nodes whose data to emit.
283 List of revision nodes whose data to emit.
283
284
284 ``resultcls``
285 ``resultcls``
285 A type implementing the ``irevisiondelta`` interface that will be
286 A type implementing the ``irevisiondelta`` interface that will be
286 constructed and returned.
287 constructed and returned.
287
288
288 ``deltaparentfn`` (optional)
289 ``deltaparentfn`` (optional)
289 Callable receiving a revision number and returning the revision number
290 Callable receiving a revision number and returning the revision number
290 of a revision that the internal delta is stored against. This delta
291 of a revision that the internal delta is stored against. This delta
291 will be preferred over computing a new arbitrary delta.
292 will be preferred over computing a new arbitrary delta.
292
293
293 If not defined, a delta will always be computed from raw revision
294 If not defined, a delta will always be computed from raw revision
294 data.
295 data.
295
296
296 ``candeltafn`` (optional)
297 ``candeltafn`` (optional)
297 Callable receiving a pair of revision numbers that returns a bool
298 Callable receiving a pair of revision numbers that returns a bool
298 indicating whether a delta between them can be produced.
299 indicating whether a delta between them can be produced.
299
300
300 If not defined, it is assumed that any two revisions can delta with
301 If not defined, it is assumed that any two revisions can delta with
301 each other.
302 each other.
302
303
303 ``rawsizefn`` (optional)
304 ``rawsizefn`` (optional)
304 Callable receiving a revision number and returning the length of the
305 Callable receiving a revision number and returning the length of the
305 ``store.revision(rev, raw=True)``.
306 ``store.revision(rev, raw=True)``.
306
307
307 If not defined, ``len(store.revision(rev, raw=True))`` will be called.
308 If not defined, ``len(store.revision(rev, raw=True))`` will be called.
308
309
309 ``revdifffn`` (optional)
310 ``revdifffn`` (optional)
310 Callable receiving a pair of revision numbers that returns a delta
311 Callable receiving a pair of revision numbers that returns a delta
311 between them.
312 between them.
312
313
313 If not defined, a delta will be computed by invoking mdiff code
314 If not defined, a delta will be computed by invoking mdiff code
314 on ``store.revision()`` results.
315 on ``store.revision()`` results.
315
316
316 Defining this function allows a precomputed or stored delta to be
317 Defining this function allows a precomputed or stored delta to be
317 used without having to compute on.
318 used without having to compute on.
318
319
319 ``flagsfn`` (optional)
320 ``flagsfn`` (optional)
320 Callable receiving a revision number and returns the integer flags
321 Callable receiving a revision number and returns the integer flags
321 value for it. If not defined, flags value will be 0.
322 value for it. If not defined, flags value will be 0.
322
323
323 ``sendfulltext``
324 ``sendfulltext``
324 Whether to send fulltext revisions instead of deltas, if allowed.
325 Whether to send fulltext revisions instead of deltas, if allowed.
325
326
326 ``nodesorder``
327 ``nodesorder``
327 ``revisiondata``
328 ``revisiondata``
328 ``assumehaveparentrevisions``
329 ``assumehaveparentrevisions``
329 ``deltaprevious``
330 ``deltaprevious``
330 See ``ifiledata.emitrevisions()`` interface documentation.
331 See ``ifiledata.emitrevisions()`` interface documentation.
331 """
332 """
332
333
333 fnode = store.node
334 fnode = store.node
334 frev = store.rev
335 frev = store.rev
335
336
336 if nodesorder == 'nodes':
337 if nodesorder == 'nodes':
337 revs = [frev(n) for n in nodes]
338 revs = [frev(n) for n in nodes]
338 elif nodesorder == 'storage':
339 elif nodesorder == 'storage':
339 revs = sorted(frev(n) for n in nodes)
340 revs = sorted(frev(n) for n in nodes)
340 else:
341 else:
341 revs = set(frev(n) for n in nodes)
342 revs = set(frev(n) for n in nodes)
342 revs = dagop.linearize(revs, store.parentrevs)
343 revs = dagop.linearize(revs, store.parentrevs)
343
344
344 prevrev = None
345 prevrev = None
345
346
346 if deltaprevious or assumehaveparentrevisions:
347 if deltaprevious or assumehaveparentrevisions:
347 prevrev = store.parentrevs(revs[0])[0]
348 prevrev = store.parentrevs(revs[0])[0]
348
349
349 # Set of revs available to delta against.
350 # Set of revs available to delta against.
350 available = set()
351 available = set()
351
352
352 for rev in revs:
353 for rev in revs:
353 if rev == nullrev:
354 if rev == nullrev:
354 continue
355 continue
355
356
356 node = fnode(rev)
357 node = fnode(rev)
357 p1rev, p2rev = store.parentrevs(rev)
358 p1rev, p2rev = store.parentrevs(rev)
358
359
359 if deltaparentfn:
360 if deltaparentfn:
360 deltaparentrev = deltaparentfn(rev)
361 deltaparentrev = deltaparentfn(rev)
361 else:
362 else:
362 deltaparentrev = nullrev
363 deltaparentrev = nullrev
363
364
364 # Forced delta against previous mode.
365 # Forced delta against previous mode.
365 if deltaprevious:
366 if deltaprevious:
366 baserev = prevrev
367 baserev = prevrev
367
368
368 # We're instructed to send fulltext. Honor that.
369 # We're instructed to send fulltext. Honor that.
369 elif sendfulltext:
370 elif sendfulltext:
370 baserev = nullrev
371 baserev = nullrev
371
372
372 # There is a delta in storage. We try to use that because it
373 # There is a delta in storage. We try to use that because it
373 # amounts to effectively copying data from storage and is
374 # amounts to effectively copying data from storage and is
374 # therefore the fastest.
375 # therefore the fastest.
375 elif deltaparentrev != nullrev:
376 elif deltaparentrev != nullrev:
376 # Base revision was already emitted in this group. We can
377 # Base revision was already emitted in this group. We can
377 # always safely use the delta.
378 # always safely use the delta.
378 if deltaparentrev in available:
379 if deltaparentrev in available:
379 baserev = deltaparentrev
380 baserev = deltaparentrev
380
381
381 # Base revision is a parent that hasn't been emitted already.
382 # Base revision is a parent that hasn't been emitted already.
382 # Use it if we can assume the receiver has the parent revision.
383 # Use it if we can assume the receiver has the parent revision.
383 elif (assumehaveparentrevisions
384 elif (assumehaveparentrevisions
384 and deltaparentrev in (p1rev, p2rev)):
385 and deltaparentrev in (p1rev, p2rev)):
385 baserev = deltaparentrev
386 baserev = deltaparentrev
386
387
387 # No guarantee the receiver has the delta parent. Send delta
388 # No guarantee the receiver has the delta parent. Send delta
388 # against last revision (if possible), which in the common case
389 # against last revision (if possible), which in the common case
389 # should be similar enough to this revision that the delta is
390 # should be similar enough to this revision that the delta is
390 # reasonable.
391 # reasonable.
391 elif prevrev is not None:
392 elif prevrev is not None:
392 baserev = prevrev
393 baserev = prevrev
393 else:
394 else:
394 baserev = nullrev
395 baserev = nullrev
395
396
396 # Storage has a fulltext revision.
397 # Storage has a fulltext revision.
397
398
398 # Let's use the previous revision, which is as good a guess as any.
399 # Let's use the previous revision, which is as good a guess as any.
399 # There is definitely room to improve this logic.
400 # There is definitely room to improve this logic.
400 elif prevrev is not None:
401 elif prevrev is not None:
401 baserev = prevrev
402 baserev = prevrev
402 else:
403 else:
403 baserev = nullrev
404 baserev = nullrev
404
405
405 # But we can't actually use our chosen delta base for whatever
406 # But we can't actually use our chosen delta base for whatever
406 # reason. Reset to fulltext.
407 # reason. Reset to fulltext.
407 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
408 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
408 baserev = nullrev
409 baserev = nullrev
409
410
410 revision = None
411 revision = None
411 delta = None
412 delta = None
412 baserevisionsize = None
413 baserevisionsize = None
413
414
414 if revisiondata:
415 if revisiondata:
415 if store.iscensored(baserev) or store.iscensored(rev):
416 if store.iscensored(baserev) or store.iscensored(rev):
416 try:
417 try:
417 revision = store.revision(node, raw=True)
418 revision = store.revision(node, raw=True)
418 except error.CensoredNodeError as e:
419 except error.CensoredNodeError as e:
419 revision = e.tombstone
420 revision = e.tombstone
420
421
421 if baserev != nullrev:
422 if baserev != nullrev:
422 if rawsizefn:
423 if rawsizefn:
423 baserevisionsize = rawsizefn(baserev)
424 baserevisionsize = rawsizefn(baserev)
424 else:
425 else:
425 baserevisionsize = len(store.revision(baserev,
426 baserevisionsize = len(store.revision(baserev,
426 raw=True))
427 raw=True))
427
428
428 elif baserev == nullrev and not deltaprevious:
429 elif baserev == nullrev and not deltaprevious:
429 revision = store.revision(node, raw=True)
430 revision = store.revision(node, raw=True)
430 available.add(rev)
431 available.add(rev)
431 else:
432 else:
432 if revdifffn:
433 if revdifffn:
433 delta = revdifffn(baserev, rev)
434 delta = revdifffn(baserev, rev)
434 else:
435 else:
435 delta = mdiff.textdiff(store.revision(baserev, raw=True),
436 delta = mdiff.textdiff(store.revision(baserev, raw=True),
436 store.revision(rev, raw=True))
437 store.revision(rev, raw=True))
437
438
438 available.add(rev)
439 available.add(rev)
439
440
440 yield resultcls(
441 yield resultcls(
441 node=node,
442 node=node,
442 p1node=fnode(p1rev),
443 p1node=fnode(p1rev),
443 p2node=fnode(p2rev),
444 p2node=fnode(p2rev),
444 basenode=fnode(baserev),
445 basenode=fnode(baserev),
445 flags=flagsfn(rev) if flagsfn else 0,
446 flags=flagsfn(rev) if flagsfn else 0,
446 baserevisionsize=baserevisionsize,
447 baserevisionsize=baserevisionsize,
447 revision=revision,
448 revision=revision,
448 delta=delta)
449 delta=delta)
449
450
450 prevrev = rev
451 prevrev = rev
General Comments 0
You need to be logged in to leave comments. Login now