##// END OF EJS Templates
storageutil: make all callables optional...
Gregory Szorc -
r40045:631c6f50 default
parent child Browse files
Show More
@@ -1,410 +1,439 b''
1 # storageutil.py - Storage functionality agnostic of backend implementation.
1 # storageutil.py - Storage functionality agnostic of backend implementation.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import re
11 import re
12
12
13 from ..i18n import _
13 from ..i18n import _
14 from ..node import (
14 from ..node import (
15 bin,
15 bin,
16 nullid,
16 nullid,
17 nullrev,
17 nullrev,
18 )
18 )
19 from .. import (
19 from .. import (
20 error,
20 error,
21 mdiff,
21 pycompat,
22 pycompat,
22 )
23 )
23
24
24 _nullhash = hashlib.sha1(nullid)
25 _nullhash = hashlib.sha1(nullid)
25
26
26 def hashrevisionsha1(text, p1, p2):
27 def hashrevisionsha1(text, p1, p2):
27 """Compute the SHA-1 for revision data and its parents.
28 """Compute the SHA-1 for revision data and its parents.
28
29
29 This hash combines both the current file contents and its history
30 This hash combines both the current file contents and its history
30 in a manner that makes it easy to distinguish nodes with the same
31 in a manner that makes it easy to distinguish nodes with the same
31 content in the revision graph.
32 content in the revision graph.
32 """
33 """
33 # As of now, if one of the parent node is null, p2 is null
34 # As of now, if one of the parent node is null, p2 is null
34 if p2 == nullid:
35 if p2 == nullid:
35 # deep copy of a hash is faster than creating one
36 # deep copy of a hash is faster than creating one
36 s = _nullhash.copy()
37 s = _nullhash.copy()
37 s.update(p1)
38 s.update(p1)
38 else:
39 else:
39 # none of the parent nodes are nullid
40 # none of the parent nodes are nullid
40 if p1 < p2:
41 if p1 < p2:
41 a = p1
42 a = p1
42 b = p2
43 b = p2
43 else:
44 else:
44 a = p2
45 a = p2
45 b = p1
46 b = p1
46 s = hashlib.sha1(a)
47 s = hashlib.sha1(a)
47 s.update(b)
48 s.update(b)
48 s.update(text)
49 s.update(text)
49 return s.digest()
50 return s.digest()
50
51
51 METADATA_RE = re.compile(b'\x01\n')
52 METADATA_RE = re.compile(b'\x01\n')
52
53
53 def parsemeta(text):
54 def parsemeta(text):
54 """Parse metadata header from revision data.
55 """Parse metadata header from revision data.
55
56
56 Returns a 2-tuple of (metadata, offset), where both can be None if there
57 Returns a 2-tuple of (metadata, offset), where both can be None if there
57 is no metadata.
58 is no metadata.
58 """
59 """
59 # text can be buffer, so we can't use .startswith or .index
60 # text can be buffer, so we can't use .startswith or .index
60 if text[:2] != b'\x01\n':
61 if text[:2] != b'\x01\n':
61 return None, None
62 return None, None
62 s = METADATA_RE.search(text, 2).start()
63 s = METADATA_RE.search(text, 2).start()
63 mtext = text[2:s]
64 mtext = text[2:s]
64 meta = {}
65 meta = {}
65 for l in mtext.splitlines():
66 for l in mtext.splitlines():
66 k, v = l.split(b': ', 1)
67 k, v = l.split(b': ', 1)
67 meta[k] = v
68 meta[k] = v
68 return meta, s + 2
69 return meta, s + 2
69
70
70 def packmeta(meta, text):
71 def packmeta(meta, text):
71 """Add metadata to fulltext to produce revision text."""
72 """Add metadata to fulltext to produce revision text."""
72 keys = sorted(meta)
73 keys = sorted(meta)
73 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
74 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
74 return b'\x01\n%s\x01\n%s' % (metatext, text)
75 return b'\x01\n%s\x01\n%s' % (metatext, text)
75
76
76 def iscensoredtext(text):
77 def iscensoredtext(text):
77 meta = parsemeta(text)[0]
78 meta = parsemeta(text)[0]
78 return meta and b'censored' in meta
79 return meta and b'censored' in meta
79
80
80 def filtermetadata(text):
81 def filtermetadata(text):
81 """Extract just the revision data from source text.
82 """Extract just the revision data from source text.
82
83
83 Returns ``text`` unless it has a metadata header, in which case we return
84 Returns ``text`` unless it has a metadata header, in which case we return
84 a new buffer without hte metadata.
85 a new buffer without hte metadata.
85 """
86 """
86 if not text.startswith(b'\x01\n'):
87 if not text.startswith(b'\x01\n'):
87 return text
88 return text
88
89
89 offset = text.index(b'\x01\n', 2)
90 offset = text.index(b'\x01\n', 2)
90 return text[offset + 2:]
91 return text[offset + 2:]
91
92
92 def filerevisioncopied(store, node):
93 def filerevisioncopied(store, node):
93 """Resolve file revision copy metadata.
94 """Resolve file revision copy metadata.
94
95
95 Returns ``False`` if the file has no copy metadata. Otherwise a
96 Returns ``False`` if the file has no copy metadata. Otherwise a
96 2-tuple of the source filename and node.
97 2-tuple of the source filename and node.
97 """
98 """
98 if store.parents(node)[0] != nullid:
99 if store.parents(node)[0] != nullid:
99 return False
100 return False
100
101
101 meta = parsemeta(store.revision(node))[0]
102 meta = parsemeta(store.revision(node))[0]
102
103
103 # copy and copyrev occur in pairs. In rare cases due to old bugs,
104 # copy and copyrev occur in pairs. In rare cases due to old bugs,
104 # one can occur without the other. So ensure both are present to flag
105 # one can occur without the other. So ensure both are present to flag
105 # as a copy.
106 # as a copy.
106 if meta and b'copy' in meta and b'copyrev' in meta:
107 if meta and b'copy' in meta and b'copyrev' in meta:
107 return meta[b'copy'], bin(meta[b'copyrev'])
108 return meta[b'copy'], bin(meta[b'copyrev'])
108
109
109 return False
110 return False
110
111
111 def filedataequivalent(store, node, filedata):
112 def filedataequivalent(store, node, filedata):
112 """Determines whether file data is equivalent to a stored node.
113 """Determines whether file data is equivalent to a stored node.
113
114
114 Returns True if the passed file data would hash to the same value
115 Returns True if the passed file data would hash to the same value
115 as a stored revision and False otherwise.
116 as a stored revision and False otherwise.
116
117
117 When a stored revision is censored, filedata must be empty to have
118 When a stored revision is censored, filedata must be empty to have
118 equivalence.
119 equivalence.
119
120
120 When a stored revision has copy metadata, it is ignored as part
121 When a stored revision has copy metadata, it is ignored as part
121 of the compare.
122 of the compare.
122 """
123 """
123
124
124 if filedata.startswith(b'\x01\n'):
125 if filedata.startswith(b'\x01\n'):
125 revisiontext = b'\x01\n\x01\n' + filedata
126 revisiontext = b'\x01\n\x01\n' + filedata
126 else:
127 else:
127 revisiontext = filedata
128 revisiontext = filedata
128
129
129 p1, p2 = store.parents(node)
130 p1, p2 = store.parents(node)
130
131
131 computednode = hashrevisionsha1(revisiontext, p1, p2)
132 computednode = hashrevisionsha1(revisiontext, p1, p2)
132
133
133 if computednode == node:
134 if computednode == node:
134 return True
135 return True
135
136
136 # Censored files compare against the empty file.
137 # Censored files compare against the empty file.
137 if store.iscensored(store.rev(node)):
138 if store.iscensored(store.rev(node)):
138 return filedata == b''
139 return filedata == b''
139
140
140 # Renaming a file produces a different hash, even if the data
141 # Renaming a file produces a different hash, even if the data
141 # remains unchanged. Check if that's the case.
142 # remains unchanged. Check if that's the case.
142 if store.renamed(node):
143 if store.renamed(node):
143 return store.read(node) == filedata
144 return store.read(node) == filedata
144
145
145 return False
146 return False
146
147
147 def iterrevs(storelen, start=0, stop=None):
148 def iterrevs(storelen, start=0, stop=None):
148 """Iterate over revision numbers in a store."""
149 """Iterate over revision numbers in a store."""
149 step = 1
150 step = 1
150
151
151 if stop is not None:
152 if stop is not None:
152 if start > stop:
153 if start > stop:
153 step = -1
154 step = -1
154 stop += step
155 stop += step
155 if stop > storelen:
156 if stop > storelen:
156 stop = storelen
157 stop = storelen
157 else:
158 else:
158 stop = storelen
159 stop = storelen
159
160
160 return pycompat.xrange(start, stop, step)
161 return pycompat.xrange(start, stop, step)
161
162
162 def fileidlookup(store, fileid, identifier):
163 def fileidlookup(store, fileid, identifier):
163 """Resolve the file node for a value.
164 """Resolve the file node for a value.
164
165
165 ``store`` is an object implementing the ``ifileindex`` interface.
166 ``store`` is an object implementing the ``ifileindex`` interface.
166
167
167 ``fileid`` can be:
168 ``fileid`` can be:
168
169
169 * A 20 byte binary node.
170 * A 20 byte binary node.
170 * An integer revision number
171 * An integer revision number
171 * A 40 byte hex node.
172 * A 40 byte hex node.
172 * A bytes that can be parsed as an integer representing a revision number.
173 * A bytes that can be parsed as an integer representing a revision number.
173
174
174 ``identifier`` is used to populate ``error.LookupError`` with an identifier
175 ``identifier`` is used to populate ``error.LookupError`` with an identifier
175 for the store.
176 for the store.
176
177
177 Raises ``error.LookupError`` on failure.
178 Raises ``error.LookupError`` on failure.
178 """
179 """
179 if isinstance(fileid, int):
180 if isinstance(fileid, int):
180 try:
181 try:
181 return store.node(fileid)
182 return store.node(fileid)
182 except IndexError:
183 except IndexError:
183 raise error.LookupError(fileid, identifier, _('no match found'))
184 raise error.LookupError(fileid, identifier, _('no match found'))
184
185
185 if len(fileid) == 20:
186 if len(fileid) == 20:
186 try:
187 try:
187 store.rev(fileid)
188 store.rev(fileid)
188 return fileid
189 return fileid
189 except error.LookupError:
190 except error.LookupError:
190 pass
191 pass
191
192
192 if len(fileid) == 40:
193 if len(fileid) == 40:
193 try:
194 try:
194 rawnode = bin(fileid)
195 rawnode = bin(fileid)
195 store.rev(rawnode)
196 store.rev(rawnode)
196 return rawnode
197 return rawnode
197 except TypeError:
198 except TypeError:
198 pass
199 pass
199
200
200 try:
201 try:
201 rev = int(fileid)
202 rev = int(fileid)
202
203
203 if b'%d' % rev != fileid:
204 if b'%d' % rev != fileid:
204 raise ValueError
205 raise ValueError
205
206
206 try:
207 try:
207 return store.node(rev)
208 return store.node(rev)
208 except (IndexError, TypeError):
209 except (IndexError, TypeError):
209 pass
210 pass
210 except (ValueError, OverflowError):
211 except (ValueError, OverflowError):
211 pass
212 pass
212
213
213 raise error.LookupError(fileid, identifier, _('no match found'))
214 raise error.LookupError(fileid, identifier, _('no match found'))
214
215
215 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
216 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
216 """Resolve information needed to strip revisions.
217 """Resolve information needed to strip revisions.
217
218
218 Finds the minimum revision number that must be stripped in order to
219 Finds the minimum revision number that must be stripped in order to
219 strip ``minlinkrev``.
220 strip ``minlinkrev``.
220
221
221 Returns a 2-tuple of the minimum revision number to do that and a set
222 Returns a 2-tuple of the minimum revision number to do that and a set
222 of all revision numbers that have linkrevs that would be broken
223 of all revision numbers that have linkrevs that would be broken
223 by that strip.
224 by that strip.
224
225
225 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
226 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
226 ``headrevs`` is an iterable of head revisions.
227 ``headrevs`` is an iterable of head revisions.
227 ``linkrevfn`` is a callable that receives a revision and returns a linked
228 ``linkrevfn`` is a callable that receives a revision and returns a linked
228 revision.
229 revision.
229 ``parentrevsfn`` is a callable that receives a revision number and returns
230 ``parentrevsfn`` is a callable that receives a revision number and returns
230 an iterable of its parent revision numbers.
231 an iterable of its parent revision numbers.
231 """
232 """
232 brokenrevs = set()
233 brokenrevs = set()
233 strippoint = tiprev + 1
234 strippoint = tiprev + 1
234
235
235 heads = {}
236 heads = {}
236 futurelargelinkrevs = set()
237 futurelargelinkrevs = set()
237 for head in headrevs:
238 for head in headrevs:
238 headlinkrev = linkrevfn(head)
239 headlinkrev = linkrevfn(head)
239 heads[head] = headlinkrev
240 heads[head] = headlinkrev
240 if headlinkrev >= minlinkrev:
241 if headlinkrev >= minlinkrev:
241 futurelargelinkrevs.add(headlinkrev)
242 futurelargelinkrevs.add(headlinkrev)
242
243
243 # This algorithm involves walking down the rev graph, starting at the
244 # This algorithm involves walking down the rev graph, starting at the
244 # heads. Since the revs are topologically sorted according to linkrev,
245 # heads. Since the revs are topologically sorted according to linkrev,
245 # once all head linkrevs are below the minlink, we know there are
246 # once all head linkrevs are below the minlink, we know there are
246 # no more revs that could have a linkrev greater than minlink.
247 # no more revs that could have a linkrev greater than minlink.
247 # So we can stop walking.
248 # So we can stop walking.
248 while futurelargelinkrevs:
249 while futurelargelinkrevs:
249 strippoint -= 1
250 strippoint -= 1
250 linkrev = heads.pop(strippoint)
251 linkrev = heads.pop(strippoint)
251
252
252 if linkrev < minlinkrev:
253 if linkrev < minlinkrev:
253 brokenrevs.add(strippoint)
254 brokenrevs.add(strippoint)
254 else:
255 else:
255 futurelargelinkrevs.remove(linkrev)
256 futurelargelinkrevs.remove(linkrev)
256
257
257 for p in parentrevsfn(strippoint):
258 for p in parentrevsfn(strippoint):
258 if p != nullrev:
259 if p != nullrev:
259 plinkrev = linkrevfn(p)
260 plinkrev = linkrevfn(p)
260 heads[p] = plinkrev
261 heads[p] = plinkrev
261 if plinkrev >= minlinkrev:
262 if plinkrev >= minlinkrev:
262 futurelargelinkrevs.add(plinkrev)
263 futurelargelinkrevs.add(plinkrev)
263
264
264 return strippoint, brokenrevs
265 return strippoint, brokenrevs
265
266
266 def emitrevisions(store, revs, resultcls, deltaparentfn, candeltafn,
267 def emitrevisions(store, revs, resultcls, deltaparentfn=None, candeltafn=None,
267 rawsizefn, revdifffn, flagsfn, sendfulltext=False,
268 rawsizefn=None, revdifffn=None, flagsfn=None,
269 sendfulltext=False,
268 revisiondata=False, assumehaveparentrevisions=False,
270 revisiondata=False, assumehaveparentrevisions=False,
269 deltaprevious=False):
271 deltaprevious=False):
270 """Generic implementation of ifiledata.emitrevisions().
272 """Generic implementation of ifiledata.emitrevisions().
271
273
272 Emitting revision data is subtly complex. This function attempts to
274 Emitting revision data is subtly complex. This function attempts to
273 encapsulate all the logic for doing so in a backend-agnostic way.
275 encapsulate all the logic for doing so in a backend-agnostic way.
274
276
275 ``store``
277 ``store``
276 Object conforming to ``ifilestorage`` interface.
278 Object conforming to ``ifilestorage`` interface.
277
279
278 ``revs``
280 ``revs``
279 List of integer revision numbers whose data to emit.
281 List of integer revision numbers whose data to emit.
280
282
281 ``resultcls``
283 ``resultcls``
282 A type implementing the ``irevisiondelta`` interface that will be
284 A type implementing the ``irevisiondelta`` interface that will be
283 constructed and returned.
285 constructed and returned.
284
286
285 ``deltaparentfn``
287 ``deltaparentfn`` (optional)
286 Callable receiving a revision number and returning the revision number
288 Callable receiving a revision number and returning the revision number
287 of a revision that the internal delta is stored against. This delta
289 of a revision that the internal delta is stored against. This delta
288 will be preferred over computing a new arbitrary delta.
290 will be preferred over computing a new arbitrary delta.
289
291
290 ``candeltafn``
292 If not defined, a delta will always be computed from raw revision
293 data.
294
295 ``candeltafn`` (optional)
291 Callable receiving a pair of revision numbers that returns a bool
296 Callable receiving a pair of revision numbers that returns a bool
292 indicating whether a delta between them can be produced.
297 indicating whether a delta between them can be produced.
293
298
294 ``rawsizefn``
299 If not defined, it is assumed that any two revisions can delta with
300 each other.
301
302 ``rawsizefn`` (optional)
295 Callable receiving a revision number and returning the length of the
303 Callable receiving a revision number and returning the length of the
296 ``store.revision(rev, raw=True)``.
304 ``store.revision(rev, raw=True)``.
297
305
298 ``revdifffn``
306 If not defined, ``len(store.revision(rev, raw=True))`` will be called.
307
308 ``revdifffn`` (optional)
299 Callable receiving a pair of revision numbers that returns a delta
309 Callable receiving a pair of revision numbers that returns a delta
300 between them.
310 between them.
301
311
302 ``flagsfn``
312 If not defined, a delta will be computed by invoking mdiff code
313 on ``store.revision()`` results.
314
315 Defining this function allows a precomputed or stored delta to be
316 used without having to compute on.
317
318 ``flagsfn`` (optional)
303 Callable receiving a revision number and returns the integer flags
319 Callable receiving a revision number and returns the integer flags
304 value for it.
320 value for it. If not defined, flags value will be 0.
305
321
306 ``sendfulltext``
322 ``sendfulltext``
307 Whether to send fulltext revisions instead of deltas, if allowed.
323 Whether to send fulltext revisions instead of deltas, if allowed.
308
324
309 ``revisiondata``
325 ``revisiondata``
310 ``assumehaveparentrevisions``
326 ``assumehaveparentrevisions``
311 ``deltaprevious``
327 ``deltaprevious``
312 See ``ifiledata.emitrevisions()`` interface documentation.
328 See ``ifiledata.emitrevisions()`` interface documentation.
313 """
329 """
314
330
315 fnode = store.node
331 fnode = store.node
316
332
317 prevrev = None
333 prevrev = None
318
334
319 if deltaprevious or assumehaveparentrevisions:
335 if deltaprevious or assumehaveparentrevisions:
320 prevrev = store.parentrevs(revs[0])[0]
336 prevrev = store.parentrevs(revs[0])[0]
321
337
322 # Set of revs available to delta against.
338 # Set of revs available to delta against.
323 available = set()
339 available = set()
324
340
325 for rev in revs:
341 for rev in revs:
326 if rev == nullrev:
342 if rev == nullrev:
327 continue
343 continue
328
344
329 node = fnode(rev)
345 node = fnode(rev)
346 p1rev, p2rev = store.parentrevs(rev)
347
348 if deltaparentfn:
330 deltaparentrev = deltaparentfn(rev)
349 deltaparentrev = deltaparentfn(rev)
331 p1rev, p2rev = store.parentrevs(rev)
350 else:
351 deltaparentrev = nullrev
332
352
333 # Forced delta against previous mode.
353 # Forced delta against previous mode.
334 if deltaprevious:
354 if deltaprevious:
335 baserev = prevrev
355 baserev = prevrev
336
356
337 # We're instructed to send fulltext. Honor that.
357 # We're instructed to send fulltext. Honor that.
338 elif sendfulltext:
358 elif sendfulltext:
339 baserev = nullrev
359 baserev = nullrev
340
360
341 # There is a delta in storage. We try to use that because it
361 # There is a delta in storage. We try to use that because it
342 # amounts to effectively copying data from storage and is
362 # amounts to effectively copying data from storage and is
343 # therefore the fastest.
363 # therefore the fastest.
344 elif deltaparentrev != nullrev:
364 elif deltaparentrev != nullrev:
345 # Base revision was already emitted in this group. We can
365 # Base revision was already emitted in this group. We can
346 # always safely use the delta.
366 # always safely use the delta.
347 if deltaparentrev in available:
367 if deltaparentrev in available:
348 baserev = deltaparentrev
368 baserev = deltaparentrev
349
369
350 # Base revision is a parent that hasn't been emitted already.
370 # Base revision is a parent that hasn't been emitted already.
351 # Use it if we can assume the receiver has the parent revision.
371 # Use it if we can assume the receiver has the parent revision.
352 elif (assumehaveparentrevisions
372 elif (assumehaveparentrevisions
353 and deltaparentrev in (p1rev, p2rev)):
373 and deltaparentrev in (p1rev, p2rev)):
354 baserev = deltaparentrev
374 baserev = deltaparentrev
355
375
356 # No guarantee the receiver has the delta parent. Send delta
376 # No guarantee the receiver has the delta parent. Send delta
357 # against last revision (if possible), which in the common case
377 # against last revision (if possible), which in the common case
358 # should be similar enough to this revision that the delta is
378 # should be similar enough to this revision that the delta is
359 # reasonable.
379 # reasonable.
360 elif prevrev is not None:
380 elif prevrev is not None:
361 baserev = prevrev
381 baserev = prevrev
362 else:
382 else:
363 baserev = nullrev
383 baserev = nullrev
364
384
365 # Storage has a fulltext revision.
385 # Storage has a fulltext revision.
366
386
367 # Let's use the previous revision, which is as good a guess as any.
387 # Let's use the previous revision, which is as good a guess as any.
368 # There is definitely room to improve this logic.
388 # There is definitely room to improve this logic.
369 elif prevrev is not None:
389 elif prevrev is not None:
370 baserev = prevrev
390 baserev = prevrev
371 else:
391 else:
372 baserev = nullrev
392 baserev = nullrev
373
393
374 # But we can't actually use our chosen delta base for whatever
394 # But we can't actually use our chosen delta base for whatever
375 # reason. Reset to fulltext.
395 # reason. Reset to fulltext.
376 if baserev != nullrev and not candeltafn(baserev, rev):
396 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
377 baserev = nullrev
397 baserev = nullrev
378
398
379 revision = None
399 revision = None
380 delta = None
400 delta = None
381 baserevisionsize = None
401 baserevisionsize = None
382
402
383 if revisiondata:
403 if revisiondata:
384 if store.iscensored(baserev) or store.iscensored(rev):
404 if store.iscensored(baserev) or store.iscensored(rev):
385 try:
405 try:
386 revision = store.revision(node, raw=True)
406 revision = store.revision(node, raw=True)
387 except error.CensoredNodeError as e:
407 except error.CensoredNodeError as e:
388 revision = e.tombstone
408 revision = e.tombstone
389
409
390 if baserev != nullrev:
410 if baserev != nullrev:
411 if rawsizefn:
391 baserevisionsize = rawsizefn(baserev)
412 baserevisionsize = rawsizefn(baserev)
413 else:
414 baserevisionsize = len(store.revision(baserev,
415 raw=True))
392
416
393 elif baserev == nullrev and not deltaprevious:
417 elif baserev == nullrev and not deltaprevious:
394 revision = store.revision(node, raw=True)
418 revision = store.revision(node, raw=True)
395 available.add(rev)
419 available.add(rev)
396 else:
420 else:
421 if revdifffn:
397 delta = revdifffn(baserev, rev)
422 delta = revdifffn(baserev, rev)
423 else:
424 delta = mdiff.textdiff(store.revision(baserev, raw=True),
425 store.revision(rev, raw=True))
426
398 available.add(rev)
427 available.add(rev)
399
428
400 yield resultcls(
429 yield resultcls(
401 node=node,
430 node=node,
402 p1node=fnode(p1rev),
431 p1node=fnode(p1rev),
403 p2node=fnode(p2rev),
432 p2node=fnode(p2rev),
404 basenode=fnode(baserev),
433 basenode=fnode(baserev),
405 flags=flagsfn(rev),
434 flags=flagsfn(rev) if flagsfn else 0,
406 baserevisionsize=baserevisionsize,
435 baserevisionsize=baserevisionsize,
407 revision=revision,
436 revision=revision,
408 delta=delta)
437 delta=delta)
409
438
410 prevrev = rev
439 prevrev = rev
General Comments 0
You need to be logged in to leave comments. Login now