##// END OF EJS Templates
emitrevision: also check the parents in the availability closure...
marmoute -
r50681:0bda07f3 stable
parent child Browse files
Show More
@@ -1,557 +1,560 b''
1 # storageutil.py - Storage functionality agnostic of backend implementation.
1 # storageutil.py - Storage functionality agnostic of backend implementation.
2 #
2 #
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import re
9 import re
10 import struct
10 import struct
11
11
12 from ..i18n import _
12 from ..i18n import _
13 from ..node import (
13 from ..node import (
14 bin,
14 bin,
15 nullrev,
15 nullrev,
16 sha1nodeconstants,
16 sha1nodeconstants,
17 )
17 )
18 from .. import (
18 from .. import (
19 dagop,
19 dagop,
20 error,
20 error,
21 mdiff,
21 mdiff,
22 )
22 )
23 from ..interfaces import repository
23 from ..interfaces import repository
24 from ..revlogutils import sidedata as sidedatamod
24 from ..revlogutils import sidedata as sidedatamod
25 from ..utils import hashutil
25 from ..utils import hashutil
26
26
27 _nullhash = hashutil.sha1(sha1nodeconstants.nullid)
27 _nullhash = hashutil.sha1(sha1nodeconstants.nullid)
28
28
29 # revision data contains extra metadata not part of the official digest
29 # revision data contains extra metadata not part of the official digest
30 # Only used in changegroup >= v4.
30 # Only used in changegroup >= v4.
31 CG_FLAG_SIDEDATA = 1
31 CG_FLAG_SIDEDATA = 1
32
32
33
33
34 def hashrevisionsha1(text, p1, p2):
34 def hashrevisionsha1(text, p1, p2):
35 """Compute the SHA-1 for revision data and its parents.
35 """Compute the SHA-1 for revision data and its parents.
36
36
37 This hash combines both the current file contents and its history
37 This hash combines both the current file contents and its history
38 in a manner that makes it easy to distinguish nodes with the same
38 in a manner that makes it easy to distinguish nodes with the same
39 content in the revision graph.
39 content in the revision graph.
40 """
40 """
41 # As of now, if one of the parent node is null, p2 is null
41 # As of now, if one of the parent node is null, p2 is null
42 if p2 == sha1nodeconstants.nullid:
42 if p2 == sha1nodeconstants.nullid:
43 # deep copy of a hash is faster than creating one
43 # deep copy of a hash is faster than creating one
44 s = _nullhash.copy()
44 s = _nullhash.copy()
45 s.update(p1)
45 s.update(p1)
46 else:
46 else:
47 # none of the parent nodes are nullid
47 # none of the parent nodes are nullid
48 if p1 < p2:
48 if p1 < p2:
49 a = p1
49 a = p1
50 b = p2
50 b = p2
51 else:
51 else:
52 a = p2
52 a = p2
53 b = p1
53 b = p1
54 s = hashutil.sha1(a)
54 s = hashutil.sha1(a)
55 s.update(b)
55 s.update(b)
56 s.update(text)
56 s.update(text)
57 return s.digest()
57 return s.digest()
58
58
59
59
60 METADATA_RE = re.compile(b'\x01\n')
60 METADATA_RE = re.compile(b'\x01\n')
61
61
62
62
63 def parsemeta(text):
63 def parsemeta(text):
64 """Parse metadata header from revision data.
64 """Parse metadata header from revision data.
65
65
66 Returns a 2-tuple of (metadata, offset), where both can be None if there
66 Returns a 2-tuple of (metadata, offset), where both can be None if there
67 is no metadata.
67 is no metadata.
68 """
68 """
69 # text can be buffer, so we can't use .startswith or .index
69 # text can be buffer, so we can't use .startswith or .index
70 if text[:2] != b'\x01\n':
70 if text[:2] != b'\x01\n':
71 return None, None
71 return None, None
72 s = METADATA_RE.search(text, 2).start()
72 s = METADATA_RE.search(text, 2).start()
73 mtext = text[2:s]
73 mtext = text[2:s]
74 meta = {}
74 meta = {}
75 for l in mtext.splitlines():
75 for l in mtext.splitlines():
76 k, v = l.split(b': ', 1)
76 k, v = l.split(b': ', 1)
77 meta[k] = v
77 meta[k] = v
78 return meta, s + 2
78 return meta, s + 2
79
79
80
80
81 def packmeta(meta, text):
81 def packmeta(meta, text):
82 """Add metadata to fulltext to produce revision text."""
82 """Add metadata to fulltext to produce revision text."""
83 keys = sorted(meta)
83 keys = sorted(meta)
84 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
84 metatext = b''.join(b'%s: %s\n' % (k, meta[k]) for k in keys)
85 return b'\x01\n%s\x01\n%s' % (metatext, text)
85 return b'\x01\n%s\x01\n%s' % (metatext, text)
86
86
87
87
88 def iscensoredtext(text):
88 def iscensoredtext(text):
89 meta = parsemeta(text)[0]
89 meta = parsemeta(text)[0]
90 return meta and b'censored' in meta
90 return meta and b'censored' in meta
91
91
92
92
93 def filtermetadata(text):
93 def filtermetadata(text):
94 """Extract just the revision data from source text.
94 """Extract just the revision data from source text.
95
95
96 Returns ``text`` unless it has a metadata header, in which case we return
96 Returns ``text`` unless it has a metadata header, in which case we return
97 a new buffer without hte metadata.
97 a new buffer without hte metadata.
98 """
98 """
99 if not text.startswith(b'\x01\n'):
99 if not text.startswith(b'\x01\n'):
100 return text
100 return text
101
101
102 offset = text.index(b'\x01\n', 2)
102 offset = text.index(b'\x01\n', 2)
103 return text[offset + 2 :]
103 return text[offset + 2 :]
104
104
105
105
106 def filerevisioncopied(store, node):
106 def filerevisioncopied(store, node):
107 """Resolve file revision copy metadata.
107 """Resolve file revision copy metadata.
108
108
109 Returns ``False`` if the file has no copy metadata. Otherwise a
109 Returns ``False`` if the file has no copy metadata. Otherwise a
110 2-tuple of the source filename and node.
110 2-tuple of the source filename and node.
111 """
111 """
112 if store.parents(node)[0] != sha1nodeconstants.nullid:
112 if store.parents(node)[0] != sha1nodeconstants.nullid:
113 # When creating a copy or move we set filelog parents to null,
113 # When creating a copy or move we set filelog parents to null,
114 # because contents are probably unrelated and making a delta
114 # because contents are probably unrelated and making a delta
115 # would not be useful.
115 # would not be useful.
116 # Conversely, if filelog p1 is non-null we know
116 # Conversely, if filelog p1 is non-null we know
117 # there is no copy metadata.
117 # there is no copy metadata.
118 # In the presence of merges, this reasoning becomes invalid
118 # In the presence of merges, this reasoning becomes invalid
119 # if we reorder parents. See tests/test-issue6528.t.
119 # if we reorder parents. See tests/test-issue6528.t.
120 return False
120 return False
121
121
122 meta = parsemeta(store.revision(node))[0]
122 meta = parsemeta(store.revision(node))[0]
123
123
124 # copy and copyrev occur in pairs. In rare cases due to old bugs,
124 # copy and copyrev occur in pairs. In rare cases due to old bugs,
125 # one can occur without the other. So ensure both are present to flag
125 # one can occur without the other. So ensure both are present to flag
126 # as a copy.
126 # as a copy.
127 if meta and b'copy' in meta and b'copyrev' in meta:
127 if meta and b'copy' in meta and b'copyrev' in meta:
128 return meta[b'copy'], bin(meta[b'copyrev'])
128 return meta[b'copy'], bin(meta[b'copyrev'])
129
129
130 return False
130 return False
131
131
132
132
133 def filedataequivalent(store, node, filedata):
133 def filedataequivalent(store, node, filedata):
134 """Determines whether file data is equivalent to a stored node.
134 """Determines whether file data is equivalent to a stored node.
135
135
136 Returns True if the passed file data would hash to the same value
136 Returns True if the passed file data would hash to the same value
137 as a stored revision and False otherwise.
137 as a stored revision and False otherwise.
138
138
139 When a stored revision is censored, filedata must be empty to have
139 When a stored revision is censored, filedata must be empty to have
140 equivalence.
140 equivalence.
141
141
142 When a stored revision has copy metadata, it is ignored as part
142 When a stored revision has copy metadata, it is ignored as part
143 of the compare.
143 of the compare.
144 """
144 """
145
145
146 if filedata.startswith(b'\x01\n'):
146 if filedata.startswith(b'\x01\n'):
147 revisiontext = b'\x01\n\x01\n' + filedata
147 revisiontext = b'\x01\n\x01\n' + filedata
148 else:
148 else:
149 revisiontext = filedata
149 revisiontext = filedata
150
150
151 p1, p2 = store.parents(node)
151 p1, p2 = store.parents(node)
152
152
153 computednode = hashrevisionsha1(revisiontext, p1, p2)
153 computednode = hashrevisionsha1(revisiontext, p1, p2)
154
154
155 if computednode == node:
155 if computednode == node:
156 return True
156 return True
157
157
158 # Censored files compare against the empty file.
158 # Censored files compare against the empty file.
159 if store.iscensored(store.rev(node)):
159 if store.iscensored(store.rev(node)):
160 return filedata == b''
160 return filedata == b''
161
161
162 # Renaming a file produces a different hash, even if the data
162 # Renaming a file produces a different hash, even if the data
163 # remains unchanged. Check if that's the case.
163 # remains unchanged. Check if that's the case.
164 if store.renamed(node):
164 if store.renamed(node):
165 return store.read(node) == filedata
165 return store.read(node) == filedata
166
166
167 return False
167 return False
168
168
169
169
170 def iterrevs(storelen, start=0, stop=None):
170 def iterrevs(storelen, start=0, stop=None):
171 """Iterate over revision numbers in a store."""
171 """Iterate over revision numbers in a store."""
172 step = 1
172 step = 1
173
173
174 if stop is not None:
174 if stop is not None:
175 if start > stop:
175 if start > stop:
176 step = -1
176 step = -1
177 stop += step
177 stop += step
178 if stop > storelen:
178 if stop > storelen:
179 stop = storelen
179 stop = storelen
180 else:
180 else:
181 stop = storelen
181 stop = storelen
182
182
183 return range(start, stop, step)
183 return range(start, stop, step)
184
184
185
185
186 def fileidlookup(store, fileid, identifier):
186 def fileidlookup(store, fileid, identifier):
187 """Resolve the file node for a value.
187 """Resolve the file node for a value.
188
188
189 ``store`` is an object implementing the ``ifileindex`` interface.
189 ``store`` is an object implementing the ``ifileindex`` interface.
190
190
191 ``fileid`` can be:
191 ``fileid`` can be:
192
192
193 * A 20 or 32 byte binary node.
193 * A 20 or 32 byte binary node.
194 * An integer revision number
194 * An integer revision number
195 * A 40 or 64 byte hex node.
195 * A 40 or 64 byte hex node.
196 * A bytes that can be parsed as an integer representing a revision number.
196 * A bytes that can be parsed as an integer representing a revision number.
197
197
198 ``identifier`` is used to populate ``error.LookupError`` with an identifier
198 ``identifier`` is used to populate ``error.LookupError`` with an identifier
199 for the store.
199 for the store.
200
200
201 Raises ``error.LookupError`` on failure.
201 Raises ``error.LookupError`` on failure.
202 """
202 """
203 if isinstance(fileid, int):
203 if isinstance(fileid, int):
204 try:
204 try:
205 return store.node(fileid)
205 return store.node(fileid)
206 except IndexError:
206 except IndexError:
207 raise error.LookupError(
207 raise error.LookupError(
208 b'%d' % fileid, identifier, _(b'no match found')
208 b'%d' % fileid, identifier, _(b'no match found')
209 )
209 )
210
210
211 if len(fileid) in (20, 32):
211 if len(fileid) in (20, 32):
212 try:
212 try:
213 store.rev(fileid)
213 store.rev(fileid)
214 return fileid
214 return fileid
215 except error.LookupError:
215 except error.LookupError:
216 pass
216 pass
217
217
218 if len(fileid) in (40, 64):
218 if len(fileid) in (40, 64):
219 try:
219 try:
220 rawnode = bin(fileid)
220 rawnode = bin(fileid)
221 store.rev(rawnode)
221 store.rev(rawnode)
222 return rawnode
222 return rawnode
223 except TypeError:
223 except TypeError:
224 pass
224 pass
225
225
226 try:
226 try:
227 rev = int(fileid)
227 rev = int(fileid)
228
228
229 if b'%d' % rev != fileid:
229 if b'%d' % rev != fileid:
230 raise ValueError
230 raise ValueError
231
231
232 try:
232 try:
233 return store.node(rev)
233 return store.node(rev)
234 except (IndexError, TypeError):
234 except (IndexError, TypeError):
235 pass
235 pass
236 except (ValueError, OverflowError):
236 except (ValueError, OverflowError):
237 pass
237 pass
238
238
239 raise error.LookupError(fileid, identifier, _(b'no match found'))
239 raise error.LookupError(fileid, identifier, _(b'no match found'))
240
240
241
241
242 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
242 def resolvestripinfo(minlinkrev, tiprev, headrevs, linkrevfn, parentrevsfn):
243 """Resolve information needed to strip revisions.
243 """Resolve information needed to strip revisions.
244
244
245 Finds the minimum revision number that must be stripped in order to
245 Finds the minimum revision number that must be stripped in order to
246 strip ``minlinkrev``.
246 strip ``minlinkrev``.
247
247
248 Returns a 2-tuple of the minimum revision number to do that and a set
248 Returns a 2-tuple of the minimum revision number to do that and a set
249 of all revision numbers that have linkrevs that would be broken
249 of all revision numbers that have linkrevs that would be broken
250 by that strip.
250 by that strip.
251
251
252 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
252 ``tiprev`` is the current tip-most revision. It is ``len(store) - 1``.
253 ``headrevs`` is an iterable of head revisions.
253 ``headrevs`` is an iterable of head revisions.
254 ``linkrevfn`` is a callable that receives a revision and returns a linked
254 ``linkrevfn`` is a callable that receives a revision and returns a linked
255 revision.
255 revision.
256 ``parentrevsfn`` is a callable that receives a revision number and returns
256 ``parentrevsfn`` is a callable that receives a revision number and returns
257 an iterable of its parent revision numbers.
257 an iterable of its parent revision numbers.
258 """
258 """
259 brokenrevs = set()
259 brokenrevs = set()
260 strippoint = tiprev + 1
260 strippoint = tiprev + 1
261
261
262 heads = {}
262 heads = {}
263 futurelargelinkrevs = set()
263 futurelargelinkrevs = set()
264 for head in headrevs:
264 for head in headrevs:
265 headlinkrev = linkrevfn(head)
265 headlinkrev = linkrevfn(head)
266 heads[head] = headlinkrev
266 heads[head] = headlinkrev
267 if headlinkrev >= minlinkrev:
267 if headlinkrev >= minlinkrev:
268 futurelargelinkrevs.add(headlinkrev)
268 futurelargelinkrevs.add(headlinkrev)
269
269
270 # This algorithm involves walking down the rev graph, starting at the
270 # This algorithm involves walking down the rev graph, starting at the
271 # heads. Since the revs are topologically sorted according to linkrev,
271 # heads. Since the revs are topologically sorted according to linkrev,
272 # once all head linkrevs are below the minlink, we know there are
272 # once all head linkrevs are below the minlink, we know there are
273 # no more revs that could have a linkrev greater than minlink.
273 # no more revs that could have a linkrev greater than minlink.
274 # So we can stop walking.
274 # So we can stop walking.
275 while futurelargelinkrevs:
275 while futurelargelinkrevs:
276 strippoint -= 1
276 strippoint -= 1
277 linkrev = heads.pop(strippoint)
277 linkrev = heads.pop(strippoint)
278
278
279 if linkrev < minlinkrev:
279 if linkrev < minlinkrev:
280 brokenrevs.add(strippoint)
280 brokenrevs.add(strippoint)
281 else:
281 else:
282 futurelargelinkrevs.remove(linkrev)
282 futurelargelinkrevs.remove(linkrev)
283
283
284 for p in parentrevsfn(strippoint):
284 for p in parentrevsfn(strippoint):
285 if p != nullrev:
285 if p != nullrev:
286 plinkrev = linkrevfn(p)
286 plinkrev = linkrevfn(p)
287 heads[p] = plinkrev
287 heads[p] = plinkrev
288 if plinkrev >= minlinkrev:
288 if plinkrev >= minlinkrev:
289 futurelargelinkrevs.add(plinkrev)
289 futurelargelinkrevs.add(plinkrev)
290
290
291 return strippoint, brokenrevs
291 return strippoint, brokenrevs
292
292
293
293
294 def emitrevisions(
294 def emitrevisions(
295 store,
295 store,
296 nodes,
296 nodes,
297 nodesorder,
297 nodesorder,
298 resultcls,
298 resultcls,
299 deltaparentfn=None,
299 deltaparentfn=None,
300 candeltafn=None,
300 candeltafn=None,
301 rawsizefn=None,
301 rawsizefn=None,
302 revdifffn=None,
302 revdifffn=None,
303 flagsfn=None,
303 flagsfn=None,
304 deltamode=repository.CG_DELTAMODE_STD,
304 deltamode=repository.CG_DELTAMODE_STD,
305 revisiondata=False,
305 revisiondata=False,
306 assumehaveparentrevisions=False,
306 assumehaveparentrevisions=False,
307 sidedata_helpers=None,
307 sidedata_helpers=None,
308 ):
308 ):
309 """Generic implementation of ifiledata.emitrevisions().
309 """Generic implementation of ifiledata.emitrevisions().
310
310
311 Emitting revision data is subtly complex. This function attempts to
311 Emitting revision data is subtly complex. This function attempts to
312 encapsulate all the logic for doing so in a backend-agnostic way.
312 encapsulate all the logic for doing so in a backend-agnostic way.
313
313
314 ``store``
314 ``store``
315 Object conforming to ``ifilestorage`` interface.
315 Object conforming to ``ifilestorage`` interface.
316
316
317 ``nodes``
317 ``nodes``
318 List of revision nodes whose data to emit.
318 List of revision nodes whose data to emit.
319
319
320 ``resultcls``
320 ``resultcls``
321 A type implementing the ``irevisiondelta`` interface that will be
321 A type implementing the ``irevisiondelta`` interface that will be
322 constructed and returned.
322 constructed and returned.
323
323
324 ``deltaparentfn`` (optional)
324 ``deltaparentfn`` (optional)
325 Callable receiving a revision number and returning the revision number
325 Callable receiving a revision number and returning the revision number
326 of a revision that the internal delta is stored against. This delta
326 of a revision that the internal delta is stored against. This delta
327 will be preferred over computing a new arbitrary delta.
327 will be preferred over computing a new arbitrary delta.
328
328
329 If not defined, a delta will always be computed from raw revision
329 If not defined, a delta will always be computed from raw revision
330 data.
330 data.
331
331
332 ``candeltafn`` (optional)
332 ``candeltafn`` (optional)
333 Callable receiving a pair of revision numbers that returns a bool
333 Callable receiving a pair of revision numbers that returns a bool
334 indicating whether a delta between them can be produced.
334 indicating whether a delta between them can be produced.
335
335
336 If not defined, it is assumed that any two revisions can delta with
336 If not defined, it is assumed that any two revisions can delta with
337 each other.
337 each other.
338
338
339 ``rawsizefn`` (optional)
339 ``rawsizefn`` (optional)
340 Callable receiving a revision number and returning the length of the
340 Callable receiving a revision number and returning the length of the
341 ``store.rawdata(rev)``.
341 ``store.rawdata(rev)``.
342
342
343 If not defined, ``len(store.rawdata(rev))`` will be called.
343 If not defined, ``len(store.rawdata(rev))`` will be called.
344
344
345 ``revdifffn`` (optional)
345 ``revdifffn`` (optional)
346 Callable receiving a pair of revision numbers that returns a delta
346 Callable receiving a pair of revision numbers that returns a delta
347 between them.
347 between them.
348
348
349 If not defined, a delta will be computed by invoking mdiff code
349 If not defined, a delta will be computed by invoking mdiff code
350 on ``store.revision()`` results.
350 on ``store.revision()`` results.
351
351
352 Defining this function allows a precomputed or stored delta to be
352 Defining this function allows a precomputed or stored delta to be
353 used without having to compute on.
353 used without having to compute on.
354
354
355 ``flagsfn`` (optional)
355 ``flagsfn`` (optional)
356 Callable receiving a revision number and returns the integer flags
356 Callable receiving a revision number and returns the integer flags
357 value for it. If not defined, flags value will be 0.
357 value for it. If not defined, flags value will be 0.
358
358
359 ``deltamode``
359 ``deltamode``
360 constaint on delta to be sent:
360 constaint on delta to be sent:
361 * CG_DELTAMODE_STD - normal mode, try to reuse storage deltas,
361 * CG_DELTAMODE_STD - normal mode, try to reuse storage deltas,
362 * CG_DELTAMODE_PREV - only delta against "prev",
362 * CG_DELTAMODE_PREV - only delta against "prev",
363 * CG_DELTAMODE_FULL - only issue full snapshot.
363 * CG_DELTAMODE_FULL - only issue full snapshot.
364
364
365 Whether to send fulltext revisions instead of deltas, if allowed.
365 Whether to send fulltext revisions instead of deltas, if allowed.
366
366
367 ``nodesorder``
367 ``nodesorder``
368 ``revisiondata``
368 ``revisiondata``
369 ``assumehaveparentrevisions``
369 ``assumehaveparentrevisions``
370 ``sidedata_helpers`` (optional)
370 ``sidedata_helpers`` (optional)
371 If not None, means that sidedata should be included.
371 If not None, means that sidedata should be included.
372 See `revlogutil.sidedata.get_sidedata_helpers`.
372 See `revlogutil.sidedata.get_sidedata_helpers`.
373 """
373 """
374
374
375 fnode = store.node
375 fnode = store.node
376 frev = store.rev
376 frev = store.rev
377
377
378 if nodesorder == b'nodes':
378 if nodesorder == b'nodes':
379 revs = [frev(n) for n in nodes]
379 revs = [frev(n) for n in nodes]
380 elif nodesorder == b'linear':
380 elif nodesorder == b'linear':
381 revs = {frev(n) for n in nodes}
381 revs = {frev(n) for n in nodes}
382 revs = dagop.linearize(revs, store.parentrevs)
382 revs = dagop.linearize(revs, store.parentrevs)
383 else: # storage and default
383 else: # storage and default
384 revs = sorted(frev(n) for n in nodes)
384 revs = sorted(frev(n) for n in nodes)
385
385
386 prevrev = None
386 prevrev = None
387
387
388 if deltamode == repository.CG_DELTAMODE_PREV or assumehaveparentrevisions:
388 if deltamode == repository.CG_DELTAMODE_PREV or assumehaveparentrevisions:
389 prevrev = store.parentrevs(revs[0])[0]
389 prevrev = store.parentrevs(revs[0])[0]
390
390
391 # Set of revs available to delta against.
391 # Set of revs available to delta against.
392 available = set()
392 available = set()
393 parents = []
393
394
394 def is_usable_base(rev):
395 def is_usable_base(rev):
395 return rev != nullrev and rev in available
396 """Is a delta against this revision usable over the wire"""
397 if rev == nullrev:
398 return False
399 # Base revision was already emitted in this group.
400 if rev in available:
401 return True
402 # Base revision is a parent that hasn't been emitted already.
403 if assumehaveparentrevisions and rev in parents:
404 return True
405 return False
396
406
397 for rev in revs:
407 for rev in revs:
398 if rev == nullrev:
408 if rev == nullrev:
399 continue
409 continue
400
410
401 node = fnode(rev)
411 node = fnode(rev)
402 p1rev, p2rev = store.parentrevs(rev)
412 parents[:] = p1rev, p2rev = store.parentrevs(rev)
403
413
404 if deltaparentfn:
414 if deltaparentfn:
405 deltaparentrev = deltaparentfn(rev)
415 deltaparentrev = deltaparentfn(rev)
406 else:
416 else:
407 deltaparentrev = nullrev
417 deltaparentrev = nullrev
408
418
409 # Forced delta against previous mode.
419 # Forced delta against previous mode.
410 if deltamode == repository.CG_DELTAMODE_PREV:
420 if deltamode == repository.CG_DELTAMODE_PREV:
411 baserev = prevrev
421 baserev = prevrev
412
422
413 # We're instructed to send fulltext. Honor that.
423 # We're instructed to send fulltext. Honor that.
414 elif deltamode == repository.CG_DELTAMODE_FULL:
424 elif deltamode == repository.CG_DELTAMODE_FULL:
415 baserev = nullrev
425 baserev = nullrev
416 # We're instructed to use p1. Honor that
426 # We're instructed to use p1. Honor that
417 elif deltamode == repository.CG_DELTAMODE_P1:
427 elif deltamode == repository.CG_DELTAMODE_P1:
418 baserev = p1rev
428 baserev = p1rev
419
429
420 # There is a delta in storage. We try to use that because it
430 # There is a delta in storage. We try to use that because it
421 # amounts to effectively copying data from storage and is
431 # amounts to effectively copying data from storage and is
422 # therefore the fastest.
432 # therefore the fastest.
423 elif deltaparentrev != nullrev:
433 elif deltaparentrev != nullrev:
424 # Base revision was already emitted in this group. We can
434 # If the stored delta works, let us use it !
425 # always safely use the delta.
426 if is_usable_base(deltaparentrev):
435 if is_usable_base(deltaparentrev):
427 baserev = deltaparentrev
436 baserev = deltaparentrev
428
429 # Base revision is a parent that hasn't been emitted already.
430 # Use it if we can assume the receiver has the parent revision.
431 elif assumehaveparentrevisions and deltaparentrev in (p1rev, p2rev):
432 baserev = deltaparentrev
433
434 # No guarantee the receiver has the delta parent. Send delta
437 # No guarantee the receiver has the delta parent. Send delta
435 # against last revision (if possible), which in the common case
438 # against last revision (if possible), which in the common case
436 # should be similar enough to this revision that the delta is
439 # should be similar enough to this revision that the delta is
437 # reasonable.
440 # reasonable.
438 elif prevrev is not None:
441 elif prevrev is not None:
439 baserev = prevrev
442 baserev = prevrev
440 else:
443 else:
441 baserev = nullrev
444 baserev = nullrev
442
445
443 # Storage has a fulltext revision.
446 # Storage has a fulltext revision.
444
447
445 # Let's use the previous revision, which is as good a guess as any.
448 # Let's use the previous revision, which is as good a guess as any.
446 # There is definitely room to improve this logic.
449 # There is definitely room to improve this logic.
447 elif prevrev is not None:
450 elif prevrev is not None:
448 baserev = prevrev
451 baserev = prevrev
449 else:
452 else:
450 baserev = nullrev
453 baserev = nullrev
451
454
452 # But we can't actually use our chosen delta base for whatever
455 # But we can't actually use our chosen delta base for whatever
453 # reason. Reset to fulltext.
456 # reason. Reset to fulltext.
454 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
457 if baserev != nullrev and (candeltafn and not candeltafn(baserev, rev)):
455 baserev = nullrev
458 baserev = nullrev
456
459
457 revision = None
460 revision = None
458 delta = None
461 delta = None
459 baserevisionsize = None
462 baserevisionsize = None
460
463
461 if revisiondata:
464 if revisiondata:
462 if store.iscensored(baserev) or store.iscensored(rev):
465 if store.iscensored(baserev) or store.iscensored(rev):
463 try:
466 try:
464 revision = store.rawdata(node)
467 revision = store.rawdata(node)
465 except error.CensoredNodeError as e:
468 except error.CensoredNodeError as e:
466 revision = e.tombstone
469 revision = e.tombstone
467
470
468 if baserev != nullrev:
471 if baserev != nullrev:
469 if rawsizefn:
472 if rawsizefn:
470 baserevisionsize = rawsizefn(baserev)
473 baserevisionsize = rawsizefn(baserev)
471 else:
474 else:
472 baserevisionsize = len(store.rawdata(baserev))
475 baserevisionsize = len(store.rawdata(baserev))
473
476
474 elif (
477 elif (
475 baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
478 baserev == nullrev and deltamode != repository.CG_DELTAMODE_PREV
476 ):
479 ):
477 revision = store.rawdata(node)
480 revision = store.rawdata(node)
478 available.add(rev)
481 available.add(rev)
479 else:
482 else:
480 if revdifffn:
483 if revdifffn:
481 delta = revdifffn(baserev, rev)
484 delta = revdifffn(baserev, rev)
482 else:
485 else:
483 delta = mdiff.textdiff(
486 delta = mdiff.textdiff(
484 store.rawdata(baserev), store.rawdata(rev)
487 store.rawdata(baserev), store.rawdata(rev)
485 )
488 )
486
489
487 available.add(rev)
490 available.add(rev)
488
491
489 serialized_sidedata = None
492 serialized_sidedata = None
490 sidedata_flags = (0, 0)
493 sidedata_flags = (0, 0)
491 if sidedata_helpers:
494 if sidedata_helpers:
492 try:
495 try:
493 old_sidedata = store.sidedata(rev)
496 old_sidedata = store.sidedata(rev)
494 except error.CensoredNodeError:
497 except error.CensoredNodeError:
495 # skip any potential sidedata of the censored revision
498 # skip any potential sidedata of the censored revision
496 sidedata = {}
499 sidedata = {}
497 else:
500 else:
498 sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
501 sidedata, sidedata_flags = sidedatamod.run_sidedata_helpers(
499 store=store,
502 store=store,
500 sidedata_helpers=sidedata_helpers,
503 sidedata_helpers=sidedata_helpers,
501 sidedata=old_sidedata,
504 sidedata=old_sidedata,
502 rev=rev,
505 rev=rev,
503 )
506 )
504 if sidedata:
507 if sidedata:
505 serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
508 serialized_sidedata = sidedatamod.serialize_sidedata(sidedata)
506
509
507 flags = flagsfn(rev) if flagsfn else 0
510 flags = flagsfn(rev) if flagsfn else 0
508 protocol_flags = 0
511 protocol_flags = 0
509 if serialized_sidedata:
512 if serialized_sidedata:
510 # Advertise that sidedata exists to the other side
513 # Advertise that sidedata exists to the other side
511 protocol_flags |= CG_FLAG_SIDEDATA
514 protocol_flags |= CG_FLAG_SIDEDATA
512 # Computers and removers can return flags to add and/or remove
515 # Computers and removers can return flags to add and/or remove
513 flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
516 flags = flags | sidedata_flags[0] & ~sidedata_flags[1]
514
517
515 yield resultcls(
518 yield resultcls(
516 node=node,
519 node=node,
517 p1node=fnode(p1rev),
520 p1node=fnode(p1rev),
518 p2node=fnode(p2rev),
521 p2node=fnode(p2rev),
519 basenode=fnode(baserev),
522 basenode=fnode(baserev),
520 flags=flags,
523 flags=flags,
521 baserevisionsize=baserevisionsize,
524 baserevisionsize=baserevisionsize,
522 revision=revision,
525 revision=revision,
523 delta=delta,
526 delta=delta,
524 sidedata=serialized_sidedata,
527 sidedata=serialized_sidedata,
525 protocol_flags=protocol_flags,
528 protocol_flags=protocol_flags,
526 )
529 )
527
530
528 prevrev = rev
531 prevrev = rev
529
532
530
533
531 def deltaiscensored(delta, baserev, baselenfn):
534 def deltaiscensored(delta, baserev, baselenfn):
532 """Determine if a delta represents censored revision data.
535 """Determine if a delta represents censored revision data.
533
536
534 ``baserev`` is the base revision this delta is encoded against.
537 ``baserev`` is the base revision this delta is encoded against.
535 ``baselenfn`` is a callable receiving a revision number that resolves the
538 ``baselenfn`` is a callable receiving a revision number that resolves the
536 length of the revision fulltext.
539 length of the revision fulltext.
537
540
538 Returns a bool indicating if the result of the delta represents a censored
541 Returns a bool indicating if the result of the delta represents a censored
539 revision.
542 revision.
540 """
543 """
541 # Fragile heuristic: unless new file meta keys are added alphabetically
544 # Fragile heuristic: unless new file meta keys are added alphabetically
542 # preceding "censored", all censored revisions are prefixed by
545 # preceding "censored", all censored revisions are prefixed by
543 # "\1\ncensored:". A delta producing such a censored revision must be a
546 # "\1\ncensored:". A delta producing such a censored revision must be a
544 # full-replacement delta, so we inspect the first and only patch in the
547 # full-replacement delta, so we inspect the first and only patch in the
545 # delta for this prefix.
548 # delta for this prefix.
546 hlen = struct.calcsize(b">lll")
549 hlen = struct.calcsize(b">lll")
547 if len(delta) <= hlen:
550 if len(delta) <= hlen:
548 return False
551 return False
549
552
550 oldlen = baselenfn(baserev)
553 oldlen = baselenfn(baserev)
551 newlen = len(delta) - hlen
554 newlen = len(delta) - hlen
552 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
555 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
553 return False
556 return False
554
557
555 add = b"\1\ncensored:"
558 add = b"\1\ncensored:"
556 addlen = len(add)
559 addlen = len(add)
557 return newlen >= addlen and delta[hlen : hlen + addlen] == add
560 return newlen >= addlen and delta[hlen : hlen + addlen] == add
General Comments 0
You need to be logged in to leave comments. Login now