##// END OF EJS Templates
flagprocessors: directly duplicate the deprecated layer back into revlog...
marmoute -
r43263:01304095 default
parent child Browse files
Show More
@@ -1,446 +1,458 b''
1 # remotefilelog.py - filelog implementation where filelog history is stored
1 # remotefilelog.py - filelog implementation where filelog history is stored
2 # remotely
2 # remotely
3 #
3 #
4 # Copyright 2013 Facebook, Inc.
4 # Copyright 2013 Facebook, Inc.
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import os
11 import os
12
12
13 from mercurial.node import (
13 from mercurial.node import (
14 bin,
14 bin,
15 nullid,
15 nullid,
16 wdirfilenodeids,
16 wdirfilenodeids,
17 wdirid,
17 wdirid,
18 )
18 )
19 from mercurial.i18n import _
19 from mercurial.i18n import _
20 from mercurial import (
20 from mercurial import (
21 ancestor,
21 ancestor,
22 error,
22 error,
23 mdiff,
23 mdiff,
24 revlog,
24 revlog,
25 util,
25 )
26 )
26 from mercurial.utils import storageutil
27 from mercurial.utils import storageutil
27 from mercurial.revlogutils import flagutil
28 from mercurial.revlogutils import flagutil
28
29
29 from . import (
30 from . import (
30 constants,
31 constants,
31 fileserverclient,
32 fileserverclient,
32 shallowutil,
33 shallowutil,
33 )
34 )
34
35
35 class remotefilelognodemap(object):
36 class remotefilelognodemap(object):
36 def __init__(self, filename, store):
37 def __init__(self, filename, store):
37 self._filename = filename
38 self._filename = filename
38 self._store = store
39 self._store = store
39
40
40 def __contains__(self, node):
41 def __contains__(self, node):
41 missing = self._store.getmissing([(self._filename, node)])
42 missing = self._store.getmissing([(self._filename, node)])
42 return not bool(missing)
43 return not bool(missing)
43
44
44 def __get__(self, node):
45 def __get__(self, node):
45 if node not in self:
46 if node not in self:
46 raise KeyError(node)
47 raise KeyError(node)
47 return node
48 return node
48
49
49 class remotefilelog(flagutil.flagprocessorsmixin):
50 class remotefilelog(flagutil.flagprocessorsmixin):
50
51
51 _generaldelta = True
52 _generaldelta = True
52
53
53 def __init__(self, opener, path, repo):
54 def __init__(self, opener, path, repo):
54 self.opener = opener
55 self.opener = opener
55 self.filename = path
56 self.filename = path
56 self.repo = repo
57 self.repo = repo
57 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
58 self.nodemap = remotefilelognodemap(self.filename, repo.contentstore)
58
59
59 self.version = 1
60 self.version = 1
60
61
61 self._flagprocessors = dict(flagutil.flagprocessors)
62 self._flagprocessors = dict(flagutil.flagprocessors)
62
63
63 def read(self, node):
64 def read(self, node):
64 """returns the file contents at this node"""
65 """returns the file contents at this node"""
65 t = self.revision(node)
66 t = self.revision(node)
66 if not t.startswith('\1\n'):
67 if not t.startswith('\1\n'):
67 return t
68 return t
68 s = t.index('\1\n', 2)
69 s = t.index('\1\n', 2)
69 return t[s + 2:]
70 return t[s + 2:]
70
71
71 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
72 def add(self, text, meta, transaction, linknode, p1=None, p2=None):
72 # hash with the metadata, like in vanilla filelogs
73 # hash with the metadata, like in vanilla filelogs
73 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
74 hashtext = shallowutil.createrevlogtext(text, meta.get('copy'),
74 meta.get('copyrev'))
75 meta.get('copyrev'))
75 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
76 node = storageutil.hashrevisionsha1(hashtext, p1, p2)
76 return self.addrevision(hashtext, transaction, linknode, p1, p2,
77 return self.addrevision(hashtext, transaction, linknode, p1, p2,
77 node=node)
78 node=node)
78
79
79 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
80 def _createfileblob(self, text, meta, flags, p1, p2, node, linknode):
80 # text passed to "_createfileblob" does not include filelog metadata
81 # text passed to "_createfileblob" does not include filelog metadata
81 header = shallowutil.buildfileblobheader(len(text), flags)
82 header = shallowutil.buildfileblobheader(len(text), flags)
82 data = "%s\0%s" % (header, text)
83 data = "%s\0%s" % (header, text)
83
84
84 realp1 = p1
85 realp1 = p1
85 copyfrom = ""
86 copyfrom = ""
86 if meta and 'copy' in meta:
87 if meta and 'copy' in meta:
87 copyfrom = meta['copy']
88 copyfrom = meta['copy']
88 realp1 = bin(meta['copyrev'])
89 realp1 = bin(meta['copyrev'])
89
90
90 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
91 data += "%s%s%s%s%s\0" % (node, realp1, p2, linknode, copyfrom)
91
92
92 visited = set()
93 visited = set()
93
94
94 pancestors = {}
95 pancestors = {}
95 queue = []
96 queue = []
96 if realp1 != nullid:
97 if realp1 != nullid:
97 p1flog = self
98 p1flog = self
98 if copyfrom:
99 if copyfrom:
99 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
100 p1flog = remotefilelog(self.opener, copyfrom, self.repo)
100
101
101 pancestors.update(p1flog.ancestormap(realp1))
102 pancestors.update(p1flog.ancestormap(realp1))
102 queue.append(realp1)
103 queue.append(realp1)
103 visited.add(realp1)
104 visited.add(realp1)
104 if p2 != nullid:
105 if p2 != nullid:
105 pancestors.update(self.ancestormap(p2))
106 pancestors.update(self.ancestormap(p2))
106 queue.append(p2)
107 queue.append(p2)
107 visited.add(p2)
108 visited.add(p2)
108
109
109 ancestortext = ""
110 ancestortext = ""
110
111
111 # add the ancestors in topological order
112 # add the ancestors in topological order
112 while queue:
113 while queue:
113 c = queue.pop(0)
114 c = queue.pop(0)
114 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
115 pa1, pa2, ancestorlinknode, pacopyfrom = pancestors[c]
115
116
116 pacopyfrom = pacopyfrom or ''
117 pacopyfrom = pacopyfrom or ''
117 ancestortext += "%s%s%s%s%s\0" % (
118 ancestortext += "%s%s%s%s%s\0" % (
118 c, pa1, pa2, ancestorlinknode, pacopyfrom)
119 c, pa1, pa2, ancestorlinknode, pacopyfrom)
119
120
120 if pa1 != nullid and pa1 not in visited:
121 if pa1 != nullid and pa1 not in visited:
121 queue.append(pa1)
122 queue.append(pa1)
122 visited.add(pa1)
123 visited.add(pa1)
123 if pa2 != nullid and pa2 not in visited:
124 if pa2 != nullid and pa2 not in visited:
124 queue.append(pa2)
125 queue.append(pa2)
125 visited.add(pa2)
126 visited.add(pa2)
126
127
127 data += ancestortext
128 data += ancestortext
128
129
129 return data
130 return data
130
131
131 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
132 def addrevision(self, text, transaction, linknode, p1, p2, cachedelta=None,
132 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
133 node=None, flags=revlog.REVIDX_DEFAULT_FLAGS,
133 sidedata=None):
134 sidedata=None):
134 # text passed to "addrevision" includes hg filelog metadata header
135 # text passed to "addrevision" includes hg filelog metadata header
135 if node is None:
136 if node is None:
136 node = storageutil.hashrevisionsha1(text, p1, p2)
137 node = storageutil.hashrevisionsha1(text, p1, p2)
137 if sidedata is None:
138 if sidedata is None:
138 sidedata = {}
139 sidedata = {}
139
140
140 meta, metaoffset = storageutil.parsemeta(text)
141 meta, metaoffset = storageutil.parsemeta(text)
141 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
142 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
142 sidedata=sidedata)
143 sidedata=sidedata)
143 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
144 return self.addrawrevision(rawtext, transaction, linknode, p1, p2,
144 node, flags, cachedelta,
145 node, flags, cachedelta,
145 _metatuple=(meta, metaoffset))
146 _metatuple=(meta, metaoffset))
146
147
147 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
148 def addrawrevision(self, rawtext, transaction, linknode, p1, p2, node,
148 flags, cachedelta=None, _metatuple=None):
149 flags, cachedelta=None, _metatuple=None):
149 if _metatuple:
150 if _metatuple:
150 # _metatuple: used by "addrevision" internally by remotefilelog
151 # _metatuple: used by "addrevision" internally by remotefilelog
151 # meta was parsed confidently
152 # meta was parsed confidently
152 meta, metaoffset = _metatuple
153 meta, metaoffset = _metatuple
153 else:
154 else:
154 # not from self.addrevision, but something else (repo._filecommit)
155 # not from self.addrevision, but something else (repo._filecommit)
155 # calls addrawrevision directly. remotefilelog needs to get and
156 # calls addrawrevision directly. remotefilelog needs to get and
156 # strip filelog metadata.
157 # strip filelog metadata.
157 # we don't have confidence about whether rawtext contains filelog
158 # we don't have confidence about whether rawtext contains filelog
158 # metadata or not (flag processor could replace it), so we just
159 # metadata or not (flag processor could replace it), so we just
159 # parse it as best-effort.
160 # parse it as best-effort.
160 # in LFS (flags != 0)'s case, the best way is to call LFS code to
161 # in LFS (flags != 0)'s case, the best way is to call LFS code to
161 # get the meta information, instead of storageutil.parsemeta.
162 # get the meta information, instead of storageutil.parsemeta.
162 meta, metaoffset = storageutil.parsemeta(rawtext)
163 meta, metaoffset = storageutil.parsemeta(rawtext)
163 if flags != 0:
164 if flags != 0:
164 # when flags != 0, be conservative and do not mangle rawtext, since
165 # when flags != 0, be conservative and do not mangle rawtext, since
165 # a read flag processor expects the text not being mangled at all.
166 # a read flag processor expects the text not being mangled at all.
166 metaoffset = 0
167 metaoffset = 0
167 if metaoffset:
168 if metaoffset:
168 # remotefilelog fileblob stores copy metadata in its ancestortext,
169 # remotefilelog fileblob stores copy metadata in its ancestortext,
169 # not its main blob. so we need to remove filelog metadata
170 # not its main blob. so we need to remove filelog metadata
170 # (containing copy information) from text.
171 # (containing copy information) from text.
171 blobtext = rawtext[metaoffset:]
172 blobtext = rawtext[metaoffset:]
172 else:
173 else:
173 blobtext = rawtext
174 blobtext = rawtext
174 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
175 data = self._createfileblob(blobtext, meta, flags, p1, p2, node,
175 linknode)
176 linknode)
176 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
177 self.repo.contentstore.addremotefilelognode(self.filename, node, data)
177
178
178 return node
179 return node
179
180
180 def renamed(self, node):
181 def renamed(self, node):
181 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
182 ancestors = self.repo.metadatastore.getancestors(self.filename, node)
182 p1, p2, linknode, copyfrom = ancestors[node]
183 p1, p2, linknode, copyfrom = ancestors[node]
183 if copyfrom:
184 if copyfrom:
184 return (copyfrom, p1)
185 return (copyfrom, p1)
185
186
186 return False
187 return False
187
188
188 def size(self, node):
189 def size(self, node):
189 """return the size of a given revision"""
190 """return the size of a given revision"""
190 return len(self.read(node))
191 return len(self.read(node))
191
192
192 rawsize = size
193 rawsize = size
193
194
194 def cmp(self, node, text):
195 def cmp(self, node, text):
195 """compare text with a given file revision
196 """compare text with a given file revision
196
197
197 returns True if text is different than what is stored.
198 returns True if text is different than what is stored.
198 """
199 """
199
200
200 if node == nullid:
201 if node == nullid:
201 return True
202 return True
202
203
203 nodetext = self.read(node)
204 nodetext = self.read(node)
204 return nodetext != text
205 return nodetext != text
205
206
206 def __nonzero__(self):
207 def __nonzero__(self):
207 return True
208 return True
208
209
209 __bool__ = __nonzero__
210 __bool__ = __nonzero__
210
211
211 def __len__(self):
212 def __len__(self):
212 if self.filename == '.hgtags':
213 if self.filename == '.hgtags':
213 # The length of .hgtags is used to fast path tag checking.
214 # The length of .hgtags is used to fast path tag checking.
214 # remotefilelog doesn't support .hgtags since the entire .hgtags
215 # remotefilelog doesn't support .hgtags since the entire .hgtags
215 # history is needed. Use the excludepattern setting to make
216 # history is needed. Use the excludepattern setting to make
216 # .hgtags a normal filelog.
217 # .hgtags a normal filelog.
217 return 0
218 return 0
218
219
219 raise RuntimeError("len not supported")
220 raise RuntimeError("len not supported")
220
221
221 def empty(self):
222 def empty(self):
222 return False
223 return False
223
224
224 def flags(self, node):
225 def flags(self, node):
225 if isinstance(node, int):
226 if isinstance(node, int):
226 raise error.ProgrammingError(
227 raise error.ProgrammingError(
227 'remotefilelog does not accept integer rev for flags')
228 'remotefilelog does not accept integer rev for flags')
228 store = self.repo.contentstore
229 store = self.repo.contentstore
229 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
230 return store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
230
231
231 def parents(self, node):
232 def parents(self, node):
232 if node == nullid:
233 if node == nullid:
233 return nullid, nullid
234 return nullid, nullid
234
235
235 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
236 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
236 p1, p2, linknode, copyfrom = ancestormap[node]
237 p1, p2, linknode, copyfrom = ancestormap[node]
237 if copyfrom:
238 if copyfrom:
238 p1 = nullid
239 p1 = nullid
239
240
240 return p1, p2
241 return p1, p2
241
242
242 def parentrevs(self, rev):
243 def parentrevs(self, rev):
243 # TODO(augie): this is a node and should be a rev, but for now
244 # TODO(augie): this is a node and should be a rev, but for now
244 # nothing in core seems to actually break.
245 # nothing in core seems to actually break.
245 return self.parents(rev)
246 return self.parents(rev)
246
247
247 def linknode(self, node):
248 def linknode(self, node):
248 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
249 ancestormap = self.repo.metadatastore.getancestors(self.filename, node)
249 p1, p2, linknode, copyfrom = ancestormap[node]
250 p1, p2, linknode, copyfrom = ancestormap[node]
250 return linknode
251 return linknode
251
252
252 def linkrev(self, node):
253 def linkrev(self, node):
253 return self.repo.unfiltered().changelog.rev(self.linknode(node))
254 return self.repo.unfiltered().changelog.rev(self.linknode(node))
254
255
255 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
256 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
256 assumehaveparentrevisions=False, deltaprevious=False,
257 assumehaveparentrevisions=False, deltaprevious=False,
257 deltamode=None):
258 deltamode=None):
258 # we don't use any of these parameters here
259 # we don't use any of these parameters here
259 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
260 del nodesorder, revisiondata, assumehaveparentrevisions, deltaprevious
260 del deltamode
261 del deltamode
261 prevnode = None
262 prevnode = None
262 for node in nodes:
263 for node in nodes:
263 p1, p2 = self.parents(node)
264 p1, p2 = self.parents(node)
264 if prevnode is None:
265 if prevnode is None:
265 basenode = prevnode = p1
266 basenode = prevnode = p1
266 if basenode == node:
267 if basenode == node:
267 basenode = nullid
268 basenode = nullid
268 if basenode != nullid:
269 if basenode != nullid:
269 revision = None
270 revision = None
270 delta = self.revdiff(basenode, node)
271 delta = self.revdiff(basenode, node)
271 else:
272 else:
272 revision = self.rawdata(node)
273 revision = self.rawdata(node)
273 delta = None
274 delta = None
274 yield revlog.revlogrevisiondelta(
275 yield revlog.revlogrevisiondelta(
275 node=node,
276 node=node,
276 p1node=p1,
277 p1node=p1,
277 p2node=p2,
278 p2node=p2,
278 linknode=self.linknode(node),
279 linknode=self.linknode(node),
279 basenode=basenode,
280 basenode=basenode,
280 flags=self.flags(node),
281 flags=self.flags(node),
281 baserevisionsize=None,
282 baserevisionsize=None,
282 revision=revision,
283 revision=revision,
283 delta=delta,
284 delta=delta,
284 )
285 )
285
286
286 def revdiff(self, node1, node2):
287 def revdiff(self, node1, node2):
287 return mdiff.textdiff(self.rawdata(node1),
288 return mdiff.textdiff(self.rawdata(node1),
288 self.rawdata(node2))
289 self.rawdata(node2))
289
290
290 def lookup(self, node):
291 def lookup(self, node):
291 if len(node) == 40:
292 if len(node) == 40:
292 node = bin(node)
293 node = bin(node)
293 if len(node) != 20:
294 if len(node) != 20:
294 raise error.LookupError(node, self.filename,
295 raise error.LookupError(node, self.filename,
295 _('invalid lookup input'))
296 _('invalid lookup input'))
296
297
297 return node
298 return node
298
299
299 def rev(self, node):
300 def rev(self, node):
300 # This is a hack to make TortoiseHG work.
301 # This is a hack to make TortoiseHG work.
301 return node
302 return node
302
303
303 def node(self, rev):
304 def node(self, rev):
304 # This is a hack.
305 # This is a hack.
305 if isinstance(rev, int):
306 if isinstance(rev, int):
306 raise error.ProgrammingError(
307 raise error.ProgrammingError(
307 'remotefilelog does not convert integer rev to node')
308 'remotefilelog does not convert integer rev to node')
308 return rev
309 return rev
309
310
311 def _processflags(self, text, flags, operation, raw=False):
312 """deprecated entry point to access flag processors"""
313 msg = ('_processflag(...) use the specialized variant')
314 util.nouideprecwarn(msg, '5.2', stacklevel=2)
315 if raw:
316 return text, flagutil.processflagsraw(self, text, flags)
317 elif operation == 'read':
318 return flagutil.processflagsread(self, text, flags)
319 else: # write operation
320 return flagutil.processflagswrite(self, text, flags)
321
310 def revision(self, node, raw=False):
322 def revision(self, node, raw=False):
311 """returns the revlog contents at this node.
323 """returns the revlog contents at this node.
312 this includes the meta data traditionally included in file revlogs.
324 this includes the meta data traditionally included in file revlogs.
313 this is generally only used for bundling and communicating with vanilla
325 this is generally only used for bundling and communicating with vanilla
314 hg clients.
326 hg clients.
315 """
327 """
316 if node == nullid:
328 if node == nullid:
317 return ""
329 return ""
318 if len(node) != 20:
330 if len(node) != 20:
319 raise error.LookupError(node, self.filename,
331 raise error.LookupError(node, self.filename,
320 _('invalid revision input'))
332 _('invalid revision input'))
321 if node == wdirid or node in wdirfilenodeids:
333 if node == wdirid or node in wdirfilenodeids:
322 raise error.WdirUnsupported
334 raise error.WdirUnsupported
323
335
324 store = self.repo.contentstore
336 store = self.repo.contentstore
325 rawtext = store.get(self.filename, node)
337 rawtext = store.get(self.filename, node)
326 if raw:
338 if raw:
327 return rawtext
339 return rawtext
328 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
340 flags = store.getmeta(self.filename, node).get(constants.METAKEYFLAG, 0)
329 if flags == 0:
341 if flags == 0:
330 return rawtext
342 return rawtext
331 return flagutil.processflagsread(self, rawtext, flags)[0]
343 return flagutil.processflagsread(self, rawtext, flags)[0]
332
344
333 def rawdata(self, node):
345 def rawdata(self, node):
334 return self.revision(node, raw=False)
346 return self.revision(node, raw=False)
335
347
336 def _read(self, id):
348 def _read(self, id):
337 """reads the raw file blob from disk, cache, or server"""
349 """reads the raw file blob from disk, cache, or server"""
338 fileservice = self.repo.fileservice
350 fileservice = self.repo.fileservice
339 localcache = fileservice.localcache
351 localcache = fileservice.localcache
340 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
352 cachekey = fileserverclient.getcachekey(self.repo.name, self.filename,
341 id)
353 id)
342 try:
354 try:
343 return localcache.read(cachekey)
355 return localcache.read(cachekey)
344 except KeyError:
356 except KeyError:
345 pass
357 pass
346
358
347 localkey = fileserverclient.getlocalkey(self.filename, id)
359 localkey = fileserverclient.getlocalkey(self.filename, id)
348 localpath = os.path.join(self.localpath, localkey)
360 localpath = os.path.join(self.localpath, localkey)
349 try:
361 try:
350 return shallowutil.readfile(localpath)
362 return shallowutil.readfile(localpath)
351 except IOError:
363 except IOError:
352 pass
364 pass
353
365
354 fileservice.prefetch([(self.filename, id)])
366 fileservice.prefetch([(self.filename, id)])
355 try:
367 try:
356 return localcache.read(cachekey)
368 return localcache.read(cachekey)
357 except KeyError:
369 except KeyError:
358 pass
370 pass
359
371
360 raise error.LookupError(id, self.filename, _('no node'))
372 raise error.LookupError(id, self.filename, _('no node'))
361
373
362 def ancestormap(self, node):
374 def ancestormap(self, node):
363 return self.repo.metadatastore.getancestors(self.filename, node)
375 return self.repo.metadatastore.getancestors(self.filename, node)
364
376
365 def ancestor(self, a, b):
377 def ancestor(self, a, b):
366 if a == nullid or b == nullid:
378 if a == nullid or b == nullid:
367 return nullid
379 return nullid
368
380
369 revmap, parentfunc = self._buildrevgraph(a, b)
381 revmap, parentfunc = self._buildrevgraph(a, b)
370 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
382 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
371
383
372 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
384 ancs = ancestor.ancestors(parentfunc, revmap[a], revmap[b])
373 if ancs:
385 if ancs:
374 # choose a consistent winner when there's a tie
386 # choose a consistent winner when there's a tie
375 return min(map(nodemap.__getitem__, ancs))
387 return min(map(nodemap.__getitem__, ancs))
376 return nullid
388 return nullid
377
389
378 def commonancestorsheads(self, a, b):
390 def commonancestorsheads(self, a, b):
379 """calculate all the heads of the common ancestors of nodes a and b"""
391 """calculate all the heads of the common ancestors of nodes a and b"""
380
392
381 if a == nullid or b == nullid:
393 if a == nullid or b == nullid:
382 return nullid
394 return nullid
383
395
384 revmap, parentfunc = self._buildrevgraph(a, b)
396 revmap, parentfunc = self._buildrevgraph(a, b)
385 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
397 nodemap = dict(((v, k) for (k, v) in revmap.iteritems()))
386
398
387 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
399 ancs = ancestor.commonancestorsheads(parentfunc, revmap[a], revmap[b])
388 return map(nodemap.__getitem__, ancs)
400 return map(nodemap.__getitem__, ancs)
389
401
390 def _buildrevgraph(self, a, b):
402 def _buildrevgraph(self, a, b):
391 """Builds a numeric revision graph for the given two nodes.
403 """Builds a numeric revision graph for the given two nodes.
392 Returns a node->rev map and a rev->[revs] parent function.
404 Returns a node->rev map and a rev->[revs] parent function.
393 """
405 """
394 amap = self.ancestormap(a)
406 amap = self.ancestormap(a)
395 bmap = self.ancestormap(b)
407 bmap = self.ancestormap(b)
396
408
397 # Union the two maps
409 # Union the two maps
398 parentsmap = collections.defaultdict(list)
410 parentsmap = collections.defaultdict(list)
399 allparents = set()
411 allparents = set()
400 for mapping in (amap, bmap):
412 for mapping in (amap, bmap):
401 for node, pdata in mapping.iteritems():
413 for node, pdata in mapping.iteritems():
402 parents = parentsmap[node]
414 parents = parentsmap[node]
403 p1, p2, linknode, copyfrom = pdata
415 p1, p2, linknode, copyfrom = pdata
404 # Don't follow renames (copyfrom).
416 # Don't follow renames (copyfrom).
405 # remotefilectx.ancestor does that.
417 # remotefilectx.ancestor does that.
406 if p1 != nullid and not copyfrom:
418 if p1 != nullid and not copyfrom:
407 parents.append(p1)
419 parents.append(p1)
408 allparents.add(p1)
420 allparents.add(p1)
409 if p2 != nullid:
421 if p2 != nullid:
410 parents.append(p2)
422 parents.append(p2)
411 allparents.add(p2)
423 allparents.add(p2)
412
424
413 # Breadth first traversal to build linkrev graph
425 # Breadth first traversal to build linkrev graph
414 parentrevs = collections.defaultdict(list)
426 parentrevs = collections.defaultdict(list)
415 revmap = {}
427 revmap = {}
416 queue = collections.deque(((None, n) for n in parentsmap
428 queue = collections.deque(((None, n) for n in parentsmap
417 if n not in allparents))
429 if n not in allparents))
418 while queue:
430 while queue:
419 prevrev, current = queue.pop()
431 prevrev, current = queue.pop()
420 if current in revmap:
432 if current in revmap:
421 if prevrev:
433 if prevrev:
422 parentrevs[prevrev].append(revmap[current])
434 parentrevs[prevrev].append(revmap[current])
423 continue
435 continue
424
436
425 # Assign linkrevs in reverse order, so start at
437 # Assign linkrevs in reverse order, so start at
426 # len(parentsmap) and work backwards.
438 # len(parentsmap) and work backwards.
427 currentrev = len(parentsmap) - len(revmap) - 1
439 currentrev = len(parentsmap) - len(revmap) - 1
428 revmap[current] = currentrev
440 revmap[current] = currentrev
429
441
430 if prevrev:
442 if prevrev:
431 parentrevs[prevrev].append(currentrev)
443 parentrevs[prevrev].append(currentrev)
432
444
433 for parent in parentsmap.get(current):
445 for parent in parentsmap.get(current):
434 queue.appendleft((currentrev, parent))
446 queue.appendleft((currentrev, parent))
435
447
436 return revmap, parentrevs.__getitem__
448 return revmap, parentrevs.__getitem__
437
449
438 def strip(self, minlink, transaction):
450 def strip(self, minlink, transaction):
439 pass
451 pass
440
452
441 # misc unused things
453 # misc unused things
442 def files(self):
454 def files(self):
443 return []
455 return []
444
456
445 def checksize(self):
457 def checksize(self):
446 return 0, 0
458 return 0, 0
@@ -1,2639 +1,2650 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .revlogutils.constants import (
38 from .revlogutils.constants import (
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 REVLOGV0,
41 REVLOGV0,
42 REVLOGV1,
42 REVLOGV1,
43 REVLOGV1_FLAGS,
43 REVLOGV1_FLAGS,
44 REVLOGV2,
44 REVLOGV2,
45 REVLOGV2_FLAGS,
45 REVLOGV2_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FORMAT,
47 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_VERSION,
48 REVLOG_DEFAULT_VERSION,
49 )
49 )
50 from .revlogutils.flagutil import (
50 from .revlogutils.flagutil import (
51 REVIDX_DEFAULT_FLAGS,
51 REVIDX_DEFAULT_FLAGS,
52 REVIDX_ELLIPSIS,
52 REVIDX_ELLIPSIS,
53 REVIDX_EXTSTORED,
53 REVIDX_EXTSTORED,
54 REVIDX_FLAGS_ORDER,
54 REVIDX_FLAGS_ORDER,
55 REVIDX_ISCENSORED,
55 REVIDX_ISCENSORED,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 )
57 )
58 from .thirdparty import (
58 from .thirdparty import (
59 attr,
59 attr,
60 )
60 )
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 templatefilters,
68 templatefilters,
69 util,
69 util,
70 )
70 )
71 from .interfaces import (
71 from .interfaces import (
72 repository,
72 repository,
73 util as interfaceutil,
73 util as interfaceutil,
74 )
74 )
75 from .revlogutils import (
75 from .revlogutils import (
76 deltas as deltautil,
76 deltas as deltautil,
77 flagutil,
77 flagutil,
78 )
78 )
79 from .utils import (
79 from .utils import (
80 storageutil,
80 storageutil,
81 stringutil,
81 stringutil,
82 )
82 )
83
83
84 # blanked usage of all the name to prevent pyflakes constraints
84 # blanked usage of all the name to prevent pyflakes constraints
85 # We need these name available in the module for extensions.
85 # We need these name available in the module for extensions.
86 REVLOGV0
86 REVLOGV0
87 REVLOGV1
87 REVLOGV1
88 REVLOGV2
88 REVLOGV2
89 FLAG_INLINE_DATA
89 FLAG_INLINE_DATA
90 FLAG_GENERALDELTA
90 FLAG_GENERALDELTA
91 REVLOG_DEFAULT_FLAGS
91 REVLOG_DEFAULT_FLAGS
92 REVLOG_DEFAULT_FORMAT
92 REVLOG_DEFAULT_FORMAT
93 REVLOG_DEFAULT_VERSION
93 REVLOG_DEFAULT_VERSION
94 REVLOGV1_FLAGS
94 REVLOGV1_FLAGS
95 REVLOGV2_FLAGS
95 REVLOGV2_FLAGS
96 REVIDX_ISCENSORED
96 REVIDX_ISCENSORED
97 REVIDX_ELLIPSIS
97 REVIDX_ELLIPSIS
98 REVIDX_EXTSTORED
98 REVIDX_EXTSTORED
99 REVIDX_DEFAULT_FLAGS
99 REVIDX_DEFAULT_FLAGS
100 REVIDX_FLAGS_ORDER
100 REVIDX_FLAGS_ORDER
101 REVIDX_RAWTEXT_CHANGING_FLAGS
101 REVIDX_RAWTEXT_CHANGING_FLAGS
102
102
103 parsers = policy.importmod(r'parsers')
103 parsers = policy.importmod(r'parsers')
104 rustancestor = policy.importrust(r'ancestor')
104 rustancestor = policy.importrust(r'ancestor')
105 rustdagop = policy.importrust(r'dagop')
105 rustdagop = policy.importrust(r'dagop')
106
106
107 # Aliased for performance.
107 # Aliased for performance.
108 _zlibdecompress = zlib.decompress
108 _zlibdecompress = zlib.decompress
109
109
110 # max size of revlog with inline data
110 # max size of revlog with inline data
111 _maxinline = 131072
111 _maxinline = 131072
112 _chunksize = 1048576
112 _chunksize = 1048576
113
113
114 # Flag processors for REVIDX_ELLIPSIS.
114 # Flag processors for REVIDX_ELLIPSIS.
115 def ellipsisreadprocessor(rl, text):
115 def ellipsisreadprocessor(rl, text):
116 return text, False, {}
116 return text, False, {}
117
117
118 def ellipsiswriteprocessor(rl, text, sidedata):
118 def ellipsiswriteprocessor(rl, text, sidedata):
119 return text, False
119 return text, False
120
120
121 def ellipsisrawprocessor(rl, text):
121 def ellipsisrawprocessor(rl, text):
122 return False
122 return False
123
123
124 ellipsisprocessor = (
124 ellipsisprocessor = (
125 ellipsisreadprocessor,
125 ellipsisreadprocessor,
126 ellipsiswriteprocessor,
126 ellipsiswriteprocessor,
127 ellipsisrawprocessor,
127 ellipsisrawprocessor,
128 )
128 )
129
129
130 def getoffset(q):
130 def getoffset(q):
131 return int(q >> 16)
131 return int(q >> 16)
132
132
133 def gettype(q):
133 def gettype(q):
134 return int(q & 0xFFFF)
134 return int(q & 0xFFFF)
135
135
136 def offset_type(offset, type):
136 def offset_type(offset, type):
137 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
137 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
138 raise ValueError('unknown revlog index flags')
138 raise ValueError('unknown revlog index flags')
139 return int(int(offset) << 16 | type)
139 return int(int(offset) << 16 | type)
140
140
141 @attr.s(slots=True, frozen=True)
141 @attr.s(slots=True, frozen=True)
142 class _revisioninfo(object):
142 class _revisioninfo(object):
143 """Information about a revision that allows building its fulltext
143 """Information about a revision that allows building its fulltext
144 node: expected hash of the revision
144 node: expected hash of the revision
145 p1, p2: parent revs of the revision
145 p1, p2: parent revs of the revision
146 btext: built text cache consisting of a one-element list
146 btext: built text cache consisting of a one-element list
147 cachedelta: (baserev, uncompressed_delta) or None
147 cachedelta: (baserev, uncompressed_delta) or None
148 flags: flags associated to the revision storage
148 flags: flags associated to the revision storage
149
149
150 One of btext[0] or cachedelta must be set.
150 One of btext[0] or cachedelta must be set.
151 """
151 """
152 node = attr.ib()
152 node = attr.ib()
153 p1 = attr.ib()
153 p1 = attr.ib()
154 p2 = attr.ib()
154 p2 = attr.ib()
155 btext = attr.ib()
155 btext = attr.ib()
156 textlen = attr.ib()
156 textlen = attr.ib()
157 cachedelta = attr.ib()
157 cachedelta = attr.ib()
158 flags = attr.ib()
158 flags = attr.ib()
159
159
160 @interfaceutil.implementer(repository.irevisiondelta)
160 @interfaceutil.implementer(repository.irevisiondelta)
161 @attr.s(slots=True)
161 @attr.s(slots=True)
162 class revlogrevisiondelta(object):
162 class revlogrevisiondelta(object):
163 node = attr.ib()
163 node = attr.ib()
164 p1node = attr.ib()
164 p1node = attr.ib()
165 p2node = attr.ib()
165 p2node = attr.ib()
166 basenode = attr.ib()
166 basenode = attr.ib()
167 flags = attr.ib()
167 flags = attr.ib()
168 baserevisionsize = attr.ib()
168 baserevisionsize = attr.ib()
169 revision = attr.ib()
169 revision = attr.ib()
170 delta = attr.ib()
170 delta = attr.ib()
171 linknode = attr.ib(default=None)
171 linknode = attr.ib(default=None)
172
172
173 @interfaceutil.implementer(repository.iverifyproblem)
173 @interfaceutil.implementer(repository.iverifyproblem)
174 @attr.s(frozen=True)
174 @attr.s(frozen=True)
175 class revlogproblem(object):
175 class revlogproblem(object):
176 warning = attr.ib(default=None)
176 warning = attr.ib(default=None)
177 error = attr.ib(default=None)
177 error = attr.ib(default=None)
178 node = attr.ib(default=None)
178 node = attr.ib(default=None)
179
179
180 # index v0:
180 # index v0:
181 # 4 bytes: offset
181 # 4 bytes: offset
182 # 4 bytes: compressed length
182 # 4 bytes: compressed length
183 # 4 bytes: base rev
183 # 4 bytes: base rev
184 # 4 bytes: link rev
184 # 4 bytes: link rev
185 # 20 bytes: parent 1 nodeid
185 # 20 bytes: parent 1 nodeid
186 # 20 bytes: parent 2 nodeid
186 # 20 bytes: parent 2 nodeid
187 # 20 bytes: nodeid
187 # 20 bytes: nodeid
188 indexformatv0 = struct.Struct(">4l20s20s20s")
188 indexformatv0 = struct.Struct(">4l20s20s20s")
189 indexformatv0_pack = indexformatv0.pack
189 indexformatv0_pack = indexformatv0.pack
190 indexformatv0_unpack = indexformatv0.unpack
190 indexformatv0_unpack = indexformatv0.unpack
191
191
192 class revlogoldindex(list):
192 class revlogoldindex(list):
193 def __getitem__(self, i):
193 def __getitem__(self, i):
194 if i == -1:
194 if i == -1:
195 return (0, 0, 0, -1, -1, -1, -1, nullid)
195 return (0, 0, 0, -1, -1, -1, -1, nullid)
196 return list.__getitem__(self, i)
196 return list.__getitem__(self, i)
197
197
198 class revlogoldio(object):
198 class revlogoldio(object):
199 def __init__(self):
199 def __init__(self):
200 self.size = indexformatv0.size
200 self.size = indexformatv0.size
201
201
202 def parseindex(self, data, inline):
202 def parseindex(self, data, inline):
203 s = self.size
203 s = self.size
204 index = []
204 index = []
205 nodemap = {nullid: nullrev}
205 nodemap = {nullid: nullrev}
206 n = off = 0
206 n = off = 0
207 l = len(data)
207 l = len(data)
208 while off + s <= l:
208 while off + s <= l:
209 cur = data[off:off + s]
209 cur = data[off:off + s]
210 off += s
210 off += s
211 e = indexformatv0_unpack(cur)
211 e = indexformatv0_unpack(cur)
212 # transform to revlogv1 format
212 # transform to revlogv1 format
213 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
213 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
214 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
214 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
215 index.append(e2)
215 index.append(e2)
216 nodemap[e[6]] = n
216 nodemap[e[6]] = n
217 n += 1
217 n += 1
218
218
219 return revlogoldindex(index), nodemap, None
219 return revlogoldindex(index), nodemap, None
220
220
221 def packentry(self, entry, node, version, rev):
221 def packentry(self, entry, node, version, rev):
222 if gettype(entry[0]):
222 if gettype(entry[0]):
223 raise error.RevlogError(_('index entry flags need revlog '
223 raise error.RevlogError(_('index entry flags need revlog '
224 'version 1'))
224 'version 1'))
225 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
225 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
226 node(entry[5]), node(entry[6]), entry[7])
226 node(entry[5]), node(entry[6]), entry[7])
227 return indexformatv0_pack(*e2)
227 return indexformatv0_pack(*e2)
228
228
229 # index ng:
229 # index ng:
230 # 6 bytes: offset
230 # 6 bytes: offset
231 # 2 bytes: flags
231 # 2 bytes: flags
232 # 4 bytes: compressed length
232 # 4 bytes: compressed length
233 # 4 bytes: uncompressed length
233 # 4 bytes: uncompressed length
234 # 4 bytes: base rev
234 # 4 bytes: base rev
235 # 4 bytes: link rev
235 # 4 bytes: link rev
236 # 4 bytes: parent 1 rev
236 # 4 bytes: parent 1 rev
237 # 4 bytes: parent 2 rev
237 # 4 bytes: parent 2 rev
238 # 32 bytes: nodeid
238 # 32 bytes: nodeid
239 indexformatng = struct.Struct(">Qiiiiii20s12x")
239 indexformatng = struct.Struct(">Qiiiiii20s12x")
240 indexformatng_pack = indexformatng.pack
240 indexformatng_pack = indexformatng.pack
241 versionformat = struct.Struct(">I")
241 versionformat = struct.Struct(">I")
242 versionformat_pack = versionformat.pack
242 versionformat_pack = versionformat.pack
243 versionformat_unpack = versionformat.unpack
243 versionformat_unpack = versionformat.unpack
244
244
245 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
245 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
246 # signed integer)
246 # signed integer)
247 _maxentrysize = 0x7fffffff
247 _maxentrysize = 0x7fffffff
248
248
249 class revlogio(object):
249 class revlogio(object):
250 def __init__(self):
250 def __init__(self):
251 self.size = indexformatng.size
251 self.size = indexformatng.size
252
252
253 def parseindex(self, data, inline):
253 def parseindex(self, data, inline):
254 # call the C implementation to parse the index data
254 # call the C implementation to parse the index data
255 index, cache = parsers.parse_index2(data, inline)
255 index, cache = parsers.parse_index2(data, inline)
256 return index, getattr(index, 'nodemap', None), cache
256 return index, getattr(index, 'nodemap', None), cache
257
257
258 def packentry(self, entry, node, version, rev):
258 def packentry(self, entry, node, version, rev):
259 p = indexformatng_pack(*entry)
259 p = indexformatng_pack(*entry)
260 if rev == 0:
260 if rev == 0:
261 p = versionformat_pack(version) + p[4:]
261 p = versionformat_pack(version) + p[4:]
262 return p
262 return p
263
263
264 class revlog(flagutil.flagprocessorsmixin):
264 class revlog(flagutil.flagprocessorsmixin):
265 """
265 """
266 the underlying revision storage object
266 the underlying revision storage object
267
267
268 A revlog consists of two parts, an index and the revision data.
268 A revlog consists of two parts, an index and the revision data.
269
269
270 The index is a file with a fixed record size containing
270 The index is a file with a fixed record size containing
271 information on each revision, including its nodeid (hash), the
271 information on each revision, including its nodeid (hash), the
272 nodeids of its parents, the position and offset of its data within
272 nodeids of its parents, the position and offset of its data within
273 the data file, and the revision it's based on. Finally, each entry
273 the data file, and the revision it's based on. Finally, each entry
274 contains a linkrev entry that can serve as a pointer to external
274 contains a linkrev entry that can serve as a pointer to external
275 data.
275 data.
276
276
277 The revision data itself is a linear collection of data chunks.
277 The revision data itself is a linear collection of data chunks.
278 Each chunk represents a revision and is usually represented as a
278 Each chunk represents a revision and is usually represented as a
279 delta against the previous chunk. To bound lookup time, runs of
279 delta against the previous chunk. To bound lookup time, runs of
280 deltas are limited to about 2 times the length of the original
280 deltas are limited to about 2 times the length of the original
281 version data. This makes retrieval of a version proportional to
281 version data. This makes retrieval of a version proportional to
282 its size, or O(1) relative to the number of revisions.
282 its size, or O(1) relative to the number of revisions.
283
283
284 Both pieces of the revlog are written to in an append-only
284 Both pieces of the revlog are written to in an append-only
285 fashion, which means we never need to rewrite a file to insert or
285 fashion, which means we never need to rewrite a file to insert or
286 remove data, and can use some simple techniques to avoid the need
286 remove data, and can use some simple techniques to avoid the need
287 for locking while reading.
287 for locking while reading.
288
288
289 If checkambig, indexfile is opened with checkambig=True at
289 If checkambig, indexfile is opened with checkambig=True at
290 writing, to avoid file stat ambiguity.
290 writing, to avoid file stat ambiguity.
291
291
292 If mmaplargeindex is True, and an mmapindexthreshold is set, the
292 If mmaplargeindex is True, and an mmapindexthreshold is set, the
293 index will be mmapped rather than read if it is larger than the
293 index will be mmapped rather than read if it is larger than the
294 configured threshold.
294 configured threshold.
295
295
296 If censorable is True, the revlog can have censored revisions.
296 If censorable is True, the revlog can have censored revisions.
297
297
298 If `upperboundcomp` is not None, this is the expected maximal gain from
298 If `upperboundcomp` is not None, this is the expected maximal gain from
299 compression for the data content.
299 compression for the data content.
300 """
300 """
301 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
301 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
302 mmaplargeindex=False, censorable=False,
302 mmaplargeindex=False, censorable=False,
303 upperboundcomp=None):
303 upperboundcomp=None):
304 """
304 """
305 create a revlog object
305 create a revlog object
306
306
307 opener is a function that abstracts the file opening operation
307 opener is a function that abstracts the file opening operation
308 and can be used to implement COW semantics or the like.
308 and can be used to implement COW semantics or the like.
309
309
310 """
310 """
311 self.upperboundcomp = upperboundcomp
311 self.upperboundcomp = upperboundcomp
312 self.indexfile = indexfile
312 self.indexfile = indexfile
313 self.datafile = datafile or (indexfile[:-2] + ".d")
313 self.datafile = datafile or (indexfile[:-2] + ".d")
314 self.opener = opener
314 self.opener = opener
315 # When True, indexfile is opened with checkambig=True at writing, to
315 # When True, indexfile is opened with checkambig=True at writing, to
316 # avoid file stat ambiguity.
316 # avoid file stat ambiguity.
317 self._checkambig = checkambig
317 self._checkambig = checkambig
318 self._mmaplargeindex = mmaplargeindex
318 self._mmaplargeindex = mmaplargeindex
319 self._censorable = censorable
319 self._censorable = censorable
320 # 3-tuple of (node, rev, text) for a raw revision.
320 # 3-tuple of (node, rev, text) for a raw revision.
321 self._revisioncache = None
321 self._revisioncache = None
322 # Maps rev to chain base rev.
322 # Maps rev to chain base rev.
323 self._chainbasecache = util.lrucachedict(100)
323 self._chainbasecache = util.lrucachedict(100)
324 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
324 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
325 self._chunkcache = (0, '')
325 self._chunkcache = (0, '')
326 # How much data to read and cache into the raw revlog data cache.
326 # How much data to read and cache into the raw revlog data cache.
327 self._chunkcachesize = 65536
327 self._chunkcachesize = 65536
328 self._maxchainlen = None
328 self._maxchainlen = None
329 self._deltabothparents = True
329 self._deltabothparents = True
330 self.index = []
330 self.index = []
331 # Mapping of partial identifiers to full nodes.
331 # Mapping of partial identifiers to full nodes.
332 self._pcache = {}
332 self._pcache = {}
333 # Mapping of revision integer to full node.
333 # Mapping of revision integer to full node.
334 self._nodecache = {nullid: nullrev}
334 self._nodecache = {nullid: nullrev}
335 self._nodepos = None
335 self._nodepos = None
336 self._compengine = 'zlib'
336 self._compengine = 'zlib'
337 self._compengineopts = {}
337 self._compengineopts = {}
338 self._maxdeltachainspan = -1
338 self._maxdeltachainspan = -1
339 self._withsparseread = False
339 self._withsparseread = False
340 self._sparserevlog = False
340 self._sparserevlog = False
341 self._srdensitythreshold = 0.50
341 self._srdensitythreshold = 0.50
342 self._srmingapsize = 262144
342 self._srmingapsize = 262144
343
343
344 # Make copy of flag processors so each revlog instance can support
344 # Make copy of flag processors so each revlog instance can support
345 # custom flags.
345 # custom flags.
346 self._flagprocessors = dict(flagutil.flagprocessors)
346 self._flagprocessors = dict(flagutil.flagprocessors)
347
347
348 # 2-tuple of file handles being used for active writing.
348 # 2-tuple of file handles being used for active writing.
349 self._writinghandles = None
349 self._writinghandles = None
350
350
351 self._loadindex()
351 self._loadindex()
352
352
353 def _loadindex(self):
353 def _loadindex(self):
354 mmapindexthreshold = None
354 mmapindexthreshold = None
355 opts = getattr(self.opener, 'options', {}) or {}
355 opts = getattr(self.opener, 'options', {}) or {}
356
356
357 if 'revlogv2' in opts:
357 if 'revlogv2' in opts:
358 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
358 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
359 elif 'revlogv1' in opts:
359 elif 'revlogv1' in opts:
360 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
360 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
361 if 'generaldelta' in opts:
361 if 'generaldelta' in opts:
362 newversionflags |= FLAG_GENERALDELTA
362 newversionflags |= FLAG_GENERALDELTA
363 elif getattr(self.opener, 'options', None) is not None:
363 elif getattr(self.opener, 'options', None) is not None:
364 # If options provided but no 'revlog*' found, the repository
364 # If options provided but no 'revlog*' found, the repository
365 # would have no 'requires' file in it, which means we have to
365 # would have no 'requires' file in it, which means we have to
366 # stick to the old format.
366 # stick to the old format.
367 newversionflags = REVLOGV0
367 newversionflags = REVLOGV0
368 else:
368 else:
369 newversionflags = REVLOG_DEFAULT_VERSION
369 newversionflags = REVLOG_DEFAULT_VERSION
370
370
371 if 'chunkcachesize' in opts:
371 if 'chunkcachesize' in opts:
372 self._chunkcachesize = opts['chunkcachesize']
372 self._chunkcachesize = opts['chunkcachesize']
373 if 'maxchainlen' in opts:
373 if 'maxchainlen' in opts:
374 self._maxchainlen = opts['maxchainlen']
374 self._maxchainlen = opts['maxchainlen']
375 if 'deltabothparents' in opts:
375 if 'deltabothparents' in opts:
376 self._deltabothparents = opts['deltabothparents']
376 self._deltabothparents = opts['deltabothparents']
377 self._lazydelta = bool(opts.get('lazydelta', True))
377 self._lazydelta = bool(opts.get('lazydelta', True))
378 self._lazydeltabase = False
378 self._lazydeltabase = False
379 if self._lazydelta:
379 if self._lazydelta:
380 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
380 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
381 if 'compengine' in opts:
381 if 'compengine' in opts:
382 self._compengine = opts['compengine']
382 self._compengine = opts['compengine']
383 if 'zlib.level' in opts:
383 if 'zlib.level' in opts:
384 self._compengineopts['zlib.level'] = opts['zlib.level']
384 self._compengineopts['zlib.level'] = opts['zlib.level']
385 if 'zstd.level' in opts:
385 if 'zstd.level' in opts:
386 self._compengineopts['zstd.level'] = opts['zstd.level']
386 self._compengineopts['zstd.level'] = opts['zstd.level']
387 if 'maxdeltachainspan' in opts:
387 if 'maxdeltachainspan' in opts:
388 self._maxdeltachainspan = opts['maxdeltachainspan']
388 self._maxdeltachainspan = opts['maxdeltachainspan']
389 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
389 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
390 mmapindexthreshold = opts['mmapindexthreshold']
390 mmapindexthreshold = opts['mmapindexthreshold']
391 self._sparserevlog = bool(opts.get('sparse-revlog', False))
391 self._sparserevlog = bool(opts.get('sparse-revlog', False))
392 withsparseread = bool(opts.get('with-sparse-read', False))
392 withsparseread = bool(opts.get('with-sparse-read', False))
393 # sparse-revlog forces sparse-read
393 # sparse-revlog forces sparse-read
394 self._withsparseread = self._sparserevlog or withsparseread
394 self._withsparseread = self._sparserevlog or withsparseread
395 if 'sparse-read-density-threshold' in opts:
395 if 'sparse-read-density-threshold' in opts:
396 self._srdensitythreshold = opts['sparse-read-density-threshold']
396 self._srdensitythreshold = opts['sparse-read-density-threshold']
397 if 'sparse-read-min-gap-size' in opts:
397 if 'sparse-read-min-gap-size' in opts:
398 self._srmingapsize = opts['sparse-read-min-gap-size']
398 self._srmingapsize = opts['sparse-read-min-gap-size']
399 if opts.get('enableellipsis'):
399 if opts.get('enableellipsis'):
400 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
400 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
401
401
402 # revlog v0 doesn't have flag processors
402 # revlog v0 doesn't have flag processors
403 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
403 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
404 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
404 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
405
405
406 if self._chunkcachesize <= 0:
406 if self._chunkcachesize <= 0:
407 raise error.RevlogError(_('revlog chunk cache size %r is not '
407 raise error.RevlogError(_('revlog chunk cache size %r is not '
408 'greater than 0') % self._chunkcachesize)
408 'greater than 0') % self._chunkcachesize)
409 elif self._chunkcachesize & (self._chunkcachesize - 1):
409 elif self._chunkcachesize & (self._chunkcachesize - 1):
410 raise error.RevlogError(_('revlog chunk cache size %r is not a '
410 raise error.RevlogError(_('revlog chunk cache size %r is not a '
411 'power of 2') % self._chunkcachesize)
411 'power of 2') % self._chunkcachesize)
412
412
413 indexdata = ''
413 indexdata = ''
414 self._initempty = True
414 self._initempty = True
415 try:
415 try:
416 with self._indexfp() as f:
416 with self._indexfp() as f:
417 if (mmapindexthreshold is not None and
417 if (mmapindexthreshold is not None and
418 self.opener.fstat(f).st_size >= mmapindexthreshold):
418 self.opener.fstat(f).st_size >= mmapindexthreshold):
419 # TODO: should .close() to release resources without
419 # TODO: should .close() to release resources without
420 # relying on Python GC
420 # relying on Python GC
421 indexdata = util.buffer(util.mmapread(f))
421 indexdata = util.buffer(util.mmapread(f))
422 else:
422 else:
423 indexdata = f.read()
423 indexdata = f.read()
424 if len(indexdata) > 0:
424 if len(indexdata) > 0:
425 versionflags = versionformat_unpack(indexdata[:4])[0]
425 versionflags = versionformat_unpack(indexdata[:4])[0]
426 self._initempty = False
426 self._initempty = False
427 else:
427 else:
428 versionflags = newversionflags
428 versionflags = newversionflags
429 except IOError as inst:
429 except IOError as inst:
430 if inst.errno != errno.ENOENT:
430 if inst.errno != errno.ENOENT:
431 raise
431 raise
432
432
433 versionflags = newversionflags
433 versionflags = newversionflags
434
434
435 self.version = versionflags
435 self.version = versionflags
436
436
437 flags = versionflags & ~0xFFFF
437 flags = versionflags & ~0xFFFF
438 fmt = versionflags & 0xFFFF
438 fmt = versionflags & 0xFFFF
439
439
440 if fmt == REVLOGV0:
440 if fmt == REVLOGV0:
441 if flags:
441 if flags:
442 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
442 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
443 'revlog %s') %
443 'revlog %s') %
444 (flags >> 16, fmt, self.indexfile))
444 (flags >> 16, fmt, self.indexfile))
445
445
446 self._inline = False
446 self._inline = False
447 self._generaldelta = False
447 self._generaldelta = False
448
448
449 elif fmt == REVLOGV1:
449 elif fmt == REVLOGV1:
450 if flags & ~REVLOGV1_FLAGS:
450 if flags & ~REVLOGV1_FLAGS:
451 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
451 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
452 'revlog %s') %
452 'revlog %s') %
453 (flags >> 16, fmt, self.indexfile))
453 (flags >> 16, fmt, self.indexfile))
454
454
455 self._inline = versionflags & FLAG_INLINE_DATA
455 self._inline = versionflags & FLAG_INLINE_DATA
456 self._generaldelta = versionflags & FLAG_GENERALDELTA
456 self._generaldelta = versionflags & FLAG_GENERALDELTA
457
457
458 elif fmt == REVLOGV2:
458 elif fmt == REVLOGV2:
459 if flags & ~REVLOGV2_FLAGS:
459 if flags & ~REVLOGV2_FLAGS:
460 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
460 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
461 'revlog %s') %
461 'revlog %s') %
462 (flags >> 16, fmt, self.indexfile))
462 (flags >> 16, fmt, self.indexfile))
463
463
464 self._inline = versionflags & FLAG_INLINE_DATA
464 self._inline = versionflags & FLAG_INLINE_DATA
465 # generaldelta implied by version 2 revlogs.
465 # generaldelta implied by version 2 revlogs.
466 self._generaldelta = True
466 self._generaldelta = True
467
467
468 else:
468 else:
469 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
469 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
470 (fmt, self.indexfile))
470 (fmt, self.indexfile))
471 # sparse-revlog can't be on without general-delta (issue6056)
471 # sparse-revlog can't be on without general-delta (issue6056)
472 if not self._generaldelta:
472 if not self._generaldelta:
473 self._sparserevlog = False
473 self._sparserevlog = False
474
474
475 self._storedeltachains = True
475 self._storedeltachains = True
476
476
477 self._io = revlogio()
477 self._io = revlogio()
478 if self.version == REVLOGV0:
478 if self.version == REVLOGV0:
479 self._io = revlogoldio()
479 self._io = revlogoldio()
480 try:
480 try:
481 d = self._io.parseindex(indexdata, self._inline)
481 d = self._io.parseindex(indexdata, self._inline)
482 except (ValueError, IndexError):
482 except (ValueError, IndexError):
483 raise error.RevlogError(_("index %s is corrupted") %
483 raise error.RevlogError(_("index %s is corrupted") %
484 self.indexfile)
484 self.indexfile)
485 self.index, nodemap, self._chunkcache = d
485 self.index, nodemap, self._chunkcache = d
486 if nodemap is not None:
486 if nodemap is not None:
487 self.nodemap = self._nodecache = nodemap
487 self.nodemap = self._nodecache = nodemap
488 if not self._chunkcache:
488 if not self._chunkcache:
489 self._chunkclear()
489 self._chunkclear()
490 # revnum -> (chain-length, sum-delta-length)
490 # revnum -> (chain-length, sum-delta-length)
491 self._chaininfocache = {}
491 self._chaininfocache = {}
492 # revlog header -> revlog compressor
492 # revlog header -> revlog compressor
493 self._decompressors = {}
493 self._decompressors = {}
494
494
495 @util.propertycache
495 @util.propertycache
496 def _compressor(self):
496 def _compressor(self):
497 engine = util.compengines[self._compengine]
497 engine = util.compengines[self._compengine]
498 return engine.revlogcompressor(self._compengineopts)
498 return engine.revlogcompressor(self._compengineopts)
499
499
500 def _indexfp(self, mode='r'):
500 def _indexfp(self, mode='r'):
501 """file object for the revlog's index file"""
501 """file object for the revlog's index file"""
502 args = {r'mode': mode}
502 args = {r'mode': mode}
503 if mode != 'r':
503 if mode != 'r':
504 args[r'checkambig'] = self._checkambig
504 args[r'checkambig'] = self._checkambig
505 if mode == 'w':
505 if mode == 'w':
506 args[r'atomictemp'] = True
506 args[r'atomictemp'] = True
507 return self.opener(self.indexfile, **args)
507 return self.opener(self.indexfile, **args)
508
508
509 def _datafp(self, mode='r'):
509 def _datafp(self, mode='r'):
510 """file object for the revlog's data file"""
510 """file object for the revlog's data file"""
511 return self.opener(self.datafile, mode=mode)
511 return self.opener(self.datafile, mode=mode)
512
512
513 @contextlib.contextmanager
513 @contextlib.contextmanager
514 def _datareadfp(self, existingfp=None):
514 def _datareadfp(self, existingfp=None):
515 """file object suitable to read data"""
515 """file object suitable to read data"""
516 # Use explicit file handle, if given.
516 # Use explicit file handle, if given.
517 if existingfp is not None:
517 if existingfp is not None:
518 yield existingfp
518 yield existingfp
519
519
520 # Use a file handle being actively used for writes, if available.
520 # Use a file handle being actively used for writes, if available.
521 # There is some danger to doing this because reads will seek the
521 # There is some danger to doing this because reads will seek the
522 # file. However, _writeentry() performs a SEEK_END before all writes,
522 # file. However, _writeentry() performs a SEEK_END before all writes,
523 # so we should be safe.
523 # so we should be safe.
524 elif self._writinghandles:
524 elif self._writinghandles:
525 if self._inline:
525 if self._inline:
526 yield self._writinghandles[0]
526 yield self._writinghandles[0]
527 else:
527 else:
528 yield self._writinghandles[1]
528 yield self._writinghandles[1]
529
529
530 # Otherwise open a new file handle.
530 # Otherwise open a new file handle.
531 else:
531 else:
532 if self._inline:
532 if self._inline:
533 func = self._indexfp
533 func = self._indexfp
534 else:
534 else:
535 func = self._datafp
535 func = self._datafp
536 with func() as fp:
536 with func() as fp:
537 yield fp
537 yield fp
538
538
539 def tip(self):
539 def tip(self):
540 return self.node(len(self.index) - 1)
540 return self.node(len(self.index) - 1)
541 def __contains__(self, rev):
541 def __contains__(self, rev):
542 return 0 <= rev < len(self)
542 return 0 <= rev < len(self)
543 def __len__(self):
543 def __len__(self):
544 return len(self.index)
544 return len(self.index)
545 def __iter__(self):
545 def __iter__(self):
546 return iter(pycompat.xrange(len(self)))
546 return iter(pycompat.xrange(len(self)))
547 def revs(self, start=0, stop=None):
547 def revs(self, start=0, stop=None):
548 """iterate over all rev in this revlog (from start to stop)"""
548 """iterate over all rev in this revlog (from start to stop)"""
549 return storageutil.iterrevs(len(self), start=start, stop=stop)
549 return storageutil.iterrevs(len(self), start=start, stop=stop)
550
550
551 @util.propertycache
551 @util.propertycache
552 def nodemap(self):
552 def nodemap(self):
553 if self.index:
553 if self.index:
554 # populate mapping down to the initial node
554 # populate mapping down to the initial node
555 node0 = self.index[0][7] # get around changelog filtering
555 node0 = self.index[0][7] # get around changelog filtering
556 self.rev(node0)
556 self.rev(node0)
557 return self._nodecache
557 return self._nodecache
558
558
559 def hasnode(self, node):
559 def hasnode(self, node):
560 try:
560 try:
561 self.rev(node)
561 self.rev(node)
562 return True
562 return True
563 except KeyError:
563 except KeyError:
564 return False
564 return False
565
565
566 def candelta(self, baserev, rev):
566 def candelta(self, baserev, rev):
567 """whether two revisions (baserev, rev) can be delta-ed or not"""
567 """whether two revisions (baserev, rev) can be delta-ed or not"""
568 # Disable delta if either rev requires a content-changing flag
568 # Disable delta if either rev requires a content-changing flag
569 # processor (ex. LFS). This is because such flag processor can alter
569 # processor (ex. LFS). This is because such flag processor can alter
570 # the rawtext content that the delta will be based on, and two clients
570 # the rawtext content that the delta will be based on, and two clients
571 # could have a same revlog node with different flags (i.e. different
571 # could have a same revlog node with different flags (i.e. different
572 # rawtext contents) and the delta could be incompatible.
572 # rawtext contents) and the delta could be incompatible.
573 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
573 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
574 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
574 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
575 return False
575 return False
576 return True
576 return True
577
577
578 def clearcaches(self):
578 def clearcaches(self):
579 self._revisioncache = None
579 self._revisioncache = None
580 self._chainbasecache.clear()
580 self._chainbasecache.clear()
581 self._chunkcache = (0, '')
581 self._chunkcache = (0, '')
582 self._pcache = {}
582 self._pcache = {}
583
583
584 try:
584 try:
585 # If we are using the native C version, you are in a fun case
585 # If we are using the native C version, you are in a fun case
586 # where self.index, self.nodemap and self._nodecaches is the same
586 # where self.index, self.nodemap and self._nodecaches is the same
587 # object.
587 # object.
588 self._nodecache.clearcaches()
588 self._nodecache.clearcaches()
589 except AttributeError:
589 except AttributeError:
590 self._nodecache = {nullid: nullrev}
590 self._nodecache = {nullid: nullrev}
591 self._nodepos = None
591 self._nodepos = None
592
592
593 def rev(self, node):
593 def rev(self, node):
594 try:
594 try:
595 return self._nodecache[node]
595 return self._nodecache[node]
596 except TypeError:
596 except TypeError:
597 raise
597 raise
598 except error.RevlogError:
598 except error.RevlogError:
599 # parsers.c radix tree lookup failed
599 # parsers.c radix tree lookup failed
600 if node == wdirid or node in wdirfilenodeids:
600 if node == wdirid or node in wdirfilenodeids:
601 raise error.WdirUnsupported
601 raise error.WdirUnsupported
602 raise error.LookupError(node, self.indexfile, _('no node'))
602 raise error.LookupError(node, self.indexfile, _('no node'))
603 except KeyError:
603 except KeyError:
604 # pure python cache lookup failed
604 # pure python cache lookup failed
605 n = self._nodecache
605 n = self._nodecache
606 i = self.index
606 i = self.index
607 p = self._nodepos
607 p = self._nodepos
608 if p is None:
608 if p is None:
609 p = len(i) - 1
609 p = len(i) - 1
610 else:
610 else:
611 assert p < len(i)
611 assert p < len(i)
612 for r in pycompat.xrange(p, -1, -1):
612 for r in pycompat.xrange(p, -1, -1):
613 v = i[r][7]
613 v = i[r][7]
614 n[v] = r
614 n[v] = r
615 if v == node:
615 if v == node:
616 self._nodepos = r - 1
616 self._nodepos = r - 1
617 return r
617 return r
618 if node == wdirid or node in wdirfilenodeids:
618 if node == wdirid or node in wdirfilenodeids:
619 raise error.WdirUnsupported
619 raise error.WdirUnsupported
620 raise error.LookupError(node, self.indexfile, _('no node'))
620 raise error.LookupError(node, self.indexfile, _('no node'))
621
621
622 # Accessors for index entries.
622 # Accessors for index entries.
623
623
624 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
624 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
625 # are flags.
625 # are flags.
626 def start(self, rev):
626 def start(self, rev):
627 return int(self.index[rev][0] >> 16)
627 return int(self.index[rev][0] >> 16)
628
628
629 def flags(self, rev):
629 def flags(self, rev):
630 return self.index[rev][0] & 0xFFFF
630 return self.index[rev][0] & 0xFFFF
631
631
632 def length(self, rev):
632 def length(self, rev):
633 return self.index[rev][1]
633 return self.index[rev][1]
634
634
635 def rawsize(self, rev):
635 def rawsize(self, rev):
636 """return the length of the uncompressed text for a given revision"""
636 """return the length of the uncompressed text for a given revision"""
637 l = self.index[rev][2]
637 l = self.index[rev][2]
638 if l >= 0:
638 if l >= 0:
639 return l
639 return l
640
640
641 t = self.rawdata(rev)
641 t = self.rawdata(rev)
642 return len(t)
642 return len(t)
643
643
644 def size(self, rev):
644 def size(self, rev):
645 """length of non-raw text (processed by a "read" flag processor)"""
645 """length of non-raw text (processed by a "read" flag processor)"""
646 # fast path: if no "read" flag processor could change the content,
646 # fast path: if no "read" flag processor could change the content,
647 # size is rawsize. note: ELLIPSIS is known to not change the content.
647 # size is rawsize. note: ELLIPSIS is known to not change the content.
648 flags = self.flags(rev)
648 flags = self.flags(rev)
649 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
649 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
650 return self.rawsize(rev)
650 return self.rawsize(rev)
651
651
652 return len(self.revision(rev, raw=False))
652 return len(self.revision(rev, raw=False))
653
653
654 def chainbase(self, rev):
654 def chainbase(self, rev):
655 base = self._chainbasecache.get(rev)
655 base = self._chainbasecache.get(rev)
656 if base is not None:
656 if base is not None:
657 return base
657 return base
658
658
659 index = self.index
659 index = self.index
660 iterrev = rev
660 iterrev = rev
661 base = index[iterrev][3]
661 base = index[iterrev][3]
662 while base != iterrev:
662 while base != iterrev:
663 iterrev = base
663 iterrev = base
664 base = index[iterrev][3]
664 base = index[iterrev][3]
665
665
666 self._chainbasecache[rev] = base
666 self._chainbasecache[rev] = base
667 return base
667 return base
668
668
669 def linkrev(self, rev):
669 def linkrev(self, rev):
670 return self.index[rev][4]
670 return self.index[rev][4]
671
671
672 def parentrevs(self, rev):
672 def parentrevs(self, rev):
673 try:
673 try:
674 entry = self.index[rev]
674 entry = self.index[rev]
675 except IndexError:
675 except IndexError:
676 if rev == wdirrev:
676 if rev == wdirrev:
677 raise error.WdirUnsupported
677 raise error.WdirUnsupported
678 raise
678 raise
679
679
680 return entry[5], entry[6]
680 return entry[5], entry[6]
681
681
682 # fast parentrevs(rev) where rev isn't filtered
682 # fast parentrevs(rev) where rev isn't filtered
683 _uncheckedparentrevs = parentrevs
683 _uncheckedparentrevs = parentrevs
684
684
685 def node(self, rev):
685 def node(self, rev):
686 try:
686 try:
687 return self.index[rev][7]
687 return self.index[rev][7]
688 except IndexError:
688 except IndexError:
689 if rev == wdirrev:
689 if rev == wdirrev:
690 raise error.WdirUnsupported
690 raise error.WdirUnsupported
691 raise
691 raise
692
692
693 # Derived from index values.
693 # Derived from index values.
694
694
695 def end(self, rev):
695 def end(self, rev):
696 return self.start(rev) + self.length(rev)
696 return self.start(rev) + self.length(rev)
697
697
698 def parents(self, node):
698 def parents(self, node):
699 i = self.index
699 i = self.index
700 d = i[self.rev(node)]
700 d = i[self.rev(node)]
701 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
701 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
702
702
703 def chainlen(self, rev):
703 def chainlen(self, rev):
704 return self._chaininfo(rev)[0]
704 return self._chaininfo(rev)[0]
705
705
706 def _chaininfo(self, rev):
706 def _chaininfo(self, rev):
707 chaininfocache = self._chaininfocache
707 chaininfocache = self._chaininfocache
708 if rev in chaininfocache:
708 if rev in chaininfocache:
709 return chaininfocache[rev]
709 return chaininfocache[rev]
710 index = self.index
710 index = self.index
711 generaldelta = self._generaldelta
711 generaldelta = self._generaldelta
712 iterrev = rev
712 iterrev = rev
713 e = index[iterrev]
713 e = index[iterrev]
714 clen = 0
714 clen = 0
715 compresseddeltalen = 0
715 compresseddeltalen = 0
716 while iterrev != e[3]:
716 while iterrev != e[3]:
717 clen += 1
717 clen += 1
718 compresseddeltalen += e[1]
718 compresseddeltalen += e[1]
719 if generaldelta:
719 if generaldelta:
720 iterrev = e[3]
720 iterrev = e[3]
721 else:
721 else:
722 iterrev -= 1
722 iterrev -= 1
723 if iterrev in chaininfocache:
723 if iterrev in chaininfocache:
724 t = chaininfocache[iterrev]
724 t = chaininfocache[iterrev]
725 clen += t[0]
725 clen += t[0]
726 compresseddeltalen += t[1]
726 compresseddeltalen += t[1]
727 break
727 break
728 e = index[iterrev]
728 e = index[iterrev]
729 else:
729 else:
730 # Add text length of base since decompressing that also takes
730 # Add text length of base since decompressing that also takes
731 # work. For cache hits the length is already included.
731 # work. For cache hits the length is already included.
732 compresseddeltalen += e[1]
732 compresseddeltalen += e[1]
733 r = (clen, compresseddeltalen)
733 r = (clen, compresseddeltalen)
734 chaininfocache[rev] = r
734 chaininfocache[rev] = r
735 return r
735 return r
736
736
737 def _deltachain(self, rev, stoprev=None):
737 def _deltachain(self, rev, stoprev=None):
738 """Obtain the delta chain for a revision.
738 """Obtain the delta chain for a revision.
739
739
740 ``stoprev`` specifies a revision to stop at. If not specified, we
740 ``stoprev`` specifies a revision to stop at. If not specified, we
741 stop at the base of the chain.
741 stop at the base of the chain.
742
742
743 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
743 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
744 revs in ascending order and ``stopped`` is a bool indicating whether
744 revs in ascending order and ``stopped`` is a bool indicating whether
745 ``stoprev`` was hit.
745 ``stoprev`` was hit.
746 """
746 """
747 # Try C implementation.
747 # Try C implementation.
748 try:
748 try:
749 return self.index.deltachain(rev, stoprev, self._generaldelta)
749 return self.index.deltachain(rev, stoprev, self._generaldelta)
750 except AttributeError:
750 except AttributeError:
751 pass
751 pass
752
752
753 chain = []
753 chain = []
754
754
755 # Alias to prevent attribute lookup in tight loop.
755 # Alias to prevent attribute lookup in tight loop.
756 index = self.index
756 index = self.index
757 generaldelta = self._generaldelta
757 generaldelta = self._generaldelta
758
758
759 iterrev = rev
759 iterrev = rev
760 e = index[iterrev]
760 e = index[iterrev]
761 while iterrev != e[3] and iterrev != stoprev:
761 while iterrev != e[3] and iterrev != stoprev:
762 chain.append(iterrev)
762 chain.append(iterrev)
763 if generaldelta:
763 if generaldelta:
764 iterrev = e[3]
764 iterrev = e[3]
765 else:
765 else:
766 iterrev -= 1
766 iterrev -= 1
767 e = index[iterrev]
767 e = index[iterrev]
768
768
769 if iterrev == stoprev:
769 if iterrev == stoprev:
770 stopped = True
770 stopped = True
771 else:
771 else:
772 chain.append(iterrev)
772 chain.append(iterrev)
773 stopped = False
773 stopped = False
774
774
775 chain.reverse()
775 chain.reverse()
776 return chain, stopped
776 return chain, stopped
777
777
778 def ancestors(self, revs, stoprev=0, inclusive=False):
778 def ancestors(self, revs, stoprev=0, inclusive=False):
779 """Generate the ancestors of 'revs' in reverse revision order.
779 """Generate the ancestors of 'revs' in reverse revision order.
780 Does not generate revs lower than stoprev.
780 Does not generate revs lower than stoprev.
781
781
782 See the documentation for ancestor.lazyancestors for more details."""
782 See the documentation for ancestor.lazyancestors for more details."""
783
783
784 # first, make sure start revisions aren't filtered
784 # first, make sure start revisions aren't filtered
785 revs = list(revs)
785 revs = list(revs)
786 checkrev = self.node
786 checkrev = self.node
787 for r in revs:
787 for r in revs:
788 checkrev(r)
788 checkrev(r)
789 # and we're sure ancestors aren't filtered as well
789 # and we're sure ancestors aren't filtered as well
790
790
791 if rustancestor is not None:
791 if rustancestor is not None:
792 lazyancestors = rustancestor.LazyAncestors
792 lazyancestors = rustancestor.LazyAncestors
793 arg = self.index
793 arg = self.index
794 elif util.safehasattr(parsers, 'rustlazyancestors'):
794 elif util.safehasattr(parsers, 'rustlazyancestors'):
795 lazyancestors = ancestor.rustlazyancestors
795 lazyancestors = ancestor.rustlazyancestors
796 arg = self.index
796 arg = self.index
797 else:
797 else:
798 lazyancestors = ancestor.lazyancestors
798 lazyancestors = ancestor.lazyancestors
799 arg = self._uncheckedparentrevs
799 arg = self._uncheckedparentrevs
800 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
800 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
801
801
802 def descendants(self, revs):
802 def descendants(self, revs):
803 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
803 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
804
804
805 def findcommonmissing(self, common=None, heads=None):
805 def findcommonmissing(self, common=None, heads=None):
806 """Return a tuple of the ancestors of common and the ancestors of heads
806 """Return a tuple of the ancestors of common and the ancestors of heads
807 that are not ancestors of common. In revset terminology, we return the
807 that are not ancestors of common. In revset terminology, we return the
808 tuple:
808 tuple:
809
809
810 ::common, (::heads) - (::common)
810 ::common, (::heads) - (::common)
811
811
812 The list is sorted by revision number, meaning it is
812 The list is sorted by revision number, meaning it is
813 topologically sorted.
813 topologically sorted.
814
814
815 'heads' and 'common' are both lists of node IDs. If heads is
815 'heads' and 'common' are both lists of node IDs. If heads is
816 not supplied, uses all of the revlog's heads. If common is not
816 not supplied, uses all of the revlog's heads. If common is not
817 supplied, uses nullid."""
817 supplied, uses nullid."""
818 if common is None:
818 if common is None:
819 common = [nullid]
819 common = [nullid]
820 if heads is None:
820 if heads is None:
821 heads = self.heads()
821 heads = self.heads()
822
822
823 common = [self.rev(n) for n in common]
823 common = [self.rev(n) for n in common]
824 heads = [self.rev(n) for n in heads]
824 heads = [self.rev(n) for n in heads]
825
825
826 # we want the ancestors, but inclusive
826 # we want the ancestors, but inclusive
827 class lazyset(object):
827 class lazyset(object):
828 def __init__(self, lazyvalues):
828 def __init__(self, lazyvalues):
829 self.addedvalues = set()
829 self.addedvalues = set()
830 self.lazyvalues = lazyvalues
830 self.lazyvalues = lazyvalues
831
831
832 def __contains__(self, value):
832 def __contains__(self, value):
833 return value in self.addedvalues or value in self.lazyvalues
833 return value in self.addedvalues or value in self.lazyvalues
834
834
835 def __iter__(self):
835 def __iter__(self):
836 added = self.addedvalues
836 added = self.addedvalues
837 for r in added:
837 for r in added:
838 yield r
838 yield r
839 for r in self.lazyvalues:
839 for r in self.lazyvalues:
840 if not r in added:
840 if not r in added:
841 yield r
841 yield r
842
842
843 def add(self, value):
843 def add(self, value):
844 self.addedvalues.add(value)
844 self.addedvalues.add(value)
845
845
846 def update(self, values):
846 def update(self, values):
847 self.addedvalues.update(values)
847 self.addedvalues.update(values)
848
848
849 has = lazyset(self.ancestors(common))
849 has = lazyset(self.ancestors(common))
850 has.add(nullrev)
850 has.add(nullrev)
851 has.update(common)
851 has.update(common)
852
852
853 # take all ancestors from heads that aren't in has
853 # take all ancestors from heads that aren't in has
854 missing = set()
854 missing = set()
855 visit = collections.deque(r for r in heads if r not in has)
855 visit = collections.deque(r for r in heads if r not in has)
856 while visit:
856 while visit:
857 r = visit.popleft()
857 r = visit.popleft()
858 if r in missing:
858 if r in missing:
859 continue
859 continue
860 else:
860 else:
861 missing.add(r)
861 missing.add(r)
862 for p in self.parentrevs(r):
862 for p in self.parentrevs(r):
863 if p not in has:
863 if p not in has:
864 visit.append(p)
864 visit.append(p)
865 missing = list(missing)
865 missing = list(missing)
866 missing.sort()
866 missing.sort()
867 return has, [self.node(miss) for miss in missing]
867 return has, [self.node(miss) for miss in missing]
868
868
869 def incrementalmissingrevs(self, common=None):
869 def incrementalmissingrevs(self, common=None):
870 """Return an object that can be used to incrementally compute the
870 """Return an object that can be used to incrementally compute the
871 revision numbers of the ancestors of arbitrary sets that are not
871 revision numbers of the ancestors of arbitrary sets that are not
872 ancestors of common. This is an ancestor.incrementalmissingancestors
872 ancestors of common. This is an ancestor.incrementalmissingancestors
873 object.
873 object.
874
874
875 'common' is a list of revision numbers. If common is not supplied, uses
875 'common' is a list of revision numbers. If common is not supplied, uses
876 nullrev.
876 nullrev.
877 """
877 """
878 if common is None:
878 if common is None:
879 common = [nullrev]
879 common = [nullrev]
880
880
881 if rustancestor is not None:
881 if rustancestor is not None:
882 return rustancestor.MissingAncestors(self.index, common)
882 return rustancestor.MissingAncestors(self.index, common)
883 return ancestor.incrementalmissingancestors(self.parentrevs, common)
883 return ancestor.incrementalmissingancestors(self.parentrevs, common)
884
884
885 def findmissingrevs(self, common=None, heads=None):
885 def findmissingrevs(self, common=None, heads=None):
886 """Return the revision numbers of the ancestors of heads that
886 """Return the revision numbers of the ancestors of heads that
887 are not ancestors of common.
887 are not ancestors of common.
888
888
889 More specifically, return a list of revision numbers corresponding to
889 More specifically, return a list of revision numbers corresponding to
890 nodes N such that every N satisfies the following constraints:
890 nodes N such that every N satisfies the following constraints:
891
891
892 1. N is an ancestor of some node in 'heads'
892 1. N is an ancestor of some node in 'heads'
893 2. N is not an ancestor of any node in 'common'
893 2. N is not an ancestor of any node in 'common'
894
894
895 The list is sorted by revision number, meaning it is
895 The list is sorted by revision number, meaning it is
896 topologically sorted.
896 topologically sorted.
897
897
898 'heads' and 'common' are both lists of revision numbers. If heads is
898 'heads' and 'common' are both lists of revision numbers. If heads is
899 not supplied, uses all of the revlog's heads. If common is not
899 not supplied, uses all of the revlog's heads. If common is not
900 supplied, uses nullid."""
900 supplied, uses nullid."""
901 if common is None:
901 if common is None:
902 common = [nullrev]
902 common = [nullrev]
903 if heads is None:
903 if heads is None:
904 heads = self.headrevs()
904 heads = self.headrevs()
905
905
906 inc = self.incrementalmissingrevs(common=common)
906 inc = self.incrementalmissingrevs(common=common)
907 return inc.missingancestors(heads)
907 return inc.missingancestors(heads)
908
908
909 def findmissing(self, common=None, heads=None):
909 def findmissing(self, common=None, heads=None):
910 """Return the ancestors of heads that are not ancestors of common.
910 """Return the ancestors of heads that are not ancestors of common.
911
911
912 More specifically, return a list of nodes N such that every N
912 More specifically, return a list of nodes N such that every N
913 satisfies the following constraints:
913 satisfies the following constraints:
914
914
915 1. N is an ancestor of some node in 'heads'
915 1. N is an ancestor of some node in 'heads'
916 2. N is not an ancestor of any node in 'common'
916 2. N is not an ancestor of any node in 'common'
917
917
918 The list is sorted by revision number, meaning it is
918 The list is sorted by revision number, meaning it is
919 topologically sorted.
919 topologically sorted.
920
920
921 'heads' and 'common' are both lists of node IDs. If heads is
921 'heads' and 'common' are both lists of node IDs. If heads is
922 not supplied, uses all of the revlog's heads. If common is not
922 not supplied, uses all of the revlog's heads. If common is not
923 supplied, uses nullid."""
923 supplied, uses nullid."""
924 if common is None:
924 if common is None:
925 common = [nullid]
925 common = [nullid]
926 if heads is None:
926 if heads is None:
927 heads = self.heads()
927 heads = self.heads()
928
928
929 common = [self.rev(n) for n in common]
929 common = [self.rev(n) for n in common]
930 heads = [self.rev(n) for n in heads]
930 heads = [self.rev(n) for n in heads]
931
931
932 inc = self.incrementalmissingrevs(common=common)
932 inc = self.incrementalmissingrevs(common=common)
933 return [self.node(r) for r in inc.missingancestors(heads)]
933 return [self.node(r) for r in inc.missingancestors(heads)]
934
934
935 def nodesbetween(self, roots=None, heads=None):
935 def nodesbetween(self, roots=None, heads=None):
936 """Return a topological path from 'roots' to 'heads'.
936 """Return a topological path from 'roots' to 'heads'.
937
937
938 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
938 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
939 topologically sorted list of all nodes N that satisfy both of
939 topologically sorted list of all nodes N that satisfy both of
940 these constraints:
940 these constraints:
941
941
942 1. N is a descendant of some node in 'roots'
942 1. N is a descendant of some node in 'roots'
943 2. N is an ancestor of some node in 'heads'
943 2. N is an ancestor of some node in 'heads'
944
944
945 Every node is considered to be both a descendant and an ancestor
945 Every node is considered to be both a descendant and an ancestor
946 of itself, so every reachable node in 'roots' and 'heads' will be
946 of itself, so every reachable node in 'roots' and 'heads' will be
947 included in 'nodes'.
947 included in 'nodes'.
948
948
949 'outroots' is the list of reachable nodes in 'roots', i.e., the
949 'outroots' is the list of reachable nodes in 'roots', i.e., the
950 subset of 'roots' that is returned in 'nodes'. Likewise,
950 subset of 'roots' that is returned in 'nodes'. Likewise,
951 'outheads' is the subset of 'heads' that is also in 'nodes'.
951 'outheads' is the subset of 'heads' that is also in 'nodes'.
952
952
953 'roots' and 'heads' are both lists of node IDs. If 'roots' is
953 'roots' and 'heads' are both lists of node IDs. If 'roots' is
954 unspecified, uses nullid as the only root. If 'heads' is
954 unspecified, uses nullid as the only root. If 'heads' is
955 unspecified, uses list of all of the revlog's heads."""
955 unspecified, uses list of all of the revlog's heads."""
956 nonodes = ([], [], [])
956 nonodes = ([], [], [])
957 if roots is not None:
957 if roots is not None:
958 roots = list(roots)
958 roots = list(roots)
959 if not roots:
959 if not roots:
960 return nonodes
960 return nonodes
961 lowestrev = min([self.rev(n) for n in roots])
961 lowestrev = min([self.rev(n) for n in roots])
962 else:
962 else:
963 roots = [nullid] # Everybody's a descendant of nullid
963 roots = [nullid] # Everybody's a descendant of nullid
964 lowestrev = nullrev
964 lowestrev = nullrev
965 if (lowestrev == nullrev) and (heads is None):
965 if (lowestrev == nullrev) and (heads is None):
966 # We want _all_ the nodes!
966 # We want _all_ the nodes!
967 return ([self.node(r) for r in self], [nullid], list(self.heads()))
967 return ([self.node(r) for r in self], [nullid], list(self.heads()))
968 if heads is None:
968 if heads is None:
969 # All nodes are ancestors, so the latest ancestor is the last
969 # All nodes are ancestors, so the latest ancestor is the last
970 # node.
970 # node.
971 highestrev = len(self) - 1
971 highestrev = len(self) - 1
972 # Set ancestors to None to signal that every node is an ancestor.
972 # Set ancestors to None to signal that every node is an ancestor.
973 ancestors = None
973 ancestors = None
974 # Set heads to an empty dictionary for later discovery of heads
974 # Set heads to an empty dictionary for later discovery of heads
975 heads = {}
975 heads = {}
976 else:
976 else:
977 heads = list(heads)
977 heads = list(heads)
978 if not heads:
978 if not heads:
979 return nonodes
979 return nonodes
980 ancestors = set()
980 ancestors = set()
981 # Turn heads into a dictionary so we can remove 'fake' heads.
981 # Turn heads into a dictionary so we can remove 'fake' heads.
982 # Also, later we will be using it to filter out the heads we can't
982 # Also, later we will be using it to filter out the heads we can't
983 # find from roots.
983 # find from roots.
984 heads = dict.fromkeys(heads, False)
984 heads = dict.fromkeys(heads, False)
985 # Start at the top and keep marking parents until we're done.
985 # Start at the top and keep marking parents until we're done.
986 nodestotag = set(heads)
986 nodestotag = set(heads)
987 # Remember where the top was so we can use it as a limit later.
987 # Remember where the top was so we can use it as a limit later.
988 highestrev = max([self.rev(n) for n in nodestotag])
988 highestrev = max([self.rev(n) for n in nodestotag])
989 while nodestotag:
989 while nodestotag:
990 # grab a node to tag
990 # grab a node to tag
991 n = nodestotag.pop()
991 n = nodestotag.pop()
992 # Never tag nullid
992 # Never tag nullid
993 if n == nullid:
993 if n == nullid:
994 continue
994 continue
995 # A node's revision number represents its place in a
995 # A node's revision number represents its place in a
996 # topologically sorted list of nodes.
996 # topologically sorted list of nodes.
997 r = self.rev(n)
997 r = self.rev(n)
998 if r >= lowestrev:
998 if r >= lowestrev:
999 if n not in ancestors:
999 if n not in ancestors:
1000 # If we are possibly a descendant of one of the roots
1000 # If we are possibly a descendant of one of the roots
1001 # and we haven't already been marked as an ancestor
1001 # and we haven't already been marked as an ancestor
1002 ancestors.add(n) # Mark as ancestor
1002 ancestors.add(n) # Mark as ancestor
1003 # Add non-nullid parents to list of nodes to tag.
1003 # Add non-nullid parents to list of nodes to tag.
1004 nodestotag.update([p for p in self.parents(n) if
1004 nodestotag.update([p for p in self.parents(n) if
1005 p != nullid])
1005 p != nullid])
1006 elif n in heads: # We've seen it before, is it a fake head?
1006 elif n in heads: # We've seen it before, is it a fake head?
1007 # So it is, real heads should not be the ancestors of
1007 # So it is, real heads should not be the ancestors of
1008 # any other heads.
1008 # any other heads.
1009 heads.pop(n)
1009 heads.pop(n)
1010 if not ancestors:
1010 if not ancestors:
1011 return nonodes
1011 return nonodes
1012 # Now that we have our set of ancestors, we want to remove any
1012 # Now that we have our set of ancestors, we want to remove any
1013 # roots that are not ancestors.
1013 # roots that are not ancestors.
1014
1014
1015 # If one of the roots was nullid, everything is included anyway.
1015 # If one of the roots was nullid, everything is included anyway.
1016 if lowestrev > nullrev:
1016 if lowestrev > nullrev:
1017 # But, since we weren't, let's recompute the lowest rev to not
1017 # But, since we weren't, let's recompute the lowest rev to not
1018 # include roots that aren't ancestors.
1018 # include roots that aren't ancestors.
1019
1019
1020 # Filter out roots that aren't ancestors of heads
1020 # Filter out roots that aren't ancestors of heads
1021 roots = [root for root in roots if root in ancestors]
1021 roots = [root for root in roots if root in ancestors]
1022 # Recompute the lowest revision
1022 # Recompute the lowest revision
1023 if roots:
1023 if roots:
1024 lowestrev = min([self.rev(root) for root in roots])
1024 lowestrev = min([self.rev(root) for root in roots])
1025 else:
1025 else:
1026 # No more roots? Return empty list
1026 # No more roots? Return empty list
1027 return nonodes
1027 return nonodes
1028 else:
1028 else:
1029 # We are descending from nullid, and don't need to care about
1029 # We are descending from nullid, and don't need to care about
1030 # any other roots.
1030 # any other roots.
1031 lowestrev = nullrev
1031 lowestrev = nullrev
1032 roots = [nullid]
1032 roots = [nullid]
1033 # Transform our roots list into a set.
1033 # Transform our roots list into a set.
1034 descendants = set(roots)
1034 descendants = set(roots)
1035 # Also, keep the original roots so we can filter out roots that aren't
1035 # Also, keep the original roots so we can filter out roots that aren't
1036 # 'real' roots (i.e. are descended from other roots).
1036 # 'real' roots (i.e. are descended from other roots).
1037 roots = descendants.copy()
1037 roots = descendants.copy()
1038 # Our topologically sorted list of output nodes.
1038 # Our topologically sorted list of output nodes.
1039 orderedout = []
1039 orderedout = []
1040 # Don't start at nullid since we don't want nullid in our output list,
1040 # Don't start at nullid since we don't want nullid in our output list,
1041 # and if nullid shows up in descendants, empty parents will look like
1041 # and if nullid shows up in descendants, empty parents will look like
1042 # they're descendants.
1042 # they're descendants.
1043 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1043 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1044 n = self.node(r)
1044 n = self.node(r)
1045 isdescendant = False
1045 isdescendant = False
1046 if lowestrev == nullrev: # Everybody is a descendant of nullid
1046 if lowestrev == nullrev: # Everybody is a descendant of nullid
1047 isdescendant = True
1047 isdescendant = True
1048 elif n in descendants:
1048 elif n in descendants:
1049 # n is already a descendant
1049 # n is already a descendant
1050 isdescendant = True
1050 isdescendant = True
1051 # This check only needs to be done here because all the roots
1051 # This check only needs to be done here because all the roots
1052 # will start being marked is descendants before the loop.
1052 # will start being marked is descendants before the loop.
1053 if n in roots:
1053 if n in roots:
1054 # If n was a root, check if it's a 'real' root.
1054 # If n was a root, check if it's a 'real' root.
1055 p = tuple(self.parents(n))
1055 p = tuple(self.parents(n))
1056 # If any of its parents are descendants, it's not a root.
1056 # If any of its parents are descendants, it's not a root.
1057 if (p[0] in descendants) or (p[1] in descendants):
1057 if (p[0] in descendants) or (p[1] in descendants):
1058 roots.remove(n)
1058 roots.remove(n)
1059 else:
1059 else:
1060 p = tuple(self.parents(n))
1060 p = tuple(self.parents(n))
1061 # A node is a descendant if either of its parents are
1061 # A node is a descendant if either of its parents are
1062 # descendants. (We seeded the dependents list with the roots
1062 # descendants. (We seeded the dependents list with the roots
1063 # up there, remember?)
1063 # up there, remember?)
1064 if (p[0] in descendants) or (p[1] in descendants):
1064 if (p[0] in descendants) or (p[1] in descendants):
1065 descendants.add(n)
1065 descendants.add(n)
1066 isdescendant = True
1066 isdescendant = True
1067 if isdescendant and ((ancestors is None) or (n in ancestors)):
1067 if isdescendant and ((ancestors is None) or (n in ancestors)):
1068 # Only include nodes that are both descendants and ancestors.
1068 # Only include nodes that are both descendants and ancestors.
1069 orderedout.append(n)
1069 orderedout.append(n)
1070 if (ancestors is not None) and (n in heads):
1070 if (ancestors is not None) and (n in heads):
1071 # We're trying to figure out which heads are reachable
1071 # We're trying to figure out which heads are reachable
1072 # from roots.
1072 # from roots.
1073 # Mark this head as having been reached
1073 # Mark this head as having been reached
1074 heads[n] = True
1074 heads[n] = True
1075 elif ancestors is None:
1075 elif ancestors is None:
1076 # Otherwise, we're trying to discover the heads.
1076 # Otherwise, we're trying to discover the heads.
1077 # Assume this is a head because if it isn't, the next step
1077 # Assume this is a head because if it isn't, the next step
1078 # will eventually remove it.
1078 # will eventually remove it.
1079 heads[n] = True
1079 heads[n] = True
1080 # But, obviously its parents aren't.
1080 # But, obviously its parents aren't.
1081 for p in self.parents(n):
1081 for p in self.parents(n):
1082 heads.pop(p, None)
1082 heads.pop(p, None)
1083 heads = [head for head, flag in heads.iteritems() if flag]
1083 heads = [head for head, flag in heads.iteritems() if flag]
1084 roots = list(roots)
1084 roots = list(roots)
1085 assert orderedout
1085 assert orderedout
1086 assert roots
1086 assert roots
1087 assert heads
1087 assert heads
1088 return (orderedout, roots, heads)
1088 return (orderedout, roots, heads)
1089
1089
1090 def headrevs(self, revs=None):
1090 def headrevs(self, revs=None):
1091 if revs is None:
1091 if revs is None:
1092 try:
1092 try:
1093 return self.index.headrevs()
1093 return self.index.headrevs()
1094 except AttributeError:
1094 except AttributeError:
1095 return self._headrevs()
1095 return self._headrevs()
1096 if rustdagop is not None:
1096 if rustdagop is not None:
1097 return rustdagop.headrevs(self.index, revs)
1097 return rustdagop.headrevs(self.index, revs)
1098 return dagop.headrevs(revs, self._uncheckedparentrevs)
1098 return dagop.headrevs(revs, self._uncheckedparentrevs)
1099
1099
1100 def computephases(self, roots):
1100 def computephases(self, roots):
1101 return self.index.computephasesmapsets(roots)
1101 return self.index.computephasesmapsets(roots)
1102
1102
1103 def _headrevs(self):
1103 def _headrevs(self):
1104 count = len(self)
1104 count = len(self)
1105 if not count:
1105 if not count:
1106 return [nullrev]
1106 return [nullrev]
1107 # we won't iter over filtered rev so nobody is a head at start
1107 # we won't iter over filtered rev so nobody is a head at start
1108 ishead = [0] * (count + 1)
1108 ishead = [0] * (count + 1)
1109 index = self.index
1109 index = self.index
1110 for r in self:
1110 for r in self:
1111 ishead[r] = 1 # I may be an head
1111 ishead[r] = 1 # I may be an head
1112 e = index[r]
1112 e = index[r]
1113 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1113 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1114 return [r for r, val in enumerate(ishead) if val]
1114 return [r for r, val in enumerate(ishead) if val]
1115
1115
1116 def heads(self, start=None, stop=None):
1116 def heads(self, start=None, stop=None):
1117 """return the list of all nodes that have no children
1117 """return the list of all nodes that have no children
1118
1118
1119 if start is specified, only heads that are descendants of
1119 if start is specified, only heads that are descendants of
1120 start will be returned
1120 start will be returned
1121 if stop is specified, it will consider all the revs from stop
1121 if stop is specified, it will consider all the revs from stop
1122 as if they had no children
1122 as if they had no children
1123 """
1123 """
1124 if start is None and stop is None:
1124 if start is None and stop is None:
1125 if not len(self):
1125 if not len(self):
1126 return [nullid]
1126 return [nullid]
1127 return [self.node(r) for r in self.headrevs()]
1127 return [self.node(r) for r in self.headrevs()]
1128
1128
1129 if start is None:
1129 if start is None:
1130 start = nullrev
1130 start = nullrev
1131 else:
1131 else:
1132 start = self.rev(start)
1132 start = self.rev(start)
1133
1133
1134 stoprevs = set(self.rev(n) for n in stop or [])
1134 stoprevs = set(self.rev(n) for n in stop or [])
1135
1135
1136 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1136 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1137 stoprevs=stoprevs)
1137 stoprevs=stoprevs)
1138
1138
1139 return [self.node(rev) for rev in revs]
1139 return [self.node(rev) for rev in revs]
1140
1140
1141 def children(self, node):
1141 def children(self, node):
1142 """find the children of a given node"""
1142 """find the children of a given node"""
1143 c = []
1143 c = []
1144 p = self.rev(node)
1144 p = self.rev(node)
1145 for r in self.revs(start=p + 1):
1145 for r in self.revs(start=p + 1):
1146 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1146 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1147 if prevs:
1147 if prevs:
1148 for pr in prevs:
1148 for pr in prevs:
1149 if pr == p:
1149 if pr == p:
1150 c.append(self.node(r))
1150 c.append(self.node(r))
1151 elif p == nullrev:
1151 elif p == nullrev:
1152 c.append(self.node(r))
1152 c.append(self.node(r))
1153 return c
1153 return c
1154
1154
1155 def commonancestorsheads(self, a, b):
1155 def commonancestorsheads(self, a, b):
1156 """calculate all the heads of the common ancestors of nodes a and b"""
1156 """calculate all the heads of the common ancestors of nodes a and b"""
1157 a, b = self.rev(a), self.rev(b)
1157 a, b = self.rev(a), self.rev(b)
1158 ancs = self._commonancestorsheads(a, b)
1158 ancs = self._commonancestorsheads(a, b)
1159 return pycompat.maplist(self.node, ancs)
1159 return pycompat.maplist(self.node, ancs)
1160
1160
1161 def _commonancestorsheads(self, *revs):
1161 def _commonancestorsheads(self, *revs):
1162 """calculate all the heads of the common ancestors of revs"""
1162 """calculate all the heads of the common ancestors of revs"""
1163 try:
1163 try:
1164 ancs = self.index.commonancestorsheads(*revs)
1164 ancs = self.index.commonancestorsheads(*revs)
1165 except (AttributeError, OverflowError): # C implementation failed
1165 except (AttributeError, OverflowError): # C implementation failed
1166 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1166 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1167 return ancs
1167 return ancs
1168
1168
1169 def isancestor(self, a, b):
1169 def isancestor(self, a, b):
1170 """return True if node a is an ancestor of node b
1170 """return True if node a is an ancestor of node b
1171
1171
1172 A revision is considered an ancestor of itself."""
1172 A revision is considered an ancestor of itself."""
1173 a, b = self.rev(a), self.rev(b)
1173 a, b = self.rev(a), self.rev(b)
1174 return self.isancestorrev(a, b)
1174 return self.isancestorrev(a, b)
1175
1175
1176 def isancestorrev(self, a, b):
1176 def isancestorrev(self, a, b):
1177 """return True if revision a is an ancestor of revision b
1177 """return True if revision a is an ancestor of revision b
1178
1178
1179 A revision is considered an ancestor of itself.
1179 A revision is considered an ancestor of itself.
1180
1180
1181 The implementation of this is trivial but the use of
1181 The implementation of this is trivial but the use of
1182 reachableroots is not."""
1182 reachableroots is not."""
1183 if a == nullrev:
1183 if a == nullrev:
1184 return True
1184 return True
1185 elif a == b:
1185 elif a == b:
1186 return True
1186 return True
1187 elif a > b:
1187 elif a > b:
1188 return False
1188 return False
1189 return bool(self.reachableroots(a, [b], [a], includepath=False))
1189 return bool(self.reachableroots(a, [b], [a], includepath=False))
1190
1190
1191 def reachableroots(self, minroot, heads, roots, includepath=False):
1191 def reachableroots(self, minroot, heads, roots, includepath=False):
1192 """return (heads(::<roots> and <roots>::<heads>))
1192 """return (heads(::<roots> and <roots>::<heads>))
1193
1193
1194 If includepath is True, return (<roots>::<heads>)."""
1194 If includepath is True, return (<roots>::<heads>)."""
1195 try:
1195 try:
1196 return self.index.reachableroots2(minroot, heads, roots,
1196 return self.index.reachableroots2(minroot, heads, roots,
1197 includepath)
1197 includepath)
1198 except AttributeError:
1198 except AttributeError:
1199 return dagop._reachablerootspure(self.parentrevs,
1199 return dagop._reachablerootspure(self.parentrevs,
1200 minroot, roots, heads, includepath)
1200 minroot, roots, heads, includepath)
1201
1201
1202 def ancestor(self, a, b):
1202 def ancestor(self, a, b):
1203 """calculate the "best" common ancestor of nodes a and b"""
1203 """calculate the "best" common ancestor of nodes a and b"""
1204
1204
1205 a, b = self.rev(a), self.rev(b)
1205 a, b = self.rev(a), self.rev(b)
1206 try:
1206 try:
1207 ancs = self.index.ancestors(a, b)
1207 ancs = self.index.ancestors(a, b)
1208 except (AttributeError, OverflowError):
1208 except (AttributeError, OverflowError):
1209 ancs = ancestor.ancestors(self.parentrevs, a, b)
1209 ancs = ancestor.ancestors(self.parentrevs, a, b)
1210 if ancs:
1210 if ancs:
1211 # choose a consistent winner when there's a tie
1211 # choose a consistent winner when there's a tie
1212 return min(map(self.node, ancs))
1212 return min(map(self.node, ancs))
1213 return nullid
1213 return nullid
1214
1214
1215 def _match(self, id):
1215 def _match(self, id):
1216 if isinstance(id, int):
1216 if isinstance(id, int):
1217 # rev
1217 # rev
1218 return self.node(id)
1218 return self.node(id)
1219 if len(id) == 20:
1219 if len(id) == 20:
1220 # possibly a binary node
1220 # possibly a binary node
1221 # odds of a binary node being all hex in ASCII are 1 in 10**25
1221 # odds of a binary node being all hex in ASCII are 1 in 10**25
1222 try:
1222 try:
1223 node = id
1223 node = id
1224 self.rev(node) # quick search the index
1224 self.rev(node) # quick search the index
1225 return node
1225 return node
1226 except error.LookupError:
1226 except error.LookupError:
1227 pass # may be partial hex id
1227 pass # may be partial hex id
1228 try:
1228 try:
1229 # str(rev)
1229 # str(rev)
1230 rev = int(id)
1230 rev = int(id)
1231 if "%d" % rev != id:
1231 if "%d" % rev != id:
1232 raise ValueError
1232 raise ValueError
1233 if rev < 0:
1233 if rev < 0:
1234 rev = len(self) + rev
1234 rev = len(self) + rev
1235 if rev < 0 or rev >= len(self):
1235 if rev < 0 or rev >= len(self):
1236 raise ValueError
1236 raise ValueError
1237 return self.node(rev)
1237 return self.node(rev)
1238 except (ValueError, OverflowError):
1238 except (ValueError, OverflowError):
1239 pass
1239 pass
1240 if len(id) == 40:
1240 if len(id) == 40:
1241 try:
1241 try:
1242 # a full hex nodeid?
1242 # a full hex nodeid?
1243 node = bin(id)
1243 node = bin(id)
1244 self.rev(node)
1244 self.rev(node)
1245 return node
1245 return node
1246 except (TypeError, error.LookupError):
1246 except (TypeError, error.LookupError):
1247 pass
1247 pass
1248
1248
1249 def _partialmatch(self, id):
1249 def _partialmatch(self, id):
1250 # we don't care wdirfilenodeids as they should be always full hash
1250 # we don't care wdirfilenodeids as they should be always full hash
1251 maybewdir = wdirhex.startswith(id)
1251 maybewdir = wdirhex.startswith(id)
1252 try:
1252 try:
1253 partial = self.index.partialmatch(id)
1253 partial = self.index.partialmatch(id)
1254 if partial and self.hasnode(partial):
1254 if partial and self.hasnode(partial):
1255 if maybewdir:
1255 if maybewdir:
1256 # single 'ff...' match in radix tree, ambiguous with wdir
1256 # single 'ff...' match in radix tree, ambiguous with wdir
1257 raise error.RevlogError
1257 raise error.RevlogError
1258 return partial
1258 return partial
1259 if maybewdir:
1259 if maybewdir:
1260 # no 'ff...' match in radix tree, wdir identified
1260 # no 'ff...' match in radix tree, wdir identified
1261 raise error.WdirUnsupported
1261 raise error.WdirUnsupported
1262 return None
1262 return None
1263 except error.RevlogError:
1263 except error.RevlogError:
1264 # parsers.c radix tree lookup gave multiple matches
1264 # parsers.c radix tree lookup gave multiple matches
1265 # fast path: for unfiltered changelog, radix tree is accurate
1265 # fast path: for unfiltered changelog, radix tree is accurate
1266 if not getattr(self, 'filteredrevs', None):
1266 if not getattr(self, 'filteredrevs', None):
1267 raise error.AmbiguousPrefixLookupError(
1267 raise error.AmbiguousPrefixLookupError(
1268 id, self.indexfile, _('ambiguous identifier'))
1268 id, self.indexfile, _('ambiguous identifier'))
1269 # fall through to slow path that filters hidden revisions
1269 # fall through to slow path that filters hidden revisions
1270 except (AttributeError, ValueError):
1270 except (AttributeError, ValueError):
1271 # we are pure python, or key was too short to search radix tree
1271 # we are pure python, or key was too short to search radix tree
1272 pass
1272 pass
1273
1273
1274 if id in self._pcache:
1274 if id in self._pcache:
1275 return self._pcache[id]
1275 return self._pcache[id]
1276
1276
1277 if len(id) <= 40:
1277 if len(id) <= 40:
1278 try:
1278 try:
1279 # hex(node)[:...]
1279 # hex(node)[:...]
1280 l = len(id) // 2 # grab an even number of digits
1280 l = len(id) // 2 # grab an even number of digits
1281 prefix = bin(id[:l * 2])
1281 prefix = bin(id[:l * 2])
1282 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1282 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1283 nl = [n for n in nl if hex(n).startswith(id) and
1283 nl = [n for n in nl if hex(n).startswith(id) and
1284 self.hasnode(n)]
1284 self.hasnode(n)]
1285 if nullhex.startswith(id):
1285 if nullhex.startswith(id):
1286 nl.append(nullid)
1286 nl.append(nullid)
1287 if len(nl) > 0:
1287 if len(nl) > 0:
1288 if len(nl) == 1 and not maybewdir:
1288 if len(nl) == 1 and not maybewdir:
1289 self._pcache[id] = nl[0]
1289 self._pcache[id] = nl[0]
1290 return nl[0]
1290 return nl[0]
1291 raise error.AmbiguousPrefixLookupError(
1291 raise error.AmbiguousPrefixLookupError(
1292 id, self.indexfile, _('ambiguous identifier'))
1292 id, self.indexfile, _('ambiguous identifier'))
1293 if maybewdir:
1293 if maybewdir:
1294 raise error.WdirUnsupported
1294 raise error.WdirUnsupported
1295 return None
1295 return None
1296 except TypeError:
1296 except TypeError:
1297 pass
1297 pass
1298
1298
1299 def lookup(self, id):
1299 def lookup(self, id):
1300 """locate a node based on:
1300 """locate a node based on:
1301 - revision number or str(revision number)
1301 - revision number or str(revision number)
1302 - nodeid or subset of hex nodeid
1302 - nodeid or subset of hex nodeid
1303 """
1303 """
1304 n = self._match(id)
1304 n = self._match(id)
1305 if n is not None:
1305 if n is not None:
1306 return n
1306 return n
1307 n = self._partialmatch(id)
1307 n = self._partialmatch(id)
1308 if n:
1308 if n:
1309 return n
1309 return n
1310
1310
1311 raise error.LookupError(id, self.indexfile, _('no match found'))
1311 raise error.LookupError(id, self.indexfile, _('no match found'))
1312
1312
1313 def shortest(self, node, minlength=1):
1313 def shortest(self, node, minlength=1):
1314 """Find the shortest unambiguous prefix that matches node."""
1314 """Find the shortest unambiguous prefix that matches node."""
1315 def isvalid(prefix):
1315 def isvalid(prefix):
1316 try:
1316 try:
1317 matchednode = self._partialmatch(prefix)
1317 matchednode = self._partialmatch(prefix)
1318 except error.AmbiguousPrefixLookupError:
1318 except error.AmbiguousPrefixLookupError:
1319 return False
1319 return False
1320 except error.WdirUnsupported:
1320 except error.WdirUnsupported:
1321 # single 'ff...' match
1321 # single 'ff...' match
1322 return True
1322 return True
1323 if matchednode is None:
1323 if matchednode is None:
1324 raise error.LookupError(node, self.indexfile, _('no node'))
1324 raise error.LookupError(node, self.indexfile, _('no node'))
1325 return True
1325 return True
1326
1326
1327 def maybewdir(prefix):
1327 def maybewdir(prefix):
1328 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1328 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1329
1329
1330 hexnode = hex(node)
1330 hexnode = hex(node)
1331
1331
1332 def disambiguate(hexnode, minlength):
1332 def disambiguate(hexnode, minlength):
1333 """Disambiguate against wdirid."""
1333 """Disambiguate against wdirid."""
1334 for length in range(minlength, 41):
1334 for length in range(minlength, 41):
1335 prefix = hexnode[:length]
1335 prefix = hexnode[:length]
1336 if not maybewdir(prefix):
1336 if not maybewdir(prefix):
1337 return prefix
1337 return prefix
1338
1338
1339 if not getattr(self, 'filteredrevs', None):
1339 if not getattr(self, 'filteredrevs', None):
1340 try:
1340 try:
1341 length = max(self.index.shortest(node), minlength)
1341 length = max(self.index.shortest(node), minlength)
1342 return disambiguate(hexnode, length)
1342 return disambiguate(hexnode, length)
1343 except error.RevlogError:
1343 except error.RevlogError:
1344 if node != wdirid:
1344 if node != wdirid:
1345 raise error.LookupError(node, self.indexfile, _('no node'))
1345 raise error.LookupError(node, self.indexfile, _('no node'))
1346 except AttributeError:
1346 except AttributeError:
1347 # Fall through to pure code
1347 # Fall through to pure code
1348 pass
1348 pass
1349
1349
1350 if node == wdirid:
1350 if node == wdirid:
1351 for length in range(minlength, 41):
1351 for length in range(minlength, 41):
1352 prefix = hexnode[:length]
1352 prefix = hexnode[:length]
1353 if isvalid(prefix):
1353 if isvalid(prefix):
1354 return prefix
1354 return prefix
1355
1355
1356 for length in range(minlength, 41):
1356 for length in range(minlength, 41):
1357 prefix = hexnode[:length]
1357 prefix = hexnode[:length]
1358 if isvalid(prefix):
1358 if isvalid(prefix):
1359 return disambiguate(hexnode, length)
1359 return disambiguate(hexnode, length)
1360
1360
1361 def cmp(self, node, text):
1361 def cmp(self, node, text):
1362 """compare text with a given file revision
1362 """compare text with a given file revision
1363
1363
1364 returns True if text is different than what is stored.
1364 returns True if text is different than what is stored.
1365 """
1365 """
1366 p1, p2 = self.parents(node)
1366 p1, p2 = self.parents(node)
1367 return storageutil.hashrevisionsha1(text, p1, p2) != node
1367 return storageutil.hashrevisionsha1(text, p1, p2) != node
1368
1368
1369 def _cachesegment(self, offset, data):
1369 def _cachesegment(self, offset, data):
1370 """Add a segment to the revlog cache.
1370 """Add a segment to the revlog cache.
1371
1371
1372 Accepts an absolute offset and the data that is at that location.
1372 Accepts an absolute offset and the data that is at that location.
1373 """
1373 """
1374 o, d = self._chunkcache
1374 o, d = self._chunkcache
1375 # try to add to existing cache
1375 # try to add to existing cache
1376 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1376 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1377 self._chunkcache = o, d + data
1377 self._chunkcache = o, d + data
1378 else:
1378 else:
1379 self._chunkcache = offset, data
1379 self._chunkcache = offset, data
1380
1380
1381 def _readsegment(self, offset, length, df=None):
1381 def _readsegment(self, offset, length, df=None):
1382 """Load a segment of raw data from the revlog.
1382 """Load a segment of raw data from the revlog.
1383
1383
1384 Accepts an absolute offset, length to read, and an optional existing
1384 Accepts an absolute offset, length to read, and an optional existing
1385 file handle to read from.
1385 file handle to read from.
1386
1386
1387 If an existing file handle is passed, it will be seeked and the
1387 If an existing file handle is passed, it will be seeked and the
1388 original seek position will NOT be restored.
1388 original seek position will NOT be restored.
1389
1389
1390 Returns a str or buffer of raw byte data.
1390 Returns a str or buffer of raw byte data.
1391
1391
1392 Raises if the requested number of bytes could not be read.
1392 Raises if the requested number of bytes could not be read.
1393 """
1393 """
1394 # Cache data both forward and backward around the requested
1394 # Cache data both forward and backward around the requested
1395 # data, in a fixed size window. This helps speed up operations
1395 # data, in a fixed size window. This helps speed up operations
1396 # involving reading the revlog backwards.
1396 # involving reading the revlog backwards.
1397 cachesize = self._chunkcachesize
1397 cachesize = self._chunkcachesize
1398 realoffset = offset & ~(cachesize - 1)
1398 realoffset = offset & ~(cachesize - 1)
1399 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1399 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1400 - realoffset)
1400 - realoffset)
1401 with self._datareadfp(df) as df:
1401 with self._datareadfp(df) as df:
1402 df.seek(realoffset)
1402 df.seek(realoffset)
1403 d = df.read(reallength)
1403 d = df.read(reallength)
1404
1404
1405 self._cachesegment(realoffset, d)
1405 self._cachesegment(realoffset, d)
1406 if offset != realoffset or reallength != length:
1406 if offset != realoffset or reallength != length:
1407 startoffset = offset - realoffset
1407 startoffset = offset - realoffset
1408 if len(d) - startoffset < length:
1408 if len(d) - startoffset < length:
1409 raise error.RevlogError(
1409 raise error.RevlogError(
1410 _('partial read of revlog %s; expected %d bytes from '
1410 _('partial read of revlog %s; expected %d bytes from '
1411 'offset %d, got %d') %
1411 'offset %d, got %d') %
1412 (self.indexfile if self._inline else self.datafile,
1412 (self.indexfile if self._inline else self.datafile,
1413 length, realoffset, len(d) - startoffset))
1413 length, realoffset, len(d) - startoffset))
1414
1414
1415 return util.buffer(d, startoffset, length)
1415 return util.buffer(d, startoffset, length)
1416
1416
1417 if len(d) < length:
1417 if len(d) < length:
1418 raise error.RevlogError(
1418 raise error.RevlogError(
1419 _('partial read of revlog %s; expected %d bytes from offset '
1419 _('partial read of revlog %s; expected %d bytes from offset '
1420 '%d, got %d') %
1420 '%d, got %d') %
1421 (self.indexfile if self._inline else self.datafile,
1421 (self.indexfile if self._inline else self.datafile,
1422 length, offset, len(d)))
1422 length, offset, len(d)))
1423
1423
1424 return d
1424 return d
1425
1425
1426 def _getsegment(self, offset, length, df=None):
1426 def _getsegment(self, offset, length, df=None):
1427 """Obtain a segment of raw data from the revlog.
1427 """Obtain a segment of raw data from the revlog.
1428
1428
1429 Accepts an absolute offset, length of bytes to obtain, and an
1429 Accepts an absolute offset, length of bytes to obtain, and an
1430 optional file handle to the already-opened revlog. If the file
1430 optional file handle to the already-opened revlog. If the file
1431 handle is used, it's original seek position will not be preserved.
1431 handle is used, it's original seek position will not be preserved.
1432
1432
1433 Requests for data may be returned from a cache.
1433 Requests for data may be returned from a cache.
1434
1434
1435 Returns a str or a buffer instance of raw byte data.
1435 Returns a str or a buffer instance of raw byte data.
1436 """
1436 """
1437 o, d = self._chunkcache
1437 o, d = self._chunkcache
1438 l = len(d)
1438 l = len(d)
1439
1439
1440 # is it in the cache?
1440 # is it in the cache?
1441 cachestart = offset - o
1441 cachestart = offset - o
1442 cacheend = cachestart + length
1442 cacheend = cachestart + length
1443 if cachestart >= 0 and cacheend <= l:
1443 if cachestart >= 0 and cacheend <= l:
1444 if cachestart == 0 and cacheend == l:
1444 if cachestart == 0 and cacheend == l:
1445 return d # avoid a copy
1445 return d # avoid a copy
1446 return util.buffer(d, cachestart, cacheend - cachestart)
1446 return util.buffer(d, cachestart, cacheend - cachestart)
1447
1447
1448 return self._readsegment(offset, length, df=df)
1448 return self._readsegment(offset, length, df=df)
1449
1449
1450 def _getsegmentforrevs(self, startrev, endrev, df=None):
1450 def _getsegmentforrevs(self, startrev, endrev, df=None):
1451 """Obtain a segment of raw data corresponding to a range of revisions.
1451 """Obtain a segment of raw data corresponding to a range of revisions.
1452
1452
1453 Accepts the start and end revisions and an optional already-open
1453 Accepts the start and end revisions and an optional already-open
1454 file handle to be used for reading. If the file handle is read, its
1454 file handle to be used for reading. If the file handle is read, its
1455 seek position will not be preserved.
1455 seek position will not be preserved.
1456
1456
1457 Requests for data may be satisfied by a cache.
1457 Requests for data may be satisfied by a cache.
1458
1458
1459 Returns a 2-tuple of (offset, data) for the requested range of
1459 Returns a 2-tuple of (offset, data) for the requested range of
1460 revisions. Offset is the integer offset from the beginning of the
1460 revisions. Offset is the integer offset from the beginning of the
1461 revlog and data is a str or buffer of the raw byte data.
1461 revlog and data is a str or buffer of the raw byte data.
1462
1462
1463 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1463 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1464 to determine where each revision's data begins and ends.
1464 to determine where each revision's data begins and ends.
1465 """
1465 """
1466 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1466 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1467 # (functions are expensive).
1467 # (functions are expensive).
1468 index = self.index
1468 index = self.index
1469 istart = index[startrev]
1469 istart = index[startrev]
1470 start = int(istart[0] >> 16)
1470 start = int(istart[0] >> 16)
1471 if startrev == endrev:
1471 if startrev == endrev:
1472 end = start + istart[1]
1472 end = start + istart[1]
1473 else:
1473 else:
1474 iend = index[endrev]
1474 iend = index[endrev]
1475 end = int(iend[0] >> 16) + iend[1]
1475 end = int(iend[0] >> 16) + iend[1]
1476
1476
1477 if self._inline:
1477 if self._inline:
1478 start += (startrev + 1) * self._io.size
1478 start += (startrev + 1) * self._io.size
1479 end += (endrev + 1) * self._io.size
1479 end += (endrev + 1) * self._io.size
1480 length = end - start
1480 length = end - start
1481
1481
1482 return start, self._getsegment(start, length, df=df)
1482 return start, self._getsegment(start, length, df=df)
1483
1483
1484 def _chunk(self, rev, df=None):
1484 def _chunk(self, rev, df=None):
1485 """Obtain a single decompressed chunk for a revision.
1485 """Obtain a single decompressed chunk for a revision.
1486
1486
1487 Accepts an integer revision and an optional already-open file handle
1487 Accepts an integer revision and an optional already-open file handle
1488 to be used for reading. If used, the seek position of the file will not
1488 to be used for reading. If used, the seek position of the file will not
1489 be preserved.
1489 be preserved.
1490
1490
1491 Returns a str holding uncompressed data for the requested revision.
1491 Returns a str holding uncompressed data for the requested revision.
1492 """
1492 """
1493 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1493 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1494
1494
1495 def _chunks(self, revs, df=None, targetsize=None):
1495 def _chunks(self, revs, df=None, targetsize=None):
1496 """Obtain decompressed chunks for the specified revisions.
1496 """Obtain decompressed chunks for the specified revisions.
1497
1497
1498 Accepts an iterable of numeric revisions that are assumed to be in
1498 Accepts an iterable of numeric revisions that are assumed to be in
1499 ascending order. Also accepts an optional already-open file handle
1499 ascending order. Also accepts an optional already-open file handle
1500 to be used for reading. If used, the seek position of the file will
1500 to be used for reading. If used, the seek position of the file will
1501 not be preserved.
1501 not be preserved.
1502
1502
1503 This function is similar to calling ``self._chunk()`` multiple times,
1503 This function is similar to calling ``self._chunk()`` multiple times,
1504 but is faster.
1504 but is faster.
1505
1505
1506 Returns a list with decompressed data for each requested revision.
1506 Returns a list with decompressed data for each requested revision.
1507 """
1507 """
1508 if not revs:
1508 if not revs:
1509 return []
1509 return []
1510 start = self.start
1510 start = self.start
1511 length = self.length
1511 length = self.length
1512 inline = self._inline
1512 inline = self._inline
1513 iosize = self._io.size
1513 iosize = self._io.size
1514 buffer = util.buffer
1514 buffer = util.buffer
1515
1515
1516 l = []
1516 l = []
1517 ladd = l.append
1517 ladd = l.append
1518
1518
1519 if not self._withsparseread:
1519 if not self._withsparseread:
1520 slicedchunks = (revs,)
1520 slicedchunks = (revs,)
1521 else:
1521 else:
1522 slicedchunks = deltautil.slicechunk(self, revs,
1522 slicedchunks = deltautil.slicechunk(self, revs,
1523 targetsize=targetsize)
1523 targetsize=targetsize)
1524
1524
1525 for revschunk in slicedchunks:
1525 for revschunk in slicedchunks:
1526 firstrev = revschunk[0]
1526 firstrev = revschunk[0]
1527 # Skip trailing revisions with empty diff
1527 # Skip trailing revisions with empty diff
1528 for lastrev in revschunk[::-1]:
1528 for lastrev in revschunk[::-1]:
1529 if length(lastrev) != 0:
1529 if length(lastrev) != 0:
1530 break
1530 break
1531
1531
1532 try:
1532 try:
1533 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1533 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1534 except OverflowError:
1534 except OverflowError:
1535 # issue4215 - we can't cache a run of chunks greater than
1535 # issue4215 - we can't cache a run of chunks greater than
1536 # 2G on Windows
1536 # 2G on Windows
1537 return [self._chunk(rev, df=df) for rev in revschunk]
1537 return [self._chunk(rev, df=df) for rev in revschunk]
1538
1538
1539 decomp = self.decompress
1539 decomp = self.decompress
1540 for rev in revschunk:
1540 for rev in revschunk:
1541 chunkstart = start(rev)
1541 chunkstart = start(rev)
1542 if inline:
1542 if inline:
1543 chunkstart += (rev + 1) * iosize
1543 chunkstart += (rev + 1) * iosize
1544 chunklength = length(rev)
1544 chunklength = length(rev)
1545 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1545 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1546
1546
1547 return l
1547 return l
1548
1548
1549 def _chunkclear(self):
1549 def _chunkclear(self):
1550 """Clear the raw chunk cache."""
1550 """Clear the raw chunk cache."""
1551 self._chunkcache = (0, '')
1551 self._chunkcache = (0, '')
1552
1552
1553 def deltaparent(self, rev):
1553 def deltaparent(self, rev):
1554 """return deltaparent of the given revision"""
1554 """return deltaparent of the given revision"""
1555 base = self.index[rev][3]
1555 base = self.index[rev][3]
1556 if base == rev:
1556 if base == rev:
1557 return nullrev
1557 return nullrev
1558 elif self._generaldelta:
1558 elif self._generaldelta:
1559 return base
1559 return base
1560 else:
1560 else:
1561 return rev - 1
1561 return rev - 1
1562
1562
1563 def issnapshot(self, rev):
1563 def issnapshot(self, rev):
1564 """tells whether rev is a snapshot
1564 """tells whether rev is a snapshot
1565 """
1565 """
1566 if not self._sparserevlog:
1566 if not self._sparserevlog:
1567 return self.deltaparent(rev) == nullrev
1567 return self.deltaparent(rev) == nullrev
1568 elif util.safehasattr(self.index, 'issnapshot'):
1568 elif util.safehasattr(self.index, 'issnapshot'):
1569 # directly assign the method to cache the testing and access
1569 # directly assign the method to cache the testing and access
1570 self.issnapshot = self.index.issnapshot
1570 self.issnapshot = self.index.issnapshot
1571 return self.issnapshot(rev)
1571 return self.issnapshot(rev)
1572 if rev == nullrev:
1572 if rev == nullrev:
1573 return True
1573 return True
1574 entry = self.index[rev]
1574 entry = self.index[rev]
1575 base = entry[3]
1575 base = entry[3]
1576 if base == rev:
1576 if base == rev:
1577 return True
1577 return True
1578 if base == nullrev:
1578 if base == nullrev:
1579 return True
1579 return True
1580 p1 = entry[5]
1580 p1 = entry[5]
1581 p2 = entry[6]
1581 p2 = entry[6]
1582 if base == p1 or base == p2:
1582 if base == p1 or base == p2:
1583 return False
1583 return False
1584 return self.issnapshot(base)
1584 return self.issnapshot(base)
1585
1585
1586 def snapshotdepth(self, rev):
1586 def snapshotdepth(self, rev):
1587 """number of snapshot in the chain before this one"""
1587 """number of snapshot in the chain before this one"""
1588 if not self.issnapshot(rev):
1588 if not self.issnapshot(rev):
1589 raise error.ProgrammingError('revision %d not a snapshot')
1589 raise error.ProgrammingError('revision %d not a snapshot')
1590 return len(self._deltachain(rev)[0]) - 1
1590 return len(self._deltachain(rev)[0]) - 1
1591
1591
1592 def revdiff(self, rev1, rev2):
1592 def revdiff(self, rev1, rev2):
1593 """return or calculate a delta between two revisions
1593 """return or calculate a delta between two revisions
1594
1594
1595 The delta calculated is in binary form and is intended to be written to
1595 The delta calculated is in binary form and is intended to be written to
1596 revlog data directly. So this function needs raw revision data.
1596 revlog data directly. So this function needs raw revision data.
1597 """
1597 """
1598 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1598 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1599 return bytes(self._chunk(rev2))
1599 return bytes(self._chunk(rev2))
1600
1600
1601 return mdiff.textdiff(self.rawdata(rev1),
1601 return mdiff.textdiff(self.rawdata(rev1),
1602 self.rawdata(rev2))
1602 self.rawdata(rev2))
1603
1603
1604 def _processflags(self, text, flags, operation, raw=False):
1605 """deprecated entry point to access flag processors"""
1606 msg = ('_processflag(...) use the specialized variant')
1607 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1608 if raw:
1609 return text, flagutil.processflagsraw(self, text, flags)
1610 elif operation == 'read':
1611 return flagutil.processflagsread(self, text, flags)
1612 else: # write operation
1613 return flagutil.processflagswrite(self, text, flags)
1614
1604 def revision(self, nodeorrev, _df=None, raw=False):
1615 def revision(self, nodeorrev, _df=None, raw=False):
1605 """return an uncompressed revision of a given node or revision
1616 """return an uncompressed revision of a given node or revision
1606 number.
1617 number.
1607
1618
1608 _df - an existing file handle to read from. (internal-only)
1619 _df - an existing file handle to read from. (internal-only)
1609 raw - an optional argument specifying if the revision data is to be
1620 raw - an optional argument specifying if the revision data is to be
1610 treated as raw data when applying flag transforms. 'raw' should be set
1621 treated as raw data when applying flag transforms. 'raw' should be set
1611 to True when generating changegroups or in debug commands.
1622 to True when generating changegroups or in debug commands.
1612 """
1623 """
1613 if raw:
1624 if raw:
1614 msg = ('revlog.revision(..., raw=True) is deprecated, '
1625 msg = ('revlog.revision(..., raw=True) is deprecated, '
1615 'use revlog.rawdata(...)')
1626 'use revlog.rawdata(...)')
1616 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1627 util.nouideprecwarn(msg, '5.2', stacklevel=2)
1617 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1628 return self._revisiondata(nodeorrev, _df, raw=raw)[0]
1618
1629
1619 def sidedata(self, nodeorrev, _df=None):
1630 def sidedata(self, nodeorrev, _df=None):
1620 """a map of extra data related to the changeset but not part of the hash
1631 """a map of extra data related to the changeset but not part of the hash
1621
1632
1622 This function currently return a dictionary. However, more advanced
1633 This function currently return a dictionary. However, more advanced
1623 mapping object will likely be used in the future for a more
1634 mapping object will likely be used in the future for a more
1624 efficient/lazy code.
1635 efficient/lazy code.
1625 """
1636 """
1626 return self._revisiondata(nodeorrev, _df)[1]
1637 return self._revisiondata(nodeorrev, _df)[1]
1627
1638
1628 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1639 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1629 # deal with <nodeorrev> argument type
1640 # deal with <nodeorrev> argument type
1630 if isinstance(nodeorrev, int):
1641 if isinstance(nodeorrev, int):
1631 rev = nodeorrev
1642 rev = nodeorrev
1632 node = self.node(rev)
1643 node = self.node(rev)
1633 else:
1644 else:
1634 node = nodeorrev
1645 node = nodeorrev
1635 rev = None
1646 rev = None
1636
1647
1637 # fast path the special `nullid` rev
1648 # fast path the special `nullid` rev
1638 if node == nullid:
1649 if node == nullid:
1639 return "", {}
1650 return "", {}
1640
1651
1641 # The text as stored inside the revlog. Might be the revision or might
1652 # The text as stored inside the revlog. Might be the revision or might
1642 # need to be processed to retrieve the revision.
1653 # need to be processed to retrieve the revision.
1643 rawtext = None
1654 rawtext = None
1644
1655
1645 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1656 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1646
1657
1647 if raw and validated:
1658 if raw and validated:
1648 # if we don't want to process the raw text and that raw
1659 # if we don't want to process the raw text and that raw
1649 # text is cached, we can exit early.
1660 # text is cached, we can exit early.
1650 return rawtext, {}
1661 return rawtext, {}
1651 if rev is None:
1662 if rev is None:
1652 rev = self.rev(node)
1663 rev = self.rev(node)
1653 # the revlog's flag for this revision
1664 # the revlog's flag for this revision
1654 # (usually alter its state or content)
1665 # (usually alter its state or content)
1655 flags = self.flags(rev)
1666 flags = self.flags(rev)
1656
1667
1657 if validated and flags == REVIDX_DEFAULT_FLAGS:
1668 if validated and flags == REVIDX_DEFAULT_FLAGS:
1658 # no extra flags set, no flag processor runs, text = rawtext
1669 # no extra flags set, no flag processor runs, text = rawtext
1659 return rawtext, {}
1670 return rawtext, {}
1660
1671
1661 sidedata = {}
1672 sidedata = {}
1662 if raw:
1673 if raw:
1663 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1674 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1664 text = rawtext
1675 text = rawtext
1665 else:
1676 else:
1666 r = flagutil.processflagsread(self, rawtext, flags)
1677 r = flagutil.processflagsread(self, rawtext, flags)
1667 text, validatehash, sidedata = r
1678 text, validatehash, sidedata = r
1668 if validatehash:
1679 if validatehash:
1669 self.checkhash(text, node, rev=rev)
1680 self.checkhash(text, node, rev=rev)
1670 if not validated:
1681 if not validated:
1671 self._revisioncache = (node, rev, rawtext)
1682 self._revisioncache = (node, rev, rawtext)
1672
1683
1673 return text, sidedata
1684 return text, sidedata
1674
1685
1675 def _rawtext(self, node, rev, _df=None):
1686 def _rawtext(self, node, rev, _df=None):
1676 """return the possibly unvalidated rawtext for a revision
1687 """return the possibly unvalidated rawtext for a revision
1677
1688
1678 returns (rev, rawtext, validated)
1689 returns (rev, rawtext, validated)
1679 """
1690 """
1680
1691
1681 # revision in the cache (could be useful to apply delta)
1692 # revision in the cache (could be useful to apply delta)
1682 cachedrev = None
1693 cachedrev = None
1683 # An intermediate text to apply deltas to
1694 # An intermediate text to apply deltas to
1684 basetext = None
1695 basetext = None
1685
1696
1686 # Check if we have the entry in cache
1697 # Check if we have the entry in cache
1687 # The cache entry looks like (node, rev, rawtext)
1698 # The cache entry looks like (node, rev, rawtext)
1688 if self._revisioncache:
1699 if self._revisioncache:
1689 if self._revisioncache[0] == node:
1700 if self._revisioncache[0] == node:
1690 return (rev, self._revisioncache[2], True)
1701 return (rev, self._revisioncache[2], True)
1691 cachedrev = self._revisioncache[1]
1702 cachedrev = self._revisioncache[1]
1692
1703
1693 if rev is None:
1704 if rev is None:
1694 rev = self.rev(node)
1705 rev = self.rev(node)
1695
1706
1696 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1707 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1697 if stopped:
1708 if stopped:
1698 basetext = self._revisioncache[2]
1709 basetext = self._revisioncache[2]
1699
1710
1700 # drop cache to save memory, the caller is expected to
1711 # drop cache to save memory, the caller is expected to
1701 # update self._revisioncache after validating the text
1712 # update self._revisioncache after validating the text
1702 self._revisioncache = None
1713 self._revisioncache = None
1703
1714
1704 targetsize = None
1715 targetsize = None
1705 rawsize = self.index[rev][2]
1716 rawsize = self.index[rev][2]
1706 if 0 <= rawsize:
1717 if 0 <= rawsize:
1707 targetsize = 4 * rawsize
1718 targetsize = 4 * rawsize
1708
1719
1709 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1720 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1710 if basetext is None:
1721 if basetext is None:
1711 basetext = bytes(bins[0])
1722 basetext = bytes(bins[0])
1712 bins = bins[1:]
1723 bins = bins[1:]
1713
1724
1714 rawtext = mdiff.patches(basetext, bins)
1725 rawtext = mdiff.patches(basetext, bins)
1715 del basetext # let us have a chance to free memory early
1726 del basetext # let us have a chance to free memory early
1716 return (rev, rawtext, False)
1727 return (rev, rawtext, False)
1717
1728
1718 def rawdata(self, nodeorrev, _df=None):
1729 def rawdata(self, nodeorrev, _df=None):
1719 """return an uncompressed raw data of a given node or revision number.
1730 """return an uncompressed raw data of a given node or revision number.
1720
1731
1721 _df - an existing file handle to read from. (internal-only)
1732 _df - an existing file handle to read from. (internal-only)
1722 """
1733 """
1723 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1734 return self._revisiondata(nodeorrev, _df, raw=True)[0]
1724
1735
1725 def hash(self, text, p1, p2):
1736 def hash(self, text, p1, p2):
1726 """Compute a node hash.
1737 """Compute a node hash.
1727
1738
1728 Available as a function so that subclasses can replace the hash
1739 Available as a function so that subclasses can replace the hash
1729 as needed.
1740 as needed.
1730 """
1741 """
1731 return storageutil.hashrevisionsha1(text, p1, p2)
1742 return storageutil.hashrevisionsha1(text, p1, p2)
1732
1743
1733 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1744 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1734 """Check node hash integrity.
1745 """Check node hash integrity.
1735
1746
1736 Available as a function so that subclasses can extend hash mismatch
1747 Available as a function so that subclasses can extend hash mismatch
1737 behaviors as needed.
1748 behaviors as needed.
1738 """
1749 """
1739 try:
1750 try:
1740 if p1 is None and p2 is None:
1751 if p1 is None and p2 is None:
1741 p1, p2 = self.parents(node)
1752 p1, p2 = self.parents(node)
1742 if node != self.hash(text, p1, p2):
1753 if node != self.hash(text, p1, p2):
1743 # Clear the revision cache on hash failure. The revision cache
1754 # Clear the revision cache on hash failure. The revision cache
1744 # only stores the raw revision and clearing the cache does have
1755 # only stores the raw revision and clearing the cache does have
1745 # the side-effect that we won't have a cache hit when the raw
1756 # the side-effect that we won't have a cache hit when the raw
1746 # revision data is accessed. But this case should be rare and
1757 # revision data is accessed. But this case should be rare and
1747 # it is extra work to teach the cache about the hash
1758 # it is extra work to teach the cache about the hash
1748 # verification state.
1759 # verification state.
1749 if self._revisioncache and self._revisioncache[0] == node:
1760 if self._revisioncache and self._revisioncache[0] == node:
1750 self._revisioncache = None
1761 self._revisioncache = None
1751
1762
1752 revornode = rev
1763 revornode = rev
1753 if revornode is None:
1764 if revornode is None:
1754 revornode = templatefilters.short(hex(node))
1765 revornode = templatefilters.short(hex(node))
1755 raise error.RevlogError(_("integrity check failed on %s:%s")
1766 raise error.RevlogError(_("integrity check failed on %s:%s")
1756 % (self.indexfile, pycompat.bytestr(revornode)))
1767 % (self.indexfile, pycompat.bytestr(revornode)))
1757 except error.RevlogError:
1768 except error.RevlogError:
1758 if self._censorable and storageutil.iscensoredtext(text):
1769 if self._censorable and storageutil.iscensoredtext(text):
1759 raise error.CensoredNodeError(self.indexfile, node, text)
1770 raise error.CensoredNodeError(self.indexfile, node, text)
1760 raise
1771 raise
1761
1772
1762 def _enforceinlinesize(self, tr, fp=None):
1773 def _enforceinlinesize(self, tr, fp=None):
1763 """Check if the revlog is too big for inline and convert if so.
1774 """Check if the revlog is too big for inline and convert if so.
1764
1775
1765 This should be called after revisions are added to the revlog. If the
1776 This should be called after revisions are added to the revlog. If the
1766 revlog has grown too large to be an inline revlog, it will convert it
1777 revlog has grown too large to be an inline revlog, it will convert it
1767 to use multiple index and data files.
1778 to use multiple index and data files.
1768 """
1779 """
1769 tiprev = len(self) - 1
1780 tiprev = len(self) - 1
1770 if (not self._inline or
1781 if (not self._inline or
1771 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1782 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1772 return
1783 return
1773
1784
1774 trinfo = tr.find(self.indexfile)
1785 trinfo = tr.find(self.indexfile)
1775 if trinfo is None:
1786 if trinfo is None:
1776 raise error.RevlogError(_("%s not found in the transaction")
1787 raise error.RevlogError(_("%s not found in the transaction")
1777 % self.indexfile)
1788 % self.indexfile)
1778
1789
1779 trindex = trinfo[2]
1790 trindex = trinfo[2]
1780 if trindex is not None:
1791 if trindex is not None:
1781 dataoff = self.start(trindex)
1792 dataoff = self.start(trindex)
1782 else:
1793 else:
1783 # revlog was stripped at start of transaction, use all leftover data
1794 # revlog was stripped at start of transaction, use all leftover data
1784 trindex = len(self) - 1
1795 trindex = len(self) - 1
1785 dataoff = self.end(tiprev)
1796 dataoff = self.end(tiprev)
1786
1797
1787 tr.add(self.datafile, dataoff)
1798 tr.add(self.datafile, dataoff)
1788
1799
1789 if fp:
1800 if fp:
1790 fp.flush()
1801 fp.flush()
1791 fp.close()
1802 fp.close()
1792 # We can't use the cached file handle after close(). So prevent
1803 # We can't use the cached file handle after close(). So prevent
1793 # its usage.
1804 # its usage.
1794 self._writinghandles = None
1805 self._writinghandles = None
1795
1806
1796 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1807 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1797 for r in self:
1808 for r in self:
1798 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1809 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1799
1810
1800 with self._indexfp('w') as fp:
1811 with self._indexfp('w') as fp:
1801 self.version &= ~FLAG_INLINE_DATA
1812 self.version &= ~FLAG_INLINE_DATA
1802 self._inline = False
1813 self._inline = False
1803 io = self._io
1814 io = self._io
1804 for i in self:
1815 for i in self:
1805 e = io.packentry(self.index[i], self.node, self.version, i)
1816 e = io.packentry(self.index[i], self.node, self.version, i)
1806 fp.write(e)
1817 fp.write(e)
1807
1818
1808 # the temp file replace the real index when we exit the context
1819 # the temp file replace the real index when we exit the context
1809 # manager
1820 # manager
1810
1821
1811 tr.replace(self.indexfile, trindex * self._io.size)
1822 tr.replace(self.indexfile, trindex * self._io.size)
1812 self._chunkclear()
1823 self._chunkclear()
1813
1824
1814 def _nodeduplicatecallback(self, transaction, node):
1825 def _nodeduplicatecallback(self, transaction, node):
1815 """called when trying to add a node already stored.
1826 """called when trying to add a node already stored.
1816 """
1827 """
1817
1828
1818 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1829 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1819 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None,
1830 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None,
1820 sidedata=None):
1831 sidedata=None):
1821 """add a revision to the log
1832 """add a revision to the log
1822
1833
1823 text - the revision data to add
1834 text - the revision data to add
1824 transaction - the transaction object used for rollback
1835 transaction - the transaction object used for rollback
1825 link - the linkrev data to add
1836 link - the linkrev data to add
1826 p1, p2 - the parent nodeids of the revision
1837 p1, p2 - the parent nodeids of the revision
1827 cachedelta - an optional precomputed delta
1838 cachedelta - an optional precomputed delta
1828 node - nodeid of revision; typically node is not specified, and it is
1839 node - nodeid of revision; typically node is not specified, and it is
1829 computed by default as hash(text, p1, p2), however subclasses might
1840 computed by default as hash(text, p1, p2), however subclasses might
1830 use different hashing method (and override checkhash() in such case)
1841 use different hashing method (and override checkhash() in such case)
1831 flags - the known flags to set on the revision
1842 flags - the known flags to set on the revision
1832 deltacomputer - an optional deltacomputer instance shared between
1843 deltacomputer - an optional deltacomputer instance shared between
1833 multiple calls
1844 multiple calls
1834 """
1845 """
1835 if link == nullrev:
1846 if link == nullrev:
1836 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1847 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1837 % self.indexfile)
1848 % self.indexfile)
1838
1849
1839 if sidedata is None:
1850 if sidedata is None:
1840 sidedata = {}
1851 sidedata = {}
1841
1852
1842 if flags:
1853 if flags:
1843 node = node or self.hash(text, p1, p2)
1854 node = node or self.hash(text, p1, p2)
1844
1855
1845 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
1856 rawtext, validatehash = flagutil.processflagswrite(self, text, flags,
1846 sidedata=sidedata)
1857 sidedata=sidedata)
1847
1858
1848 # If the flag processor modifies the revision data, ignore any provided
1859 # If the flag processor modifies the revision data, ignore any provided
1849 # cachedelta.
1860 # cachedelta.
1850 if rawtext != text:
1861 if rawtext != text:
1851 cachedelta = None
1862 cachedelta = None
1852
1863
1853 if len(rawtext) > _maxentrysize:
1864 if len(rawtext) > _maxentrysize:
1854 raise error.RevlogError(
1865 raise error.RevlogError(
1855 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1866 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1856 % (self.indexfile, len(rawtext)))
1867 % (self.indexfile, len(rawtext)))
1857
1868
1858 node = node or self.hash(rawtext, p1, p2)
1869 node = node or self.hash(rawtext, p1, p2)
1859 if node in self.nodemap:
1870 if node in self.nodemap:
1860 return node
1871 return node
1861
1872
1862 if validatehash:
1873 if validatehash:
1863 self.checkhash(rawtext, node, p1=p1, p2=p2)
1874 self.checkhash(rawtext, node, p1=p1, p2=p2)
1864
1875
1865 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1876 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1866 flags, cachedelta=cachedelta,
1877 flags, cachedelta=cachedelta,
1867 deltacomputer=deltacomputer)
1878 deltacomputer=deltacomputer)
1868
1879
1869 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1880 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1870 cachedelta=None, deltacomputer=None):
1881 cachedelta=None, deltacomputer=None):
1871 """add a raw revision with known flags, node and parents
1882 """add a raw revision with known flags, node and parents
1872 useful when reusing a revision not stored in this revlog (ex: received
1883 useful when reusing a revision not stored in this revlog (ex: received
1873 over wire, or read from an external bundle).
1884 over wire, or read from an external bundle).
1874 """
1885 """
1875 dfh = None
1886 dfh = None
1876 if not self._inline:
1887 if not self._inline:
1877 dfh = self._datafp("a+")
1888 dfh = self._datafp("a+")
1878 ifh = self._indexfp("a+")
1889 ifh = self._indexfp("a+")
1879 try:
1890 try:
1880 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1891 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1881 flags, cachedelta, ifh, dfh,
1892 flags, cachedelta, ifh, dfh,
1882 deltacomputer=deltacomputer)
1893 deltacomputer=deltacomputer)
1883 finally:
1894 finally:
1884 if dfh:
1895 if dfh:
1885 dfh.close()
1896 dfh.close()
1886 ifh.close()
1897 ifh.close()
1887
1898
1888 def compress(self, data):
1899 def compress(self, data):
1889 """Generate a possibly-compressed representation of data."""
1900 """Generate a possibly-compressed representation of data."""
1890 if not data:
1901 if not data:
1891 return '', data
1902 return '', data
1892
1903
1893 compressed = self._compressor.compress(data)
1904 compressed = self._compressor.compress(data)
1894
1905
1895 if compressed:
1906 if compressed:
1896 # The revlog compressor added the header in the returned data.
1907 # The revlog compressor added the header in the returned data.
1897 return '', compressed
1908 return '', compressed
1898
1909
1899 if data[0:1] == '\0':
1910 if data[0:1] == '\0':
1900 return '', data
1911 return '', data
1901 return 'u', data
1912 return 'u', data
1902
1913
1903 def decompress(self, data):
1914 def decompress(self, data):
1904 """Decompress a revlog chunk.
1915 """Decompress a revlog chunk.
1905
1916
1906 The chunk is expected to begin with a header identifying the
1917 The chunk is expected to begin with a header identifying the
1907 format type so it can be routed to an appropriate decompressor.
1918 format type so it can be routed to an appropriate decompressor.
1908 """
1919 """
1909 if not data:
1920 if not data:
1910 return data
1921 return data
1911
1922
1912 # Revlogs are read much more frequently than they are written and many
1923 # Revlogs are read much more frequently than they are written and many
1913 # chunks only take microseconds to decompress, so performance is
1924 # chunks only take microseconds to decompress, so performance is
1914 # important here.
1925 # important here.
1915 #
1926 #
1916 # We can make a few assumptions about revlogs:
1927 # We can make a few assumptions about revlogs:
1917 #
1928 #
1918 # 1) the majority of chunks will be compressed (as opposed to inline
1929 # 1) the majority of chunks will be compressed (as opposed to inline
1919 # raw data).
1930 # raw data).
1920 # 2) decompressing *any* data will likely by at least 10x slower than
1931 # 2) decompressing *any* data will likely by at least 10x slower than
1921 # returning raw inline data.
1932 # returning raw inline data.
1922 # 3) we want to prioritize common and officially supported compression
1933 # 3) we want to prioritize common and officially supported compression
1923 # engines
1934 # engines
1924 #
1935 #
1925 # It follows that we want to optimize for "decompress compressed data
1936 # It follows that we want to optimize for "decompress compressed data
1926 # when encoded with common and officially supported compression engines"
1937 # when encoded with common and officially supported compression engines"
1927 # case over "raw data" and "data encoded by less common or non-official
1938 # case over "raw data" and "data encoded by less common or non-official
1928 # compression engines." That is why we have the inline lookup first
1939 # compression engines." That is why we have the inline lookup first
1929 # followed by the compengines lookup.
1940 # followed by the compengines lookup.
1930 #
1941 #
1931 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1942 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1932 # compressed chunks. And this matters for changelog and manifest reads.
1943 # compressed chunks. And this matters for changelog and manifest reads.
1933 t = data[0:1]
1944 t = data[0:1]
1934
1945
1935 if t == 'x':
1946 if t == 'x':
1936 try:
1947 try:
1937 return _zlibdecompress(data)
1948 return _zlibdecompress(data)
1938 except zlib.error as e:
1949 except zlib.error as e:
1939 raise error.RevlogError(_('revlog decompress error: %s') %
1950 raise error.RevlogError(_('revlog decompress error: %s') %
1940 stringutil.forcebytestr(e))
1951 stringutil.forcebytestr(e))
1941 # '\0' is more common than 'u' so it goes first.
1952 # '\0' is more common than 'u' so it goes first.
1942 elif t == '\0':
1953 elif t == '\0':
1943 return data
1954 return data
1944 elif t == 'u':
1955 elif t == 'u':
1945 return util.buffer(data, 1)
1956 return util.buffer(data, 1)
1946
1957
1947 try:
1958 try:
1948 compressor = self._decompressors[t]
1959 compressor = self._decompressors[t]
1949 except KeyError:
1960 except KeyError:
1950 try:
1961 try:
1951 engine = util.compengines.forrevlogheader(t)
1962 engine = util.compengines.forrevlogheader(t)
1952 compressor = engine.revlogcompressor(self._compengineopts)
1963 compressor = engine.revlogcompressor(self._compengineopts)
1953 self._decompressors[t] = compressor
1964 self._decompressors[t] = compressor
1954 except KeyError:
1965 except KeyError:
1955 raise error.RevlogError(_('unknown compression type %r') % t)
1966 raise error.RevlogError(_('unknown compression type %r') % t)
1956
1967
1957 return compressor.decompress(data)
1968 return compressor.decompress(data)
1958
1969
1959 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1970 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1960 cachedelta, ifh, dfh, alwayscache=False,
1971 cachedelta, ifh, dfh, alwayscache=False,
1961 deltacomputer=None):
1972 deltacomputer=None):
1962 """internal function to add revisions to the log
1973 """internal function to add revisions to the log
1963
1974
1964 see addrevision for argument descriptions.
1975 see addrevision for argument descriptions.
1965
1976
1966 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1977 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1967
1978
1968 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1979 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1969 be used.
1980 be used.
1970
1981
1971 invariants:
1982 invariants:
1972 - rawtext is optional (can be None); if not set, cachedelta must be set.
1983 - rawtext is optional (can be None); if not set, cachedelta must be set.
1973 if both are set, they must correspond to each other.
1984 if both are set, they must correspond to each other.
1974 """
1985 """
1975 if node == nullid:
1986 if node == nullid:
1976 raise error.RevlogError(_("%s: attempt to add null revision") %
1987 raise error.RevlogError(_("%s: attempt to add null revision") %
1977 self.indexfile)
1988 self.indexfile)
1978 if node == wdirid or node in wdirfilenodeids:
1989 if node == wdirid or node in wdirfilenodeids:
1979 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1990 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1980 self.indexfile)
1991 self.indexfile)
1981
1992
1982 if self._inline:
1993 if self._inline:
1983 fh = ifh
1994 fh = ifh
1984 else:
1995 else:
1985 fh = dfh
1996 fh = dfh
1986
1997
1987 btext = [rawtext]
1998 btext = [rawtext]
1988
1999
1989 curr = len(self)
2000 curr = len(self)
1990 prev = curr - 1
2001 prev = curr - 1
1991 offset = self.end(prev)
2002 offset = self.end(prev)
1992 p1r, p2r = self.rev(p1), self.rev(p2)
2003 p1r, p2r = self.rev(p1), self.rev(p2)
1993
2004
1994 # full versions are inserted when the needed deltas
2005 # full versions are inserted when the needed deltas
1995 # become comparable to the uncompressed text
2006 # become comparable to the uncompressed text
1996 if rawtext is None:
2007 if rawtext is None:
1997 # need rawtext size, before changed by flag processors, which is
2008 # need rawtext size, before changed by flag processors, which is
1998 # the non-raw size. use revlog explicitly to avoid filelog's extra
2009 # the non-raw size. use revlog explicitly to avoid filelog's extra
1999 # logic that might remove metadata size.
2010 # logic that might remove metadata size.
2000 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2011 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2001 cachedelta[1])
2012 cachedelta[1])
2002 else:
2013 else:
2003 textlen = len(rawtext)
2014 textlen = len(rawtext)
2004
2015
2005 if deltacomputer is None:
2016 if deltacomputer is None:
2006 deltacomputer = deltautil.deltacomputer(self)
2017 deltacomputer = deltautil.deltacomputer(self)
2007
2018
2008 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2019 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2009
2020
2010 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2021 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2011
2022
2012 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2023 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2013 deltainfo.base, link, p1r, p2r, node)
2024 deltainfo.base, link, p1r, p2r, node)
2014 self.index.append(e)
2025 self.index.append(e)
2015 self.nodemap[node] = curr
2026 self.nodemap[node] = curr
2016
2027
2017 # Reset the pure node cache start lookup offset to account for new
2028 # Reset the pure node cache start lookup offset to account for new
2018 # revision.
2029 # revision.
2019 if self._nodepos is not None:
2030 if self._nodepos is not None:
2020 self._nodepos = curr
2031 self._nodepos = curr
2021
2032
2022 entry = self._io.packentry(e, self.node, self.version, curr)
2033 entry = self._io.packentry(e, self.node, self.version, curr)
2023 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2034 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2024 link, offset)
2035 link, offset)
2025
2036
2026 rawtext = btext[0]
2037 rawtext = btext[0]
2027
2038
2028 if alwayscache and rawtext is None:
2039 if alwayscache and rawtext is None:
2029 rawtext = deltacomputer.buildtext(revinfo, fh)
2040 rawtext = deltacomputer.buildtext(revinfo, fh)
2030
2041
2031 if type(rawtext) == bytes: # only accept immutable objects
2042 if type(rawtext) == bytes: # only accept immutable objects
2032 self._revisioncache = (node, curr, rawtext)
2043 self._revisioncache = (node, curr, rawtext)
2033 self._chainbasecache[curr] = deltainfo.chainbase
2044 self._chainbasecache[curr] = deltainfo.chainbase
2034 return node
2045 return node
2035
2046
2036 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2047 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2037 # Files opened in a+ mode have inconsistent behavior on various
2048 # Files opened in a+ mode have inconsistent behavior on various
2038 # platforms. Windows requires that a file positioning call be made
2049 # platforms. Windows requires that a file positioning call be made
2039 # when the file handle transitions between reads and writes. See
2050 # when the file handle transitions between reads and writes. See
2040 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2051 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2041 # platforms, Python or the platform itself can be buggy. Some versions
2052 # platforms, Python or the platform itself can be buggy. Some versions
2042 # of Solaris have been observed to not append at the end of the file
2053 # of Solaris have been observed to not append at the end of the file
2043 # if the file was seeked to before the end. See issue4943 for more.
2054 # if the file was seeked to before the end. See issue4943 for more.
2044 #
2055 #
2045 # We work around this issue by inserting a seek() before writing.
2056 # We work around this issue by inserting a seek() before writing.
2046 # Note: This is likely not necessary on Python 3. However, because
2057 # Note: This is likely not necessary on Python 3. However, because
2047 # the file handle is reused for reads and may be seeked there, we need
2058 # the file handle is reused for reads and may be seeked there, we need
2048 # to be careful before changing this.
2059 # to be careful before changing this.
2049 ifh.seek(0, os.SEEK_END)
2060 ifh.seek(0, os.SEEK_END)
2050 if dfh:
2061 if dfh:
2051 dfh.seek(0, os.SEEK_END)
2062 dfh.seek(0, os.SEEK_END)
2052
2063
2053 curr = len(self) - 1
2064 curr = len(self) - 1
2054 if not self._inline:
2065 if not self._inline:
2055 transaction.add(self.datafile, offset)
2066 transaction.add(self.datafile, offset)
2056 transaction.add(self.indexfile, curr * len(entry))
2067 transaction.add(self.indexfile, curr * len(entry))
2057 if data[0]:
2068 if data[0]:
2058 dfh.write(data[0])
2069 dfh.write(data[0])
2059 dfh.write(data[1])
2070 dfh.write(data[1])
2060 ifh.write(entry)
2071 ifh.write(entry)
2061 else:
2072 else:
2062 offset += curr * self._io.size
2073 offset += curr * self._io.size
2063 transaction.add(self.indexfile, offset, curr)
2074 transaction.add(self.indexfile, offset, curr)
2064 ifh.write(entry)
2075 ifh.write(entry)
2065 ifh.write(data[0])
2076 ifh.write(data[0])
2066 ifh.write(data[1])
2077 ifh.write(data[1])
2067 self._enforceinlinesize(transaction, ifh)
2078 self._enforceinlinesize(transaction, ifh)
2068
2079
2069 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2080 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2070 """
2081 """
2071 add a delta group
2082 add a delta group
2072
2083
2073 given a set of deltas, add them to the revision log. the
2084 given a set of deltas, add them to the revision log. the
2074 first delta is against its parent, which should be in our
2085 first delta is against its parent, which should be in our
2075 log, the rest are against the previous delta.
2086 log, the rest are against the previous delta.
2076
2087
2077 If ``addrevisioncb`` is defined, it will be called with arguments of
2088 If ``addrevisioncb`` is defined, it will be called with arguments of
2078 this revlog and the node that was added.
2089 this revlog and the node that was added.
2079 """
2090 """
2080
2091
2081 if self._writinghandles:
2092 if self._writinghandles:
2082 raise error.ProgrammingError('cannot nest addgroup() calls')
2093 raise error.ProgrammingError('cannot nest addgroup() calls')
2083
2094
2084 nodes = []
2095 nodes = []
2085
2096
2086 r = len(self)
2097 r = len(self)
2087 end = 0
2098 end = 0
2088 if r:
2099 if r:
2089 end = self.end(r - 1)
2100 end = self.end(r - 1)
2090 ifh = self._indexfp("a+")
2101 ifh = self._indexfp("a+")
2091 isize = r * self._io.size
2102 isize = r * self._io.size
2092 if self._inline:
2103 if self._inline:
2093 transaction.add(self.indexfile, end + isize, r)
2104 transaction.add(self.indexfile, end + isize, r)
2094 dfh = None
2105 dfh = None
2095 else:
2106 else:
2096 transaction.add(self.indexfile, isize, r)
2107 transaction.add(self.indexfile, isize, r)
2097 transaction.add(self.datafile, end)
2108 transaction.add(self.datafile, end)
2098 dfh = self._datafp("a+")
2109 dfh = self._datafp("a+")
2099 def flush():
2110 def flush():
2100 if dfh:
2111 if dfh:
2101 dfh.flush()
2112 dfh.flush()
2102 ifh.flush()
2113 ifh.flush()
2103
2114
2104 self._writinghandles = (ifh, dfh)
2115 self._writinghandles = (ifh, dfh)
2105
2116
2106 try:
2117 try:
2107 deltacomputer = deltautil.deltacomputer(self)
2118 deltacomputer = deltautil.deltacomputer(self)
2108 # loop through our set of deltas
2119 # loop through our set of deltas
2109 for data in deltas:
2120 for data in deltas:
2110 node, p1, p2, linknode, deltabase, delta, flags = data
2121 node, p1, p2, linknode, deltabase, delta, flags = data
2111 link = linkmapper(linknode)
2122 link = linkmapper(linknode)
2112 flags = flags or REVIDX_DEFAULT_FLAGS
2123 flags = flags or REVIDX_DEFAULT_FLAGS
2113
2124
2114 nodes.append(node)
2125 nodes.append(node)
2115
2126
2116 if node in self.nodemap:
2127 if node in self.nodemap:
2117 self._nodeduplicatecallback(transaction, node)
2128 self._nodeduplicatecallback(transaction, node)
2118 # this can happen if two branches make the same change
2129 # this can happen if two branches make the same change
2119 continue
2130 continue
2120
2131
2121 for p in (p1, p2):
2132 for p in (p1, p2):
2122 if p not in self.nodemap:
2133 if p not in self.nodemap:
2123 raise error.LookupError(p, self.indexfile,
2134 raise error.LookupError(p, self.indexfile,
2124 _('unknown parent'))
2135 _('unknown parent'))
2125
2136
2126 if deltabase not in self.nodemap:
2137 if deltabase not in self.nodemap:
2127 raise error.LookupError(deltabase, self.indexfile,
2138 raise error.LookupError(deltabase, self.indexfile,
2128 _('unknown delta base'))
2139 _('unknown delta base'))
2129
2140
2130 baserev = self.rev(deltabase)
2141 baserev = self.rev(deltabase)
2131
2142
2132 if baserev != nullrev and self.iscensored(baserev):
2143 if baserev != nullrev and self.iscensored(baserev):
2133 # if base is censored, delta must be full replacement in a
2144 # if base is censored, delta must be full replacement in a
2134 # single patch operation
2145 # single patch operation
2135 hlen = struct.calcsize(">lll")
2146 hlen = struct.calcsize(">lll")
2136 oldlen = self.rawsize(baserev)
2147 oldlen = self.rawsize(baserev)
2137 newlen = len(delta) - hlen
2148 newlen = len(delta) - hlen
2138 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2149 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2139 raise error.CensoredBaseError(self.indexfile,
2150 raise error.CensoredBaseError(self.indexfile,
2140 self.node(baserev))
2151 self.node(baserev))
2141
2152
2142 if not flags and self._peek_iscensored(baserev, delta, flush):
2153 if not flags and self._peek_iscensored(baserev, delta, flush):
2143 flags |= REVIDX_ISCENSORED
2154 flags |= REVIDX_ISCENSORED
2144
2155
2145 # We assume consumers of addrevisioncb will want to retrieve
2156 # We assume consumers of addrevisioncb will want to retrieve
2146 # the added revision, which will require a call to
2157 # the added revision, which will require a call to
2147 # revision(). revision() will fast path if there is a cache
2158 # revision(). revision() will fast path if there is a cache
2148 # hit. So, we tell _addrevision() to always cache in this case.
2159 # hit. So, we tell _addrevision() to always cache in this case.
2149 # We're only using addgroup() in the context of changegroup
2160 # We're only using addgroup() in the context of changegroup
2150 # generation so the revision data can always be handled as raw
2161 # generation so the revision data can always be handled as raw
2151 # by the flagprocessor.
2162 # by the flagprocessor.
2152 self._addrevision(node, None, transaction, link,
2163 self._addrevision(node, None, transaction, link,
2153 p1, p2, flags, (baserev, delta),
2164 p1, p2, flags, (baserev, delta),
2154 ifh, dfh,
2165 ifh, dfh,
2155 alwayscache=bool(addrevisioncb),
2166 alwayscache=bool(addrevisioncb),
2156 deltacomputer=deltacomputer)
2167 deltacomputer=deltacomputer)
2157
2168
2158 if addrevisioncb:
2169 if addrevisioncb:
2159 addrevisioncb(self, node)
2170 addrevisioncb(self, node)
2160
2171
2161 if not dfh and not self._inline:
2172 if not dfh and not self._inline:
2162 # addrevision switched from inline to conventional
2173 # addrevision switched from inline to conventional
2163 # reopen the index
2174 # reopen the index
2164 ifh.close()
2175 ifh.close()
2165 dfh = self._datafp("a+")
2176 dfh = self._datafp("a+")
2166 ifh = self._indexfp("a+")
2177 ifh = self._indexfp("a+")
2167 self._writinghandles = (ifh, dfh)
2178 self._writinghandles = (ifh, dfh)
2168 finally:
2179 finally:
2169 self._writinghandles = None
2180 self._writinghandles = None
2170
2181
2171 if dfh:
2182 if dfh:
2172 dfh.close()
2183 dfh.close()
2173 ifh.close()
2184 ifh.close()
2174
2185
2175 return nodes
2186 return nodes
2176
2187
2177 def iscensored(self, rev):
2188 def iscensored(self, rev):
2178 """Check if a file revision is censored."""
2189 """Check if a file revision is censored."""
2179 if not self._censorable:
2190 if not self._censorable:
2180 return False
2191 return False
2181
2192
2182 return self.flags(rev) & REVIDX_ISCENSORED
2193 return self.flags(rev) & REVIDX_ISCENSORED
2183
2194
2184 def _peek_iscensored(self, baserev, delta, flush):
2195 def _peek_iscensored(self, baserev, delta, flush):
2185 """Quickly check if a delta produces a censored revision."""
2196 """Quickly check if a delta produces a censored revision."""
2186 if not self._censorable:
2197 if not self._censorable:
2187 return False
2198 return False
2188
2199
2189 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2200 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2190
2201
2191 def getstrippoint(self, minlink):
2202 def getstrippoint(self, minlink):
2192 """find the minimum rev that must be stripped to strip the linkrev
2203 """find the minimum rev that must be stripped to strip the linkrev
2193
2204
2194 Returns a tuple containing the minimum rev and a set of all revs that
2205 Returns a tuple containing the minimum rev and a set of all revs that
2195 have linkrevs that will be broken by this strip.
2206 have linkrevs that will be broken by this strip.
2196 """
2207 """
2197 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2208 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2198 self.headrevs(),
2209 self.headrevs(),
2199 self.linkrev, self.parentrevs)
2210 self.linkrev, self.parentrevs)
2200
2211
2201 def strip(self, minlink, transaction):
2212 def strip(self, minlink, transaction):
2202 """truncate the revlog on the first revision with a linkrev >= minlink
2213 """truncate the revlog on the first revision with a linkrev >= minlink
2203
2214
2204 This function is called when we're stripping revision minlink and
2215 This function is called when we're stripping revision minlink and
2205 its descendants from the repository.
2216 its descendants from the repository.
2206
2217
2207 We have to remove all revisions with linkrev >= minlink, because
2218 We have to remove all revisions with linkrev >= minlink, because
2208 the equivalent changelog revisions will be renumbered after the
2219 the equivalent changelog revisions will be renumbered after the
2209 strip.
2220 strip.
2210
2221
2211 So we truncate the revlog on the first of these revisions, and
2222 So we truncate the revlog on the first of these revisions, and
2212 trust that the caller has saved the revisions that shouldn't be
2223 trust that the caller has saved the revisions that shouldn't be
2213 removed and that it'll re-add them after this truncation.
2224 removed and that it'll re-add them after this truncation.
2214 """
2225 """
2215 if len(self) == 0:
2226 if len(self) == 0:
2216 return
2227 return
2217
2228
2218 rev, _ = self.getstrippoint(minlink)
2229 rev, _ = self.getstrippoint(minlink)
2219 if rev == len(self):
2230 if rev == len(self):
2220 return
2231 return
2221
2232
2222 # first truncate the files on disk
2233 # first truncate the files on disk
2223 end = self.start(rev)
2234 end = self.start(rev)
2224 if not self._inline:
2235 if not self._inline:
2225 transaction.add(self.datafile, end)
2236 transaction.add(self.datafile, end)
2226 end = rev * self._io.size
2237 end = rev * self._io.size
2227 else:
2238 else:
2228 end += rev * self._io.size
2239 end += rev * self._io.size
2229
2240
2230 transaction.add(self.indexfile, end)
2241 transaction.add(self.indexfile, end)
2231
2242
2232 # then reset internal state in memory to forget those revisions
2243 # then reset internal state in memory to forget those revisions
2233 self._revisioncache = None
2244 self._revisioncache = None
2234 self._chaininfocache = {}
2245 self._chaininfocache = {}
2235 self._chunkclear()
2246 self._chunkclear()
2236 for x in pycompat.xrange(rev, len(self)):
2247 for x in pycompat.xrange(rev, len(self)):
2237 del self.nodemap[self.node(x)]
2248 del self.nodemap[self.node(x)]
2238
2249
2239 del self.index[rev:-1]
2250 del self.index[rev:-1]
2240 self._nodepos = None
2251 self._nodepos = None
2241
2252
2242 def checksize(self):
2253 def checksize(self):
2243 """Check size of index and data files
2254 """Check size of index and data files
2244
2255
2245 return a (dd, di) tuple.
2256 return a (dd, di) tuple.
2246 - dd: extra bytes for the "data" file
2257 - dd: extra bytes for the "data" file
2247 - di: extra bytes for the "index" file
2258 - di: extra bytes for the "index" file
2248
2259
2249 A healthy revlog will return (0, 0).
2260 A healthy revlog will return (0, 0).
2250 """
2261 """
2251 expected = 0
2262 expected = 0
2252 if len(self):
2263 if len(self):
2253 expected = max(0, self.end(len(self) - 1))
2264 expected = max(0, self.end(len(self) - 1))
2254
2265
2255 try:
2266 try:
2256 with self._datafp() as f:
2267 with self._datafp() as f:
2257 f.seek(0, io.SEEK_END)
2268 f.seek(0, io.SEEK_END)
2258 actual = f.tell()
2269 actual = f.tell()
2259 dd = actual - expected
2270 dd = actual - expected
2260 except IOError as inst:
2271 except IOError as inst:
2261 if inst.errno != errno.ENOENT:
2272 if inst.errno != errno.ENOENT:
2262 raise
2273 raise
2263 dd = 0
2274 dd = 0
2264
2275
2265 try:
2276 try:
2266 f = self.opener(self.indexfile)
2277 f = self.opener(self.indexfile)
2267 f.seek(0, io.SEEK_END)
2278 f.seek(0, io.SEEK_END)
2268 actual = f.tell()
2279 actual = f.tell()
2269 f.close()
2280 f.close()
2270 s = self._io.size
2281 s = self._io.size
2271 i = max(0, actual // s)
2282 i = max(0, actual // s)
2272 di = actual - (i * s)
2283 di = actual - (i * s)
2273 if self._inline:
2284 if self._inline:
2274 databytes = 0
2285 databytes = 0
2275 for r in self:
2286 for r in self:
2276 databytes += max(0, self.length(r))
2287 databytes += max(0, self.length(r))
2277 dd = 0
2288 dd = 0
2278 di = actual - len(self) * s - databytes
2289 di = actual - len(self) * s - databytes
2279 except IOError as inst:
2290 except IOError as inst:
2280 if inst.errno != errno.ENOENT:
2291 if inst.errno != errno.ENOENT:
2281 raise
2292 raise
2282 di = 0
2293 di = 0
2283
2294
2284 return (dd, di)
2295 return (dd, di)
2285
2296
2286 def files(self):
2297 def files(self):
2287 res = [self.indexfile]
2298 res = [self.indexfile]
2288 if not self._inline:
2299 if not self._inline:
2289 res.append(self.datafile)
2300 res.append(self.datafile)
2290 return res
2301 return res
2291
2302
2292 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2303 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2293 assumehaveparentrevisions=False,
2304 assumehaveparentrevisions=False,
2294 deltamode=repository.CG_DELTAMODE_STD):
2305 deltamode=repository.CG_DELTAMODE_STD):
2295 if nodesorder not in ('nodes', 'storage', 'linear', None):
2306 if nodesorder not in ('nodes', 'storage', 'linear', None):
2296 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2307 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2297 nodesorder)
2308 nodesorder)
2298
2309
2299 if nodesorder is None and not self._generaldelta:
2310 if nodesorder is None and not self._generaldelta:
2300 nodesorder = 'storage'
2311 nodesorder = 'storage'
2301
2312
2302 if (not self._storedeltachains and
2313 if (not self._storedeltachains and
2303 deltamode != repository.CG_DELTAMODE_PREV):
2314 deltamode != repository.CG_DELTAMODE_PREV):
2304 deltamode = repository.CG_DELTAMODE_FULL
2315 deltamode = repository.CG_DELTAMODE_FULL
2305
2316
2306 return storageutil.emitrevisions(
2317 return storageutil.emitrevisions(
2307 self, nodes, nodesorder, revlogrevisiondelta,
2318 self, nodes, nodesorder, revlogrevisiondelta,
2308 deltaparentfn=self.deltaparent,
2319 deltaparentfn=self.deltaparent,
2309 candeltafn=self.candelta,
2320 candeltafn=self.candelta,
2310 rawsizefn=self.rawsize,
2321 rawsizefn=self.rawsize,
2311 revdifffn=self.revdiff,
2322 revdifffn=self.revdiff,
2312 flagsfn=self.flags,
2323 flagsfn=self.flags,
2313 deltamode=deltamode,
2324 deltamode=deltamode,
2314 revisiondata=revisiondata,
2325 revisiondata=revisiondata,
2315 assumehaveparentrevisions=assumehaveparentrevisions)
2326 assumehaveparentrevisions=assumehaveparentrevisions)
2316
2327
2317 DELTAREUSEALWAYS = 'always'
2328 DELTAREUSEALWAYS = 'always'
2318 DELTAREUSESAMEREVS = 'samerevs'
2329 DELTAREUSESAMEREVS = 'samerevs'
2319 DELTAREUSENEVER = 'never'
2330 DELTAREUSENEVER = 'never'
2320
2331
2321 DELTAREUSEFULLADD = 'fulladd'
2332 DELTAREUSEFULLADD = 'fulladd'
2322
2333
2323 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2334 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2324
2335
2325 def clone(self, tr, destrevlog, addrevisioncb=None,
2336 def clone(self, tr, destrevlog, addrevisioncb=None,
2326 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2337 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2327 """Copy this revlog to another, possibly with format changes.
2338 """Copy this revlog to another, possibly with format changes.
2328
2339
2329 The destination revlog will contain the same revisions and nodes.
2340 The destination revlog will contain the same revisions and nodes.
2330 However, it may not be bit-for-bit identical due to e.g. delta encoding
2341 However, it may not be bit-for-bit identical due to e.g. delta encoding
2331 differences.
2342 differences.
2332
2343
2333 The ``deltareuse`` argument control how deltas from the existing revlog
2344 The ``deltareuse`` argument control how deltas from the existing revlog
2334 are preserved in the destination revlog. The argument can have the
2345 are preserved in the destination revlog. The argument can have the
2335 following values:
2346 following values:
2336
2347
2337 DELTAREUSEALWAYS
2348 DELTAREUSEALWAYS
2338 Deltas will always be reused (if possible), even if the destination
2349 Deltas will always be reused (if possible), even if the destination
2339 revlog would not select the same revisions for the delta. This is the
2350 revlog would not select the same revisions for the delta. This is the
2340 fastest mode of operation.
2351 fastest mode of operation.
2341 DELTAREUSESAMEREVS
2352 DELTAREUSESAMEREVS
2342 Deltas will be reused if the destination revlog would pick the same
2353 Deltas will be reused if the destination revlog would pick the same
2343 revisions for the delta. This mode strikes a balance between speed
2354 revisions for the delta. This mode strikes a balance between speed
2344 and optimization.
2355 and optimization.
2345 DELTAREUSENEVER
2356 DELTAREUSENEVER
2346 Deltas will never be reused. This is the slowest mode of execution.
2357 Deltas will never be reused. This is the slowest mode of execution.
2347 This mode can be used to recompute deltas (e.g. if the diff/delta
2358 This mode can be used to recompute deltas (e.g. if the diff/delta
2348 algorithm changes).
2359 algorithm changes).
2349
2360
2350 Delta computation can be slow, so the choice of delta reuse policy can
2361 Delta computation can be slow, so the choice of delta reuse policy can
2351 significantly affect run time.
2362 significantly affect run time.
2352
2363
2353 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2364 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2354 two extremes. Deltas will be reused if they are appropriate. But if the
2365 two extremes. Deltas will be reused if they are appropriate. But if the
2355 delta could choose a better revision, it will do so. This means if you
2366 delta could choose a better revision, it will do so. This means if you
2356 are converting a non-generaldelta revlog to a generaldelta revlog,
2367 are converting a non-generaldelta revlog to a generaldelta revlog,
2357 deltas will be recomputed if the delta's parent isn't a parent of the
2368 deltas will be recomputed if the delta's parent isn't a parent of the
2358 revision.
2369 revision.
2359
2370
2360 In addition to the delta policy, the ``forcedeltabothparents``
2371 In addition to the delta policy, the ``forcedeltabothparents``
2361 argument controls whether to force compute deltas against both parents
2372 argument controls whether to force compute deltas against both parents
2362 for merges. By default, the current default is used.
2373 for merges. By default, the current default is used.
2363 """
2374 """
2364 if deltareuse not in self.DELTAREUSEALL:
2375 if deltareuse not in self.DELTAREUSEALL:
2365 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2376 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2366
2377
2367 if len(destrevlog):
2378 if len(destrevlog):
2368 raise ValueError(_('destination revlog is not empty'))
2379 raise ValueError(_('destination revlog is not empty'))
2369
2380
2370 if getattr(self, 'filteredrevs', None):
2381 if getattr(self, 'filteredrevs', None):
2371 raise ValueError(_('source revlog has filtered revisions'))
2382 raise ValueError(_('source revlog has filtered revisions'))
2372 if getattr(destrevlog, 'filteredrevs', None):
2383 if getattr(destrevlog, 'filteredrevs', None):
2373 raise ValueError(_('destination revlog has filtered revisions'))
2384 raise ValueError(_('destination revlog has filtered revisions'))
2374
2385
2375 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2386 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2376 # if possible.
2387 # if possible.
2377 oldlazydelta = destrevlog._lazydelta
2388 oldlazydelta = destrevlog._lazydelta
2378 oldlazydeltabase = destrevlog._lazydeltabase
2389 oldlazydeltabase = destrevlog._lazydeltabase
2379 oldamd = destrevlog._deltabothparents
2390 oldamd = destrevlog._deltabothparents
2380
2391
2381 try:
2392 try:
2382 if deltareuse == self.DELTAREUSEALWAYS:
2393 if deltareuse == self.DELTAREUSEALWAYS:
2383 destrevlog._lazydeltabase = True
2394 destrevlog._lazydeltabase = True
2384 destrevlog._lazydelta = True
2395 destrevlog._lazydelta = True
2385 elif deltareuse == self.DELTAREUSESAMEREVS:
2396 elif deltareuse == self.DELTAREUSESAMEREVS:
2386 destrevlog._lazydeltabase = False
2397 destrevlog._lazydeltabase = False
2387 destrevlog._lazydelta = True
2398 destrevlog._lazydelta = True
2388 elif deltareuse == self.DELTAREUSENEVER:
2399 elif deltareuse == self.DELTAREUSENEVER:
2389 destrevlog._lazydeltabase = False
2400 destrevlog._lazydeltabase = False
2390 destrevlog._lazydelta = False
2401 destrevlog._lazydelta = False
2391
2402
2392 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2403 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2393
2404
2394 deltacomputer = deltautil.deltacomputer(destrevlog)
2405 deltacomputer = deltautil.deltacomputer(destrevlog)
2395 index = self.index
2406 index = self.index
2396 for rev in self:
2407 for rev in self:
2397 entry = index[rev]
2408 entry = index[rev]
2398
2409
2399 # Some classes override linkrev to take filtered revs into
2410 # Some classes override linkrev to take filtered revs into
2400 # account. Use raw entry from index.
2411 # account. Use raw entry from index.
2401 flags = entry[0] & 0xffff
2412 flags = entry[0] & 0xffff
2402 linkrev = entry[4]
2413 linkrev = entry[4]
2403 p1 = index[entry[5]][7]
2414 p1 = index[entry[5]][7]
2404 p2 = index[entry[6]][7]
2415 p2 = index[entry[6]][7]
2405 node = entry[7]
2416 node = entry[7]
2406
2417
2407 # (Possibly) reuse the delta from the revlog if allowed and
2418 # (Possibly) reuse the delta from the revlog if allowed and
2408 # the revlog chunk is a delta.
2419 # the revlog chunk is a delta.
2409 cachedelta = None
2420 cachedelta = None
2410 rawtext = None
2421 rawtext = None
2411 if (deltareuse != self.DELTAREUSEFULLADD
2422 if (deltareuse != self.DELTAREUSEFULLADD
2412 and destrevlog._lazydelta):
2423 and destrevlog._lazydelta):
2413 dp = self.deltaparent(rev)
2424 dp = self.deltaparent(rev)
2414 if dp != nullrev:
2425 if dp != nullrev:
2415 cachedelta = (dp, bytes(self._chunk(rev)))
2426 cachedelta = (dp, bytes(self._chunk(rev)))
2416
2427
2417 if not cachedelta:
2428 if not cachedelta:
2418 rawtext = self.rawdata(rev)
2429 rawtext = self.rawdata(rev)
2419
2430
2420
2431
2421 if deltareuse == self.DELTAREUSEFULLADD:
2432 if deltareuse == self.DELTAREUSEFULLADD:
2422 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2433 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2423 cachedelta=cachedelta,
2434 cachedelta=cachedelta,
2424 node=node, flags=flags,
2435 node=node, flags=flags,
2425 deltacomputer=deltacomputer)
2436 deltacomputer=deltacomputer)
2426 else:
2437 else:
2427 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2438 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2428 checkambig=False)
2439 checkambig=False)
2429 dfh = None
2440 dfh = None
2430 if not destrevlog._inline:
2441 if not destrevlog._inline:
2431 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2442 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2432 try:
2443 try:
2433 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2444 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2434 p2, flags, cachedelta, ifh, dfh,
2445 p2, flags, cachedelta, ifh, dfh,
2435 deltacomputer=deltacomputer)
2446 deltacomputer=deltacomputer)
2436 finally:
2447 finally:
2437 if dfh:
2448 if dfh:
2438 dfh.close()
2449 dfh.close()
2439 ifh.close()
2450 ifh.close()
2440
2451
2441 if addrevisioncb:
2452 if addrevisioncb:
2442 addrevisioncb(self, rev, node)
2453 addrevisioncb(self, rev, node)
2443 finally:
2454 finally:
2444 destrevlog._lazydelta = oldlazydelta
2455 destrevlog._lazydelta = oldlazydelta
2445 destrevlog._lazydeltabase = oldlazydeltabase
2456 destrevlog._lazydeltabase = oldlazydeltabase
2446 destrevlog._deltabothparents = oldamd
2457 destrevlog._deltabothparents = oldamd
2447
2458
2448 def censorrevision(self, tr, censornode, tombstone=b''):
2459 def censorrevision(self, tr, censornode, tombstone=b''):
2449 if (self.version & 0xFFFF) == REVLOGV0:
2460 if (self.version & 0xFFFF) == REVLOGV0:
2450 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2461 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2451 self.version)
2462 self.version)
2452
2463
2453 censorrev = self.rev(censornode)
2464 censorrev = self.rev(censornode)
2454 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2465 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2455
2466
2456 if len(tombstone) > self.rawsize(censorrev):
2467 if len(tombstone) > self.rawsize(censorrev):
2457 raise error.Abort(_('censor tombstone must be no longer than '
2468 raise error.Abort(_('censor tombstone must be no longer than '
2458 'censored data'))
2469 'censored data'))
2459
2470
2460 # Rewriting the revlog in place is hard. Our strategy for censoring is
2471 # Rewriting the revlog in place is hard. Our strategy for censoring is
2461 # to create a new revlog, copy all revisions to it, then replace the
2472 # to create a new revlog, copy all revisions to it, then replace the
2462 # revlogs on transaction close.
2473 # revlogs on transaction close.
2463
2474
2464 newindexfile = self.indexfile + b'.tmpcensored'
2475 newindexfile = self.indexfile + b'.tmpcensored'
2465 newdatafile = self.datafile + b'.tmpcensored'
2476 newdatafile = self.datafile + b'.tmpcensored'
2466
2477
2467 # This is a bit dangerous. We could easily have a mismatch of state.
2478 # This is a bit dangerous. We could easily have a mismatch of state.
2468 newrl = revlog(self.opener, newindexfile, newdatafile,
2479 newrl = revlog(self.opener, newindexfile, newdatafile,
2469 censorable=True)
2480 censorable=True)
2470 newrl.version = self.version
2481 newrl.version = self.version
2471 newrl._generaldelta = self._generaldelta
2482 newrl._generaldelta = self._generaldelta
2472 newrl._io = self._io
2483 newrl._io = self._io
2473
2484
2474 for rev in self.revs():
2485 for rev in self.revs():
2475 node = self.node(rev)
2486 node = self.node(rev)
2476 p1, p2 = self.parents(node)
2487 p1, p2 = self.parents(node)
2477
2488
2478 if rev == censorrev:
2489 if rev == censorrev:
2479 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2490 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2480 p1, p2, censornode, REVIDX_ISCENSORED)
2491 p1, p2, censornode, REVIDX_ISCENSORED)
2481
2492
2482 if newrl.deltaparent(rev) != nullrev:
2493 if newrl.deltaparent(rev) != nullrev:
2483 raise error.Abort(_('censored revision stored as delta; '
2494 raise error.Abort(_('censored revision stored as delta; '
2484 'cannot censor'),
2495 'cannot censor'),
2485 hint=_('censoring of revlogs is not '
2496 hint=_('censoring of revlogs is not '
2486 'fully implemented; please report '
2497 'fully implemented; please report '
2487 'this bug'))
2498 'this bug'))
2488 continue
2499 continue
2489
2500
2490 if self.iscensored(rev):
2501 if self.iscensored(rev):
2491 if self.deltaparent(rev) != nullrev:
2502 if self.deltaparent(rev) != nullrev:
2492 raise error.Abort(_('cannot censor due to censored '
2503 raise error.Abort(_('cannot censor due to censored '
2493 'revision having delta stored'))
2504 'revision having delta stored'))
2494 rawtext = self._chunk(rev)
2505 rawtext = self._chunk(rev)
2495 else:
2506 else:
2496 rawtext = self.rawdata(rev)
2507 rawtext = self.rawdata(rev)
2497
2508
2498 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2509 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2499 self.flags(rev))
2510 self.flags(rev))
2500
2511
2501 tr.addbackup(self.indexfile, location='store')
2512 tr.addbackup(self.indexfile, location='store')
2502 if not self._inline:
2513 if not self._inline:
2503 tr.addbackup(self.datafile, location='store')
2514 tr.addbackup(self.datafile, location='store')
2504
2515
2505 self.opener.rename(newrl.indexfile, self.indexfile)
2516 self.opener.rename(newrl.indexfile, self.indexfile)
2506 if not self._inline:
2517 if not self._inline:
2507 self.opener.rename(newrl.datafile, self.datafile)
2518 self.opener.rename(newrl.datafile, self.datafile)
2508
2519
2509 self.clearcaches()
2520 self.clearcaches()
2510 self._loadindex()
2521 self._loadindex()
2511
2522
2512 def verifyintegrity(self, state):
2523 def verifyintegrity(self, state):
2513 """Verifies the integrity of the revlog.
2524 """Verifies the integrity of the revlog.
2514
2525
2515 Yields ``revlogproblem`` instances describing problems that are
2526 Yields ``revlogproblem`` instances describing problems that are
2516 found.
2527 found.
2517 """
2528 """
2518 dd, di = self.checksize()
2529 dd, di = self.checksize()
2519 if dd:
2530 if dd:
2520 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2531 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2521 if di:
2532 if di:
2522 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2533 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2523
2534
2524 version = self.version & 0xFFFF
2535 version = self.version & 0xFFFF
2525
2536
2526 # The verifier tells us what version revlog we should be.
2537 # The verifier tells us what version revlog we should be.
2527 if version != state['expectedversion']:
2538 if version != state['expectedversion']:
2528 yield revlogproblem(
2539 yield revlogproblem(
2529 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2540 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2530 (self.indexfile, version, state['expectedversion']))
2541 (self.indexfile, version, state['expectedversion']))
2531
2542
2532 state['skipread'] = set()
2543 state['skipread'] = set()
2533
2544
2534 for rev in self:
2545 for rev in self:
2535 node = self.node(rev)
2546 node = self.node(rev)
2536
2547
2537 # Verify contents. 4 cases to care about:
2548 # Verify contents. 4 cases to care about:
2538 #
2549 #
2539 # common: the most common case
2550 # common: the most common case
2540 # rename: with a rename
2551 # rename: with a rename
2541 # meta: file content starts with b'\1\n', the metadata
2552 # meta: file content starts with b'\1\n', the metadata
2542 # header defined in filelog.py, but without a rename
2553 # header defined in filelog.py, but without a rename
2543 # ext: content stored externally
2554 # ext: content stored externally
2544 #
2555 #
2545 # More formally, their differences are shown below:
2556 # More formally, their differences are shown below:
2546 #
2557 #
2547 # | common | rename | meta | ext
2558 # | common | rename | meta | ext
2548 # -------------------------------------------------------
2559 # -------------------------------------------------------
2549 # flags() | 0 | 0 | 0 | not 0
2560 # flags() | 0 | 0 | 0 | not 0
2550 # renamed() | False | True | False | ?
2561 # renamed() | False | True | False | ?
2551 # rawtext[0:2]=='\1\n'| False | True | True | ?
2562 # rawtext[0:2]=='\1\n'| False | True | True | ?
2552 #
2563 #
2553 # "rawtext" means the raw text stored in revlog data, which
2564 # "rawtext" means the raw text stored in revlog data, which
2554 # could be retrieved by "rawdata(rev)". "text"
2565 # could be retrieved by "rawdata(rev)". "text"
2555 # mentioned below is "revision(rev)".
2566 # mentioned below is "revision(rev)".
2556 #
2567 #
2557 # There are 3 different lengths stored physically:
2568 # There are 3 different lengths stored physically:
2558 # 1. L1: rawsize, stored in revlog index
2569 # 1. L1: rawsize, stored in revlog index
2559 # 2. L2: len(rawtext), stored in revlog data
2570 # 2. L2: len(rawtext), stored in revlog data
2560 # 3. L3: len(text), stored in revlog data if flags==0, or
2571 # 3. L3: len(text), stored in revlog data if flags==0, or
2561 # possibly somewhere else if flags!=0
2572 # possibly somewhere else if flags!=0
2562 #
2573 #
2563 # L1 should be equal to L2. L3 could be different from them.
2574 # L1 should be equal to L2. L3 could be different from them.
2564 # "text" may or may not affect commit hash depending on flag
2575 # "text" may or may not affect commit hash depending on flag
2565 # processors (see flagutil.addflagprocessor).
2576 # processors (see flagutil.addflagprocessor).
2566 #
2577 #
2567 # | common | rename | meta | ext
2578 # | common | rename | meta | ext
2568 # -------------------------------------------------
2579 # -------------------------------------------------
2569 # rawsize() | L1 | L1 | L1 | L1
2580 # rawsize() | L1 | L1 | L1 | L1
2570 # size() | L1 | L2-LM | L1(*) | L1 (?)
2581 # size() | L1 | L2-LM | L1(*) | L1 (?)
2571 # len(rawtext) | L2 | L2 | L2 | L2
2582 # len(rawtext) | L2 | L2 | L2 | L2
2572 # len(text) | L2 | L2 | L2 | L3
2583 # len(text) | L2 | L2 | L2 | L3
2573 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2584 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2574 #
2585 #
2575 # LM: length of metadata, depending on rawtext
2586 # LM: length of metadata, depending on rawtext
2576 # (*): not ideal, see comment in filelog.size
2587 # (*): not ideal, see comment in filelog.size
2577 # (?): could be "- len(meta)" if the resolved content has
2588 # (?): could be "- len(meta)" if the resolved content has
2578 # rename metadata
2589 # rename metadata
2579 #
2590 #
2580 # Checks needed to be done:
2591 # Checks needed to be done:
2581 # 1. length check: L1 == L2, in all cases.
2592 # 1. length check: L1 == L2, in all cases.
2582 # 2. hash check: depending on flag processor, we may need to
2593 # 2. hash check: depending on flag processor, we may need to
2583 # use either "text" (external), or "rawtext" (in revlog).
2594 # use either "text" (external), or "rawtext" (in revlog).
2584
2595
2585 try:
2596 try:
2586 skipflags = state.get('skipflags', 0)
2597 skipflags = state.get('skipflags', 0)
2587 if skipflags:
2598 if skipflags:
2588 skipflags &= self.flags(rev)
2599 skipflags &= self.flags(rev)
2589
2600
2590 if skipflags:
2601 if skipflags:
2591 state['skipread'].add(node)
2602 state['skipread'].add(node)
2592 else:
2603 else:
2593 # Side-effect: read content and verify hash.
2604 # Side-effect: read content and verify hash.
2594 self.revision(node)
2605 self.revision(node)
2595
2606
2596 l1 = self.rawsize(rev)
2607 l1 = self.rawsize(rev)
2597 l2 = len(self.rawdata(node))
2608 l2 = len(self.rawdata(node))
2598
2609
2599 if l1 != l2:
2610 if l1 != l2:
2600 yield revlogproblem(
2611 yield revlogproblem(
2601 error=_('unpacked size is %d, %d expected') % (l2, l1),
2612 error=_('unpacked size is %d, %d expected') % (l2, l1),
2602 node=node)
2613 node=node)
2603
2614
2604 except error.CensoredNodeError:
2615 except error.CensoredNodeError:
2605 if state['erroroncensored']:
2616 if state['erroroncensored']:
2606 yield revlogproblem(error=_('censored file data'),
2617 yield revlogproblem(error=_('censored file data'),
2607 node=node)
2618 node=node)
2608 state['skipread'].add(node)
2619 state['skipread'].add(node)
2609 except Exception as e:
2620 except Exception as e:
2610 yield revlogproblem(
2621 yield revlogproblem(
2611 error=_('unpacking %s: %s') % (short(node),
2622 error=_('unpacking %s: %s') % (short(node),
2612 stringutil.forcebytestr(e)),
2623 stringutil.forcebytestr(e)),
2613 node=node)
2624 node=node)
2614 state['skipread'].add(node)
2625 state['skipread'].add(node)
2615
2626
2616 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2627 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2617 revisionscount=False, trackedsize=False,
2628 revisionscount=False, trackedsize=False,
2618 storedsize=False):
2629 storedsize=False):
2619 d = {}
2630 d = {}
2620
2631
2621 if exclusivefiles:
2632 if exclusivefiles:
2622 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2633 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2623 if not self._inline:
2634 if not self._inline:
2624 d['exclusivefiles'].append((self.opener, self.datafile))
2635 d['exclusivefiles'].append((self.opener, self.datafile))
2625
2636
2626 if sharedfiles:
2637 if sharedfiles:
2627 d['sharedfiles'] = []
2638 d['sharedfiles'] = []
2628
2639
2629 if revisionscount:
2640 if revisionscount:
2630 d['revisionscount'] = len(self)
2641 d['revisionscount'] = len(self)
2631
2642
2632 if trackedsize:
2643 if trackedsize:
2633 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2644 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2634
2645
2635 if storedsize:
2646 if storedsize:
2636 d['storedsize'] = sum(self.opener.stat(path).st_size
2647 d['storedsize'] = sum(self.opener.stat(path).st_size
2637 for path in self.files())
2648 for path in self.files())
2638
2649
2639 return d
2650 return d
@@ -1,206 +1,195 b''
1 # flagutils.py - code to deal with revlog flags and their processors
1 # flagutils.py - code to deal with revlog flags and their processors
2 #
2 #
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from ..i18n import _
11 from ..i18n import _
12
12
13 from .constants import (
13 from .constants import (
14 REVIDX_DEFAULT_FLAGS,
14 REVIDX_DEFAULT_FLAGS,
15 REVIDX_ELLIPSIS,
15 REVIDX_ELLIPSIS,
16 REVIDX_EXTSTORED,
16 REVIDX_EXTSTORED,
17 REVIDX_FLAGS_ORDER,
17 REVIDX_FLAGS_ORDER,
18 REVIDX_ISCENSORED,
18 REVIDX_ISCENSORED,
19 REVIDX_RAWTEXT_CHANGING_FLAGS,
19 REVIDX_RAWTEXT_CHANGING_FLAGS,
20 )
20 )
21
21
22 from .. import (
22 from .. import (
23 error,
23 error,
24 util
24 util
25 )
25 )
26
26
27 # blanked usage of all the name to prevent pyflakes constraints
27 # blanked usage of all the name to prevent pyflakes constraints
28 # We need these name available in the module for extensions.
28 # We need these name available in the module for extensions.
29 REVIDX_ISCENSORED
29 REVIDX_ISCENSORED
30 REVIDX_ELLIPSIS
30 REVIDX_ELLIPSIS
31 REVIDX_EXTSTORED
31 REVIDX_EXTSTORED
32 REVIDX_DEFAULT_FLAGS
32 REVIDX_DEFAULT_FLAGS
33 REVIDX_FLAGS_ORDER
33 REVIDX_FLAGS_ORDER
34 REVIDX_RAWTEXT_CHANGING_FLAGS
34 REVIDX_RAWTEXT_CHANGING_FLAGS
35
35
36 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
36 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
37
37
38 # Store flag processors (cf. 'addflagprocessor()' to register)
38 # Store flag processors (cf. 'addflagprocessor()' to register)
39 flagprocessors = {
39 flagprocessors = {
40 REVIDX_ISCENSORED: None,
40 REVIDX_ISCENSORED: None,
41 }
41 }
42
42
43 def addflagprocessor(flag, processor):
43 def addflagprocessor(flag, processor):
44 """Register a flag processor on a revision data flag.
44 """Register a flag processor on a revision data flag.
45
45
46 Invariant:
46 Invariant:
47 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
47 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
48 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
48 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
49 - Only one flag processor can be registered on a specific flag.
49 - Only one flag processor can be registered on a specific flag.
50 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
50 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
51 following signatures:
51 following signatures:
52 - (read) f(self, rawtext) -> text, bool
52 - (read) f(self, rawtext) -> text, bool
53 - (write) f(self, text) -> rawtext, bool
53 - (write) f(self, text) -> rawtext, bool
54 - (raw) f(self, rawtext) -> bool
54 - (raw) f(self, rawtext) -> bool
55 "text" is presented to the user. "rawtext" is stored in revlog data, not
55 "text" is presented to the user. "rawtext" is stored in revlog data, not
56 directly visible to the user.
56 directly visible to the user.
57 The boolean returned by these transforms is used to determine whether
57 The boolean returned by these transforms is used to determine whether
58 the returned text can be used for hash integrity checking. For example,
58 the returned text can be used for hash integrity checking. For example,
59 if "write" returns False, then "text" is used to generate hash. If
59 if "write" returns False, then "text" is used to generate hash. If
60 "write" returns True, that basically means "rawtext" returned by "write"
60 "write" returns True, that basically means "rawtext" returned by "write"
61 should be used to generate hash. Usually, "write" and "read" return
61 should be used to generate hash. Usually, "write" and "read" return
62 different booleans. And "raw" returns a same boolean as "write".
62 different booleans. And "raw" returns a same boolean as "write".
63
63
64 Note: The 'raw' transform is used for changegroup generation and in some
64 Note: The 'raw' transform is used for changegroup generation and in some
65 debug commands. In this case the transform only indicates whether the
65 debug commands. In this case the transform only indicates whether the
66 contents can be used for hash integrity checks.
66 contents can be used for hash integrity checks.
67 """
67 """
68 insertflagprocessor(flag, processor, flagprocessors)
68 insertflagprocessor(flag, processor, flagprocessors)
69
69
70 def insertflagprocessor(flag, processor, flagprocessors):
70 def insertflagprocessor(flag, processor, flagprocessors):
71 if not flag & REVIDX_KNOWN_FLAGS:
71 if not flag & REVIDX_KNOWN_FLAGS:
72 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
72 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
73 raise error.ProgrammingError(msg)
73 raise error.ProgrammingError(msg)
74 if flag not in REVIDX_FLAGS_ORDER:
74 if flag not in REVIDX_FLAGS_ORDER:
75 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
75 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
76 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
77 if flag in flagprocessors:
77 if flag in flagprocessors:
78 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
78 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
79 raise error.Abort(msg)
79 raise error.Abort(msg)
80 flagprocessors[flag] = processor
80 flagprocessors[flag] = processor
81
81
82 class flagprocessorsmixin(object):
82 class flagprocessorsmixin(object):
83 """basic mixin to support revlog flag processing
83 """basic mixin to support revlog flag processing
84
84
85 Make sure the `_flagprocessors` attribute is set at ``__init__`` time.
85 Make sure the `_flagprocessors` attribute is set at ``__init__`` time.
86
86
87 See the documentation of the ``_processflags`` method for details.
87 See the documentation of the ``_processflags`` method for details.
88 """
88 """
89
89
90 _flagserrorclass = error.RevlogError
90 _flagserrorclass = error.RevlogError
91
91
92 def _processflags(self, text, flags, operation, raw=False):
93 """deprecated entry point to access flag processors"""
94 msg = ('_processflag(...) use the specialized variant')
95 util.nouideprecwarn(msg, '5.2', stacklevel=2)
96 if raw:
97 return text, processflagsraw(self, text, flags)
98 elif operation == 'read':
99 return processflagsread(self, text, flags)
100 else: # write operation
101 return processflagswrite(self, text, flags)
102
103 def processflagswrite(revlog, text, flags, sidedata):
92 def processflagswrite(revlog, text, flags, sidedata):
104 """Inspect revision data flags and applies write transformations defined
93 """Inspect revision data flags and applies write transformations defined
105 by registered flag processors.
94 by registered flag processors.
106
95
107 ``text`` - the revision data to process
96 ``text`` - the revision data to process
108 ``flags`` - the revision flags
97 ``flags`` - the revision flags
109
98
110 This method processes the flags in the order (or reverse order if
99 This method processes the flags in the order (or reverse order if
111 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
100 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
112 flag processors registered for present flags. The order of flags defined
101 flag processors registered for present flags. The order of flags defined
113 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
102 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
114
103
115 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
104 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
116 processed text and ``validatehash`` is a bool indicating whether the
105 processed text and ``validatehash`` is a bool indicating whether the
117 returned text should be checked for hash integrity.
106 returned text should be checked for hash integrity.
118 """
107 """
119 return _processflagsfunc(revlog, text, flags, 'write',
108 return _processflagsfunc(revlog, text, flags, 'write',
120 sidedata=sidedata)[:2]
109 sidedata=sidedata)[:2]
121
110
122 def processflagsread(revlog, text, flags):
111 def processflagsread(revlog, text, flags):
123 """Inspect revision data flags and applies read transformations defined
112 """Inspect revision data flags and applies read transformations defined
124 by registered flag processors.
113 by registered flag processors.
125
114
126 ``text`` - the revision data to process
115 ``text`` - the revision data to process
127 ``flags`` - the revision flags
116 ``flags`` - the revision flags
128 ``raw`` - an optional argument describing if the raw transform should be
117 ``raw`` - an optional argument describing if the raw transform should be
129 applied.
118 applied.
130
119
131 This method processes the flags in the order (or reverse order if
120 This method processes the flags in the order (or reverse order if
132 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
121 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
133 flag processors registered for present flags. The order of flags defined
122 flag processors registered for present flags. The order of flags defined
134 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
123 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
135
124
136 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
125 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
137 processed text and ``validatehash`` is a bool indicating whether the
126 processed text and ``validatehash`` is a bool indicating whether the
138 returned text should be checked for hash integrity.
127 returned text should be checked for hash integrity.
139 """
128 """
140 return _processflagsfunc(revlog, text, flags, 'read')
129 return _processflagsfunc(revlog, text, flags, 'read')
141
130
142 def processflagsraw(revlog, text, flags):
131 def processflagsraw(revlog, text, flags):
143 """Inspect revision data flags to check is the content hash should be
132 """Inspect revision data flags to check is the content hash should be
144 validated.
133 validated.
145
134
146 ``text`` - the revision data to process
135 ``text`` - the revision data to process
147 ``flags`` - the revision flags
136 ``flags`` - the revision flags
148
137
149 This method processes the flags in the order (or reverse order if
138 This method processes the flags in the order (or reverse order if
150 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
139 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
151 flag processors registered for present flags. The order of flags defined
140 flag processors registered for present flags. The order of flags defined
152 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
141 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
153
142
154 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
143 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
155 processed text and ``validatehash`` is a bool indicating whether the
144 processed text and ``validatehash`` is a bool indicating whether the
156 returned text should be checked for hash integrity.
145 returned text should be checked for hash integrity.
157 """
146 """
158 return _processflagsfunc(revlog, text, flags, 'raw')[1]
147 return _processflagsfunc(revlog, text, flags, 'raw')[1]
159
148
160 def _processflagsfunc(revlog, text, flags, operation, sidedata=None):
149 def _processflagsfunc(revlog, text, flags, operation, sidedata=None):
161 """internal function to process flag on a revlog
150 """internal function to process flag on a revlog
162
151
163 This function is private to this module, code should never needs to call it
152 This function is private to this module, code should never needs to call it
164 directly."""
153 directly."""
165 # fast path: no flag processors will run
154 # fast path: no flag processors will run
166 if flags == 0:
155 if flags == 0:
167 return text, True, {}
156 return text, True, {}
168 if operation not in ('read', 'write', 'raw'):
157 if operation not in ('read', 'write', 'raw'):
169 raise error.ProgrammingError(_("invalid '%s' operation") %
158 raise error.ProgrammingError(_("invalid '%s' operation") %
170 operation)
159 operation)
171 # Check all flags are known.
160 # Check all flags are known.
172 if flags & ~REVIDX_KNOWN_FLAGS:
161 if flags & ~REVIDX_KNOWN_FLAGS:
173 raise revlog._flagserrorclass(_("incompatible revision flag '%#x'") %
162 raise revlog._flagserrorclass(_("incompatible revision flag '%#x'") %
174 (flags & ~REVIDX_KNOWN_FLAGS))
163 (flags & ~REVIDX_KNOWN_FLAGS))
175 validatehash = True
164 validatehash = True
176 # Depending on the operation (read or write), the order might be
165 # Depending on the operation (read or write), the order might be
177 # reversed due to non-commutative transforms.
166 # reversed due to non-commutative transforms.
178 orderedflags = REVIDX_FLAGS_ORDER
167 orderedflags = REVIDX_FLAGS_ORDER
179 if operation == 'write':
168 if operation == 'write':
180 orderedflags = reversed(orderedflags)
169 orderedflags = reversed(orderedflags)
181
170
182 outsidedata = {}
171 outsidedata = {}
183 for flag in orderedflags:
172 for flag in orderedflags:
184 # If a flagprocessor has been registered for a known flag, apply the
173 # If a flagprocessor has been registered for a known flag, apply the
185 # related operation transform and update result tuple.
174 # related operation transform and update result tuple.
186 if flag & flags:
175 if flag & flags:
187 vhash = True
176 vhash = True
188
177
189 if flag not in revlog._flagprocessors:
178 if flag not in revlog._flagprocessors:
190 message = _("missing processor for flag '%#x'") % (flag)
179 message = _("missing processor for flag '%#x'") % (flag)
191 raise revlog._flagserrorclass(message)
180 raise revlog._flagserrorclass(message)
192
181
193 processor = revlog._flagprocessors[flag]
182 processor = revlog._flagprocessors[flag]
194 if processor is not None:
183 if processor is not None:
195 readtransform, writetransform, rawtransform = processor
184 readtransform, writetransform, rawtransform = processor
196
185
197 if operation == 'raw':
186 if operation == 'raw':
198 vhash = rawtransform(revlog, text)
187 vhash = rawtransform(revlog, text)
199 elif operation == 'read':
188 elif operation == 'read':
200 text, vhash, s = readtransform(revlog, text)
189 text, vhash, s = readtransform(revlog, text)
201 outsidedata.update(s)
190 outsidedata.update(s)
202 else: # write operation
191 else: # write operation
203 text, vhash = writetransform(revlog, text, sidedata)
192 text, vhash = writetransform(revlog, text, sidedata)
204 validatehash = validatehash and vhash
193 validatehash = validatehash and vhash
205
194
206 return text, validatehash, outsidedata
195 return text, validatehash, outsidedata
General Comments 0
You need to be logged in to leave comments. Login now