##// END OF EJS Templates
templater: extract shortest() logic from template function...
Martin von Zweigbergk -
r34247:448725a2 default
parent child Browse files
Show More
@@ -1,2214 +1,2252 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import errno
18 import errno
19 import hashlib
19 import hashlib
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullid,
28 nullid,
29 nullrev,
29 nullrev,
30 wdirhex,
30 wdirhex,
31 wdirid,
31 wdirid,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from . import (
35 from . import (
36 ancestor,
36 ancestor,
37 error,
37 error,
38 mdiff,
38 mdiff,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 templatefilters,
41 templatefilters,
42 util,
42 util,
43 )
43 )
44
44
45 parsers = policy.importmod(r'parsers')
45 parsers = policy.importmod(r'parsers')
46
46
47 # Aliased for performance.
47 # Aliased for performance.
48 _zlibdecompress = zlib.decompress
48 _zlibdecompress = zlib.decompress
49
49
50 # revlog header flags
50 # revlog header flags
51 REVLOGV0 = 0
51 REVLOGV0 = 0
52 REVLOGV1 = 1
52 REVLOGV1 = 1
53 # Dummy value until file format is finalized.
53 # Dummy value until file format is finalized.
54 # Reminder: change the bounds check in revlog.__init__ when this is changed.
54 # Reminder: change the bounds check in revlog.__init__ when this is changed.
55 REVLOGV2 = 0xDEAD
55 REVLOGV2 = 0xDEAD
56 FLAG_INLINE_DATA = (1 << 16)
56 FLAG_INLINE_DATA = (1 << 16)
57 FLAG_GENERALDELTA = (1 << 17)
57 FLAG_GENERALDELTA = (1 << 17)
58 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
58 REVLOG_DEFAULT_FLAGS = FLAG_INLINE_DATA
59 REVLOG_DEFAULT_FORMAT = REVLOGV1
59 REVLOG_DEFAULT_FORMAT = REVLOGV1
60 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
60 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
61 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
61 REVLOGV1_FLAGS = FLAG_INLINE_DATA | FLAG_GENERALDELTA
62 REVLOGV2_FLAGS = REVLOGV1_FLAGS
62 REVLOGV2_FLAGS = REVLOGV1_FLAGS
63
63
64 # revlog index flags
64 # revlog index flags
65 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
65 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
66 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
66 REVIDX_ELLIPSIS = (1 << 14) # revision hash does not match data (narrowhg)
67 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
67 REVIDX_EXTSTORED = (1 << 13) # revision data is stored externally
68 REVIDX_DEFAULT_FLAGS = 0
68 REVIDX_DEFAULT_FLAGS = 0
69 # stable order in which flags need to be processed and their processors applied
69 # stable order in which flags need to be processed and their processors applied
70 REVIDX_FLAGS_ORDER = [
70 REVIDX_FLAGS_ORDER = [
71 REVIDX_ISCENSORED,
71 REVIDX_ISCENSORED,
72 REVIDX_ELLIPSIS,
72 REVIDX_ELLIPSIS,
73 REVIDX_EXTSTORED,
73 REVIDX_EXTSTORED,
74 ]
74 ]
75 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
75 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
76
76
77 # max size of revlog with inline data
77 # max size of revlog with inline data
78 _maxinline = 131072
78 _maxinline = 131072
79 _chunksize = 1048576
79 _chunksize = 1048576
80
80
81 RevlogError = error.RevlogError
81 RevlogError = error.RevlogError
82 LookupError = error.LookupError
82 LookupError = error.LookupError
83 CensoredNodeError = error.CensoredNodeError
83 CensoredNodeError = error.CensoredNodeError
84 ProgrammingError = error.ProgrammingError
84 ProgrammingError = error.ProgrammingError
85
85
86 # Store flag processors (cf. 'addflagprocessor()' to register)
86 # Store flag processors (cf. 'addflagprocessor()' to register)
87 _flagprocessors = {
87 _flagprocessors = {
88 REVIDX_ISCENSORED: None,
88 REVIDX_ISCENSORED: None,
89 }
89 }
90
90
91 def addflagprocessor(flag, processor):
91 def addflagprocessor(flag, processor):
92 """Register a flag processor on a revision data flag.
92 """Register a flag processor on a revision data flag.
93
93
94 Invariant:
94 Invariant:
95 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
95 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER.
96 - Only one flag processor can be registered on a specific flag.
96 - Only one flag processor can be registered on a specific flag.
97 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
97 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
98 following signatures:
98 following signatures:
99 - (read) f(self, rawtext) -> text, bool
99 - (read) f(self, rawtext) -> text, bool
100 - (write) f(self, text) -> rawtext, bool
100 - (write) f(self, text) -> rawtext, bool
101 - (raw) f(self, rawtext) -> bool
101 - (raw) f(self, rawtext) -> bool
102 "text" is presented to the user. "rawtext" is stored in revlog data, not
102 "text" is presented to the user. "rawtext" is stored in revlog data, not
103 directly visible to the user.
103 directly visible to the user.
104 The boolean returned by these transforms is used to determine whether
104 The boolean returned by these transforms is used to determine whether
105 the returned text can be used for hash integrity checking. For example,
105 the returned text can be used for hash integrity checking. For example,
106 if "write" returns False, then "text" is used to generate hash. If
106 if "write" returns False, then "text" is used to generate hash. If
107 "write" returns True, that basically means "rawtext" returned by "write"
107 "write" returns True, that basically means "rawtext" returned by "write"
108 should be used to generate hash. Usually, "write" and "read" return
108 should be used to generate hash. Usually, "write" and "read" return
109 different booleans. And "raw" returns a same boolean as "write".
109 different booleans. And "raw" returns a same boolean as "write".
110
110
111 Note: The 'raw' transform is used for changegroup generation and in some
111 Note: The 'raw' transform is used for changegroup generation and in some
112 debug commands. In this case the transform only indicates whether the
112 debug commands. In this case the transform only indicates whether the
113 contents can be used for hash integrity checks.
113 contents can be used for hash integrity checks.
114 """
114 """
115 if not flag & REVIDX_KNOWN_FLAGS:
115 if not flag & REVIDX_KNOWN_FLAGS:
116 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
116 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
117 raise ProgrammingError(msg)
117 raise ProgrammingError(msg)
118 if flag not in REVIDX_FLAGS_ORDER:
118 if flag not in REVIDX_FLAGS_ORDER:
119 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
119 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
120 raise ProgrammingError(msg)
120 raise ProgrammingError(msg)
121 if flag in _flagprocessors:
121 if flag in _flagprocessors:
122 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
122 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
123 raise error.Abort(msg)
123 raise error.Abort(msg)
124 _flagprocessors[flag] = processor
124 _flagprocessors[flag] = processor
125
125
126 def getoffset(q):
126 def getoffset(q):
127 return int(q >> 16)
127 return int(q >> 16)
128
128
129 def gettype(q):
129 def gettype(q):
130 return int(q & 0xFFFF)
130 return int(q & 0xFFFF)
131
131
132 def offset_type(offset, type):
132 def offset_type(offset, type):
133 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
133 if (type & ~REVIDX_KNOWN_FLAGS) != 0:
134 raise ValueError('unknown revlog index flags')
134 raise ValueError('unknown revlog index flags')
135 return int(int(offset) << 16 | type)
135 return int(int(offset) << 16 | type)
136
136
137 _nullhash = hashlib.sha1(nullid)
137 _nullhash = hashlib.sha1(nullid)
138
138
139 def hash(text, p1, p2):
139 def hash(text, p1, p2):
140 """generate a hash from the given text and its parent hashes
140 """generate a hash from the given text and its parent hashes
141
141
142 This hash combines both the current file contents and its history
142 This hash combines both the current file contents and its history
143 in a manner that makes it easy to distinguish nodes with the same
143 in a manner that makes it easy to distinguish nodes with the same
144 content in the revision graph.
144 content in the revision graph.
145 """
145 """
146 # As of now, if one of the parent node is null, p2 is null
146 # As of now, if one of the parent node is null, p2 is null
147 if p2 == nullid:
147 if p2 == nullid:
148 # deep copy of a hash is faster than creating one
148 # deep copy of a hash is faster than creating one
149 s = _nullhash.copy()
149 s = _nullhash.copy()
150 s.update(p1)
150 s.update(p1)
151 else:
151 else:
152 # none of the parent nodes are nullid
152 # none of the parent nodes are nullid
153 if p1 < p2:
153 if p1 < p2:
154 a = p1
154 a = p1
155 b = p2
155 b = p2
156 else:
156 else:
157 a = p2
157 a = p2
158 b = p1
158 b = p1
159 s = hashlib.sha1(a)
159 s = hashlib.sha1(a)
160 s.update(b)
160 s.update(b)
161 s.update(text)
161 s.update(text)
162 return s.digest()
162 return s.digest()
163
163
164 # index v0:
164 # index v0:
165 # 4 bytes: offset
165 # 4 bytes: offset
166 # 4 bytes: compressed length
166 # 4 bytes: compressed length
167 # 4 bytes: base rev
167 # 4 bytes: base rev
168 # 4 bytes: link rev
168 # 4 bytes: link rev
169 # 20 bytes: parent 1 nodeid
169 # 20 bytes: parent 1 nodeid
170 # 20 bytes: parent 2 nodeid
170 # 20 bytes: parent 2 nodeid
171 # 20 bytes: nodeid
171 # 20 bytes: nodeid
172 indexformatv0 = struct.Struct(">4l20s20s20s")
172 indexformatv0 = struct.Struct(">4l20s20s20s")
173 indexformatv0_pack = indexformatv0.pack
173 indexformatv0_pack = indexformatv0.pack
174 indexformatv0_unpack = indexformatv0.unpack
174 indexformatv0_unpack = indexformatv0.unpack
175
175
176 class revlogoldio(object):
176 class revlogoldio(object):
177 def __init__(self):
177 def __init__(self):
178 self.size = indexformatv0.size
178 self.size = indexformatv0.size
179
179
180 def parseindex(self, data, inline):
180 def parseindex(self, data, inline):
181 s = self.size
181 s = self.size
182 index = []
182 index = []
183 nodemap = {nullid: nullrev}
183 nodemap = {nullid: nullrev}
184 n = off = 0
184 n = off = 0
185 l = len(data)
185 l = len(data)
186 while off + s <= l:
186 while off + s <= l:
187 cur = data[off:off + s]
187 cur = data[off:off + s]
188 off += s
188 off += s
189 e = indexformatv0_unpack(cur)
189 e = indexformatv0_unpack(cur)
190 # transform to revlogv1 format
190 # transform to revlogv1 format
191 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
191 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
192 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
192 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
193 index.append(e2)
193 index.append(e2)
194 nodemap[e[6]] = n
194 nodemap[e[6]] = n
195 n += 1
195 n += 1
196
196
197 # add the magic null revision at -1
197 # add the magic null revision at -1
198 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
198 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
199
199
200 return index, nodemap, None
200 return index, nodemap, None
201
201
202 def packentry(self, entry, node, version, rev):
202 def packentry(self, entry, node, version, rev):
203 if gettype(entry[0]):
203 if gettype(entry[0]):
204 raise RevlogError(_('index entry flags need revlog version 1'))
204 raise RevlogError(_('index entry flags need revlog version 1'))
205 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
205 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
206 node(entry[5]), node(entry[6]), entry[7])
206 node(entry[5]), node(entry[6]), entry[7])
207 return indexformatv0_pack(*e2)
207 return indexformatv0_pack(*e2)
208
208
209 # index ng:
209 # index ng:
210 # 6 bytes: offset
210 # 6 bytes: offset
211 # 2 bytes: flags
211 # 2 bytes: flags
212 # 4 bytes: compressed length
212 # 4 bytes: compressed length
213 # 4 bytes: uncompressed length
213 # 4 bytes: uncompressed length
214 # 4 bytes: base rev
214 # 4 bytes: base rev
215 # 4 bytes: link rev
215 # 4 bytes: link rev
216 # 4 bytes: parent 1 rev
216 # 4 bytes: parent 1 rev
217 # 4 bytes: parent 2 rev
217 # 4 bytes: parent 2 rev
218 # 32 bytes: nodeid
218 # 32 bytes: nodeid
219 indexformatng = struct.Struct(">Qiiiiii20s12x")
219 indexformatng = struct.Struct(">Qiiiiii20s12x")
220 indexformatng_pack = indexformatng.pack
220 indexformatng_pack = indexformatng.pack
221 versionformat = struct.Struct(">I")
221 versionformat = struct.Struct(">I")
222 versionformat_pack = versionformat.pack
222 versionformat_pack = versionformat.pack
223 versionformat_unpack = versionformat.unpack
223 versionformat_unpack = versionformat.unpack
224
224
225 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
225 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
226 # signed integer)
226 # signed integer)
227 _maxentrysize = 0x7fffffff
227 _maxentrysize = 0x7fffffff
228
228
229 class revlogio(object):
229 class revlogio(object):
230 def __init__(self):
230 def __init__(self):
231 self.size = indexformatng.size
231 self.size = indexformatng.size
232
232
233 def parseindex(self, data, inline):
233 def parseindex(self, data, inline):
234 # call the C implementation to parse the index data
234 # call the C implementation to parse the index data
235 index, cache = parsers.parse_index2(data, inline)
235 index, cache = parsers.parse_index2(data, inline)
236 return index, getattr(index, 'nodemap', None), cache
236 return index, getattr(index, 'nodemap', None), cache
237
237
238 def packentry(self, entry, node, version, rev):
238 def packentry(self, entry, node, version, rev):
239 p = indexformatng_pack(*entry)
239 p = indexformatng_pack(*entry)
240 if rev == 0:
240 if rev == 0:
241 p = versionformat_pack(version) + p[4:]
241 p = versionformat_pack(version) + p[4:]
242 return p
242 return p
243
243
244 class revlog(object):
244 class revlog(object):
245 """
245 """
246 the underlying revision storage object
246 the underlying revision storage object
247
247
248 A revlog consists of two parts, an index and the revision data.
248 A revlog consists of two parts, an index and the revision data.
249
249
250 The index is a file with a fixed record size containing
250 The index is a file with a fixed record size containing
251 information on each revision, including its nodeid (hash), the
251 information on each revision, including its nodeid (hash), the
252 nodeids of its parents, the position and offset of its data within
252 nodeids of its parents, the position and offset of its data within
253 the data file, and the revision it's based on. Finally, each entry
253 the data file, and the revision it's based on. Finally, each entry
254 contains a linkrev entry that can serve as a pointer to external
254 contains a linkrev entry that can serve as a pointer to external
255 data.
255 data.
256
256
257 The revision data itself is a linear collection of data chunks.
257 The revision data itself is a linear collection of data chunks.
258 Each chunk represents a revision and is usually represented as a
258 Each chunk represents a revision and is usually represented as a
259 delta against the previous chunk. To bound lookup time, runs of
259 delta against the previous chunk. To bound lookup time, runs of
260 deltas are limited to about 2 times the length of the original
260 deltas are limited to about 2 times the length of the original
261 version data. This makes retrieval of a version proportional to
261 version data. This makes retrieval of a version proportional to
262 its size, or O(1) relative to the number of revisions.
262 its size, or O(1) relative to the number of revisions.
263
263
264 Both pieces of the revlog are written to in an append-only
264 Both pieces of the revlog are written to in an append-only
265 fashion, which means we never need to rewrite a file to insert or
265 fashion, which means we never need to rewrite a file to insert or
266 remove data, and can use some simple techniques to avoid the need
266 remove data, and can use some simple techniques to avoid the need
267 for locking while reading.
267 for locking while reading.
268
268
269 If checkambig, indexfile is opened with checkambig=True at
269 If checkambig, indexfile is opened with checkambig=True at
270 writing, to avoid file stat ambiguity.
270 writing, to avoid file stat ambiguity.
271 """
271 """
272 def __init__(self, opener, indexfile, datafile=None, checkambig=False):
272 def __init__(self, opener, indexfile, datafile=None, checkambig=False):
273 """
273 """
274 create a revlog object
274 create a revlog object
275
275
276 opener is a function that abstracts the file opening operation
276 opener is a function that abstracts the file opening operation
277 and can be used to implement COW semantics or the like.
277 and can be used to implement COW semantics or the like.
278 """
278 """
279 self.indexfile = indexfile
279 self.indexfile = indexfile
280 self.datafile = datafile or (indexfile[:-2] + ".d")
280 self.datafile = datafile or (indexfile[:-2] + ".d")
281 self.opener = opener
281 self.opener = opener
282 # When True, indexfile is opened with checkambig=True at writing, to
282 # When True, indexfile is opened with checkambig=True at writing, to
283 # avoid file stat ambiguity.
283 # avoid file stat ambiguity.
284 self._checkambig = checkambig
284 self._checkambig = checkambig
285 # 3-tuple of (node, rev, text) for a raw revision.
285 # 3-tuple of (node, rev, text) for a raw revision.
286 self._cache = None
286 self._cache = None
287 # Maps rev to chain base rev.
287 # Maps rev to chain base rev.
288 self._chainbasecache = util.lrucachedict(100)
288 self._chainbasecache = util.lrucachedict(100)
289 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
289 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
290 self._chunkcache = (0, '')
290 self._chunkcache = (0, '')
291 # How much data to read and cache into the raw revlog data cache.
291 # How much data to read and cache into the raw revlog data cache.
292 self._chunkcachesize = 65536
292 self._chunkcachesize = 65536
293 self._maxchainlen = None
293 self._maxchainlen = None
294 self._aggressivemergedeltas = False
294 self._aggressivemergedeltas = False
295 self.index = []
295 self.index = []
296 # Mapping of partial identifiers to full nodes.
296 # Mapping of partial identifiers to full nodes.
297 self._pcache = {}
297 self._pcache = {}
298 # Mapping of revision integer to full node.
298 # Mapping of revision integer to full node.
299 self._nodecache = {nullid: nullrev}
299 self._nodecache = {nullid: nullrev}
300 self._nodepos = None
300 self._nodepos = None
301 self._compengine = 'zlib'
301 self._compengine = 'zlib'
302 self._maxdeltachainspan = -1
302 self._maxdeltachainspan = -1
303
303
304 v = REVLOG_DEFAULT_VERSION
304 v = REVLOG_DEFAULT_VERSION
305 opts = getattr(opener, 'options', None)
305 opts = getattr(opener, 'options', None)
306 if opts is not None:
306 if opts is not None:
307 if 'revlogv2' in opts:
307 if 'revlogv2' in opts:
308 # version 2 revlogs always use generaldelta.
308 # version 2 revlogs always use generaldelta.
309 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
309 v = REVLOGV2 | FLAG_GENERALDELTA | FLAG_INLINE_DATA
310 elif 'revlogv1' in opts:
310 elif 'revlogv1' in opts:
311 if 'generaldelta' in opts:
311 if 'generaldelta' in opts:
312 v |= FLAG_GENERALDELTA
312 v |= FLAG_GENERALDELTA
313 else:
313 else:
314 v = 0
314 v = 0
315 if 'chunkcachesize' in opts:
315 if 'chunkcachesize' in opts:
316 self._chunkcachesize = opts['chunkcachesize']
316 self._chunkcachesize = opts['chunkcachesize']
317 if 'maxchainlen' in opts:
317 if 'maxchainlen' in opts:
318 self._maxchainlen = opts['maxchainlen']
318 self._maxchainlen = opts['maxchainlen']
319 if 'aggressivemergedeltas' in opts:
319 if 'aggressivemergedeltas' in opts:
320 self._aggressivemergedeltas = opts['aggressivemergedeltas']
320 self._aggressivemergedeltas = opts['aggressivemergedeltas']
321 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
321 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
322 if 'compengine' in opts:
322 if 'compengine' in opts:
323 self._compengine = opts['compengine']
323 self._compengine = opts['compengine']
324 if 'maxdeltachainspan' in opts:
324 if 'maxdeltachainspan' in opts:
325 self._maxdeltachainspan = opts['maxdeltachainspan']
325 self._maxdeltachainspan = opts['maxdeltachainspan']
326
326
327 if self._chunkcachesize <= 0:
327 if self._chunkcachesize <= 0:
328 raise RevlogError(_('revlog chunk cache size %r is not greater '
328 raise RevlogError(_('revlog chunk cache size %r is not greater '
329 'than 0') % self._chunkcachesize)
329 'than 0') % self._chunkcachesize)
330 elif self._chunkcachesize & (self._chunkcachesize - 1):
330 elif self._chunkcachesize & (self._chunkcachesize - 1):
331 raise RevlogError(_('revlog chunk cache size %r is not a power '
331 raise RevlogError(_('revlog chunk cache size %r is not a power '
332 'of 2') % self._chunkcachesize)
332 'of 2') % self._chunkcachesize)
333
333
334 indexdata = ''
334 indexdata = ''
335 self._initempty = True
335 self._initempty = True
336 try:
336 try:
337 f = self.opener(self.indexfile)
337 f = self.opener(self.indexfile)
338 indexdata = f.read()
338 indexdata = f.read()
339 f.close()
339 f.close()
340 if len(indexdata) > 0:
340 if len(indexdata) > 0:
341 v = versionformat_unpack(indexdata[:4])[0]
341 v = versionformat_unpack(indexdata[:4])[0]
342 self._initempty = False
342 self._initempty = False
343 except IOError as inst:
343 except IOError as inst:
344 if inst.errno != errno.ENOENT:
344 if inst.errno != errno.ENOENT:
345 raise
345 raise
346
346
347 self.version = v
347 self.version = v
348 self._inline = v & FLAG_INLINE_DATA
348 self._inline = v & FLAG_INLINE_DATA
349 self._generaldelta = v & FLAG_GENERALDELTA
349 self._generaldelta = v & FLAG_GENERALDELTA
350 flags = v & ~0xFFFF
350 flags = v & ~0xFFFF
351 fmt = v & 0xFFFF
351 fmt = v & 0xFFFF
352 if fmt == REVLOGV0:
352 if fmt == REVLOGV0:
353 if flags:
353 if flags:
354 raise RevlogError(_('unknown flags (%#04x) in version %d '
354 raise RevlogError(_('unknown flags (%#04x) in version %d '
355 'revlog %s') %
355 'revlog %s') %
356 (flags >> 16, fmt, self.indexfile))
356 (flags >> 16, fmt, self.indexfile))
357 elif fmt == REVLOGV1:
357 elif fmt == REVLOGV1:
358 if flags & ~REVLOGV1_FLAGS:
358 if flags & ~REVLOGV1_FLAGS:
359 raise RevlogError(_('unknown flags (%#04x) in version %d '
359 raise RevlogError(_('unknown flags (%#04x) in version %d '
360 'revlog %s') %
360 'revlog %s') %
361 (flags >> 16, fmt, self.indexfile))
361 (flags >> 16, fmt, self.indexfile))
362 elif fmt == REVLOGV2:
362 elif fmt == REVLOGV2:
363 if flags & ~REVLOGV2_FLAGS:
363 if flags & ~REVLOGV2_FLAGS:
364 raise RevlogError(_('unknown flags (%#04x) in version %d '
364 raise RevlogError(_('unknown flags (%#04x) in version %d '
365 'revlog %s') %
365 'revlog %s') %
366 (flags >> 16, fmt, self.indexfile))
366 (flags >> 16, fmt, self.indexfile))
367 else:
367 else:
368 raise RevlogError(_('unknown version (%d) in revlog %s') %
368 raise RevlogError(_('unknown version (%d) in revlog %s') %
369 (fmt, self.indexfile))
369 (fmt, self.indexfile))
370
370
371 self.storedeltachains = True
371 self.storedeltachains = True
372
372
373 self._io = revlogio()
373 self._io = revlogio()
374 if self.version == REVLOGV0:
374 if self.version == REVLOGV0:
375 self._io = revlogoldio()
375 self._io = revlogoldio()
376 try:
376 try:
377 d = self._io.parseindex(indexdata, self._inline)
377 d = self._io.parseindex(indexdata, self._inline)
378 except (ValueError, IndexError):
378 except (ValueError, IndexError):
379 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
379 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
380 self.index, nodemap, self._chunkcache = d
380 self.index, nodemap, self._chunkcache = d
381 if nodemap is not None:
381 if nodemap is not None:
382 self.nodemap = self._nodecache = nodemap
382 self.nodemap = self._nodecache = nodemap
383 if not self._chunkcache:
383 if not self._chunkcache:
384 self._chunkclear()
384 self._chunkclear()
385 # revnum -> (chain-length, sum-delta-length)
385 # revnum -> (chain-length, sum-delta-length)
386 self._chaininfocache = {}
386 self._chaininfocache = {}
387 # revlog header -> revlog compressor
387 # revlog header -> revlog compressor
388 self._decompressors = {}
388 self._decompressors = {}
389
389
390 @util.propertycache
390 @util.propertycache
391 def _compressor(self):
391 def _compressor(self):
392 return util.compengines[self._compengine].revlogcompressor()
392 return util.compengines[self._compengine].revlogcompressor()
393
393
394 def tip(self):
394 def tip(self):
395 return self.node(len(self.index) - 2)
395 return self.node(len(self.index) - 2)
396 def __contains__(self, rev):
396 def __contains__(self, rev):
397 return 0 <= rev < len(self)
397 return 0 <= rev < len(self)
398 def __len__(self):
398 def __len__(self):
399 return len(self.index) - 1
399 return len(self.index) - 1
400 def __iter__(self):
400 def __iter__(self):
401 return iter(xrange(len(self)))
401 return iter(xrange(len(self)))
402 def revs(self, start=0, stop=None):
402 def revs(self, start=0, stop=None):
403 """iterate over all rev in this revlog (from start to stop)"""
403 """iterate over all rev in this revlog (from start to stop)"""
404 step = 1
404 step = 1
405 if stop is not None:
405 if stop is not None:
406 if start > stop:
406 if start > stop:
407 step = -1
407 step = -1
408 stop += step
408 stop += step
409 else:
409 else:
410 stop = len(self)
410 stop = len(self)
411 return xrange(start, stop, step)
411 return xrange(start, stop, step)
412
412
413 @util.propertycache
413 @util.propertycache
414 def nodemap(self):
414 def nodemap(self):
415 self.rev(self.node(0))
415 self.rev(self.node(0))
416 return self._nodecache
416 return self._nodecache
417
417
418 def hasnode(self, node):
418 def hasnode(self, node):
419 try:
419 try:
420 self.rev(node)
420 self.rev(node)
421 return True
421 return True
422 except KeyError:
422 except KeyError:
423 return False
423 return False
424
424
425 def clearcaches(self):
425 def clearcaches(self):
426 self._cache = None
426 self._cache = None
427 self._chainbasecache.clear()
427 self._chainbasecache.clear()
428 self._chunkcache = (0, '')
428 self._chunkcache = (0, '')
429 self._pcache = {}
429 self._pcache = {}
430
430
431 try:
431 try:
432 self._nodecache.clearcaches()
432 self._nodecache.clearcaches()
433 except AttributeError:
433 except AttributeError:
434 self._nodecache = {nullid: nullrev}
434 self._nodecache = {nullid: nullrev}
435 self._nodepos = None
435 self._nodepos = None
436
436
437 def rev(self, node):
437 def rev(self, node):
438 try:
438 try:
439 return self._nodecache[node]
439 return self._nodecache[node]
440 except TypeError:
440 except TypeError:
441 raise
441 raise
442 except RevlogError:
442 except RevlogError:
443 # parsers.c radix tree lookup failed
443 # parsers.c radix tree lookup failed
444 if node == wdirid:
444 if node == wdirid:
445 raise error.WdirUnsupported
445 raise error.WdirUnsupported
446 raise LookupError(node, self.indexfile, _('no node'))
446 raise LookupError(node, self.indexfile, _('no node'))
447 except KeyError:
447 except KeyError:
448 # pure python cache lookup failed
448 # pure python cache lookup failed
449 n = self._nodecache
449 n = self._nodecache
450 i = self.index
450 i = self.index
451 p = self._nodepos
451 p = self._nodepos
452 if p is None:
452 if p is None:
453 p = len(i) - 2
453 p = len(i) - 2
454 for r in xrange(p, -1, -1):
454 for r in xrange(p, -1, -1):
455 v = i[r][7]
455 v = i[r][7]
456 n[v] = r
456 n[v] = r
457 if v == node:
457 if v == node:
458 self._nodepos = r - 1
458 self._nodepos = r - 1
459 return r
459 return r
460 if node == wdirid:
460 if node == wdirid:
461 raise error.WdirUnsupported
461 raise error.WdirUnsupported
462 raise LookupError(node, self.indexfile, _('no node'))
462 raise LookupError(node, self.indexfile, _('no node'))
463
463
464 # Accessors for index entries.
464 # Accessors for index entries.
465
465
466 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
466 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
467 # are flags.
467 # are flags.
468 def start(self, rev):
468 def start(self, rev):
469 return int(self.index[rev][0] >> 16)
469 return int(self.index[rev][0] >> 16)
470
470
471 def flags(self, rev):
471 def flags(self, rev):
472 return self.index[rev][0] & 0xFFFF
472 return self.index[rev][0] & 0xFFFF
473
473
474 def length(self, rev):
474 def length(self, rev):
475 return self.index[rev][1]
475 return self.index[rev][1]
476
476
477 def rawsize(self, rev):
477 def rawsize(self, rev):
478 """return the length of the uncompressed text for a given revision"""
478 """return the length of the uncompressed text for a given revision"""
479 l = self.index[rev][2]
479 l = self.index[rev][2]
480 if l >= 0:
480 if l >= 0:
481 return l
481 return l
482
482
483 t = self.revision(rev, raw=True)
483 t = self.revision(rev, raw=True)
484 return len(t)
484 return len(t)
485
485
486 def size(self, rev):
486 def size(self, rev):
487 """length of non-raw text (processed by a "read" flag processor)"""
487 """length of non-raw text (processed by a "read" flag processor)"""
488 # fast path: if no "read" flag processor could change the content,
488 # fast path: if no "read" flag processor could change the content,
489 # size is rawsize. note: ELLIPSIS is known to not change the content.
489 # size is rawsize. note: ELLIPSIS is known to not change the content.
490 flags = self.flags(rev)
490 flags = self.flags(rev)
491 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
491 if flags & (REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
492 return self.rawsize(rev)
492 return self.rawsize(rev)
493
493
494 return len(self.revision(rev, raw=False))
494 return len(self.revision(rev, raw=False))
495
495
496 def chainbase(self, rev):
496 def chainbase(self, rev):
497 base = self._chainbasecache.get(rev)
497 base = self._chainbasecache.get(rev)
498 if base is not None:
498 if base is not None:
499 return base
499 return base
500
500
501 index = self.index
501 index = self.index
502 base = index[rev][3]
502 base = index[rev][3]
503 while base != rev:
503 while base != rev:
504 rev = base
504 rev = base
505 base = index[rev][3]
505 base = index[rev][3]
506
506
507 self._chainbasecache[rev] = base
507 self._chainbasecache[rev] = base
508 return base
508 return base
509
509
510 def linkrev(self, rev):
510 def linkrev(self, rev):
511 return self.index[rev][4]
511 return self.index[rev][4]
512
512
513 def parentrevs(self, rev):
513 def parentrevs(self, rev):
514 try:
514 try:
515 return self.index[rev][5:7]
515 return self.index[rev][5:7]
516 except IndexError:
516 except IndexError:
517 if rev == wdirrev:
517 if rev == wdirrev:
518 raise error.WdirUnsupported
518 raise error.WdirUnsupported
519 raise
519 raise
520
520
521 def node(self, rev):
521 def node(self, rev):
522 try:
522 try:
523 return self.index[rev][7]
523 return self.index[rev][7]
524 except IndexError:
524 except IndexError:
525 if rev == wdirrev:
525 if rev == wdirrev:
526 raise error.WdirUnsupported
526 raise error.WdirUnsupported
527 raise
527 raise
528
528
529 # Derived from index values.
529 # Derived from index values.
530
530
531 def end(self, rev):
531 def end(self, rev):
532 return self.start(rev) + self.length(rev)
532 return self.start(rev) + self.length(rev)
533
533
534 def parents(self, node):
534 def parents(self, node):
535 i = self.index
535 i = self.index
536 d = i[self.rev(node)]
536 d = i[self.rev(node)]
537 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
537 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
538
538
539 def chainlen(self, rev):
539 def chainlen(self, rev):
540 return self._chaininfo(rev)[0]
540 return self._chaininfo(rev)[0]
541
541
542 def _chaininfo(self, rev):
542 def _chaininfo(self, rev):
543 chaininfocache = self._chaininfocache
543 chaininfocache = self._chaininfocache
544 if rev in chaininfocache:
544 if rev in chaininfocache:
545 return chaininfocache[rev]
545 return chaininfocache[rev]
546 index = self.index
546 index = self.index
547 generaldelta = self._generaldelta
547 generaldelta = self._generaldelta
548 iterrev = rev
548 iterrev = rev
549 e = index[iterrev]
549 e = index[iterrev]
550 clen = 0
550 clen = 0
551 compresseddeltalen = 0
551 compresseddeltalen = 0
552 while iterrev != e[3]:
552 while iterrev != e[3]:
553 clen += 1
553 clen += 1
554 compresseddeltalen += e[1]
554 compresseddeltalen += e[1]
555 if generaldelta:
555 if generaldelta:
556 iterrev = e[3]
556 iterrev = e[3]
557 else:
557 else:
558 iterrev -= 1
558 iterrev -= 1
559 if iterrev in chaininfocache:
559 if iterrev in chaininfocache:
560 t = chaininfocache[iterrev]
560 t = chaininfocache[iterrev]
561 clen += t[0]
561 clen += t[0]
562 compresseddeltalen += t[1]
562 compresseddeltalen += t[1]
563 break
563 break
564 e = index[iterrev]
564 e = index[iterrev]
565 else:
565 else:
566 # Add text length of base since decompressing that also takes
566 # Add text length of base since decompressing that also takes
567 # work. For cache hits the length is already included.
567 # work. For cache hits the length is already included.
568 compresseddeltalen += e[1]
568 compresseddeltalen += e[1]
569 r = (clen, compresseddeltalen)
569 r = (clen, compresseddeltalen)
570 chaininfocache[rev] = r
570 chaininfocache[rev] = r
571 return r
571 return r
572
572
573 def _deltachain(self, rev, stoprev=None):
573 def _deltachain(self, rev, stoprev=None):
574 """Obtain the delta chain for a revision.
574 """Obtain the delta chain for a revision.
575
575
576 ``stoprev`` specifies a revision to stop at. If not specified, we
576 ``stoprev`` specifies a revision to stop at. If not specified, we
577 stop at the base of the chain.
577 stop at the base of the chain.
578
578
579 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
579 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
580 revs in ascending order and ``stopped`` is a bool indicating whether
580 revs in ascending order and ``stopped`` is a bool indicating whether
581 ``stoprev`` was hit.
581 ``stoprev`` was hit.
582 """
582 """
583 # Try C implementation.
583 # Try C implementation.
584 try:
584 try:
585 return self.index.deltachain(rev, stoprev, self._generaldelta)
585 return self.index.deltachain(rev, stoprev, self._generaldelta)
586 except AttributeError:
586 except AttributeError:
587 pass
587 pass
588
588
589 chain = []
589 chain = []
590
590
591 # Alias to prevent attribute lookup in tight loop.
591 # Alias to prevent attribute lookup in tight loop.
592 index = self.index
592 index = self.index
593 generaldelta = self._generaldelta
593 generaldelta = self._generaldelta
594
594
595 iterrev = rev
595 iterrev = rev
596 e = index[iterrev]
596 e = index[iterrev]
597 while iterrev != e[3] and iterrev != stoprev:
597 while iterrev != e[3] and iterrev != stoprev:
598 chain.append(iterrev)
598 chain.append(iterrev)
599 if generaldelta:
599 if generaldelta:
600 iterrev = e[3]
600 iterrev = e[3]
601 else:
601 else:
602 iterrev -= 1
602 iterrev -= 1
603 e = index[iterrev]
603 e = index[iterrev]
604
604
605 if iterrev == stoprev:
605 if iterrev == stoprev:
606 stopped = True
606 stopped = True
607 else:
607 else:
608 chain.append(iterrev)
608 chain.append(iterrev)
609 stopped = False
609 stopped = False
610
610
611 chain.reverse()
611 chain.reverse()
612 return chain, stopped
612 return chain, stopped
613
613
614 def ancestors(self, revs, stoprev=0, inclusive=False):
614 def ancestors(self, revs, stoprev=0, inclusive=False):
615 """Generate the ancestors of 'revs' in reverse topological order.
615 """Generate the ancestors of 'revs' in reverse topological order.
616 Does not generate revs lower than stoprev.
616 Does not generate revs lower than stoprev.
617
617
618 See the documentation for ancestor.lazyancestors for more details."""
618 See the documentation for ancestor.lazyancestors for more details."""
619
619
620 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
620 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
621 inclusive=inclusive)
621 inclusive=inclusive)
622
622
623 def descendants(self, revs):
623 def descendants(self, revs):
624 """Generate the descendants of 'revs' in revision order.
624 """Generate the descendants of 'revs' in revision order.
625
625
626 Yield a sequence of revision numbers starting with a child of
626 Yield a sequence of revision numbers starting with a child of
627 some rev in revs, i.e., each revision is *not* considered a
627 some rev in revs, i.e., each revision is *not* considered a
628 descendant of itself. Results are ordered by revision number (a
628 descendant of itself. Results are ordered by revision number (a
629 topological sort)."""
629 topological sort)."""
630 first = min(revs)
630 first = min(revs)
631 if first == nullrev:
631 if first == nullrev:
632 for i in self:
632 for i in self:
633 yield i
633 yield i
634 return
634 return
635
635
636 seen = set(revs)
636 seen = set(revs)
637 for i in self.revs(start=first + 1):
637 for i in self.revs(start=first + 1):
638 for x in self.parentrevs(i):
638 for x in self.parentrevs(i):
639 if x != nullrev and x in seen:
639 if x != nullrev and x in seen:
640 seen.add(i)
640 seen.add(i)
641 yield i
641 yield i
642 break
642 break
643
643
644 def findcommonmissing(self, common=None, heads=None):
644 def findcommonmissing(self, common=None, heads=None):
645 """Return a tuple of the ancestors of common and the ancestors of heads
645 """Return a tuple of the ancestors of common and the ancestors of heads
646 that are not ancestors of common. In revset terminology, we return the
646 that are not ancestors of common. In revset terminology, we return the
647 tuple:
647 tuple:
648
648
649 ::common, (::heads) - (::common)
649 ::common, (::heads) - (::common)
650
650
651 The list is sorted by revision number, meaning it is
651 The list is sorted by revision number, meaning it is
652 topologically sorted.
652 topologically sorted.
653
653
654 'heads' and 'common' are both lists of node IDs. If heads is
654 'heads' and 'common' are both lists of node IDs. If heads is
655 not supplied, uses all of the revlog's heads. If common is not
655 not supplied, uses all of the revlog's heads. If common is not
656 supplied, uses nullid."""
656 supplied, uses nullid."""
657 if common is None:
657 if common is None:
658 common = [nullid]
658 common = [nullid]
659 if heads is None:
659 if heads is None:
660 heads = self.heads()
660 heads = self.heads()
661
661
662 common = [self.rev(n) for n in common]
662 common = [self.rev(n) for n in common]
663 heads = [self.rev(n) for n in heads]
663 heads = [self.rev(n) for n in heads]
664
664
665 # we want the ancestors, but inclusive
665 # we want the ancestors, but inclusive
666 class lazyset(object):
666 class lazyset(object):
667 def __init__(self, lazyvalues):
667 def __init__(self, lazyvalues):
668 self.addedvalues = set()
668 self.addedvalues = set()
669 self.lazyvalues = lazyvalues
669 self.lazyvalues = lazyvalues
670
670
671 def __contains__(self, value):
671 def __contains__(self, value):
672 return value in self.addedvalues or value in self.lazyvalues
672 return value in self.addedvalues or value in self.lazyvalues
673
673
674 def __iter__(self):
674 def __iter__(self):
675 added = self.addedvalues
675 added = self.addedvalues
676 for r in added:
676 for r in added:
677 yield r
677 yield r
678 for r in self.lazyvalues:
678 for r in self.lazyvalues:
679 if not r in added:
679 if not r in added:
680 yield r
680 yield r
681
681
682 def add(self, value):
682 def add(self, value):
683 self.addedvalues.add(value)
683 self.addedvalues.add(value)
684
684
685 def update(self, values):
685 def update(self, values):
686 self.addedvalues.update(values)
686 self.addedvalues.update(values)
687
687
688 has = lazyset(self.ancestors(common))
688 has = lazyset(self.ancestors(common))
689 has.add(nullrev)
689 has.add(nullrev)
690 has.update(common)
690 has.update(common)
691
691
692 # take all ancestors from heads that aren't in has
692 # take all ancestors from heads that aren't in has
693 missing = set()
693 missing = set()
694 visit = collections.deque(r for r in heads if r not in has)
694 visit = collections.deque(r for r in heads if r not in has)
695 while visit:
695 while visit:
696 r = visit.popleft()
696 r = visit.popleft()
697 if r in missing:
697 if r in missing:
698 continue
698 continue
699 else:
699 else:
700 missing.add(r)
700 missing.add(r)
701 for p in self.parentrevs(r):
701 for p in self.parentrevs(r):
702 if p not in has:
702 if p not in has:
703 visit.append(p)
703 visit.append(p)
704 missing = list(missing)
704 missing = list(missing)
705 missing.sort()
705 missing.sort()
706 return has, [self.node(miss) for miss in missing]
706 return has, [self.node(miss) for miss in missing]
707
707
708 def incrementalmissingrevs(self, common=None):
708 def incrementalmissingrevs(self, common=None):
709 """Return an object that can be used to incrementally compute the
709 """Return an object that can be used to incrementally compute the
710 revision numbers of the ancestors of arbitrary sets that are not
710 revision numbers of the ancestors of arbitrary sets that are not
711 ancestors of common. This is an ancestor.incrementalmissingancestors
711 ancestors of common. This is an ancestor.incrementalmissingancestors
712 object.
712 object.
713
713
714 'common' is a list of revision numbers. If common is not supplied, uses
714 'common' is a list of revision numbers. If common is not supplied, uses
715 nullrev.
715 nullrev.
716 """
716 """
717 if common is None:
717 if common is None:
718 common = [nullrev]
718 common = [nullrev]
719
719
720 return ancestor.incrementalmissingancestors(self.parentrevs, common)
720 return ancestor.incrementalmissingancestors(self.parentrevs, common)
721
721
722 def findmissingrevs(self, common=None, heads=None):
722 def findmissingrevs(self, common=None, heads=None):
723 """Return the revision numbers of the ancestors of heads that
723 """Return the revision numbers of the ancestors of heads that
724 are not ancestors of common.
724 are not ancestors of common.
725
725
726 More specifically, return a list of revision numbers corresponding to
726 More specifically, return a list of revision numbers corresponding to
727 nodes N such that every N satisfies the following constraints:
727 nodes N such that every N satisfies the following constraints:
728
728
729 1. N is an ancestor of some node in 'heads'
729 1. N is an ancestor of some node in 'heads'
730 2. N is not an ancestor of any node in 'common'
730 2. N is not an ancestor of any node in 'common'
731
731
732 The list is sorted by revision number, meaning it is
732 The list is sorted by revision number, meaning it is
733 topologically sorted.
733 topologically sorted.
734
734
735 'heads' and 'common' are both lists of revision numbers. If heads is
735 'heads' and 'common' are both lists of revision numbers. If heads is
736 not supplied, uses all of the revlog's heads. If common is not
736 not supplied, uses all of the revlog's heads. If common is not
737 supplied, uses nullid."""
737 supplied, uses nullid."""
738 if common is None:
738 if common is None:
739 common = [nullrev]
739 common = [nullrev]
740 if heads is None:
740 if heads is None:
741 heads = self.headrevs()
741 heads = self.headrevs()
742
742
743 inc = self.incrementalmissingrevs(common=common)
743 inc = self.incrementalmissingrevs(common=common)
744 return inc.missingancestors(heads)
744 return inc.missingancestors(heads)
745
745
746 def findmissing(self, common=None, heads=None):
746 def findmissing(self, common=None, heads=None):
747 """Return the ancestors of heads that are not ancestors of common.
747 """Return the ancestors of heads that are not ancestors of common.
748
748
749 More specifically, return a list of nodes N such that every N
749 More specifically, return a list of nodes N such that every N
750 satisfies the following constraints:
750 satisfies the following constraints:
751
751
752 1. N is an ancestor of some node in 'heads'
752 1. N is an ancestor of some node in 'heads'
753 2. N is not an ancestor of any node in 'common'
753 2. N is not an ancestor of any node in 'common'
754
754
755 The list is sorted by revision number, meaning it is
755 The list is sorted by revision number, meaning it is
756 topologically sorted.
756 topologically sorted.
757
757
758 'heads' and 'common' are both lists of node IDs. If heads is
758 'heads' and 'common' are both lists of node IDs. If heads is
759 not supplied, uses all of the revlog's heads. If common is not
759 not supplied, uses all of the revlog's heads. If common is not
760 supplied, uses nullid."""
760 supplied, uses nullid."""
761 if common is None:
761 if common is None:
762 common = [nullid]
762 common = [nullid]
763 if heads is None:
763 if heads is None:
764 heads = self.heads()
764 heads = self.heads()
765
765
766 common = [self.rev(n) for n in common]
766 common = [self.rev(n) for n in common]
767 heads = [self.rev(n) for n in heads]
767 heads = [self.rev(n) for n in heads]
768
768
769 inc = self.incrementalmissingrevs(common=common)
769 inc = self.incrementalmissingrevs(common=common)
770 return [self.node(r) for r in inc.missingancestors(heads)]
770 return [self.node(r) for r in inc.missingancestors(heads)]
771
771
772 def nodesbetween(self, roots=None, heads=None):
772 def nodesbetween(self, roots=None, heads=None):
773 """Return a topological path from 'roots' to 'heads'.
773 """Return a topological path from 'roots' to 'heads'.
774
774
775 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
775 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
776 topologically sorted list of all nodes N that satisfy both of
776 topologically sorted list of all nodes N that satisfy both of
777 these constraints:
777 these constraints:
778
778
779 1. N is a descendant of some node in 'roots'
779 1. N is a descendant of some node in 'roots'
780 2. N is an ancestor of some node in 'heads'
780 2. N is an ancestor of some node in 'heads'
781
781
782 Every node is considered to be both a descendant and an ancestor
782 Every node is considered to be both a descendant and an ancestor
783 of itself, so every reachable node in 'roots' and 'heads' will be
783 of itself, so every reachable node in 'roots' and 'heads' will be
784 included in 'nodes'.
784 included in 'nodes'.
785
785
786 'outroots' is the list of reachable nodes in 'roots', i.e., the
786 'outroots' is the list of reachable nodes in 'roots', i.e., the
787 subset of 'roots' that is returned in 'nodes'. Likewise,
787 subset of 'roots' that is returned in 'nodes'. Likewise,
788 'outheads' is the subset of 'heads' that is also in 'nodes'.
788 'outheads' is the subset of 'heads' that is also in 'nodes'.
789
789
790 'roots' and 'heads' are both lists of node IDs. If 'roots' is
790 'roots' and 'heads' are both lists of node IDs. If 'roots' is
791 unspecified, uses nullid as the only root. If 'heads' is
791 unspecified, uses nullid as the only root. If 'heads' is
792 unspecified, uses list of all of the revlog's heads."""
792 unspecified, uses list of all of the revlog's heads."""
793 nonodes = ([], [], [])
793 nonodes = ([], [], [])
794 if roots is not None:
794 if roots is not None:
795 roots = list(roots)
795 roots = list(roots)
796 if not roots:
796 if not roots:
797 return nonodes
797 return nonodes
798 lowestrev = min([self.rev(n) for n in roots])
798 lowestrev = min([self.rev(n) for n in roots])
799 else:
799 else:
800 roots = [nullid] # Everybody's a descendant of nullid
800 roots = [nullid] # Everybody's a descendant of nullid
801 lowestrev = nullrev
801 lowestrev = nullrev
802 if (lowestrev == nullrev) and (heads is None):
802 if (lowestrev == nullrev) and (heads is None):
803 # We want _all_ the nodes!
803 # We want _all_ the nodes!
804 return ([self.node(r) for r in self], [nullid], list(self.heads()))
804 return ([self.node(r) for r in self], [nullid], list(self.heads()))
805 if heads is None:
805 if heads is None:
806 # All nodes are ancestors, so the latest ancestor is the last
806 # All nodes are ancestors, so the latest ancestor is the last
807 # node.
807 # node.
808 highestrev = len(self) - 1
808 highestrev = len(self) - 1
809 # Set ancestors to None to signal that every node is an ancestor.
809 # Set ancestors to None to signal that every node is an ancestor.
810 ancestors = None
810 ancestors = None
811 # Set heads to an empty dictionary for later discovery of heads
811 # Set heads to an empty dictionary for later discovery of heads
812 heads = {}
812 heads = {}
813 else:
813 else:
814 heads = list(heads)
814 heads = list(heads)
815 if not heads:
815 if not heads:
816 return nonodes
816 return nonodes
817 ancestors = set()
817 ancestors = set()
818 # Turn heads into a dictionary so we can remove 'fake' heads.
818 # Turn heads into a dictionary so we can remove 'fake' heads.
819 # Also, later we will be using it to filter out the heads we can't
819 # Also, later we will be using it to filter out the heads we can't
820 # find from roots.
820 # find from roots.
821 heads = dict.fromkeys(heads, False)
821 heads = dict.fromkeys(heads, False)
822 # Start at the top and keep marking parents until we're done.
822 # Start at the top and keep marking parents until we're done.
823 nodestotag = set(heads)
823 nodestotag = set(heads)
824 # Remember where the top was so we can use it as a limit later.
824 # Remember where the top was so we can use it as a limit later.
825 highestrev = max([self.rev(n) for n in nodestotag])
825 highestrev = max([self.rev(n) for n in nodestotag])
826 while nodestotag:
826 while nodestotag:
827 # grab a node to tag
827 # grab a node to tag
828 n = nodestotag.pop()
828 n = nodestotag.pop()
829 # Never tag nullid
829 # Never tag nullid
830 if n == nullid:
830 if n == nullid:
831 continue
831 continue
832 # A node's revision number represents its place in a
832 # A node's revision number represents its place in a
833 # topologically sorted list of nodes.
833 # topologically sorted list of nodes.
834 r = self.rev(n)
834 r = self.rev(n)
835 if r >= lowestrev:
835 if r >= lowestrev:
836 if n not in ancestors:
836 if n not in ancestors:
837 # If we are possibly a descendant of one of the roots
837 # If we are possibly a descendant of one of the roots
838 # and we haven't already been marked as an ancestor
838 # and we haven't already been marked as an ancestor
839 ancestors.add(n) # Mark as ancestor
839 ancestors.add(n) # Mark as ancestor
840 # Add non-nullid parents to list of nodes to tag.
840 # Add non-nullid parents to list of nodes to tag.
841 nodestotag.update([p for p in self.parents(n) if
841 nodestotag.update([p for p in self.parents(n) if
842 p != nullid])
842 p != nullid])
843 elif n in heads: # We've seen it before, is it a fake head?
843 elif n in heads: # We've seen it before, is it a fake head?
844 # So it is, real heads should not be the ancestors of
844 # So it is, real heads should not be the ancestors of
845 # any other heads.
845 # any other heads.
846 heads.pop(n)
846 heads.pop(n)
847 if not ancestors:
847 if not ancestors:
848 return nonodes
848 return nonodes
849 # Now that we have our set of ancestors, we want to remove any
849 # Now that we have our set of ancestors, we want to remove any
850 # roots that are not ancestors.
850 # roots that are not ancestors.
851
851
852 # If one of the roots was nullid, everything is included anyway.
852 # If one of the roots was nullid, everything is included anyway.
853 if lowestrev > nullrev:
853 if lowestrev > nullrev:
854 # But, since we weren't, let's recompute the lowest rev to not
854 # But, since we weren't, let's recompute the lowest rev to not
855 # include roots that aren't ancestors.
855 # include roots that aren't ancestors.
856
856
857 # Filter out roots that aren't ancestors of heads
857 # Filter out roots that aren't ancestors of heads
858 roots = [root for root in roots if root in ancestors]
858 roots = [root for root in roots if root in ancestors]
859 # Recompute the lowest revision
859 # Recompute the lowest revision
860 if roots:
860 if roots:
861 lowestrev = min([self.rev(root) for root in roots])
861 lowestrev = min([self.rev(root) for root in roots])
862 else:
862 else:
863 # No more roots? Return empty list
863 # No more roots? Return empty list
864 return nonodes
864 return nonodes
865 else:
865 else:
866 # We are descending from nullid, and don't need to care about
866 # We are descending from nullid, and don't need to care about
867 # any other roots.
867 # any other roots.
868 lowestrev = nullrev
868 lowestrev = nullrev
869 roots = [nullid]
869 roots = [nullid]
870 # Transform our roots list into a set.
870 # Transform our roots list into a set.
871 descendants = set(roots)
871 descendants = set(roots)
872 # Also, keep the original roots so we can filter out roots that aren't
872 # Also, keep the original roots so we can filter out roots that aren't
873 # 'real' roots (i.e. are descended from other roots).
873 # 'real' roots (i.e. are descended from other roots).
874 roots = descendants.copy()
874 roots = descendants.copy()
875 # Our topologically sorted list of output nodes.
875 # Our topologically sorted list of output nodes.
876 orderedout = []
876 orderedout = []
877 # Don't start at nullid since we don't want nullid in our output list,
877 # Don't start at nullid since we don't want nullid in our output list,
878 # and if nullid shows up in descendants, empty parents will look like
878 # and if nullid shows up in descendants, empty parents will look like
879 # they're descendants.
879 # they're descendants.
880 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
880 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
881 n = self.node(r)
881 n = self.node(r)
882 isdescendant = False
882 isdescendant = False
883 if lowestrev == nullrev: # Everybody is a descendant of nullid
883 if lowestrev == nullrev: # Everybody is a descendant of nullid
884 isdescendant = True
884 isdescendant = True
885 elif n in descendants:
885 elif n in descendants:
886 # n is already a descendant
886 # n is already a descendant
887 isdescendant = True
887 isdescendant = True
888 # This check only needs to be done here because all the roots
888 # This check only needs to be done here because all the roots
889 # will start being marked is descendants before the loop.
889 # will start being marked is descendants before the loop.
890 if n in roots:
890 if n in roots:
891 # If n was a root, check if it's a 'real' root.
891 # If n was a root, check if it's a 'real' root.
892 p = tuple(self.parents(n))
892 p = tuple(self.parents(n))
893 # If any of its parents are descendants, it's not a root.
893 # If any of its parents are descendants, it's not a root.
894 if (p[0] in descendants) or (p[1] in descendants):
894 if (p[0] in descendants) or (p[1] in descendants):
895 roots.remove(n)
895 roots.remove(n)
896 else:
896 else:
897 p = tuple(self.parents(n))
897 p = tuple(self.parents(n))
898 # A node is a descendant if either of its parents are
898 # A node is a descendant if either of its parents are
899 # descendants. (We seeded the dependents list with the roots
899 # descendants. (We seeded the dependents list with the roots
900 # up there, remember?)
900 # up there, remember?)
901 if (p[0] in descendants) or (p[1] in descendants):
901 if (p[0] in descendants) or (p[1] in descendants):
902 descendants.add(n)
902 descendants.add(n)
903 isdescendant = True
903 isdescendant = True
904 if isdescendant and ((ancestors is None) or (n in ancestors)):
904 if isdescendant and ((ancestors is None) or (n in ancestors)):
905 # Only include nodes that are both descendants and ancestors.
905 # Only include nodes that are both descendants and ancestors.
906 orderedout.append(n)
906 orderedout.append(n)
907 if (ancestors is not None) and (n in heads):
907 if (ancestors is not None) and (n in heads):
908 # We're trying to figure out which heads are reachable
908 # We're trying to figure out which heads are reachable
909 # from roots.
909 # from roots.
910 # Mark this head as having been reached
910 # Mark this head as having been reached
911 heads[n] = True
911 heads[n] = True
912 elif ancestors is None:
912 elif ancestors is None:
913 # Otherwise, we're trying to discover the heads.
913 # Otherwise, we're trying to discover the heads.
914 # Assume this is a head because if it isn't, the next step
914 # Assume this is a head because if it isn't, the next step
915 # will eventually remove it.
915 # will eventually remove it.
916 heads[n] = True
916 heads[n] = True
917 # But, obviously its parents aren't.
917 # But, obviously its parents aren't.
918 for p in self.parents(n):
918 for p in self.parents(n):
919 heads.pop(p, None)
919 heads.pop(p, None)
920 heads = [head for head, flag in heads.iteritems() if flag]
920 heads = [head for head, flag in heads.iteritems() if flag]
921 roots = list(roots)
921 roots = list(roots)
922 assert orderedout
922 assert orderedout
923 assert roots
923 assert roots
924 assert heads
924 assert heads
925 return (orderedout, roots, heads)
925 return (orderedout, roots, heads)
926
926
927 def headrevs(self):
927 def headrevs(self):
928 try:
928 try:
929 return self.index.headrevs()
929 return self.index.headrevs()
930 except AttributeError:
930 except AttributeError:
931 return self._headrevs()
931 return self._headrevs()
932
932
933 def computephases(self, roots):
933 def computephases(self, roots):
934 return self.index.computephasesmapsets(roots)
934 return self.index.computephasesmapsets(roots)
935
935
936 def _headrevs(self):
936 def _headrevs(self):
937 count = len(self)
937 count = len(self)
938 if not count:
938 if not count:
939 return [nullrev]
939 return [nullrev]
940 # we won't iter over filtered rev so nobody is a head at start
940 # we won't iter over filtered rev so nobody is a head at start
941 ishead = [0] * (count + 1)
941 ishead = [0] * (count + 1)
942 index = self.index
942 index = self.index
943 for r in self:
943 for r in self:
944 ishead[r] = 1 # I may be an head
944 ishead[r] = 1 # I may be an head
945 e = index[r]
945 e = index[r]
946 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
946 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
947 return [r for r, val in enumerate(ishead) if val]
947 return [r for r, val in enumerate(ishead) if val]
948
948
949 def heads(self, start=None, stop=None):
949 def heads(self, start=None, stop=None):
950 """return the list of all nodes that have no children
950 """return the list of all nodes that have no children
951
951
952 if start is specified, only heads that are descendants of
952 if start is specified, only heads that are descendants of
953 start will be returned
953 start will be returned
954 if stop is specified, it will consider all the revs from stop
954 if stop is specified, it will consider all the revs from stop
955 as if they had no children
955 as if they had no children
956 """
956 """
957 if start is None and stop is None:
957 if start is None and stop is None:
958 if not len(self):
958 if not len(self):
959 return [nullid]
959 return [nullid]
960 return [self.node(r) for r in self.headrevs()]
960 return [self.node(r) for r in self.headrevs()]
961
961
962 if start is None:
962 if start is None:
963 start = nullid
963 start = nullid
964 if stop is None:
964 if stop is None:
965 stop = []
965 stop = []
966 stoprevs = set([self.rev(n) for n in stop])
966 stoprevs = set([self.rev(n) for n in stop])
967 startrev = self.rev(start)
967 startrev = self.rev(start)
968 reachable = {startrev}
968 reachable = {startrev}
969 heads = {startrev}
969 heads = {startrev}
970
970
971 parentrevs = self.parentrevs
971 parentrevs = self.parentrevs
972 for r in self.revs(start=startrev + 1):
972 for r in self.revs(start=startrev + 1):
973 for p in parentrevs(r):
973 for p in parentrevs(r):
974 if p in reachable:
974 if p in reachable:
975 if r not in stoprevs:
975 if r not in stoprevs:
976 reachable.add(r)
976 reachable.add(r)
977 heads.add(r)
977 heads.add(r)
978 if p in heads and p not in stoprevs:
978 if p in heads and p not in stoprevs:
979 heads.remove(p)
979 heads.remove(p)
980
980
981 return [self.node(r) for r in heads]
981 return [self.node(r) for r in heads]
982
982
983 def children(self, node):
983 def children(self, node):
984 """find the children of a given node"""
984 """find the children of a given node"""
985 c = []
985 c = []
986 p = self.rev(node)
986 p = self.rev(node)
987 for r in self.revs(start=p + 1):
987 for r in self.revs(start=p + 1):
988 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
988 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
989 if prevs:
989 if prevs:
990 for pr in prevs:
990 for pr in prevs:
991 if pr == p:
991 if pr == p:
992 c.append(self.node(r))
992 c.append(self.node(r))
993 elif p == nullrev:
993 elif p == nullrev:
994 c.append(self.node(r))
994 c.append(self.node(r))
995 return c
995 return c
996
996
997 def descendant(self, start, end):
997 def descendant(self, start, end):
998 if start == nullrev:
998 if start == nullrev:
999 return True
999 return True
1000 for i in self.descendants([start]):
1000 for i in self.descendants([start]):
1001 if i == end:
1001 if i == end:
1002 return True
1002 return True
1003 elif i > end:
1003 elif i > end:
1004 break
1004 break
1005 return False
1005 return False
1006
1006
1007 def commonancestorsheads(self, a, b):
1007 def commonancestorsheads(self, a, b):
1008 """calculate all the heads of the common ancestors of nodes a and b"""
1008 """calculate all the heads of the common ancestors of nodes a and b"""
1009 a, b = self.rev(a), self.rev(b)
1009 a, b = self.rev(a), self.rev(b)
1010 try:
1010 try:
1011 ancs = self.index.commonancestorsheads(a, b)
1011 ancs = self.index.commonancestorsheads(a, b)
1012 except (AttributeError, OverflowError): # C implementation failed
1012 except (AttributeError, OverflowError): # C implementation failed
1013 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1013 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
1014 return pycompat.maplist(self.node, ancs)
1014 return pycompat.maplist(self.node, ancs)
1015
1015
1016 def isancestor(self, a, b):
1016 def isancestor(self, a, b):
1017 """return True if node a is an ancestor of node b
1017 """return True if node a is an ancestor of node b
1018
1018
1019 The implementation of this is trivial but the use of
1019 The implementation of this is trivial but the use of
1020 commonancestorsheads is not."""
1020 commonancestorsheads is not."""
1021 return a in self.commonancestorsheads(a, b)
1021 return a in self.commonancestorsheads(a, b)
1022
1022
1023 def ancestor(self, a, b):
1023 def ancestor(self, a, b):
1024 """calculate the "best" common ancestor of nodes a and b"""
1024 """calculate the "best" common ancestor of nodes a and b"""
1025
1025
1026 a, b = self.rev(a), self.rev(b)
1026 a, b = self.rev(a), self.rev(b)
1027 try:
1027 try:
1028 ancs = self.index.ancestors(a, b)
1028 ancs = self.index.ancestors(a, b)
1029 except (AttributeError, OverflowError):
1029 except (AttributeError, OverflowError):
1030 ancs = ancestor.ancestors(self.parentrevs, a, b)
1030 ancs = ancestor.ancestors(self.parentrevs, a, b)
1031 if ancs:
1031 if ancs:
1032 # choose a consistent winner when there's a tie
1032 # choose a consistent winner when there's a tie
1033 return min(map(self.node, ancs))
1033 return min(map(self.node, ancs))
1034 return nullid
1034 return nullid
1035
1035
1036 def _match(self, id):
1036 def _match(self, id):
1037 if isinstance(id, int):
1037 if isinstance(id, int):
1038 # rev
1038 # rev
1039 return self.node(id)
1039 return self.node(id)
1040 if len(id) == 20:
1040 if len(id) == 20:
1041 # possibly a binary node
1041 # possibly a binary node
1042 # odds of a binary node being all hex in ASCII are 1 in 10**25
1042 # odds of a binary node being all hex in ASCII are 1 in 10**25
1043 try:
1043 try:
1044 node = id
1044 node = id
1045 self.rev(node) # quick search the index
1045 self.rev(node) # quick search the index
1046 return node
1046 return node
1047 except LookupError:
1047 except LookupError:
1048 pass # may be partial hex id
1048 pass # may be partial hex id
1049 try:
1049 try:
1050 # str(rev)
1050 # str(rev)
1051 rev = int(id)
1051 rev = int(id)
1052 if str(rev) != id:
1052 if str(rev) != id:
1053 raise ValueError
1053 raise ValueError
1054 if rev < 0:
1054 if rev < 0:
1055 rev = len(self) + rev
1055 rev = len(self) + rev
1056 if rev < 0 or rev >= len(self):
1056 if rev < 0 or rev >= len(self):
1057 raise ValueError
1057 raise ValueError
1058 return self.node(rev)
1058 return self.node(rev)
1059 except (ValueError, OverflowError):
1059 except (ValueError, OverflowError):
1060 pass
1060 pass
1061 if len(id) == 40:
1061 if len(id) == 40:
1062 try:
1062 try:
1063 # a full hex nodeid?
1063 # a full hex nodeid?
1064 node = bin(id)
1064 node = bin(id)
1065 self.rev(node)
1065 self.rev(node)
1066 return node
1066 return node
1067 except (TypeError, LookupError):
1067 except (TypeError, LookupError):
1068 pass
1068 pass
1069
1069
1070 def _partialmatch(self, id):
1070 def _partialmatch(self, id):
1071 maybewdir = wdirhex.startswith(id)
1071 maybewdir = wdirhex.startswith(id)
1072 try:
1072 try:
1073 partial = self.index.partialmatch(id)
1073 partial = self.index.partialmatch(id)
1074 if partial and self.hasnode(partial):
1074 if partial and self.hasnode(partial):
1075 if maybewdir:
1075 if maybewdir:
1076 # single 'ff...' match in radix tree, ambiguous with wdir
1076 # single 'ff...' match in radix tree, ambiguous with wdir
1077 raise RevlogError
1077 raise RevlogError
1078 return partial
1078 return partial
1079 if maybewdir:
1079 if maybewdir:
1080 # no 'ff...' match in radix tree, wdir identified
1080 # no 'ff...' match in radix tree, wdir identified
1081 raise error.WdirUnsupported
1081 raise error.WdirUnsupported
1082 return None
1082 return None
1083 except RevlogError:
1083 except RevlogError:
1084 # parsers.c radix tree lookup gave multiple matches
1084 # parsers.c radix tree lookup gave multiple matches
1085 # fast path: for unfiltered changelog, radix tree is accurate
1085 # fast path: for unfiltered changelog, radix tree is accurate
1086 if not getattr(self, 'filteredrevs', None):
1086 if not getattr(self, 'filteredrevs', None):
1087 raise LookupError(id, self.indexfile,
1087 raise LookupError(id, self.indexfile,
1088 _('ambiguous identifier'))
1088 _('ambiguous identifier'))
1089 # fall through to slow path that filters hidden revisions
1089 # fall through to slow path that filters hidden revisions
1090 except (AttributeError, ValueError):
1090 except (AttributeError, ValueError):
1091 # we are pure python, or key was too short to search radix tree
1091 # we are pure python, or key was too short to search radix tree
1092 pass
1092 pass
1093
1093
1094 if id in self._pcache:
1094 if id in self._pcache:
1095 return self._pcache[id]
1095 return self._pcache[id]
1096
1096
1097 if len(id) < 40:
1097 if len(id) < 40:
1098 try:
1098 try:
1099 # hex(node)[:...]
1099 # hex(node)[:...]
1100 l = len(id) // 2 # grab an even number of digits
1100 l = len(id) // 2 # grab an even number of digits
1101 prefix = bin(id[:l * 2])
1101 prefix = bin(id[:l * 2])
1102 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1102 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1103 nl = [n for n in nl if hex(n).startswith(id) and
1103 nl = [n for n in nl if hex(n).startswith(id) and
1104 self.hasnode(n)]
1104 self.hasnode(n)]
1105 if len(nl) > 0:
1105 if len(nl) > 0:
1106 if len(nl) == 1 and not maybewdir:
1106 if len(nl) == 1 and not maybewdir:
1107 self._pcache[id] = nl[0]
1107 self._pcache[id] = nl[0]
1108 return nl[0]
1108 return nl[0]
1109 raise LookupError(id, self.indexfile,
1109 raise LookupError(id, self.indexfile,
1110 _('ambiguous identifier'))
1110 _('ambiguous identifier'))
1111 if maybewdir:
1111 if maybewdir:
1112 raise error.WdirUnsupported
1112 raise error.WdirUnsupported
1113 return None
1113 return None
1114 except (TypeError, binascii.Error):
1114 except (TypeError, binascii.Error):
1115 pass
1115 pass
1116
1116
1117 def lookup(self, id):
1117 def lookup(self, id):
1118 """locate a node based on:
1118 """locate a node based on:
1119 - revision number or str(revision number)
1119 - revision number or str(revision number)
1120 - nodeid or subset of hex nodeid
1120 - nodeid or subset of hex nodeid
1121 """
1121 """
1122 n = self._match(id)
1122 n = self._match(id)
1123 if n is not None:
1123 if n is not None:
1124 return n
1124 return n
1125 n = self._partialmatch(id)
1125 n = self._partialmatch(id)
1126 if n:
1126 if n:
1127 return n
1127 return n
1128
1128
1129 raise LookupError(id, self.indexfile, _('no match found'))
1129 raise LookupError(id, self.indexfile, _('no match found'))
1130
1130
1131 def shortest(self, hexnode, minlength=1):
1132 """Find the shortest unambiguous prefix that matches hexnode."""
1133 def isvalid(test):
1134 try:
1135 if self._partialmatch(test) is None:
1136 return False
1137
1138 try:
1139 i = int(test)
1140 # if we are a pure int, then starting with zero will not be
1141 # confused as a rev; or, obviously, if the int is larger
1142 # than the value of the tip rev
1143 if test[0] == '0' or i > len(self):
1144 return True
1145 return False
1146 except ValueError:
1147 return True
1148 except error.RevlogError:
1149 return False
1150 except error.WdirUnsupported:
1151 # single 'ff...' match
1152 return True
1153
1154 shortest = hexnode
1155 startlength = max(6, minlength)
1156 length = startlength
1157 while True:
1158 test = hexnode[:length]
1159 if isvalid(test):
1160 shortest = test
1161 if length == minlength or length > startlength:
1162 return shortest
1163 length -= 1
1164 else:
1165 length += 1
1166 if len(shortest) <= length:
1167 return shortest
1168
1131 def cmp(self, node, text):
1169 def cmp(self, node, text):
1132 """compare text with a given file revision
1170 """compare text with a given file revision
1133
1171
1134 returns True if text is different than what is stored.
1172 returns True if text is different than what is stored.
1135 """
1173 """
1136 p1, p2 = self.parents(node)
1174 p1, p2 = self.parents(node)
1137 return hash(text, p1, p2) != node
1175 return hash(text, p1, p2) != node
1138
1176
1139 def _cachesegment(self, offset, data):
1177 def _cachesegment(self, offset, data):
1140 """Add a segment to the revlog cache.
1178 """Add a segment to the revlog cache.
1141
1179
1142 Accepts an absolute offset and the data that is at that location.
1180 Accepts an absolute offset and the data that is at that location.
1143 """
1181 """
1144 o, d = self._chunkcache
1182 o, d = self._chunkcache
1145 # try to add to existing cache
1183 # try to add to existing cache
1146 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1184 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1147 self._chunkcache = o, d + data
1185 self._chunkcache = o, d + data
1148 else:
1186 else:
1149 self._chunkcache = offset, data
1187 self._chunkcache = offset, data
1150
1188
1151 def _readsegment(self, offset, length, df=None):
1189 def _readsegment(self, offset, length, df=None):
1152 """Load a segment of raw data from the revlog.
1190 """Load a segment of raw data from the revlog.
1153
1191
1154 Accepts an absolute offset, length to read, and an optional existing
1192 Accepts an absolute offset, length to read, and an optional existing
1155 file handle to read from.
1193 file handle to read from.
1156
1194
1157 If an existing file handle is passed, it will be seeked and the
1195 If an existing file handle is passed, it will be seeked and the
1158 original seek position will NOT be restored.
1196 original seek position will NOT be restored.
1159
1197
1160 Returns a str or buffer of raw byte data.
1198 Returns a str or buffer of raw byte data.
1161 """
1199 """
1162 if df is not None:
1200 if df is not None:
1163 closehandle = False
1201 closehandle = False
1164 else:
1202 else:
1165 if self._inline:
1203 if self._inline:
1166 df = self.opener(self.indexfile)
1204 df = self.opener(self.indexfile)
1167 else:
1205 else:
1168 df = self.opener(self.datafile)
1206 df = self.opener(self.datafile)
1169 closehandle = True
1207 closehandle = True
1170
1208
1171 # Cache data both forward and backward around the requested
1209 # Cache data both forward and backward around the requested
1172 # data, in a fixed size window. This helps speed up operations
1210 # data, in a fixed size window. This helps speed up operations
1173 # involving reading the revlog backwards.
1211 # involving reading the revlog backwards.
1174 cachesize = self._chunkcachesize
1212 cachesize = self._chunkcachesize
1175 realoffset = offset & ~(cachesize - 1)
1213 realoffset = offset & ~(cachesize - 1)
1176 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1214 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1177 - realoffset)
1215 - realoffset)
1178 df.seek(realoffset)
1216 df.seek(realoffset)
1179 d = df.read(reallength)
1217 d = df.read(reallength)
1180 if closehandle:
1218 if closehandle:
1181 df.close()
1219 df.close()
1182 self._cachesegment(realoffset, d)
1220 self._cachesegment(realoffset, d)
1183 if offset != realoffset or reallength != length:
1221 if offset != realoffset or reallength != length:
1184 return util.buffer(d, offset - realoffset, length)
1222 return util.buffer(d, offset - realoffset, length)
1185 return d
1223 return d
1186
1224
1187 def _getsegment(self, offset, length, df=None):
1225 def _getsegment(self, offset, length, df=None):
1188 """Obtain a segment of raw data from the revlog.
1226 """Obtain a segment of raw data from the revlog.
1189
1227
1190 Accepts an absolute offset, length of bytes to obtain, and an
1228 Accepts an absolute offset, length of bytes to obtain, and an
1191 optional file handle to the already-opened revlog. If the file
1229 optional file handle to the already-opened revlog. If the file
1192 handle is used, it's original seek position will not be preserved.
1230 handle is used, it's original seek position will not be preserved.
1193
1231
1194 Requests for data may be returned from a cache.
1232 Requests for data may be returned from a cache.
1195
1233
1196 Returns a str or a buffer instance of raw byte data.
1234 Returns a str or a buffer instance of raw byte data.
1197 """
1235 """
1198 o, d = self._chunkcache
1236 o, d = self._chunkcache
1199 l = len(d)
1237 l = len(d)
1200
1238
1201 # is it in the cache?
1239 # is it in the cache?
1202 cachestart = offset - o
1240 cachestart = offset - o
1203 cacheend = cachestart + length
1241 cacheend = cachestart + length
1204 if cachestart >= 0 and cacheend <= l:
1242 if cachestart >= 0 and cacheend <= l:
1205 if cachestart == 0 and cacheend == l:
1243 if cachestart == 0 and cacheend == l:
1206 return d # avoid a copy
1244 return d # avoid a copy
1207 return util.buffer(d, cachestart, cacheend - cachestart)
1245 return util.buffer(d, cachestart, cacheend - cachestart)
1208
1246
1209 return self._readsegment(offset, length, df=df)
1247 return self._readsegment(offset, length, df=df)
1210
1248
1211 def _getsegmentforrevs(self, startrev, endrev, df=None):
1249 def _getsegmentforrevs(self, startrev, endrev, df=None):
1212 """Obtain a segment of raw data corresponding to a range of revisions.
1250 """Obtain a segment of raw data corresponding to a range of revisions.
1213
1251
1214 Accepts the start and end revisions and an optional already-open
1252 Accepts the start and end revisions and an optional already-open
1215 file handle to be used for reading. If the file handle is read, its
1253 file handle to be used for reading. If the file handle is read, its
1216 seek position will not be preserved.
1254 seek position will not be preserved.
1217
1255
1218 Requests for data may be satisfied by a cache.
1256 Requests for data may be satisfied by a cache.
1219
1257
1220 Returns a 2-tuple of (offset, data) for the requested range of
1258 Returns a 2-tuple of (offset, data) for the requested range of
1221 revisions. Offset is the integer offset from the beginning of the
1259 revisions. Offset is the integer offset from the beginning of the
1222 revlog and data is a str or buffer of the raw byte data.
1260 revlog and data is a str or buffer of the raw byte data.
1223
1261
1224 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1262 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1225 to determine where each revision's data begins and ends.
1263 to determine where each revision's data begins and ends.
1226 """
1264 """
1227 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1265 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1228 # (functions are expensive).
1266 # (functions are expensive).
1229 index = self.index
1267 index = self.index
1230 istart = index[startrev]
1268 istart = index[startrev]
1231 start = int(istart[0] >> 16)
1269 start = int(istart[0] >> 16)
1232 if startrev == endrev:
1270 if startrev == endrev:
1233 end = start + istart[1]
1271 end = start + istart[1]
1234 else:
1272 else:
1235 iend = index[endrev]
1273 iend = index[endrev]
1236 end = int(iend[0] >> 16) + iend[1]
1274 end = int(iend[0] >> 16) + iend[1]
1237
1275
1238 if self._inline:
1276 if self._inline:
1239 start += (startrev + 1) * self._io.size
1277 start += (startrev + 1) * self._io.size
1240 end += (endrev + 1) * self._io.size
1278 end += (endrev + 1) * self._io.size
1241 length = end - start
1279 length = end - start
1242
1280
1243 return start, self._getsegment(start, length, df=df)
1281 return start, self._getsegment(start, length, df=df)
1244
1282
1245 def _chunk(self, rev, df=None):
1283 def _chunk(self, rev, df=None):
1246 """Obtain a single decompressed chunk for a revision.
1284 """Obtain a single decompressed chunk for a revision.
1247
1285
1248 Accepts an integer revision and an optional already-open file handle
1286 Accepts an integer revision and an optional already-open file handle
1249 to be used for reading. If used, the seek position of the file will not
1287 to be used for reading. If used, the seek position of the file will not
1250 be preserved.
1288 be preserved.
1251
1289
1252 Returns a str holding uncompressed data for the requested revision.
1290 Returns a str holding uncompressed data for the requested revision.
1253 """
1291 """
1254 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1292 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1255
1293
1256 def _chunks(self, revs, df=None):
1294 def _chunks(self, revs, df=None):
1257 """Obtain decompressed chunks for the specified revisions.
1295 """Obtain decompressed chunks for the specified revisions.
1258
1296
1259 Accepts an iterable of numeric revisions that are assumed to be in
1297 Accepts an iterable of numeric revisions that are assumed to be in
1260 ascending order. Also accepts an optional already-open file handle
1298 ascending order. Also accepts an optional already-open file handle
1261 to be used for reading. If used, the seek position of the file will
1299 to be used for reading. If used, the seek position of the file will
1262 not be preserved.
1300 not be preserved.
1263
1301
1264 This function is similar to calling ``self._chunk()`` multiple times,
1302 This function is similar to calling ``self._chunk()`` multiple times,
1265 but is faster.
1303 but is faster.
1266
1304
1267 Returns a list with decompressed data for each requested revision.
1305 Returns a list with decompressed data for each requested revision.
1268 """
1306 """
1269 if not revs:
1307 if not revs:
1270 return []
1308 return []
1271 start = self.start
1309 start = self.start
1272 length = self.length
1310 length = self.length
1273 inline = self._inline
1311 inline = self._inline
1274 iosize = self._io.size
1312 iosize = self._io.size
1275 buffer = util.buffer
1313 buffer = util.buffer
1276
1314
1277 l = []
1315 l = []
1278 ladd = l.append
1316 ladd = l.append
1279
1317
1280 try:
1318 try:
1281 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1319 offset, data = self._getsegmentforrevs(revs[0], revs[-1], df=df)
1282 except OverflowError:
1320 except OverflowError:
1283 # issue4215 - we can't cache a run of chunks greater than
1321 # issue4215 - we can't cache a run of chunks greater than
1284 # 2G on Windows
1322 # 2G on Windows
1285 return [self._chunk(rev, df=df) for rev in revs]
1323 return [self._chunk(rev, df=df) for rev in revs]
1286
1324
1287 decomp = self.decompress
1325 decomp = self.decompress
1288 for rev in revs:
1326 for rev in revs:
1289 chunkstart = start(rev)
1327 chunkstart = start(rev)
1290 if inline:
1328 if inline:
1291 chunkstart += (rev + 1) * iosize
1329 chunkstart += (rev + 1) * iosize
1292 chunklength = length(rev)
1330 chunklength = length(rev)
1293 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1331 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1294
1332
1295 return l
1333 return l
1296
1334
1297 def _chunkclear(self):
1335 def _chunkclear(self):
1298 """Clear the raw chunk cache."""
1336 """Clear the raw chunk cache."""
1299 self._chunkcache = (0, '')
1337 self._chunkcache = (0, '')
1300
1338
1301 def deltaparent(self, rev):
1339 def deltaparent(self, rev):
1302 """return deltaparent of the given revision"""
1340 """return deltaparent of the given revision"""
1303 base = self.index[rev][3]
1341 base = self.index[rev][3]
1304 if base == rev:
1342 if base == rev:
1305 return nullrev
1343 return nullrev
1306 elif self._generaldelta:
1344 elif self._generaldelta:
1307 return base
1345 return base
1308 else:
1346 else:
1309 return rev - 1
1347 return rev - 1
1310
1348
1311 def revdiff(self, rev1, rev2):
1349 def revdiff(self, rev1, rev2):
1312 """return or calculate a delta between two revisions
1350 """return or calculate a delta between two revisions
1313
1351
1314 The delta calculated is in binary form and is intended to be written to
1352 The delta calculated is in binary form and is intended to be written to
1315 revlog data directly. So this function needs raw revision data.
1353 revlog data directly. So this function needs raw revision data.
1316 """
1354 """
1317 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1355 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1318 return bytes(self._chunk(rev2))
1356 return bytes(self._chunk(rev2))
1319
1357
1320 return mdiff.textdiff(self.revision(rev1, raw=True),
1358 return mdiff.textdiff(self.revision(rev1, raw=True),
1321 self.revision(rev2, raw=True))
1359 self.revision(rev2, raw=True))
1322
1360
1323 def revision(self, nodeorrev, _df=None, raw=False):
1361 def revision(self, nodeorrev, _df=None, raw=False):
1324 """return an uncompressed revision of a given node or revision
1362 """return an uncompressed revision of a given node or revision
1325 number.
1363 number.
1326
1364
1327 _df - an existing file handle to read from. (internal-only)
1365 _df - an existing file handle to read from. (internal-only)
1328 raw - an optional argument specifying if the revision data is to be
1366 raw - an optional argument specifying if the revision data is to be
1329 treated as raw data when applying flag transforms. 'raw' should be set
1367 treated as raw data when applying flag transforms. 'raw' should be set
1330 to True when generating changegroups or in debug commands.
1368 to True when generating changegroups or in debug commands.
1331 """
1369 """
1332 if isinstance(nodeorrev, int):
1370 if isinstance(nodeorrev, int):
1333 rev = nodeorrev
1371 rev = nodeorrev
1334 node = self.node(rev)
1372 node = self.node(rev)
1335 else:
1373 else:
1336 node = nodeorrev
1374 node = nodeorrev
1337 rev = None
1375 rev = None
1338
1376
1339 cachedrev = None
1377 cachedrev = None
1340 flags = None
1378 flags = None
1341 rawtext = None
1379 rawtext = None
1342 if node == nullid:
1380 if node == nullid:
1343 return ""
1381 return ""
1344 if self._cache:
1382 if self._cache:
1345 if self._cache[0] == node:
1383 if self._cache[0] == node:
1346 # _cache only stores rawtext
1384 # _cache only stores rawtext
1347 if raw:
1385 if raw:
1348 return self._cache[2]
1386 return self._cache[2]
1349 # duplicated, but good for perf
1387 # duplicated, but good for perf
1350 if rev is None:
1388 if rev is None:
1351 rev = self.rev(node)
1389 rev = self.rev(node)
1352 if flags is None:
1390 if flags is None:
1353 flags = self.flags(rev)
1391 flags = self.flags(rev)
1354 # no extra flags set, no flag processor runs, text = rawtext
1392 # no extra flags set, no flag processor runs, text = rawtext
1355 if flags == REVIDX_DEFAULT_FLAGS:
1393 if flags == REVIDX_DEFAULT_FLAGS:
1356 return self._cache[2]
1394 return self._cache[2]
1357 # rawtext is reusable. need to run flag processor
1395 # rawtext is reusable. need to run flag processor
1358 rawtext = self._cache[2]
1396 rawtext = self._cache[2]
1359
1397
1360 cachedrev = self._cache[1]
1398 cachedrev = self._cache[1]
1361
1399
1362 # look up what we need to read
1400 # look up what we need to read
1363 if rawtext is None:
1401 if rawtext is None:
1364 if rev is None:
1402 if rev is None:
1365 rev = self.rev(node)
1403 rev = self.rev(node)
1366
1404
1367 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1405 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1368 if stopped:
1406 if stopped:
1369 rawtext = self._cache[2]
1407 rawtext = self._cache[2]
1370
1408
1371 # drop cache to save memory
1409 # drop cache to save memory
1372 self._cache = None
1410 self._cache = None
1373
1411
1374 bins = self._chunks(chain, df=_df)
1412 bins = self._chunks(chain, df=_df)
1375 if rawtext is None:
1413 if rawtext is None:
1376 rawtext = bytes(bins[0])
1414 rawtext = bytes(bins[0])
1377 bins = bins[1:]
1415 bins = bins[1:]
1378
1416
1379 rawtext = mdiff.patches(rawtext, bins)
1417 rawtext = mdiff.patches(rawtext, bins)
1380 self._cache = (node, rev, rawtext)
1418 self._cache = (node, rev, rawtext)
1381
1419
1382 if flags is None:
1420 if flags is None:
1383 if rev is None:
1421 if rev is None:
1384 rev = self.rev(node)
1422 rev = self.rev(node)
1385 flags = self.flags(rev)
1423 flags = self.flags(rev)
1386
1424
1387 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1425 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1388 if validatehash:
1426 if validatehash:
1389 self.checkhash(text, node, rev=rev)
1427 self.checkhash(text, node, rev=rev)
1390
1428
1391 return text
1429 return text
1392
1430
1393 def hash(self, text, p1, p2):
1431 def hash(self, text, p1, p2):
1394 """Compute a node hash.
1432 """Compute a node hash.
1395
1433
1396 Available as a function so that subclasses can replace the hash
1434 Available as a function so that subclasses can replace the hash
1397 as needed.
1435 as needed.
1398 """
1436 """
1399 return hash(text, p1, p2)
1437 return hash(text, p1, p2)
1400
1438
1401 def _processflags(self, text, flags, operation, raw=False):
1439 def _processflags(self, text, flags, operation, raw=False):
1402 """Inspect revision data flags and applies transforms defined by
1440 """Inspect revision data flags and applies transforms defined by
1403 registered flag processors.
1441 registered flag processors.
1404
1442
1405 ``text`` - the revision data to process
1443 ``text`` - the revision data to process
1406 ``flags`` - the revision flags
1444 ``flags`` - the revision flags
1407 ``operation`` - the operation being performed (read or write)
1445 ``operation`` - the operation being performed (read or write)
1408 ``raw`` - an optional argument describing if the raw transform should be
1446 ``raw`` - an optional argument describing if the raw transform should be
1409 applied.
1447 applied.
1410
1448
1411 This method processes the flags in the order (or reverse order if
1449 This method processes the flags in the order (or reverse order if
1412 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1450 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1413 flag processors registered for present flags. The order of flags defined
1451 flag processors registered for present flags. The order of flags defined
1414 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1452 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1415
1453
1416 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1454 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1417 processed text and ``validatehash`` is a bool indicating whether the
1455 processed text and ``validatehash`` is a bool indicating whether the
1418 returned text should be checked for hash integrity.
1456 returned text should be checked for hash integrity.
1419
1457
1420 Note: If the ``raw`` argument is set, it has precedence over the
1458 Note: If the ``raw`` argument is set, it has precedence over the
1421 operation and will only update the value of ``validatehash``.
1459 operation and will only update the value of ``validatehash``.
1422 """
1460 """
1423 # fast path: no flag processors will run
1461 # fast path: no flag processors will run
1424 if flags == 0:
1462 if flags == 0:
1425 return text, True
1463 return text, True
1426 if not operation in ('read', 'write'):
1464 if not operation in ('read', 'write'):
1427 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1465 raise ProgrammingError(_("invalid '%s' operation ") % (operation))
1428 # Check all flags are known.
1466 # Check all flags are known.
1429 if flags & ~REVIDX_KNOWN_FLAGS:
1467 if flags & ~REVIDX_KNOWN_FLAGS:
1430 raise RevlogError(_("incompatible revision flag '%#x'") %
1468 raise RevlogError(_("incompatible revision flag '%#x'") %
1431 (flags & ~REVIDX_KNOWN_FLAGS))
1469 (flags & ~REVIDX_KNOWN_FLAGS))
1432 validatehash = True
1470 validatehash = True
1433 # Depending on the operation (read or write), the order might be
1471 # Depending on the operation (read or write), the order might be
1434 # reversed due to non-commutative transforms.
1472 # reversed due to non-commutative transforms.
1435 orderedflags = REVIDX_FLAGS_ORDER
1473 orderedflags = REVIDX_FLAGS_ORDER
1436 if operation == 'write':
1474 if operation == 'write':
1437 orderedflags = reversed(orderedflags)
1475 orderedflags = reversed(orderedflags)
1438
1476
1439 for flag in orderedflags:
1477 for flag in orderedflags:
1440 # If a flagprocessor has been registered for a known flag, apply the
1478 # If a flagprocessor has been registered for a known flag, apply the
1441 # related operation transform and update result tuple.
1479 # related operation transform and update result tuple.
1442 if flag & flags:
1480 if flag & flags:
1443 vhash = True
1481 vhash = True
1444
1482
1445 if flag not in _flagprocessors:
1483 if flag not in _flagprocessors:
1446 message = _("missing processor for flag '%#x'") % (flag)
1484 message = _("missing processor for flag '%#x'") % (flag)
1447 raise RevlogError(message)
1485 raise RevlogError(message)
1448
1486
1449 processor = _flagprocessors[flag]
1487 processor = _flagprocessors[flag]
1450 if processor is not None:
1488 if processor is not None:
1451 readtransform, writetransform, rawtransform = processor
1489 readtransform, writetransform, rawtransform = processor
1452
1490
1453 if raw:
1491 if raw:
1454 vhash = rawtransform(self, text)
1492 vhash = rawtransform(self, text)
1455 elif operation == 'read':
1493 elif operation == 'read':
1456 text, vhash = readtransform(self, text)
1494 text, vhash = readtransform(self, text)
1457 else: # write operation
1495 else: # write operation
1458 text, vhash = writetransform(self, text)
1496 text, vhash = writetransform(self, text)
1459 validatehash = validatehash and vhash
1497 validatehash = validatehash and vhash
1460
1498
1461 return text, validatehash
1499 return text, validatehash
1462
1500
1463 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1501 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1464 """Check node hash integrity.
1502 """Check node hash integrity.
1465
1503
1466 Available as a function so that subclasses can extend hash mismatch
1504 Available as a function so that subclasses can extend hash mismatch
1467 behaviors as needed.
1505 behaviors as needed.
1468 """
1506 """
1469 if p1 is None and p2 is None:
1507 if p1 is None and p2 is None:
1470 p1, p2 = self.parents(node)
1508 p1, p2 = self.parents(node)
1471 if node != self.hash(text, p1, p2):
1509 if node != self.hash(text, p1, p2):
1472 revornode = rev
1510 revornode = rev
1473 if revornode is None:
1511 if revornode is None:
1474 revornode = templatefilters.short(hex(node))
1512 revornode = templatefilters.short(hex(node))
1475 raise RevlogError(_("integrity check failed on %s:%s")
1513 raise RevlogError(_("integrity check failed on %s:%s")
1476 % (self.indexfile, pycompat.bytestr(revornode)))
1514 % (self.indexfile, pycompat.bytestr(revornode)))
1477
1515
1478 def checkinlinesize(self, tr, fp=None):
1516 def checkinlinesize(self, tr, fp=None):
1479 """Check if the revlog is too big for inline and convert if so.
1517 """Check if the revlog is too big for inline and convert if so.
1480
1518
1481 This should be called after revisions are added to the revlog. If the
1519 This should be called after revisions are added to the revlog. If the
1482 revlog has grown too large to be an inline revlog, it will convert it
1520 revlog has grown too large to be an inline revlog, it will convert it
1483 to use multiple index and data files.
1521 to use multiple index and data files.
1484 """
1522 """
1485 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1523 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1486 return
1524 return
1487
1525
1488 trinfo = tr.find(self.indexfile)
1526 trinfo = tr.find(self.indexfile)
1489 if trinfo is None:
1527 if trinfo is None:
1490 raise RevlogError(_("%s not found in the transaction")
1528 raise RevlogError(_("%s not found in the transaction")
1491 % self.indexfile)
1529 % self.indexfile)
1492
1530
1493 trindex = trinfo[2]
1531 trindex = trinfo[2]
1494 if trindex is not None:
1532 if trindex is not None:
1495 dataoff = self.start(trindex)
1533 dataoff = self.start(trindex)
1496 else:
1534 else:
1497 # revlog was stripped at start of transaction, use all leftover data
1535 # revlog was stripped at start of transaction, use all leftover data
1498 trindex = len(self) - 1
1536 trindex = len(self) - 1
1499 dataoff = self.end(-2)
1537 dataoff = self.end(-2)
1500
1538
1501 tr.add(self.datafile, dataoff)
1539 tr.add(self.datafile, dataoff)
1502
1540
1503 if fp:
1541 if fp:
1504 fp.flush()
1542 fp.flush()
1505 fp.close()
1543 fp.close()
1506
1544
1507 df = self.opener(self.datafile, 'w')
1545 df = self.opener(self.datafile, 'w')
1508 try:
1546 try:
1509 for r in self:
1547 for r in self:
1510 df.write(self._getsegmentforrevs(r, r)[1])
1548 df.write(self._getsegmentforrevs(r, r)[1])
1511 finally:
1549 finally:
1512 df.close()
1550 df.close()
1513
1551
1514 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1552 fp = self.opener(self.indexfile, 'w', atomictemp=True,
1515 checkambig=self._checkambig)
1553 checkambig=self._checkambig)
1516 self.version &= ~FLAG_INLINE_DATA
1554 self.version &= ~FLAG_INLINE_DATA
1517 self._inline = False
1555 self._inline = False
1518 for i in self:
1556 for i in self:
1519 e = self._io.packentry(self.index[i], self.node, self.version, i)
1557 e = self._io.packentry(self.index[i], self.node, self.version, i)
1520 fp.write(e)
1558 fp.write(e)
1521
1559
1522 # if we don't call close, the temp file will never replace the
1560 # if we don't call close, the temp file will never replace the
1523 # real index
1561 # real index
1524 fp.close()
1562 fp.close()
1525
1563
1526 tr.replace(self.indexfile, trindex * self._io.size)
1564 tr.replace(self.indexfile, trindex * self._io.size)
1527 self._chunkclear()
1565 self._chunkclear()
1528
1566
1529 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1567 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1530 node=None, flags=REVIDX_DEFAULT_FLAGS):
1568 node=None, flags=REVIDX_DEFAULT_FLAGS):
1531 """add a revision to the log
1569 """add a revision to the log
1532
1570
1533 text - the revision data to add
1571 text - the revision data to add
1534 transaction - the transaction object used for rollback
1572 transaction - the transaction object used for rollback
1535 link - the linkrev data to add
1573 link - the linkrev data to add
1536 p1, p2 - the parent nodeids of the revision
1574 p1, p2 - the parent nodeids of the revision
1537 cachedelta - an optional precomputed delta
1575 cachedelta - an optional precomputed delta
1538 node - nodeid of revision; typically node is not specified, and it is
1576 node - nodeid of revision; typically node is not specified, and it is
1539 computed by default as hash(text, p1, p2), however subclasses might
1577 computed by default as hash(text, p1, p2), however subclasses might
1540 use different hashing method (and override checkhash() in such case)
1578 use different hashing method (and override checkhash() in such case)
1541 flags - the known flags to set on the revision
1579 flags - the known flags to set on the revision
1542 """
1580 """
1543 if link == nullrev:
1581 if link == nullrev:
1544 raise RevlogError(_("attempted to add linkrev -1 to %s")
1582 raise RevlogError(_("attempted to add linkrev -1 to %s")
1545 % self.indexfile)
1583 % self.indexfile)
1546
1584
1547 if flags:
1585 if flags:
1548 node = node or self.hash(text, p1, p2)
1586 node = node or self.hash(text, p1, p2)
1549
1587
1550 rawtext, validatehash = self._processflags(text, flags, 'write')
1588 rawtext, validatehash = self._processflags(text, flags, 'write')
1551
1589
1552 # If the flag processor modifies the revision data, ignore any provided
1590 # If the flag processor modifies the revision data, ignore any provided
1553 # cachedelta.
1591 # cachedelta.
1554 if rawtext != text:
1592 if rawtext != text:
1555 cachedelta = None
1593 cachedelta = None
1556
1594
1557 if len(rawtext) > _maxentrysize:
1595 if len(rawtext) > _maxentrysize:
1558 raise RevlogError(
1596 raise RevlogError(
1559 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1597 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1560 % (self.indexfile, len(rawtext)))
1598 % (self.indexfile, len(rawtext)))
1561
1599
1562 node = node or self.hash(rawtext, p1, p2)
1600 node = node or self.hash(rawtext, p1, p2)
1563 if node in self.nodemap:
1601 if node in self.nodemap:
1564 return node
1602 return node
1565
1603
1566 if validatehash:
1604 if validatehash:
1567 self.checkhash(rawtext, node, p1=p1, p2=p2)
1605 self.checkhash(rawtext, node, p1=p1, p2=p2)
1568
1606
1569 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1607 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1570 flags, cachedelta=cachedelta)
1608 flags, cachedelta=cachedelta)
1571
1609
1572 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1610 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1573 cachedelta=None):
1611 cachedelta=None):
1574 """add a raw revision with known flags, node and parents
1612 """add a raw revision with known flags, node and parents
1575 useful when reusing a revision not stored in this revlog (ex: received
1613 useful when reusing a revision not stored in this revlog (ex: received
1576 over wire, or read from an external bundle).
1614 over wire, or read from an external bundle).
1577 """
1615 """
1578 dfh = None
1616 dfh = None
1579 if not self._inline:
1617 if not self._inline:
1580 dfh = self.opener(self.datafile, "a+")
1618 dfh = self.opener(self.datafile, "a+")
1581 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1619 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1582 try:
1620 try:
1583 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1621 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1584 flags, cachedelta, ifh, dfh)
1622 flags, cachedelta, ifh, dfh)
1585 finally:
1623 finally:
1586 if dfh:
1624 if dfh:
1587 dfh.close()
1625 dfh.close()
1588 ifh.close()
1626 ifh.close()
1589
1627
1590 def compress(self, data):
1628 def compress(self, data):
1591 """Generate a possibly-compressed representation of data."""
1629 """Generate a possibly-compressed representation of data."""
1592 if not data:
1630 if not data:
1593 return '', data
1631 return '', data
1594
1632
1595 compressed = self._compressor.compress(data)
1633 compressed = self._compressor.compress(data)
1596
1634
1597 if compressed:
1635 if compressed:
1598 # The revlog compressor added the header in the returned data.
1636 # The revlog compressor added the header in the returned data.
1599 return '', compressed
1637 return '', compressed
1600
1638
1601 if data[0:1] == '\0':
1639 if data[0:1] == '\0':
1602 return '', data
1640 return '', data
1603 return 'u', data
1641 return 'u', data
1604
1642
1605 def decompress(self, data):
1643 def decompress(self, data):
1606 """Decompress a revlog chunk.
1644 """Decompress a revlog chunk.
1607
1645
1608 The chunk is expected to begin with a header identifying the
1646 The chunk is expected to begin with a header identifying the
1609 format type so it can be routed to an appropriate decompressor.
1647 format type so it can be routed to an appropriate decompressor.
1610 """
1648 """
1611 if not data:
1649 if not data:
1612 return data
1650 return data
1613
1651
1614 # Revlogs are read much more frequently than they are written and many
1652 # Revlogs are read much more frequently than they are written and many
1615 # chunks only take microseconds to decompress, so performance is
1653 # chunks only take microseconds to decompress, so performance is
1616 # important here.
1654 # important here.
1617 #
1655 #
1618 # We can make a few assumptions about revlogs:
1656 # We can make a few assumptions about revlogs:
1619 #
1657 #
1620 # 1) the majority of chunks will be compressed (as opposed to inline
1658 # 1) the majority of chunks will be compressed (as opposed to inline
1621 # raw data).
1659 # raw data).
1622 # 2) decompressing *any* data will likely by at least 10x slower than
1660 # 2) decompressing *any* data will likely by at least 10x slower than
1623 # returning raw inline data.
1661 # returning raw inline data.
1624 # 3) we want to prioritize common and officially supported compression
1662 # 3) we want to prioritize common and officially supported compression
1625 # engines
1663 # engines
1626 #
1664 #
1627 # It follows that we want to optimize for "decompress compressed data
1665 # It follows that we want to optimize for "decompress compressed data
1628 # when encoded with common and officially supported compression engines"
1666 # when encoded with common and officially supported compression engines"
1629 # case over "raw data" and "data encoded by less common or non-official
1667 # case over "raw data" and "data encoded by less common or non-official
1630 # compression engines." That is why we have the inline lookup first
1668 # compression engines." That is why we have the inline lookup first
1631 # followed by the compengines lookup.
1669 # followed by the compengines lookup.
1632 #
1670 #
1633 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1671 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1634 # compressed chunks. And this matters for changelog and manifest reads.
1672 # compressed chunks. And this matters for changelog and manifest reads.
1635 t = data[0:1]
1673 t = data[0:1]
1636
1674
1637 if t == 'x':
1675 if t == 'x':
1638 try:
1676 try:
1639 return _zlibdecompress(data)
1677 return _zlibdecompress(data)
1640 except zlib.error as e:
1678 except zlib.error as e:
1641 raise RevlogError(_('revlog decompress error: %s') % str(e))
1679 raise RevlogError(_('revlog decompress error: %s') % str(e))
1642 # '\0' is more common than 'u' so it goes first.
1680 # '\0' is more common than 'u' so it goes first.
1643 elif t == '\0':
1681 elif t == '\0':
1644 return data
1682 return data
1645 elif t == 'u':
1683 elif t == 'u':
1646 return util.buffer(data, 1)
1684 return util.buffer(data, 1)
1647
1685
1648 try:
1686 try:
1649 compressor = self._decompressors[t]
1687 compressor = self._decompressors[t]
1650 except KeyError:
1688 except KeyError:
1651 try:
1689 try:
1652 engine = util.compengines.forrevlogheader(t)
1690 engine = util.compengines.forrevlogheader(t)
1653 compressor = engine.revlogcompressor()
1691 compressor = engine.revlogcompressor()
1654 self._decompressors[t] = compressor
1692 self._decompressors[t] = compressor
1655 except KeyError:
1693 except KeyError:
1656 raise RevlogError(_('unknown compression type %r') % t)
1694 raise RevlogError(_('unknown compression type %r') % t)
1657
1695
1658 return compressor.decompress(data)
1696 return compressor.decompress(data)
1659
1697
1660 def _isgooddelta(self, d, textlen):
1698 def _isgooddelta(self, d, textlen):
1661 """Returns True if the given delta is good. Good means that it is within
1699 """Returns True if the given delta is good. Good means that it is within
1662 the disk span, disk size, and chain length bounds that we know to be
1700 the disk span, disk size, and chain length bounds that we know to be
1663 performant."""
1701 performant."""
1664 if d is None:
1702 if d is None:
1665 return False
1703 return False
1666
1704
1667 # - 'dist' is the distance from the base revision -- bounding it limits
1705 # - 'dist' is the distance from the base revision -- bounding it limits
1668 # the amount of I/O we need to do.
1706 # the amount of I/O we need to do.
1669 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1707 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1670 # to apply -- bounding it limits the amount of CPU we consume.
1708 # to apply -- bounding it limits the amount of CPU we consume.
1671 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1709 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1672
1710
1673 defaultmax = textlen * 4
1711 defaultmax = textlen * 4
1674 maxdist = self._maxdeltachainspan
1712 maxdist = self._maxdeltachainspan
1675 if not maxdist:
1713 if not maxdist:
1676 maxdist = dist # ensure the conditional pass
1714 maxdist = dist # ensure the conditional pass
1677 maxdist = max(maxdist, defaultmax)
1715 maxdist = max(maxdist, defaultmax)
1678 if (dist > maxdist or l > textlen or
1716 if (dist > maxdist or l > textlen or
1679 compresseddeltalen > textlen * 2 or
1717 compresseddeltalen > textlen * 2 or
1680 (self._maxchainlen and chainlen > self._maxchainlen)):
1718 (self._maxchainlen and chainlen > self._maxchainlen)):
1681 return False
1719 return False
1682
1720
1683 return True
1721 return True
1684
1722
1685 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1723 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1686 cachedelta, ifh, dfh, alwayscache=False):
1724 cachedelta, ifh, dfh, alwayscache=False):
1687 """internal function to add revisions to the log
1725 """internal function to add revisions to the log
1688
1726
1689 see addrevision for argument descriptions.
1727 see addrevision for argument descriptions.
1690
1728
1691 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1729 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1692
1730
1693 invariants:
1731 invariants:
1694 - rawtext is optional (can be None); if not set, cachedelta must be set.
1732 - rawtext is optional (can be None); if not set, cachedelta must be set.
1695 if both are set, they must correspond to each other.
1733 if both are set, they must correspond to each other.
1696 """
1734 """
1697 if node == nullid:
1735 if node == nullid:
1698 raise RevlogError(_("%s: attempt to add null revision") %
1736 raise RevlogError(_("%s: attempt to add null revision") %
1699 (self.indexfile))
1737 (self.indexfile))
1700 if node == wdirid:
1738 if node == wdirid:
1701 raise RevlogError(_("%s: attempt to add wdir revision") %
1739 raise RevlogError(_("%s: attempt to add wdir revision") %
1702 (self.indexfile))
1740 (self.indexfile))
1703
1741
1704 btext = [rawtext]
1742 btext = [rawtext]
1705 def buildtext():
1743 def buildtext():
1706 if btext[0] is not None:
1744 if btext[0] is not None:
1707 return btext[0]
1745 return btext[0]
1708 baserev = cachedelta[0]
1746 baserev = cachedelta[0]
1709 delta = cachedelta[1]
1747 delta = cachedelta[1]
1710 # special case deltas which replace entire base; no need to decode
1748 # special case deltas which replace entire base; no need to decode
1711 # base revision. this neatly avoids censored bases, which throw when
1749 # base revision. this neatly avoids censored bases, which throw when
1712 # they're decoded.
1750 # they're decoded.
1713 hlen = struct.calcsize(">lll")
1751 hlen = struct.calcsize(">lll")
1714 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1752 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1715 len(delta) - hlen):
1753 len(delta) - hlen):
1716 btext[0] = delta[hlen:]
1754 btext[0] = delta[hlen:]
1717 else:
1755 else:
1718 if self._inline:
1756 if self._inline:
1719 fh = ifh
1757 fh = ifh
1720 else:
1758 else:
1721 fh = dfh
1759 fh = dfh
1722 basetext = self.revision(baserev, _df=fh, raw=True)
1760 basetext = self.revision(baserev, _df=fh, raw=True)
1723 btext[0] = mdiff.patch(basetext, delta)
1761 btext[0] = mdiff.patch(basetext, delta)
1724
1762
1725 try:
1763 try:
1726 res = self._processflags(btext[0], flags, 'read', raw=True)
1764 res = self._processflags(btext[0], flags, 'read', raw=True)
1727 btext[0], validatehash = res
1765 btext[0], validatehash = res
1728 if validatehash:
1766 if validatehash:
1729 self.checkhash(btext[0], node, p1=p1, p2=p2)
1767 self.checkhash(btext[0], node, p1=p1, p2=p2)
1730 if flags & REVIDX_ISCENSORED:
1768 if flags & REVIDX_ISCENSORED:
1731 raise RevlogError(_('node %s is not censored') % node)
1769 raise RevlogError(_('node %s is not censored') % node)
1732 except CensoredNodeError:
1770 except CensoredNodeError:
1733 # must pass the censored index flag to add censored revisions
1771 # must pass the censored index flag to add censored revisions
1734 if not flags & REVIDX_ISCENSORED:
1772 if not flags & REVIDX_ISCENSORED:
1735 raise
1773 raise
1736 return btext[0]
1774 return btext[0]
1737
1775
1738 def builddelta(rev):
1776 def builddelta(rev):
1739 # can we use the cached delta?
1777 # can we use the cached delta?
1740 if cachedelta and cachedelta[0] == rev:
1778 if cachedelta and cachedelta[0] == rev:
1741 delta = cachedelta[1]
1779 delta = cachedelta[1]
1742 else:
1780 else:
1743 t = buildtext()
1781 t = buildtext()
1744 if self.iscensored(rev):
1782 if self.iscensored(rev):
1745 # deltas based on a censored revision must replace the
1783 # deltas based on a censored revision must replace the
1746 # full content in one patch, so delta works everywhere
1784 # full content in one patch, so delta works everywhere
1747 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1785 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1748 delta = header + t
1786 delta = header + t
1749 else:
1787 else:
1750 if self._inline:
1788 if self._inline:
1751 fh = ifh
1789 fh = ifh
1752 else:
1790 else:
1753 fh = dfh
1791 fh = dfh
1754 ptext = self.revision(rev, _df=fh, raw=True)
1792 ptext = self.revision(rev, _df=fh, raw=True)
1755 delta = mdiff.textdiff(ptext, t)
1793 delta = mdiff.textdiff(ptext, t)
1756 header, data = self.compress(delta)
1794 header, data = self.compress(delta)
1757 deltalen = len(header) + len(data)
1795 deltalen = len(header) + len(data)
1758 chainbase = self.chainbase(rev)
1796 chainbase = self.chainbase(rev)
1759 dist = deltalen + offset - self.start(chainbase)
1797 dist = deltalen + offset - self.start(chainbase)
1760 if self._generaldelta:
1798 if self._generaldelta:
1761 base = rev
1799 base = rev
1762 else:
1800 else:
1763 base = chainbase
1801 base = chainbase
1764 chainlen, compresseddeltalen = self._chaininfo(rev)
1802 chainlen, compresseddeltalen = self._chaininfo(rev)
1765 chainlen += 1
1803 chainlen += 1
1766 compresseddeltalen += deltalen
1804 compresseddeltalen += deltalen
1767 return (dist, deltalen, (header, data), base,
1805 return (dist, deltalen, (header, data), base,
1768 chainbase, chainlen, compresseddeltalen)
1806 chainbase, chainlen, compresseddeltalen)
1769
1807
1770 curr = len(self)
1808 curr = len(self)
1771 prev = curr - 1
1809 prev = curr - 1
1772 offset = self.end(prev)
1810 offset = self.end(prev)
1773 delta = None
1811 delta = None
1774 p1r, p2r = self.rev(p1), self.rev(p2)
1812 p1r, p2r = self.rev(p1), self.rev(p2)
1775
1813
1776 # full versions are inserted when the needed deltas
1814 # full versions are inserted when the needed deltas
1777 # become comparable to the uncompressed text
1815 # become comparable to the uncompressed text
1778 if rawtext is None:
1816 if rawtext is None:
1779 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1817 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1780 cachedelta[1])
1818 cachedelta[1])
1781 else:
1819 else:
1782 textlen = len(rawtext)
1820 textlen = len(rawtext)
1783
1821
1784 # should we try to build a delta?
1822 # should we try to build a delta?
1785 if prev != nullrev and self.storedeltachains:
1823 if prev != nullrev and self.storedeltachains:
1786 tested = set()
1824 tested = set()
1787 # This condition is true most of the time when processing
1825 # This condition is true most of the time when processing
1788 # changegroup data into a generaldelta repo. The only time it
1826 # changegroup data into a generaldelta repo. The only time it
1789 # isn't true is if this is the first revision in a delta chain
1827 # isn't true is if this is the first revision in a delta chain
1790 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1828 # or if ``format.generaldelta=true`` disabled ``lazydeltabase``.
1791 if cachedelta and self._generaldelta and self._lazydeltabase:
1829 if cachedelta and self._generaldelta and self._lazydeltabase:
1792 # Assume what we received from the server is a good choice
1830 # Assume what we received from the server is a good choice
1793 # build delta will reuse the cache
1831 # build delta will reuse the cache
1794 candidatedelta = builddelta(cachedelta[0])
1832 candidatedelta = builddelta(cachedelta[0])
1795 tested.add(cachedelta[0])
1833 tested.add(cachedelta[0])
1796 if self._isgooddelta(candidatedelta, textlen):
1834 if self._isgooddelta(candidatedelta, textlen):
1797 delta = candidatedelta
1835 delta = candidatedelta
1798 if delta is None and self._generaldelta:
1836 if delta is None and self._generaldelta:
1799 # exclude already lazy tested base if any
1837 # exclude already lazy tested base if any
1800 parents = [p for p in (p1r, p2r)
1838 parents = [p for p in (p1r, p2r)
1801 if p != nullrev and p not in tested]
1839 if p != nullrev and p not in tested]
1802 if parents and not self._aggressivemergedeltas:
1840 if parents and not self._aggressivemergedeltas:
1803 # Pick whichever parent is closer to us (to minimize the
1841 # Pick whichever parent is closer to us (to minimize the
1804 # chance of having to build a fulltext).
1842 # chance of having to build a fulltext).
1805 parents = [max(parents)]
1843 parents = [max(parents)]
1806 tested.update(parents)
1844 tested.update(parents)
1807 pdeltas = []
1845 pdeltas = []
1808 for p in parents:
1846 for p in parents:
1809 pd = builddelta(p)
1847 pd = builddelta(p)
1810 if self._isgooddelta(pd, textlen):
1848 if self._isgooddelta(pd, textlen):
1811 pdeltas.append(pd)
1849 pdeltas.append(pd)
1812 if pdeltas:
1850 if pdeltas:
1813 delta = min(pdeltas, key=lambda x: x[1])
1851 delta = min(pdeltas, key=lambda x: x[1])
1814 if delta is None and prev not in tested:
1852 if delta is None and prev not in tested:
1815 # other approach failed try against prev to hopefully save us a
1853 # other approach failed try against prev to hopefully save us a
1816 # fulltext.
1854 # fulltext.
1817 candidatedelta = builddelta(prev)
1855 candidatedelta = builddelta(prev)
1818 if self._isgooddelta(candidatedelta, textlen):
1856 if self._isgooddelta(candidatedelta, textlen):
1819 delta = candidatedelta
1857 delta = candidatedelta
1820 if delta is not None:
1858 if delta is not None:
1821 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1859 dist, l, data, base, chainbase, chainlen, compresseddeltalen = delta
1822 else:
1860 else:
1823 rawtext = buildtext()
1861 rawtext = buildtext()
1824 data = self.compress(rawtext)
1862 data = self.compress(rawtext)
1825 l = len(data[1]) + len(data[0])
1863 l = len(data[1]) + len(data[0])
1826 base = chainbase = curr
1864 base = chainbase = curr
1827
1865
1828 e = (offset_type(offset, flags), l, textlen,
1866 e = (offset_type(offset, flags), l, textlen,
1829 base, link, p1r, p2r, node)
1867 base, link, p1r, p2r, node)
1830 self.index.insert(-1, e)
1868 self.index.insert(-1, e)
1831 self.nodemap[node] = curr
1869 self.nodemap[node] = curr
1832
1870
1833 entry = self._io.packentry(e, self.node, self.version, curr)
1871 entry = self._io.packentry(e, self.node, self.version, curr)
1834 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1872 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1835
1873
1836 if alwayscache and rawtext is None:
1874 if alwayscache and rawtext is None:
1837 rawtext = buildtext()
1875 rawtext = buildtext()
1838
1876
1839 if type(rawtext) == str: # only accept immutable objects
1877 if type(rawtext) == str: # only accept immutable objects
1840 self._cache = (node, curr, rawtext)
1878 self._cache = (node, curr, rawtext)
1841 self._chainbasecache[curr] = chainbase
1879 self._chainbasecache[curr] = chainbase
1842 return node
1880 return node
1843
1881
1844 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1882 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1845 # Files opened in a+ mode have inconsistent behavior on various
1883 # Files opened in a+ mode have inconsistent behavior on various
1846 # platforms. Windows requires that a file positioning call be made
1884 # platforms. Windows requires that a file positioning call be made
1847 # when the file handle transitions between reads and writes. See
1885 # when the file handle transitions between reads and writes. See
1848 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1886 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
1849 # platforms, Python or the platform itself can be buggy. Some versions
1887 # platforms, Python or the platform itself can be buggy. Some versions
1850 # of Solaris have been observed to not append at the end of the file
1888 # of Solaris have been observed to not append at the end of the file
1851 # if the file was seeked to before the end. See issue4943 for more.
1889 # if the file was seeked to before the end. See issue4943 for more.
1852 #
1890 #
1853 # We work around this issue by inserting a seek() before writing.
1891 # We work around this issue by inserting a seek() before writing.
1854 # Note: This is likely not necessary on Python 3.
1892 # Note: This is likely not necessary on Python 3.
1855 ifh.seek(0, os.SEEK_END)
1893 ifh.seek(0, os.SEEK_END)
1856 if dfh:
1894 if dfh:
1857 dfh.seek(0, os.SEEK_END)
1895 dfh.seek(0, os.SEEK_END)
1858
1896
1859 curr = len(self) - 1
1897 curr = len(self) - 1
1860 if not self._inline:
1898 if not self._inline:
1861 transaction.add(self.datafile, offset)
1899 transaction.add(self.datafile, offset)
1862 transaction.add(self.indexfile, curr * len(entry))
1900 transaction.add(self.indexfile, curr * len(entry))
1863 if data[0]:
1901 if data[0]:
1864 dfh.write(data[0])
1902 dfh.write(data[0])
1865 dfh.write(data[1])
1903 dfh.write(data[1])
1866 ifh.write(entry)
1904 ifh.write(entry)
1867 else:
1905 else:
1868 offset += curr * self._io.size
1906 offset += curr * self._io.size
1869 transaction.add(self.indexfile, offset, curr)
1907 transaction.add(self.indexfile, offset, curr)
1870 ifh.write(entry)
1908 ifh.write(entry)
1871 ifh.write(data[0])
1909 ifh.write(data[0])
1872 ifh.write(data[1])
1910 ifh.write(data[1])
1873 self.checkinlinesize(transaction, ifh)
1911 self.checkinlinesize(transaction, ifh)
1874
1912
1875 def addgroup(self, deltas, transaction, addrevisioncb=None):
1913 def addgroup(self, deltas, transaction, addrevisioncb=None):
1876 """
1914 """
1877 add a delta group
1915 add a delta group
1878
1916
1879 given a set of deltas, add them to the revision log. the
1917 given a set of deltas, add them to the revision log. the
1880 first delta is against its parent, which should be in our
1918 first delta is against its parent, which should be in our
1881 log, the rest are against the previous delta.
1919 log, the rest are against the previous delta.
1882
1920
1883 If ``addrevisioncb`` is defined, it will be called with arguments of
1921 If ``addrevisioncb`` is defined, it will be called with arguments of
1884 this revlog and the node that was added.
1922 this revlog and the node that was added.
1885 """
1923 """
1886
1924
1887 nodes = []
1925 nodes = []
1888
1926
1889 r = len(self)
1927 r = len(self)
1890 end = 0
1928 end = 0
1891 if r:
1929 if r:
1892 end = self.end(r - 1)
1930 end = self.end(r - 1)
1893 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1931 ifh = self.opener(self.indexfile, "a+", checkambig=self._checkambig)
1894 isize = r * self._io.size
1932 isize = r * self._io.size
1895 if self._inline:
1933 if self._inline:
1896 transaction.add(self.indexfile, end + isize, r)
1934 transaction.add(self.indexfile, end + isize, r)
1897 dfh = None
1935 dfh = None
1898 else:
1936 else:
1899 transaction.add(self.indexfile, isize, r)
1937 transaction.add(self.indexfile, isize, r)
1900 transaction.add(self.datafile, end)
1938 transaction.add(self.datafile, end)
1901 dfh = self.opener(self.datafile, "a+")
1939 dfh = self.opener(self.datafile, "a+")
1902 def flush():
1940 def flush():
1903 if dfh:
1941 if dfh:
1904 dfh.flush()
1942 dfh.flush()
1905 ifh.flush()
1943 ifh.flush()
1906 try:
1944 try:
1907 # loop through our set of deltas
1945 # loop through our set of deltas
1908 for data in deltas:
1946 for data in deltas:
1909 node, p1, p2, link, deltabase, delta, flags = data
1947 node, p1, p2, link, deltabase, delta, flags = data
1910 flags = flags or REVIDX_DEFAULT_FLAGS
1948 flags = flags or REVIDX_DEFAULT_FLAGS
1911
1949
1912 nodes.append(node)
1950 nodes.append(node)
1913
1951
1914 if node in self.nodemap:
1952 if node in self.nodemap:
1915 # this can happen if two branches make the same change
1953 # this can happen if two branches make the same change
1916 continue
1954 continue
1917
1955
1918 for p in (p1, p2):
1956 for p in (p1, p2):
1919 if p not in self.nodemap:
1957 if p not in self.nodemap:
1920 raise LookupError(p, self.indexfile,
1958 raise LookupError(p, self.indexfile,
1921 _('unknown parent'))
1959 _('unknown parent'))
1922
1960
1923 if deltabase not in self.nodemap:
1961 if deltabase not in self.nodemap:
1924 raise LookupError(deltabase, self.indexfile,
1962 raise LookupError(deltabase, self.indexfile,
1925 _('unknown delta base'))
1963 _('unknown delta base'))
1926
1964
1927 baserev = self.rev(deltabase)
1965 baserev = self.rev(deltabase)
1928
1966
1929 if baserev != nullrev and self.iscensored(baserev):
1967 if baserev != nullrev and self.iscensored(baserev):
1930 # if base is censored, delta must be full replacement in a
1968 # if base is censored, delta must be full replacement in a
1931 # single patch operation
1969 # single patch operation
1932 hlen = struct.calcsize(">lll")
1970 hlen = struct.calcsize(">lll")
1933 oldlen = self.rawsize(baserev)
1971 oldlen = self.rawsize(baserev)
1934 newlen = len(delta) - hlen
1972 newlen = len(delta) - hlen
1935 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1973 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1936 raise error.CensoredBaseError(self.indexfile,
1974 raise error.CensoredBaseError(self.indexfile,
1937 self.node(baserev))
1975 self.node(baserev))
1938
1976
1939 if not flags and self._peek_iscensored(baserev, delta, flush):
1977 if not flags and self._peek_iscensored(baserev, delta, flush):
1940 flags |= REVIDX_ISCENSORED
1978 flags |= REVIDX_ISCENSORED
1941
1979
1942 # We assume consumers of addrevisioncb will want to retrieve
1980 # We assume consumers of addrevisioncb will want to retrieve
1943 # the added revision, which will require a call to
1981 # the added revision, which will require a call to
1944 # revision(). revision() will fast path if there is a cache
1982 # revision(). revision() will fast path if there is a cache
1945 # hit. So, we tell _addrevision() to always cache in this case.
1983 # hit. So, we tell _addrevision() to always cache in this case.
1946 # We're only using addgroup() in the context of changegroup
1984 # We're only using addgroup() in the context of changegroup
1947 # generation so the revision data can always be handled as raw
1985 # generation so the revision data can always be handled as raw
1948 # by the flagprocessor.
1986 # by the flagprocessor.
1949 self._addrevision(node, None, transaction, link,
1987 self._addrevision(node, None, transaction, link,
1950 p1, p2, flags, (baserev, delta),
1988 p1, p2, flags, (baserev, delta),
1951 ifh, dfh,
1989 ifh, dfh,
1952 alwayscache=bool(addrevisioncb))
1990 alwayscache=bool(addrevisioncb))
1953
1991
1954 if addrevisioncb:
1992 if addrevisioncb:
1955 addrevisioncb(self, node)
1993 addrevisioncb(self, node)
1956
1994
1957 if not dfh and not self._inline:
1995 if not dfh and not self._inline:
1958 # addrevision switched from inline to conventional
1996 # addrevision switched from inline to conventional
1959 # reopen the index
1997 # reopen the index
1960 ifh.close()
1998 ifh.close()
1961 dfh = self.opener(self.datafile, "a+")
1999 dfh = self.opener(self.datafile, "a+")
1962 ifh = self.opener(self.indexfile, "a+",
2000 ifh = self.opener(self.indexfile, "a+",
1963 checkambig=self._checkambig)
2001 checkambig=self._checkambig)
1964 finally:
2002 finally:
1965 if dfh:
2003 if dfh:
1966 dfh.close()
2004 dfh.close()
1967 ifh.close()
2005 ifh.close()
1968
2006
1969 return nodes
2007 return nodes
1970
2008
1971 def iscensored(self, rev):
2009 def iscensored(self, rev):
1972 """Check if a file revision is censored."""
2010 """Check if a file revision is censored."""
1973 return False
2011 return False
1974
2012
1975 def _peek_iscensored(self, baserev, delta, flush):
2013 def _peek_iscensored(self, baserev, delta, flush):
1976 """Quickly check if a delta produces a censored revision."""
2014 """Quickly check if a delta produces a censored revision."""
1977 return False
2015 return False
1978
2016
1979 def getstrippoint(self, minlink):
2017 def getstrippoint(self, minlink):
1980 """find the minimum rev that must be stripped to strip the linkrev
2018 """find the minimum rev that must be stripped to strip the linkrev
1981
2019
1982 Returns a tuple containing the minimum rev and a set of all revs that
2020 Returns a tuple containing the minimum rev and a set of all revs that
1983 have linkrevs that will be broken by this strip.
2021 have linkrevs that will be broken by this strip.
1984 """
2022 """
1985 brokenrevs = set()
2023 brokenrevs = set()
1986 strippoint = len(self)
2024 strippoint = len(self)
1987
2025
1988 heads = {}
2026 heads = {}
1989 futurelargelinkrevs = set()
2027 futurelargelinkrevs = set()
1990 for head in self.headrevs():
2028 for head in self.headrevs():
1991 headlinkrev = self.linkrev(head)
2029 headlinkrev = self.linkrev(head)
1992 heads[head] = headlinkrev
2030 heads[head] = headlinkrev
1993 if headlinkrev >= minlink:
2031 if headlinkrev >= minlink:
1994 futurelargelinkrevs.add(headlinkrev)
2032 futurelargelinkrevs.add(headlinkrev)
1995
2033
1996 # This algorithm involves walking down the rev graph, starting at the
2034 # This algorithm involves walking down the rev graph, starting at the
1997 # heads. Since the revs are topologically sorted according to linkrev,
2035 # heads. Since the revs are topologically sorted according to linkrev,
1998 # once all head linkrevs are below the minlink, we know there are
2036 # once all head linkrevs are below the minlink, we know there are
1999 # no more revs that could have a linkrev greater than minlink.
2037 # no more revs that could have a linkrev greater than minlink.
2000 # So we can stop walking.
2038 # So we can stop walking.
2001 while futurelargelinkrevs:
2039 while futurelargelinkrevs:
2002 strippoint -= 1
2040 strippoint -= 1
2003 linkrev = heads.pop(strippoint)
2041 linkrev = heads.pop(strippoint)
2004
2042
2005 if linkrev < minlink:
2043 if linkrev < minlink:
2006 brokenrevs.add(strippoint)
2044 brokenrevs.add(strippoint)
2007 else:
2045 else:
2008 futurelargelinkrevs.remove(linkrev)
2046 futurelargelinkrevs.remove(linkrev)
2009
2047
2010 for p in self.parentrevs(strippoint):
2048 for p in self.parentrevs(strippoint):
2011 if p != nullrev:
2049 if p != nullrev:
2012 plinkrev = self.linkrev(p)
2050 plinkrev = self.linkrev(p)
2013 heads[p] = plinkrev
2051 heads[p] = plinkrev
2014 if plinkrev >= minlink:
2052 if plinkrev >= minlink:
2015 futurelargelinkrevs.add(plinkrev)
2053 futurelargelinkrevs.add(plinkrev)
2016
2054
2017 return strippoint, brokenrevs
2055 return strippoint, brokenrevs
2018
2056
2019 def strip(self, minlink, transaction):
2057 def strip(self, minlink, transaction):
2020 """truncate the revlog on the first revision with a linkrev >= minlink
2058 """truncate the revlog on the first revision with a linkrev >= minlink
2021
2059
2022 This function is called when we're stripping revision minlink and
2060 This function is called when we're stripping revision minlink and
2023 its descendants from the repository.
2061 its descendants from the repository.
2024
2062
2025 We have to remove all revisions with linkrev >= minlink, because
2063 We have to remove all revisions with linkrev >= minlink, because
2026 the equivalent changelog revisions will be renumbered after the
2064 the equivalent changelog revisions will be renumbered after the
2027 strip.
2065 strip.
2028
2066
2029 So we truncate the revlog on the first of these revisions, and
2067 So we truncate the revlog on the first of these revisions, and
2030 trust that the caller has saved the revisions that shouldn't be
2068 trust that the caller has saved the revisions that shouldn't be
2031 removed and that it'll re-add them after this truncation.
2069 removed and that it'll re-add them after this truncation.
2032 """
2070 """
2033 if len(self) == 0:
2071 if len(self) == 0:
2034 return
2072 return
2035
2073
2036 rev, _ = self.getstrippoint(minlink)
2074 rev, _ = self.getstrippoint(minlink)
2037 if rev == len(self):
2075 if rev == len(self):
2038 return
2076 return
2039
2077
2040 # first truncate the files on disk
2078 # first truncate the files on disk
2041 end = self.start(rev)
2079 end = self.start(rev)
2042 if not self._inline:
2080 if not self._inline:
2043 transaction.add(self.datafile, end)
2081 transaction.add(self.datafile, end)
2044 end = rev * self._io.size
2082 end = rev * self._io.size
2045 else:
2083 else:
2046 end += rev * self._io.size
2084 end += rev * self._io.size
2047
2085
2048 transaction.add(self.indexfile, end)
2086 transaction.add(self.indexfile, end)
2049
2087
2050 # then reset internal state in memory to forget those revisions
2088 # then reset internal state in memory to forget those revisions
2051 self._cache = None
2089 self._cache = None
2052 self._chaininfocache = {}
2090 self._chaininfocache = {}
2053 self._chunkclear()
2091 self._chunkclear()
2054 for x in xrange(rev, len(self)):
2092 for x in xrange(rev, len(self)):
2055 del self.nodemap[self.node(x)]
2093 del self.nodemap[self.node(x)]
2056
2094
2057 del self.index[rev:-1]
2095 del self.index[rev:-1]
2058
2096
2059 def checksize(self):
2097 def checksize(self):
2060 expected = 0
2098 expected = 0
2061 if len(self):
2099 if len(self):
2062 expected = max(0, self.end(len(self) - 1))
2100 expected = max(0, self.end(len(self) - 1))
2063
2101
2064 try:
2102 try:
2065 f = self.opener(self.datafile)
2103 f = self.opener(self.datafile)
2066 f.seek(0, 2)
2104 f.seek(0, 2)
2067 actual = f.tell()
2105 actual = f.tell()
2068 f.close()
2106 f.close()
2069 dd = actual - expected
2107 dd = actual - expected
2070 except IOError as inst:
2108 except IOError as inst:
2071 if inst.errno != errno.ENOENT:
2109 if inst.errno != errno.ENOENT:
2072 raise
2110 raise
2073 dd = 0
2111 dd = 0
2074
2112
2075 try:
2113 try:
2076 f = self.opener(self.indexfile)
2114 f = self.opener(self.indexfile)
2077 f.seek(0, 2)
2115 f.seek(0, 2)
2078 actual = f.tell()
2116 actual = f.tell()
2079 f.close()
2117 f.close()
2080 s = self._io.size
2118 s = self._io.size
2081 i = max(0, actual // s)
2119 i = max(0, actual // s)
2082 di = actual - (i * s)
2120 di = actual - (i * s)
2083 if self._inline:
2121 if self._inline:
2084 databytes = 0
2122 databytes = 0
2085 for r in self:
2123 for r in self:
2086 databytes += max(0, self.length(r))
2124 databytes += max(0, self.length(r))
2087 dd = 0
2125 dd = 0
2088 di = actual - len(self) * s - databytes
2126 di = actual - len(self) * s - databytes
2089 except IOError as inst:
2127 except IOError as inst:
2090 if inst.errno != errno.ENOENT:
2128 if inst.errno != errno.ENOENT:
2091 raise
2129 raise
2092 di = 0
2130 di = 0
2093
2131
2094 return (dd, di)
2132 return (dd, di)
2095
2133
2096 def files(self):
2134 def files(self):
2097 res = [self.indexfile]
2135 res = [self.indexfile]
2098 if not self._inline:
2136 if not self._inline:
2099 res.append(self.datafile)
2137 res.append(self.datafile)
2100 return res
2138 return res
2101
2139
2102 DELTAREUSEALWAYS = 'always'
2140 DELTAREUSEALWAYS = 'always'
2103 DELTAREUSESAMEREVS = 'samerevs'
2141 DELTAREUSESAMEREVS = 'samerevs'
2104 DELTAREUSENEVER = 'never'
2142 DELTAREUSENEVER = 'never'
2105
2143
2106 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2144 DELTAREUSEALL = {'always', 'samerevs', 'never'}
2107
2145
2108 def clone(self, tr, destrevlog, addrevisioncb=None,
2146 def clone(self, tr, destrevlog, addrevisioncb=None,
2109 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2147 deltareuse=DELTAREUSESAMEREVS, aggressivemergedeltas=None):
2110 """Copy this revlog to another, possibly with format changes.
2148 """Copy this revlog to another, possibly with format changes.
2111
2149
2112 The destination revlog will contain the same revisions and nodes.
2150 The destination revlog will contain the same revisions and nodes.
2113 However, it may not be bit-for-bit identical due to e.g. delta encoding
2151 However, it may not be bit-for-bit identical due to e.g. delta encoding
2114 differences.
2152 differences.
2115
2153
2116 The ``deltareuse`` argument control how deltas from the existing revlog
2154 The ``deltareuse`` argument control how deltas from the existing revlog
2117 are preserved in the destination revlog. The argument can have the
2155 are preserved in the destination revlog. The argument can have the
2118 following values:
2156 following values:
2119
2157
2120 DELTAREUSEALWAYS
2158 DELTAREUSEALWAYS
2121 Deltas will always be reused (if possible), even if the destination
2159 Deltas will always be reused (if possible), even if the destination
2122 revlog would not select the same revisions for the delta. This is the
2160 revlog would not select the same revisions for the delta. This is the
2123 fastest mode of operation.
2161 fastest mode of operation.
2124 DELTAREUSESAMEREVS
2162 DELTAREUSESAMEREVS
2125 Deltas will be reused if the destination revlog would pick the same
2163 Deltas will be reused if the destination revlog would pick the same
2126 revisions for the delta. This mode strikes a balance between speed
2164 revisions for the delta. This mode strikes a balance between speed
2127 and optimization.
2165 and optimization.
2128 DELTAREUSENEVER
2166 DELTAREUSENEVER
2129 Deltas will never be reused. This is the slowest mode of execution.
2167 Deltas will never be reused. This is the slowest mode of execution.
2130 This mode can be used to recompute deltas (e.g. if the diff/delta
2168 This mode can be used to recompute deltas (e.g. if the diff/delta
2131 algorithm changes).
2169 algorithm changes).
2132
2170
2133 Delta computation can be slow, so the choice of delta reuse policy can
2171 Delta computation can be slow, so the choice of delta reuse policy can
2134 significantly affect run time.
2172 significantly affect run time.
2135
2173
2136 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2174 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2137 two extremes. Deltas will be reused if they are appropriate. But if the
2175 two extremes. Deltas will be reused if they are appropriate. But if the
2138 delta could choose a better revision, it will do so. This means if you
2176 delta could choose a better revision, it will do so. This means if you
2139 are converting a non-generaldelta revlog to a generaldelta revlog,
2177 are converting a non-generaldelta revlog to a generaldelta revlog,
2140 deltas will be recomputed if the delta's parent isn't a parent of the
2178 deltas will be recomputed if the delta's parent isn't a parent of the
2141 revision.
2179 revision.
2142
2180
2143 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2181 In addition to the delta policy, the ``aggressivemergedeltas`` argument
2144 controls whether to compute deltas against both parents for merges.
2182 controls whether to compute deltas against both parents for merges.
2145 By default, the current default is used.
2183 By default, the current default is used.
2146 """
2184 """
2147 if deltareuse not in self.DELTAREUSEALL:
2185 if deltareuse not in self.DELTAREUSEALL:
2148 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2186 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2149
2187
2150 if len(destrevlog):
2188 if len(destrevlog):
2151 raise ValueError(_('destination revlog is not empty'))
2189 raise ValueError(_('destination revlog is not empty'))
2152
2190
2153 if getattr(self, 'filteredrevs', None):
2191 if getattr(self, 'filteredrevs', None):
2154 raise ValueError(_('source revlog has filtered revisions'))
2192 raise ValueError(_('source revlog has filtered revisions'))
2155 if getattr(destrevlog, 'filteredrevs', None):
2193 if getattr(destrevlog, 'filteredrevs', None):
2156 raise ValueError(_('destination revlog has filtered revisions'))
2194 raise ValueError(_('destination revlog has filtered revisions'))
2157
2195
2158 # lazydeltabase controls whether to reuse a cached delta, if possible.
2196 # lazydeltabase controls whether to reuse a cached delta, if possible.
2159 oldlazydeltabase = destrevlog._lazydeltabase
2197 oldlazydeltabase = destrevlog._lazydeltabase
2160 oldamd = destrevlog._aggressivemergedeltas
2198 oldamd = destrevlog._aggressivemergedeltas
2161
2199
2162 try:
2200 try:
2163 if deltareuse == self.DELTAREUSEALWAYS:
2201 if deltareuse == self.DELTAREUSEALWAYS:
2164 destrevlog._lazydeltabase = True
2202 destrevlog._lazydeltabase = True
2165 elif deltareuse == self.DELTAREUSESAMEREVS:
2203 elif deltareuse == self.DELTAREUSESAMEREVS:
2166 destrevlog._lazydeltabase = False
2204 destrevlog._lazydeltabase = False
2167
2205
2168 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2206 destrevlog._aggressivemergedeltas = aggressivemergedeltas or oldamd
2169
2207
2170 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2208 populatecachedelta = deltareuse in (self.DELTAREUSEALWAYS,
2171 self.DELTAREUSESAMEREVS)
2209 self.DELTAREUSESAMEREVS)
2172
2210
2173 index = self.index
2211 index = self.index
2174 for rev in self:
2212 for rev in self:
2175 entry = index[rev]
2213 entry = index[rev]
2176
2214
2177 # Some classes override linkrev to take filtered revs into
2215 # Some classes override linkrev to take filtered revs into
2178 # account. Use raw entry from index.
2216 # account. Use raw entry from index.
2179 flags = entry[0] & 0xffff
2217 flags = entry[0] & 0xffff
2180 linkrev = entry[4]
2218 linkrev = entry[4]
2181 p1 = index[entry[5]][7]
2219 p1 = index[entry[5]][7]
2182 p2 = index[entry[6]][7]
2220 p2 = index[entry[6]][7]
2183 node = entry[7]
2221 node = entry[7]
2184
2222
2185 # (Possibly) reuse the delta from the revlog if allowed and
2223 # (Possibly) reuse the delta from the revlog if allowed and
2186 # the revlog chunk is a delta.
2224 # the revlog chunk is a delta.
2187 cachedelta = None
2225 cachedelta = None
2188 rawtext = None
2226 rawtext = None
2189 if populatecachedelta:
2227 if populatecachedelta:
2190 dp = self.deltaparent(rev)
2228 dp = self.deltaparent(rev)
2191 if dp != nullrev:
2229 if dp != nullrev:
2192 cachedelta = (dp, str(self._chunk(rev)))
2230 cachedelta = (dp, str(self._chunk(rev)))
2193
2231
2194 if not cachedelta:
2232 if not cachedelta:
2195 rawtext = self.revision(rev, raw=True)
2233 rawtext = self.revision(rev, raw=True)
2196
2234
2197 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2235 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2198 checkambig=False)
2236 checkambig=False)
2199 dfh = None
2237 dfh = None
2200 if not destrevlog._inline:
2238 if not destrevlog._inline:
2201 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2239 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2202 try:
2240 try:
2203 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2241 destrevlog._addrevision(node, rawtext, tr, linkrev, p1, p2,
2204 flags, cachedelta, ifh, dfh)
2242 flags, cachedelta, ifh, dfh)
2205 finally:
2243 finally:
2206 if dfh:
2244 if dfh:
2207 dfh.close()
2245 dfh.close()
2208 ifh.close()
2246 ifh.close()
2209
2247
2210 if addrevisioncb:
2248 if addrevisioncb:
2211 addrevisioncb(self, rev, node)
2249 addrevisioncb(self, rev, node)
2212 finally:
2250 finally:
2213 destrevlog._lazydeltabase = oldlazydeltabase
2251 destrevlog._lazydeltabase = oldlazydeltabase
2214 destrevlog._aggressivemergedeltas = oldamd
2252 destrevlog._aggressivemergedeltas = oldamd
@@ -1,1460 +1,1426 b''
1 # templater.py - template expansion for output
1 # templater.py - template expansion for output
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import os
10 import os
11 import re
11 import re
12 import types
12 import types
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 color,
16 color,
17 config,
17 config,
18 encoding,
18 encoding,
19 error,
19 error,
20 minirst,
20 minirst,
21 obsutil,
21 obsutil,
22 parser,
22 parser,
23 pycompat,
23 pycompat,
24 registrar,
24 registrar,
25 revset as revsetmod,
25 revset as revsetmod,
26 revsetlang,
26 revsetlang,
27 templatefilters,
27 templatefilters,
28 templatekw,
28 templatekw,
29 util,
29 util,
30 )
30 )
31
31
32 # template parsing
32 # template parsing
33
33
34 elements = {
34 elements = {
35 # token-type: binding-strength, primary, prefix, infix, suffix
35 # token-type: binding-strength, primary, prefix, infix, suffix
36 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
36 "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None),
37 "%": (16, None, None, ("%", 16), None),
37 "%": (16, None, None, ("%", 16), None),
38 "|": (15, None, None, ("|", 15), None),
38 "|": (15, None, None, ("|", 15), None),
39 "*": (5, None, None, ("*", 5), None),
39 "*": (5, None, None, ("*", 5), None),
40 "/": (5, None, None, ("/", 5), None),
40 "/": (5, None, None, ("/", 5), None),
41 "+": (4, None, None, ("+", 4), None),
41 "+": (4, None, None, ("+", 4), None),
42 "-": (4, None, ("negate", 19), ("-", 4), None),
42 "-": (4, None, ("negate", 19), ("-", 4), None),
43 "=": (3, None, None, ("keyvalue", 3), None),
43 "=": (3, None, None, ("keyvalue", 3), None),
44 ",": (2, None, None, ("list", 2), None),
44 ",": (2, None, None, ("list", 2), None),
45 ")": (0, None, None, None, None),
45 ")": (0, None, None, None, None),
46 "integer": (0, "integer", None, None, None),
46 "integer": (0, "integer", None, None, None),
47 "symbol": (0, "symbol", None, None, None),
47 "symbol": (0, "symbol", None, None, None),
48 "string": (0, "string", None, None, None),
48 "string": (0, "string", None, None, None),
49 "template": (0, "template", None, None, None),
49 "template": (0, "template", None, None, None),
50 "end": (0, None, None, None, None),
50 "end": (0, None, None, None, None),
51 }
51 }
52
52
53 def tokenize(program, start, end, term=None):
53 def tokenize(program, start, end, term=None):
54 """Parse a template expression into a stream of tokens, which must end
54 """Parse a template expression into a stream of tokens, which must end
55 with term if specified"""
55 with term if specified"""
56 pos = start
56 pos = start
57 program = pycompat.bytestr(program)
57 program = pycompat.bytestr(program)
58 while pos < end:
58 while pos < end:
59 c = program[pos]
59 c = program[pos]
60 if c.isspace(): # skip inter-token whitespace
60 if c.isspace(): # skip inter-token whitespace
61 pass
61 pass
62 elif c in "(=,)%|+-*/": # handle simple operators
62 elif c in "(=,)%|+-*/": # handle simple operators
63 yield (c, None, pos)
63 yield (c, None, pos)
64 elif c in '"\'': # handle quoted templates
64 elif c in '"\'': # handle quoted templates
65 s = pos + 1
65 s = pos + 1
66 data, pos = _parsetemplate(program, s, end, c)
66 data, pos = _parsetemplate(program, s, end, c)
67 yield ('template', data, s)
67 yield ('template', data, s)
68 pos -= 1
68 pos -= 1
69 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
69 elif c == 'r' and program[pos:pos + 2] in ("r'", 'r"'):
70 # handle quoted strings
70 # handle quoted strings
71 c = program[pos + 1]
71 c = program[pos + 1]
72 s = pos = pos + 2
72 s = pos = pos + 2
73 while pos < end: # find closing quote
73 while pos < end: # find closing quote
74 d = program[pos]
74 d = program[pos]
75 if d == '\\': # skip over escaped characters
75 if d == '\\': # skip over escaped characters
76 pos += 2
76 pos += 2
77 continue
77 continue
78 if d == c:
78 if d == c:
79 yield ('string', program[s:pos], s)
79 yield ('string', program[s:pos], s)
80 break
80 break
81 pos += 1
81 pos += 1
82 else:
82 else:
83 raise error.ParseError(_("unterminated string"), s)
83 raise error.ParseError(_("unterminated string"), s)
84 elif c.isdigit():
84 elif c.isdigit():
85 s = pos
85 s = pos
86 while pos < end:
86 while pos < end:
87 d = program[pos]
87 d = program[pos]
88 if not d.isdigit():
88 if not d.isdigit():
89 break
89 break
90 pos += 1
90 pos += 1
91 yield ('integer', program[s:pos], s)
91 yield ('integer', program[s:pos], s)
92 pos -= 1
92 pos -= 1
93 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
93 elif (c == '\\' and program[pos:pos + 2] in (r"\'", r'\"')
94 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
94 or c == 'r' and program[pos:pos + 3] in (r"r\'", r'r\"')):
95 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
95 # handle escaped quoted strings for compatibility with 2.9.2-3.4,
96 # where some of nested templates were preprocessed as strings and
96 # where some of nested templates were preprocessed as strings and
97 # then compiled. therefore, \"...\" was allowed. (issue4733)
97 # then compiled. therefore, \"...\" was allowed. (issue4733)
98 #
98 #
99 # processing flow of _evalifliteral() at 5ab28a2e9962:
99 # processing flow of _evalifliteral() at 5ab28a2e9962:
100 # outer template string -> stringify() -> compiletemplate()
100 # outer template string -> stringify() -> compiletemplate()
101 # ------------------------ ------------ ------------------
101 # ------------------------ ------------ ------------------
102 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
102 # {f("\\\\ {g(\"\\\"\")}"} \\ {g("\"")} [r'\\', {g("\"")}]
103 # ~~~~~~~~
103 # ~~~~~~~~
104 # escaped quoted string
104 # escaped quoted string
105 if c == 'r':
105 if c == 'r':
106 pos += 1
106 pos += 1
107 token = 'string'
107 token = 'string'
108 else:
108 else:
109 token = 'template'
109 token = 'template'
110 quote = program[pos:pos + 2]
110 quote = program[pos:pos + 2]
111 s = pos = pos + 2
111 s = pos = pos + 2
112 while pos < end: # find closing escaped quote
112 while pos < end: # find closing escaped quote
113 if program.startswith('\\\\\\', pos, end):
113 if program.startswith('\\\\\\', pos, end):
114 pos += 4 # skip over double escaped characters
114 pos += 4 # skip over double escaped characters
115 continue
115 continue
116 if program.startswith(quote, pos, end):
116 if program.startswith(quote, pos, end):
117 # interpret as if it were a part of an outer string
117 # interpret as if it were a part of an outer string
118 data = parser.unescapestr(program[s:pos])
118 data = parser.unescapestr(program[s:pos])
119 if token == 'template':
119 if token == 'template':
120 data = _parsetemplate(data, 0, len(data))[0]
120 data = _parsetemplate(data, 0, len(data))[0]
121 yield (token, data, s)
121 yield (token, data, s)
122 pos += 1
122 pos += 1
123 break
123 break
124 pos += 1
124 pos += 1
125 else:
125 else:
126 raise error.ParseError(_("unterminated string"), s)
126 raise error.ParseError(_("unterminated string"), s)
127 elif c.isalnum() or c in '_':
127 elif c.isalnum() or c in '_':
128 s = pos
128 s = pos
129 pos += 1
129 pos += 1
130 while pos < end: # find end of symbol
130 while pos < end: # find end of symbol
131 d = program[pos]
131 d = program[pos]
132 if not (d.isalnum() or d == "_"):
132 if not (d.isalnum() or d == "_"):
133 break
133 break
134 pos += 1
134 pos += 1
135 sym = program[s:pos]
135 sym = program[s:pos]
136 yield ('symbol', sym, s)
136 yield ('symbol', sym, s)
137 pos -= 1
137 pos -= 1
138 elif c == term:
138 elif c == term:
139 yield ('end', None, pos + 1)
139 yield ('end', None, pos + 1)
140 return
140 return
141 else:
141 else:
142 raise error.ParseError(_("syntax error"), pos)
142 raise error.ParseError(_("syntax error"), pos)
143 pos += 1
143 pos += 1
144 if term:
144 if term:
145 raise error.ParseError(_("unterminated template expansion"), start)
145 raise error.ParseError(_("unterminated template expansion"), start)
146 yield ('end', None, pos)
146 yield ('end', None, pos)
147
147
148 def _parsetemplate(tmpl, start, stop, quote=''):
148 def _parsetemplate(tmpl, start, stop, quote=''):
149 r"""
149 r"""
150 >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
150 >>> _parsetemplate(b'foo{bar}"baz', 0, 12)
151 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
151 ([('string', 'foo'), ('symbol', 'bar'), ('string', '"baz')], 12)
152 >>> _parsetemplate(b'foo{bar}"baz', 0, 12, quote=b'"')
152 >>> _parsetemplate(b'foo{bar}"baz', 0, 12, quote=b'"')
153 ([('string', 'foo'), ('symbol', 'bar')], 9)
153 ([('string', 'foo'), ('symbol', 'bar')], 9)
154 >>> _parsetemplate(b'foo"{bar}', 0, 9, quote=b'"')
154 >>> _parsetemplate(b'foo"{bar}', 0, 9, quote=b'"')
155 ([('string', 'foo')], 4)
155 ([('string', 'foo')], 4)
156 >>> _parsetemplate(br'foo\"bar"baz', 0, 12, quote=b'"')
156 >>> _parsetemplate(br'foo\"bar"baz', 0, 12, quote=b'"')
157 ([('string', 'foo"'), ('string', 'bar')], 9)
157 ([('string', 'foo"'), ('string', 'bar')], 9)
158 >>> _parsetemplate(br'foo\\"bar', 0, 10, quote=b'"')
158 >>> _parsetemplate(br'foo\\"bar', 0, 10, quote=b'"')
159 ([('string', 'foo\\')], 6)
159 ([('string', 'foo\\')], 6)
160 """
160 """
161 parsed = []
161 parsed = []
162 sepchars = '{' + quote
162 sepchars = '{' + quote
163 pos = start
163 pos = start
164 p = parser.parser(elements)
164 p = parser.parser(elements)
165 while pos < stop:
165 while pos < stop:
166 n = min((tmpl.find(c, pos, stop) for c in sepchars),
166 n = min((tmpl.find(c, pos, stop) for c in sepchars),
167 key=lambda n: (n < 0, n))
167 key=lambda n: (n < 0, n))
168 if n < 0:
168 if n < 0:
169 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
169 parsed.append(('string', parser.unescapestr(tmpl[pos:stop])))
170 pos = stop
170 pos = stop
171 break
171 break
172 c = tmpl[n:n + 1]
172 c = tmpl[n:n + 1]
173 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
173 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
174 if bs % 2 == 1:
174 if bs % 2 == 1:
175 # escaped (e.g. '\{', '\\\{', but not '\\{')
175 # escaped (e.g. '\{', '\\\{', but not '\\{')
176 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
176 parsed.append(('string', parser.unescapestr(tmpl[pos:n - 1]) + c))
177 pos = n + 1
177 pos = n + 1
178 continue
178 continue
179 if n > pos:
179 if n > pos:
180 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
180 parsed.append(('string', parser.unescapestr(tmpl[pos:n])))
181 if c == quote:
181 if c == quote:
182 return parsed, n + 1
182 return parsed, n + 1
183
183
184 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
184 parseres, pos = p.parse(tokenize(tmpl, n + 1, stop, '}'))
185 parsed.append(parseres)
185 parsed.append(parseres)
186
186
187 if quote:
187 if quote:
188 raise error.ParseError(_("unterminated string"), start)
188 raise error.ParseError(_("unterminated string"), start)
189 return parsed, pos
189 return parsed, pos
190
190
191 def _unnesttemplatelist(tree):
191 def _unnesttemplatelist(tree):
192 """Expand list of templates to node tuple
192 """Expand list of templates to node tuple
193
193
194 >>> def f(tree):
194 >>> def f(tree):
195 ... print(pycompat.sysstr(prettyformat(_unnesttemplatelist(tree))))
195 ... print(pycompat.sysstr(prettyformat(_unnesttemplatelist(tree))))
196 >>> f((b'template', []))
196 >>> f((b'template', []))
197 (string '')
197 (string '')
198 >>> f((b'template', [(b'string', b'foo')]))
198 >>> f((b'template', [(b'string', b'foo')]))
199 (string 'foo')
199 (string 'foo')
200 >>> f((b'template', [(b'string', b'foo'), (b'symbol', b'rev')]))
200 >>> f((b'template', [(b'string', b'foo'), (b'symbol', b'rev')]))
201 (template
201 (template
202 (string 'foo')
202 (string 'foo')
203 (symbol 'rev'))
203 (symbol 'rev'))
204 >>> f((b'template', [(b'symbol', b'rev')])) # template(rev) -> str
204 >>> f((b'template', [(b'symbol', b'rev')])) # template(rev) -> str
205 (template
205 (template
206 (symbol 'rev'))
206 (symbol 'rev'))
207 >>> f((b'template', [(b'template', [(b'string', b'foo')])]))
207 >>> f((b'template', [(b'template', [(b'string', b'foo')])]))
208 (string 'foo')
208 (string 'foo')
209 """
209 """
210 if not isinstance(tree, tuple):
210 if not isinstance(tree, tuple):
211 return tree
211 return tree
212 op = tree[0]
212 op = tree[0]
213 if op != 'template':
213 if op != 'template':
214 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
214 return (op,) + tuple(_unnesttemplatelist(x) for x in tree[1:])
215
215
216 assert len(tree) == 2
216 assert len(tree) == 2
217 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
217 xs = tuple(_unnesttemplatelist(x) for x in tree[1])
218 if not xs:
218 if not xs:
219 return ('string', '') # empty template ""
219 return ('string', '') # empty template ""
220 elif len(xs) == 1 and xs[0][0] == 'string':
220 elif len(xs) == 1 and xs[0][0] == 'string':
221 return xs[0] # fast path for string with no template fragment "x"
221 return xs[0] # fast path for string with no template fragment "x"
222 else:
222 else:
223 return (op,) + xs
223 return (op,) + xs
224
224
225 def parse(tmpl):
225 def parse(tmpl):
226 """Parse template string into tree"""
226 """Parse template string into tree"""
227 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
227 parsed, pos = _parsetemplate(tmpl, 0, len(tmpl))
228 assert pos == len(tmpl), 'unquoted template should be consumed'
228 assert pos == len(tmpl), 'unquoted template should be consumed'
229 return _unnesttemplatelist(('template', parsed))
229 return _unnesttemplatelist(('template', parsed))
230
230
231 def _parseexpr(expr):
231 def _parseexpr(expr):
232 """Parse a template expression into tree
232 """Parse a template expression into tree
233
233
234 >>> _parseexpr(b'"foo"')
234 >>> _parseexpr(b'"foo"')
235 ('string', 'foo')
235 ('string', 'foo')
236 >>> _parseexpr(b'foo(bar)')
236 >>> _parseexpr(b'foo(bar)')
237 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
237 ('func', ('symbol', 'foo'), ('symbol', 'bar'))
238 >>> _parseexpr(b'foo(')
238 >>> _parseexpr(b'foo(')
239 Traceback (most recent call last):
239 Traceback (most recent call last):
240 ...
240 ...
241 ParseError: ('not a prefix: end', 4)
241 ParseError: ('not a prefix: end', 4)
242 >>> _parseexpr(b'"foo" "bar"')
242 >>> _parseexpr(b'"foo" "bar"')
243 Traceback (most recent call last):
243 Traceback (most recent call last):
244 ...
244 ...
245 ParseError: ('invalid token', 7)
245 ParseError: ('invalid token', 7)
246 """
246 """
247 p = parser.parser(elements)
247 p = parser.parser(elements)
248 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
248 tree, pos = p.parse(tokenize(expr, 0, len(expr)))
249 if pos != len(expr):
249 if pos != len(expr):
250 raise error.ParseError(_('invalid token'), pos)
250 raise error.ParseError(_('invalid token'), pos)
251 return _unnesttemplatelist(tree)
251 return _unnesttemplatelist(tree)
252
252
253 def prettyformat(tree):
253 def prettyformat(tree):
254 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
254 return parser.prettyformat(tree, ('integer', 'string', 'symbol'))
255
255
256 def compileexp(exp, context, curmethods):
256 def compileexp(exp, context, curmethods):
257 """Compile parsed template tree to (func, data) pair"""
257 """Compile parsed template tree to (func, data) pair"""
258 t = exp[0]
258 t = exp[0]
259 if t in curmethods:
259 if t in curmethods:
260 return curmethods[t](exp, context)
260 return curmethods[t](exp, context)
261 raise error.ParseError(_("unknown method '%s'") % t)
261 raise error.ParseError(_("unknown method '%s'") % t)
262
262
263 # template evaluation
263 # template evaluation
264
264
265 def getsymbol(exp):
265 def getsymbol(exp):
266 if exp[0] == 'symbol':
266 if exp[0] == 'symbol':
267 return exp[1]
267 return exp[1]
268 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
268 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
269
269
270 def getlist(x):
270 def getlist(x):
271 if not x:
271 if not x:
272 return []
272 return []
273 if x[0] == 'list':
273 if x[0] == 'list':
274 return getlist(x[1]) + [x[2]]
274 return getlist(x[1]) + [x[2]]
275 return [x]
275 return [x]
276
276
277 def gettemplate(exp, context):
277 def gettemplate(exp, context):
278 """Compile given template tree or load named template from map file;
278 """Compile given template tree or load named template from map file;
279 returns (func, data) pair"""
279 returns (func, data) pair"""
280 if exp[0] in ('template', 'string'):
280 if exp[0] in ('template', 'string'):
281 return compileexp(exp, context, methods)
281 return compileexp(exp, context, methods)
282 if exp[0] == 'symbol':
282 if exp[0] == 'symbol':
283 # unlike runsymbol(), here 'symbol' is always taken as template name
283 # unlike runsymbol(), here 'symbol' is always taken as template name
284 # even if it exists in mapping. this allows us to override mapping
284 # even if it exists in mapping. this allows us to override mapping
285 # by web templates, e.g. 'changelogtag' is redefined in map file.
285 # by web templates, e.g. 'changelogtag' is redefined in map file.
286 return context._load(exp[1])
286 return context._load(exp[1])
287 raise error.ParseError(_("expected template specifier"))
287 raise error.ParseError(_("expected template specifier"))
288
288
289 def findsymbolicname(arg):
289 def findsymbolicname(arg):
290 """Find symbolic name for the given compiled expression; returns None
290 """Find symbolic name for the given compiled expression; returns None
291 if nothing found reliably"""
291 if nothing found reliably"""
292 while True:
292 while True:
293 func, data = arg
293 func, data = arg
294 if func is runsymbol:
294 if func is runsymbol:
295 return data
295 return data
296 elif func is runfilter:
296 elif func is runfilter:
297 arg = data[0]
297 arg = data[0]
298 else:
298 else:
299 return None
299 return None
300
300
301 def evalfuncarg(context, mapping, arg):
301 def evalfuncarg(context, mapping, arg):
302 func, data = arg
302 func, data = arg
303 # func() may return string, generator of strings or arbitrary object such
303 # func() may return string, generator of strings or arbitrary object such
304 # as date tuple, but filter does not want generator.
304 # as date tuple, but filter does not want generator.
305 thing = func(context, mapping, data)
305 thing = func(context, mapping, data)
306 if isinstance(thing, types.GeneratorType):
306 if isinstance(thing, types.GeneratorType):
307 thing = stringify(thing)
307 thing = stringify(thing)
308 return thing
308 return thing
309
309
310 def evalboolean(context, mapping, arg):
310 def evalboolean(context, mapping, arg):
311 """Evaluate given argument as boolean, but also takes boolean literals"""
311 """Evaluate given argument as boolean, but also takes boolean literals"""
312 func, data = arg
312 func, data = arg
313 if func is runsymbol:
313 if func is runsymbol:
314 thing = func(context, mapping, data, default=None)
314 thing = func(context, mapping, data, default=None)
315 if thing is None:
315 if thing is None:
316 # not a template keyword, takes as a boolean literal
316 # not a template keyword, takes as a boolean literal
317 thing = util.parsebool(data)
317 thing = util.parsebool(data)
318 else:
318 else:
319 thing = func(context, mapping, data)
319 thing = func(context, mapping, data)
320 if isinstance(thing, bool):
320 if isinstance(thing, bool):
321 return thing
321 return thing
322 # other objects are evaluated as strings, which means 0 is True, but
322 # other objects are evaluated as strings, which means 0 is True, but
323 # empty dict/list should be False as they are expected to be ''
323 # empty dict/list should be False as they are expected to be ''
324 return bool(stringify(thing))
324 return bool(stringify(thing))
325
325
326 def evalinteger(context, mapping, arg, err):
326 def evalinteger(context, mapping, arg, err):
327 v = evalfuncarg(context, mapping, arg)
327 v = evalfuncarg(context, mapping, arg)
328 try:
328 try:
329 return int(v)
329 return int(v)
330 except (TypeError, ValueError):
330 except (TypeError, ValueError):
331 raise error.ParseError(err)
331 raise error.ParseError(err)
332
332
333 def evalstring(context, mapping, arg):
333 def evalstring(context, mapping, arg):
334 func, data = arg
334 func, data = arg
335 return stringify(func(context, mapping, data))
335 return stringify(func(context, mapping, data))
336
336
337 def evalstringliteral(context, mapping, arg):
337 def evalstringliteral(context, mapping, arg):
338 """Evaluate given argument as string template, but returns symbol name
338 """Evaluate given argument as string template, but returns symbol name
339 if it is unknown"""
339 if it is unknown"""
340 func, data = arg
340 func, data = arg
341 if func is runsymbol:
341 if func is runsymbol:
342 thing = func(context, mapping, data, default=data)
342 thing = func(context, mapping, data, default=data)
343 else:
343 else:
344 thing = func(context, mapping, data)
344 thing = func(context, mapping, data)
345 return stringify(thing)
345 return stringify(thing)
346
346
347 def runinteger(context, mapping, data):
347 def runinteger(context, mapping, data):
348 return int(data)
348 return int(data)
349
349
350 def runstring(context, mapping, data):
350 def runstring(context, mapping, data):
351 return data
351 return data
352
352
353 def _recursivesymbolblocker(key):
353 def _recursivesymbolblocker(key):
354 def showrecursion(**args):
354 def showrecursion(**args):
355 raise error.Abort(_("recursive reference '%s' in template") % key)
355 raise error.Abort(_("recursive reference '%s' in template") % key)
356 return showrecursion
356 return showrecursion
357
357
358 def _runrecursivesymbol(context, mapping, key):
358 def _runrecursivesymbol(context, mapping, key):
359 raise error.Abort(_("recursive reference '%s' in template") % key)
359 raise error.Abort(_("recursive reference '%s' in template") % key)
360
360
361 def runsymbol(context, mapping, key, default=''):
361 def runsymbol(context, mapping, key, default=''):
362 v = mapping.get(key)
362 v = mapping.get(key)
363 if v is None:
363 if v is None:
364 v = context._defaults.get(key)
364 v = context._defaults.get(key)
365 if v is None:
365 if v is None:
366 # put poison to cut recursion. we can't move this to parsing phase
366 # put poison to cut recursion. we can't move this to parsing phase
367 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
367 # because "x = {x}" is allowed if "x" is a keyword. (issue4758)
368 safemapping = mapping.copy()
368 safemapping = mapping.copy()
369 safemapping[key] = _recursivesymbolblocker(key)
369 safemapping[key] = _recursivesymbolblocker(key)
370 try:
370 try:
371 v = context.process(key, safemapping)
371 v = context.process(key, safemapping)
372 except TemplateNotFound:
372 except TemplateNotFound:
373 v = default
373 v = default
374 if callable(v):
374 if callable(v):
375 return v(**pycompat.strkwargs(mapping))
375 return v(**pycompat.strkwargs(mapping))
376 return v
376 return v
377
377
378 def buildtemplate(exp, context):
378 def buildtemplate(exp, context):
379 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
379 ctmpl = [compileexp(e, context, methods) for e in exp[1:]]
380 return (runtemplate, ctmpl)
380 return (runtemplate, ctmpl)
381
381
382 def runtemplate(context, mapping, template):
382 def runtemplate(context, mapping, template):
383 for func, data in template:
383 for func, data in template:
384 yield func(context, mapping, data)
384 yield func(context, mapping, data)
385
385
386 def buildfilter(exp, context):
386 def buildfilter(exp, context):
387 n = getsymbol(exp[2])
387 n = getsymbol(exp[2])
388 if n in context._filters:
388 if n in context._filters:
389 filt = context._filters[n]
389 filt = context._filters[n]
390 arg = compileexp(exp[1], context, methods)
390 arg = compileexp(exp[1], context, methods)
391 return (runfilter, (arg, filt))
391 return (runfilter, (arg, filt))
392 if n in funcs:
392 if n in funcs:
393 f = funcs[n]
393 f = funcs[n]
394 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
394 args = _buildfuncargs(exp[1], context, methods, n, f._argspec)
395 return (f, args)
395 return (f, args)
396 raise error.ParseError(_("unknown function '%s'") % n)
396 raise error.ParseError(_("unknown function '%s'") % n)
397
397
398 def runfilter(context, mapping, data):
398 def runfilter(context, mapping, data):
399 arg, filt = data
399 arg, filt = data
400 thing = evalfuncarg(context, mapping, arg)
400 thing = evalfuncarg(context, mapping, arg)
401 try:
401 try:
402 return filt(thing)
402 return filt(thing)
403 except (ValueError, AttributeError, TypeError):
403 except (ValueError, AttributeError, TypeError):
404 sym = findsymbolicname(arg)
404 sym = findsymbolicname(arg)
405 if sym:
405 if sym:
406 msg = (_("template filter '%s' is not compatible with keyword '%s'")
406 msg = (_("template filter '%s' is not compatible with keyword '%s'")
407 % (filt.func_name, sym))
407 % (filt.func_name, sym))
408 else:
408 else:
409 msg = _("incompatible use of template filter '%s'") % filt.func_name
409 msg = _("incompatible use of template filter '%s'") % filt.func_name
410 raise error.Abort(msg)
410 raise error.Abort(msg)
411
411
412 def buildmap(exp, context):
412 def buildmap(exp, context):
413 func, data = compileexp(exp[1], context, methods)
413 func, data = compileexp(exp[1], context, methods)
414 tfunc, tdata = gettemplate(exp[2], context)
414 tfunc, tdata = gettemplate(exp[2], context)
415 return (runmap, (func, data, tfunc, tdata))
415 return (runmap, (func, data, tfunc, tdata))
416
416
417 def runmap(context, mapping, data):
417 def runmap(context, mapping, data):
418 func, data, tfunc, tdata = data
418 func, data, tfunc, tdata = data
419 d = func(context, mapping, data)
419 d = func(context, mapping, data)
420 if util.safehasattr(d, 'itermaps'):
420 if util.safehasattr(d, 'itermaps'):
421 diter = d.itermaps()
421 diter = d.itermaps()
422 else:
422 else:
423 try:
423 try:
424 diter = iter(d)
424 diter = iter(d)
425 except TypeError:
425 except TypeError:
426 if func is runsymbol:
426 if func is runsymbol:
427 raise error.ParseError(_("keyword '%s' is not iterable") % data)
427 raise error.ParseError(_("keyword '%s' is not iterable") % data)
428 else:
428 else:
429 raise error.ParseError(_("%r is not iterable") % d)
429 raise error.ParseError(_("%r is not iterable") % d)
430
430
431 for i, v in enumerate(diter):
431 for i, v in enumerate(diter):
432 lm = mapping.copy()
432 lm = mapping.copy()
433 lm['index'] = i
433 lm['index'] = i
434 if isinstance(v, dict):
434 if isinstance(v, dict):
435 lm.update(v)
435 lm.update(v)
436 lm['originalnode'] = mapping.get('node')
436 lm['originalnode'] = mapping.get('node')
437 yield tfunc(context, lm, tdata)
437 yield tfunc(context, lm, tdata)
438 else:
438 else:
439 # v is not an iterable of dicts, this happen when 'key'
439 # v is not an iterable of dicts, this happen when 'key'
440 # has been fully expanded already and format is useless.
440 # has been fully expanded already and format is useless.
441 # If so, return the expanded value.
441 # If so, return the expanded value.
442 yield v
442 yield v
443
443
444 def buildnegate(exp, context):
444 def buildnegate(exp, context):
445 arg = compileexp(exp[1], context, exprmethods)
445 arg = compileexp(exp[1], context, exprmethods)
446 return (runnegate, arg)
446 return (runnegate, arg)
447
447
448 def runnegate(context, mapping, data):
448 def runnegate(context, mapping, data):
449 data = evalinteger(context, mapping, data,
449 data = evalinteger(context, mapping, data,
450 _('negation needs an integer argument'))
450 _('negation needs an integer argument'))
451 return -data
451 return -data
452
452
453 def buildarithmetic(exp, context, func):
453 def buildarithmetic(exp, context, func):
454 left = compileexp(exp[1], context, exprmethods)
454 left = compileexp(exp[1], context, exprmethods)
455 right = compileexp(exp[2], context, exprmethods)
455 right = compileexp(exp[2], context, exprmethods)
456 return (runarithmetic, (func, left, right))
456 return (runarithmetic, (func, left, right))
457
457
458 def runarithmetic(context, mapping, data):
458 def runarithmetic(context, mapping, data):
459 func, left, right = data
459 func, left, right = data
460 left = evalinteger(context, mapping, left,
460 left = evalinteger(context, mapping, left,
461 _('arithmetic only defined on integers'))
461 _('arithmetic only defined on integers'))
462 right = evalinteger(context, mapping, right,
462 right = evalinteger(context, mapping, right,
463 _('arithmetic only defined on integers'))
463 _('arithmetic only defined on integers'))
464 try:
464 try:
465 return func(left, right)
465 return func(left, right)
466 except ZeroDivisionError:
466 except ZeroDivisionError:
467 raise error.Abort(_('division by zero is not defined'))
467 raise error.Abort(_('division by zero is not defined'))
468
468
469 def buildfunc(exp, context):
469 def buildfunc(exp, context):
470 n = getsymbol(exp[1])
470 n = getsymbol(exp[1])
471 if n in funcs:
471 if n in funcs:
472 f = funcs[n]
472 f = funcs[n]
473 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
473 args = _buildfuncargs(exp[2], context, exprmethods, n, f._argspec)
474 return (f, args)
474 return (f, args)
475 if n in context._filters:
475 if n in context._filters:
476 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
476 args = _buildfuncargs(exp[2], context, exprmethods, n, argspec=None)
477 if len(args) != 1:
477 if len(args) != 1:
478 raise error.ParseError(_("filter %s expects one argument") % n)
478 raise error.ParseError(_("filter %s expects one argument") % n)
479 f = context._filters[n]
479 f = context._filters[n]
480 return (runfilter, (args[0], f))
480 return (runfilter, (args[0], f))
481 raise error.ParseError(_("unknown function '%s'") % n)
481 raise error.ParseError(_("unknown function '%s'") % n)
482
482
483 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
483 def _buildfuncargs(exp, context, curmethods, funcname, argspec):
484 """Compile parsed tree of function arguments into list or dict of
484 """Compile parsed tree of function arguments into list or dict of
485 (func, data) pairs
485 (func, data) pairs
486
486
487 >>> context = engine(lambda t: (runsymbol, t))
487 >>> context = engine(lambda t: (runsymbol, t))
488 >>> def fargs(expr, argspec):
488 >>> def fargs(expr, argspec):
489 ... x = _parseexpr(expr)
489 ... x = _parseexpr(expr)
490 ... n = getsymbol(x[1])
490 ... n = getsymbol(x[1])
491 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
491 ... return _buildfuncargs(x[2], context, exprmethods, n, argspec)
492 >>> list(fargs(b'a(l=1, k=2)', b'k l m').keys())
492 >>> list(fargs(b'a(l=1, k=2)', b'k l m').keys())
493 ['l', 'k']
493 ['l', 'k']
494 >>> args = fargs(b'a(opts=1, k=2)', b'**opts')
494 >>> args = fargs(b'a(opts=1, k=2)', b'**opts')
495 >>> list(args.keys()), list(args[b'opts'].keys())
495 >>> list(args.keys()), list(args[b'opts'].keys())
496 (['opts'], ['opts', 'k'])
496 (['opts'], ['opts', 'k'])
497 """
497 """
498 def compiledict(xs):
498 def compiledict(xs):
499 return util.sortdict((k, compileexp(x, context, curmethods))
499 return util.sortdict((k, compileexp(x, context, curmethods))
500 for k, x in xs.iteritems())
500 for k, x in xs.iteritems())
501 def compilelist(xs):
501 def compilelist(xs):
502 return [compileexp(x, context, curmethods) for x in xs]
502 return [compileexp(x, context, curmethods) for x in xs]
503
503
504 if not argspec:
504 if not argspec:
505 # filter or function with no argspec: return list of positional args
505 # filter or function with no argspec: return list of positional args
506 return compilelist(getlist(exp))
506 return compilelist(getlist(exp))
507
507
508 # function with argspec: return dict of named args
508 # function with argspec: return dict of named args
509 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
509 _poskeys, varkey, _keys, optkey = argspec = parser.splitargspec(argspec)
510 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
510 treeargs = parser.buildargsdict(getlist(exp), funcname, argspec,
511 keyvaluenode='keyvalue', keynode='symbol')
511 keyvaluenode='keyvalue', keynode='symbol')
512 compargs = util.sortdict()
512 compargs = util.sortdict()
513 if varkey:
513 if varkey:
514 compargs[varkey] = compilelist(treeargs.pop(varkey))
514 compargs[varkey] = compilelist(treeargs.pop(varkey))
515 if optkey:
515 if optkey:
516 compargs[optkey] = compiledict(treeargs.pop(optkey))
516 compargs[optkey] = compiledict(treeargs.pop(optkey))
517 compargs.update(compiledict(treeargs))
517 compargs.update(compiledict(treeargs))
518 return compargs
518 return compargs
519
519
520 def buildkeyvaluepair(exp, content):
520 def buildkeyvaluepair(exp, content):
521 raise error.ParseError(_("can't use a key-value pair in this context"))
521 raise error.ParseError(_("can't use a key-value pair in this context"))
522
522
523 # dict of template built-in functions
523 # dict of template built-in functions
524 funcs = {}
524 funcs = {}
525
525
526 templatefunc = registrar.templatefunc(funcs)
526 templatefunc = registrar.templatefunc(funcs)
527
527
528 @templatefunc('date(date[, fmt])')
528 @templatefunc('date(date[, fmt])')
529 def date(context, mapping, args):
529 def date(context, mapping, args):
530 """Format a date. See :hg:`help dates` for formatting
530 """Format a date. See :hg:`help dates` for formatting
531 strings. The default is a Unix date format, including the timezone:
531 strings. The default is a Unix date format, including the timezone:
532 "Mon Sep 04 15:13:13 2006 0700"."""
532 "Mon Sep 04 15:13:13 2006 0700"."""
533 if not (1 <= len(args) <= 2):
533 if not (1 <= len(args) <= 2):
534 # i18n: "date" is a keyword
534 # i18n: "date" is a keyword
535 raise error.ParseError(_("date expects one or two arguments"))
535 raise error.ParseError(_("date expects one or two arguments"))
536
536
537 date = evalfuncarg(context, mapping, args[0])
537 date = evalfuncarg(context, mapping, args[0])
538 fmt = None
538 fmt = None
539 if len(args) == 2:
539 if len(args) == 2:
540 fmt = evalstring(context, mapping, args[1])
540 fmt = evalstring(context, mapping, args[1])
541 try:
541 try:
542 if fmt is None:
542 if fmt is None:
543 return util.datestr(date)
543 return util.datestr(date)
544 else:
544 else:
545 return util.datestr(date, fmt)
545 return util.datestr(date, fmt)
546 except (TypeError, ValueError):
546 except (TypeError, ValueError):
547 # i18n: "date" is a keyword
547 # i18n: "date" is a keyword
548 raise error.ParseError(_("date expects a date information"))
548 raise error.ParseError(_("date expects a date information"))
549
549
550 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
550 @templatefunc('dict([[key=]value...])', argspec='*args **kwargs')
551 def dict_(context, mapping, args):
551 def dict_(context, mapping, args):
552 """Construct a dict from key-value pairs. A key may be omitted if
552 """Construct a dict from key-value pairs. A key may be omitted if
553 a value expression can provide an unambiguous name."""
553 a value expression can provide an unambiguous name."""
554 data = util.sortdict()
554 data = util.sortdict()
555
555
556 for v in args['args']:
556 for v in args['args']:
557 k = findsymbolicname(v)
557 k = findsymbolicname(v)
558 if not k:
558 if not k:
559 raise error.ParseError(_('dict key cannot be inferred'))
559 raise error.ParseError(_('dict key cannot be inferred'))
560 if k in data or k in args['kwargs']:
560 if k in data or k in args['kwargs']:
561 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
561 raise error.ParseError(_("duplicated dict key '%s' inferred") % k)
562 data[k] = evalfuncarg(context, mapping, v)
562 data[k] = evalfuncarg(context, mapping, v)
563
563
564 data.update((k, evalfuncarg(context, mapping, v))
564 data.update((k, evalfuncarg(context, mapping, v))
565 for k, v in args['kwargs'].iteritems())
565 for k, v in args['kwargs'].iteritems())
566 return templatekw.hybriddict(data)
566 return templatekw.hybriddict(data)
567
567
568 @templatefunc('diff([includepattern [, excludepattern]])')
568 @templatefunc('diff([includepattern [, excludepattern]])')
569 def diff(context, mapping, args):
569 def diff(context, mapping, args):
570 """Show a diff, optionally
570 """Show a diff, optionally
571 specifying files to include or exclude."""
571 specifying files to include or exclude."""
572 if len(args) > 2:
572 if len(args) > 2:
573 # i18n: "diff" is a keyword
573 # i18n: "diff" is a keyword
574 raise error.ParseError(_("diff expects zero, one, or two arguments"))
574 raise error.ParseError(_("diff expects zero, one, or two arguments"))
575
575
576 def getpatterns(i):
576 def getpatterns(i):
577 if i < len(args):
577 if i < len(args):
578 s = evalstring(context, mapping, args[i]).strip()
578 s = evalstring(context, mapping, args[i]).strip()
579 if s:
579 if s:
580 return [s]
580 return [s]
581 return []
581 return []
582
582
583 ctx = mapping['ctx']
583 ctx = mapping['ctx']
584 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
584 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
585
585
586 return ''.join(chunks)
586 return ''.join(chunks)
587
587
588 @templatefunc('files(pattern)')
588 @templatefunc('files(pattern)')
589 def files(context, mapping, args):
589 def files(context, mapping, args):
590 """All files of the current changeset matching the pattern. See
590 """All files of the current changeset matching the pattern. See
591 :hg:`help patterns`."""
591 :hg:`help patterns`."""
592 if not len(args) == 1:
592 if not len(args) == 1:
593 # i18n: "files" is a keyword
593 # i18n: "files" is a keyword
594 raise error.ParseError(_("files expects one argument"))
594 raise error.ParseError(_("files expects one argument"))
595
595
596 raw = evalstring(context, mapping, args[0])
596 raw = evalstring(context, mapping, args[0])
597 ctx = mapping['ctx']
597 ctx = mapping['ctx']
598 m = ctx.match([raw])
598 m = ctx.match([raw])
599 files = list(ctx.matches(m))
599 files = list(ctx.matches(m))
600 return templatekw.showlist("file", files, mapping)
600 return templatekw.showlist("file", files, mapping)
601
601
602 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
602 @templatefunc('fill(text[, width[, initialident[, hangindent]]])')
603 def fill(context, mapping, args):
603 def fill(context, mapping, args):
604 """Fill many
604 """Fill many
605 paragraphs with optional indentation. See the "fill" filter."""
605 paragraphs with optional indentation. See the "fill" filter."""
606 if not (1 <= len(args) <= 4):
606 if not (1 <= len(args) <= 4):
607 # i18n: "fill" is a keyword
607 # i18n: "fill" is a keyword
608 raise error.ParseError(_("fill expects one to four arguments"))
608 raise error.ParseError(_("fill expects one to four arguments"))
609
609
610 text = evalstring(context, mapping, args[0])
610 text = evalstring(context, mapping, args[0])
611 width = 76
611 width = 76
612 initindent = ''
612 initindent = ''
613 hangindent = ''
613 hangindent = ''
614 if 2 <= len(args) <= 4:
614 if 2 <= len(args) <= 4:
615 width = evalinteger(context, mapping, args[1],
615 width = evalinteger(context, mapping, args[1],
616 # i18n: "fill" is a keyword
616 # i18n: "fill" is a keyword
617 _("fill expects an integer width"))
617 _("fill expects an integer width"))
618 try:
618 try:
619 initindent = evalstring(context, mapping, args[2])
619 initindent = evalstring(context, mapping, args[2])
620 hangindent = evalstring(context, mapping, args[3])
620 hangindent = evalstring(context, mapping, args[3])
621 except IndexError:
621 except IndexError:
622 pass
622 pass
623
623
624 return templatefilters.fill(text, width, initindent, hangindent)
624 return templatefilters.fill(text, width, initindent, hangindent)
625
625
626 @templatefunc('formatnode(node)')
626 @templatefunc('formatnode(node)')
627 def formatnode(context, mapping, args):
627 def formatnode(context, mapping, args):
628 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
628 """Obtain the preferred form of a changeset hash. (DEPRECATED)"""
629 if len(args) != 1:
629 if len(args) != 1:
630 # i18n: "formatnode" is a keyword
630 # i18n: "formatnode" is a keyword
631 raise error.ParseError(_("formatnode expects one argument"))
631 raise error.ParseError(_("formatnode expects one argument"))
632
632
633 ui = mapping['ui']
633 ui = mapping['ui']
634 node = evalstring(context, mapping, args[0])
634 node = evalstring(context, mapping, args[0])
635 if ui.debugflag:
635 if ui.debugflag:
636 return node
636 return node
637 return templatefilters.short(node)
637 return templatefilters.short(node)
638
638
639 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
639 @templatefunc('pad(text, width[, fillchar=\' \'[, left=False]])',
640 argspec='text width fillchar left')
640 argspec='text width fillchar left')
641 def pad(context, mapping, args):
641 def pad(context, mapping, args):
642 """Pad text with a
642 """Pad text with a
643 fill character."""
643 fill character."""
644 if 'text' not in args or 'width' not in args:
644 if 'text' not in args or 'width' not in args:
645 # i18n: "pad" is a keyword
645 # i18n: "pad" is a keyword
646 raise error.ParseError(_("pad() expects two to four arguments"))
646 raise error.ParseError(_("pad() expects two to four arguments"))
647
647
648 width = evalinteger(context, mapping, args['width'],
648 width = evalinteger(context, mapping, args['width'],
649 # i18n: "pad" is a keyword
649 # i18n: "pad" is a keyword
650 _("pad() expects an integer width"))
650 _("pad() expects an integer width"))
651
651
652 text = evalstring(context, mapping, args['text'])
652 text = evalstring(context, mapping, args['text'])
653
653
654 left = False
654 left = False
655 fillchar = ' '
655 fillchar = ' '
656 if 'fillchar' in args:
656 if 'fillchar' in args:
657 fillchar = evalstring(context, mapping, args['fillchar'])
657 fillchar = evalstring(context, mapping, args['fillchar'])
658 if len(color.stripeffects(fillchar)) != 1:
658 if len(color.stripeffects(fillchar)) != 1:
659 # i18n: "pad" is a keyword
659 # i18n: "pad" is a keyword
660 raise error.ParseError(_("pad() expects a single fill character"))
660 raise error.ParseError(_("pad() expects a single fill character"))
661 if 'left' in args:
661 if 'left' in args:
662 left = evalboolean(context, mapping, args['left'])
662 left = evalboolean(context, mapping, args['left'])
663
663
664 fillwidth = width - encoding.colwidth(color.stripeffects(text))
664 fillwidth = width - encoding.colwidth(color.stripeffects(text))
665 if fillwidth <= 0:
665 if fillwidth <= 0:
666 return text
666 return text
667 if left:
667 if left:
668 return fillchar * fillwidth + text
668 return fillchar * fillwidth + text
669 else:
669 else:
670 return text + fillchar * fillwidth
670 return text + fillchar * fillwidth
671
671
672 @templatefunc('indent(text, indentchars[, firstline])')
672 @templatefunc('indent(text, indentchars[, firstline])')
673 def indent(context, mapping, args):
673 def indent(context, mapping, args):
674 """Indents all non-empty lines
674 """Indents all non-empty lines
675 with the characters given in the indentchars string. An optional
675 with the characters given in the indentchars string. An optional
676 third parameter will override the indent for the first line only
676 third parameter will override the indent for the first line only
677 if present."""
677 if present."""
678 if not (2 <= len(args) <= 3):
678 if not (2 <= len(args) <= 3):
679 # i18n: "indent" is a keyword
679 # i18n: "indent" is a keyword
680 raise error.ParseError(_("indent() expects two or three arguments"))
680 raise error.ParseError(_("indent() expects two or three arguments"))
681
681
682 text = evalstring(context, mapping, args[0])
682 text = evalstring(context, mapping, args[0])
683 indent = evalstring(context, mapping, args[1])
683 indent = evalstring(context, mapping, args[1])
684
684
685 if len(args) == 3:
685 if len(args) == 3:
686 firstline = evalstring(context, mapping, args[2])
686 firstline = evalstring(context, mapping, args[2])
687 else:
687 else:
688 firstline = indent
688 firstline = indent
689
689
690 # the indent function doesn't indent the first line, so we do it here
690 # the indent function doesn't indent the first line, so we do it here
691 return templatefilters.indent(firstline + text, indent)
691 return templatefilters.indent(firstline + text, indent)
692
692
693 @templatefunc('get(dict, key)')
693 @templatefunc('get(dict, key)')
694 def get(context, mapping, args):
694 def get(context, mapping, args):
695 """Get an attribute/key from an object. Some keywords
695 """Get an attribute/key from an object. Some keywords
696 are complex types. This function allows you to obtain the value of an
696 are complex types. This function allows you to obtain the value of an
697 attribute on these types."""
697 attribute on these types."""
698 if len(args) != 2:
698 if len(args) != 2:
699 # i18n: "get" is a keyword
699 # i18n: "get" is a keyword
700 raise error.ParseError(_("get() expects two arguments"))
700 raise error.ParseError(_("get() expects two arguments"))
701
701
702 dictarg = evalfuncarg(context, mapping, args[0])
702 dictarg = evalfuncarg(context, mapping, args[0])
703 if not util.safehasattr(dictarg, 'get'):
703 if not util.safehasattr(dictarg, 'get'):
704 # i18n: "get" is a keyword
704 # i18n: "get" is a keyword
705 raise error.ParseError(_("get() expects a dict as first argument"))
705 raise error.ParseError(_("get() expects a dict as first argument"))
706
706
707 key = evalfuncarg(context, mapping, args[1])
707 key = evalfuncarg(context, mapping, args[1])
708 return dictarg.get(key)
708 return dictarg.get(key)
709
709
710 @templatefunc('if(expr, then[, else])')
710 @templatefunc('if(expr, then[, else])')
711 def if_(context, mapping, args):
711 def if_(context, mapping, args):
712 """Conditionally execute based on the result of
712 """Conditionally execute based on the result of
713 an expression."""
713 an expression."""
714 if not (2 <= len(args) <= 3):
714 if not (2 <= len(args) <= 3):
715 # i18n: "if" is a keyword
715 # i18n: "if" is a keyword
716 raise error.ParseError(_("if expects two or three arguments"))
716 raise error.ParseError(_("if expects two or three arguments"))
717
717
718 test = evalboolean(context, mapping, args[0])
718 test = evalboolean(context, mapping, args[0])
719 if test:
719 if test:
720 yield args[1][0](context, mapping, args[1][1])
720 yield args[1][0](context, mapping, args[1][1])
721 elif len(args) == 3:
721 elif len(args) == 3:
722 yield args[2][0](context, mapping, args[2][1])
722 yield args[2][0](context, mapping, args[2][1])
723
723
724 @templatefunc('ifcontains(needle, haystack, then[, else])')
724 @templatefunc('ifcontains(needle, haystack, then[, else])')
725 def ifcontains(context, mapping, args):
725 def ifcontains(context, mapping, args):
726 """Conditionally execute based
726 """Conditionally execute based
727 on whether the item "needle" is in "haystack"."""
727 on whether the item "needle" is in "haystack"."""
728 if not (3 <= len(args) <= 4):
728 if not (3 <= len(args) <= 4):
729 # i18n: "ifcontains" is a keyword
729 # i18n: "ifcontains" is a keyword
730 raise error.ParseError(_("ifcontains expects three or four arguments"))
730 raise error.ParseError(_("ifcontains expects three or four arguments"))
731
731
732 needle = evalstring(context, mapping, args[0])
732 needle = evalstring(context, mapping, args[0])
733 haystack = evalfuncarg(context, mapping, args[1])
733 haystack = evalfuncarg(context, mapping, args[1])
734
734
735 if needle in haystack:
735 if needle in haystack:
736 yield args[2][0](context, mapping, args[2][1])
736 yield args[2][0](context, mapping, args[2][1])
737 elif len(args) == 4:
737 elif len(args) == 4:
738 yield args[3][0](context, mapping, args[3][1])
738 yield args[3][0](context, mapping, args[3][1])
739
739
740 @templatefunc('ifeq(expr1, expr2, then[, else])')
740 @templatefunc('ifeq(expr1, expr2, then[, else])')
741 def ifeq(context, mapping, args):
741 def ifeq(context, mapping, args):
742 """Conditionally execute based on
742 """Conditionally execute based on
743 whether 2 items are equivalent."""
743 whether 2 items are equivalent."""
744 if not (3 <= len(args) <= 4):
744 if not (3 <= len(args) <= 4):
745 # i18n: "ifeq" is a keyword
745 # i18n: "ifeq" is a keyword
746 raise error.ParseError(_("ifeq expects three or four arguments"))
746 raise error.ParseError(_("ifeq expects three or four arguments"))
747
747
748 test = evalstring(context, mapping, args[0])
748 test = evalstring(context, mapping, args[0])
749 match = evalstring(context, mapping, args[1])
749 match = evalstring(context, mapping, args[1])
750 if test == match:
750 if test == match:
751 yield args[2][0](context, mapping, args[2][1])
751 yield args[2][0](context, mapping, args[2][1])
752 elif len(args) == 4:
752 elif len(args) == 4:
753 yield args[3][0](context, mapping, args[3][1])
753 yield args[3][0](context, mapping, args[3][1])
754
754
755 @templatefunc('join(list, sep)')
755 @templatefunc('join(list, sep)')
756 def join(context, mapping, args):
756 def join(context, mapping, args):
757 """Join items in a list with a delimiter."""
757 """Join items in a list with a delimiter."""
758 if not (1 <= len(args) <= 2):
758 if not (1 <= len(args) <= 2):
759 # i18n: "join" is a keyword
759 # i18n: "join" is a keyword
760 raise error.ParseError(_("join expects one or two arguments"))
760 raise error.ParseError(_("join expects one or two arguments"))
761
761
762 joinset = args[0][0](context, mapping, args[0][1])
762 joinset = args[0][0](context, mapping, args[0][1])
763 if util.safehasattr(joinset, 'itermaps'):
763 if util.safehasattr(joinset, 'itermaps'):
764 jf = joinset.joinfmt
764 jf = joinset.joinfmt
765 joinset = [jf(x) for x in joinset.itermaps()]
765 joinset = [jf(x) for x in joinset.itermaps()]
766
766
767 joiner = " "
767 joiner = " "
768 if len(args) > 1:
768 if len(args) > 1:
769 joiner = evalstring(context, mapping, args[1])
769 joiner = evalstring(context, mapping, args[1])
770
770
771 first = True
771 first = True
772 for x in joinset:
772 for x in joinset:
773 if first:
773 if first:
774 first = False
774 first = False
775 else:
775 else:
776 yield joiner
776 yield joiner
777 yield x
777 yield x
778
778
779 @templatefunc('label(label, expr)')
779 @templatefunc('label(label, expr)')
780 def label(context, mapping, args):
780 def label(context, mapping, args):
781 """Apply a label to generated content. Content with
781 """Apply a label to generated content. Content with
782 a label applied can result in additional post-processing, such as
782 a label applied can result in additional post-processing, such as
783 automatic colorization."""
783 automatic colorization."""
784 if len(args) != 2:
784 if len(args) != 2:
785 # i18n: "label" is a keyword
785 # i18n: "label" is a keyword
786 raise error.ParseError(_("label expects two arguments"))
786 raise error.ParseError(_("label expects two arguments"))
787
787
788 ui = mapping['ui']
788 ui = mapping['ui']
789 thing = evalstring(context, mapping, args[1])
789 thing = evalstring(context, mapping, args[1])
790 # preserve unknown symbol as literal so effects like 'red', 'bold',
790 # preserve unknown symbol as literal so effects like 'red', 'bold',
791 # etc. don't need to be quoted
791 # etc. don't need to be quoted
792 label = evalstringliteral(context, mapping, args[0])
792 label = evalstringliteral(context, mapping, args[0])
793
793
794 return ui.label(thing, label)
794 return ui.label(thing, label)
795
795
796 @templatefunc('latesttag([pattern])')
796 @templatefunc('latesttag([pattern])')
797 def latesttag(context, mapping, args):
797 def latesttag(context, mapping, args):
798 """The global tags matching the given pattern on the
798 """The global tags matching the given pattern on the
799 most recent globally tagged ancestor of this changeset.
799 most recent globally tagged ancestor of this changeset.
800 If no such tags exist, the "{tag}" template resolves to
800 If no such tags exist, the "{tag}" template resolves to
801 the string "null"."""
801 the string "null"."""
802 if len(args) > 1:
802 if len(args) > 1:
803 # i18n: "latesttag" is a keyword
803 # i18n: "latesttag" is a keyword
804 raise error.ParseError(_("latesttag expects at most one argument"))
804 raise error.ParseError(_("latesttag expects at most one argument"))
805
805
806 pattern = None
806 pattern = None
807 if len(args) == 1:
807 if len(args) == 1:
808 pattern = evalstring(context, mapping, args[0])
808 pattern = evalstring(context, mapping, args[0])
809
809
810 return templatekw.showlatesttags(pattern, **mapping)
810 return templatekw.showlatesttags(pattern, **mapping)
811
811
812 @templatefunc('localdate(date[, tz])')
812 @templatefunc('localdate(date[, tz])')
813 def localdate(context, mapping, args):
813 def localdate(context, mapping, args):
814 """Converts a date to the specified timezone.
814 """Converts a date to the specified timezone.
815 The default is local date."""
815 The default is local date."""
816 if not (1 <= len(args) <= 2):
816 if not (1 <= len(args) <= 2):
817 # i18n: "localdate" is a keyword
817 # i18n: "localdate" is a keyword
818 raise error.ParseError(_("localdate expects one or two arguments"))
818 raise error.ParseError(_("localdate expects one or two arguments"))
819
819
820 date = evalfuncarg(context, mapping, args[0])
820 date = evalfuncarg(context, mapping, args[0])
821 try:
821 try:
822 date = util.parsedate(date)
822 date = util.parsedate(date)
823 except AttributeError: # not str nor date tuple
823 except AttributeError: # not str nor date tuple
824 # i18n: "localdate" is a keyword
824 # i18n: "localdate" is a keyword
825 raise error.ParseError(_("localdate expects a date information"))
825 raise error.ParseError(_("localdate expects a date information"))
826 if len(args) >= 2:
826 if len(args) >= 2:
827 tzoffset = None
827 tzoffset = None
828 tz = evalfuncarg(context, mapping, args[1])
828 tz = evalfuncarg(context, mapping, args[1])
829 if isinstance(tz, str):
829 if isinstance(tz, str):
830 tzoffset, remainder = util.parsetimezone(tz)
830 tzoffset, remainder = util.parsetimezone(tz)
831 if remainder:
831 if remainder:
832 tzoffset = None
832 tzoffset = None
833 if tzoffset is None:
833 if tzoffset is None:
834 try:
834 try:
835 tzoffset = int(tz)
835 tzoffset = int(tz)
836 except (TypeError, ValueError):
836 except (TypeError, ValueError):
837 # i18n: "localdate" is a keyword
837 # i18n: "localdate" is a keyword
838 raise error.ParseError(_("localdate expects a timezone"))
838 raise error.ParseError(_("localdate expects a timezone"))
839 else:
839 else:
840 tzoffset = util.makedate()[1]
840 tzoffset = util.makedate()[1]
841 return (date[0], tzoffset)
841 return (date[0], tzoffset)
842
842
843 @templatefunc('max(iterable)')
843 @templatefunc('max(iterable)')
844 def max_(context, mapping, args, **kwargs):
844 def max_(context, mapping, args, **kwargs):
845 """Return the max of an iterable"""
845 """Return the max of an iterable"""
846 if len(args) != 1:
846 if len(args) != 1:
847 # i18n: "max" is a keyword
847 # i18n: "max" is a keyword
848 raise error.ParseError(_("max expects one arguments"))
848 raise error.ParseError(_("max expects one arguments"))
849
849
850 iterable = evalfuncarg(context, mapping, args[0])
850 iterable = evalfuncarg(context, mapping, args[0])
851 try:
851 try:
852 return max(iterable)
852 return max(iterable)
853 except (TypeError, ValueError):
853 except (TypeError, ValueError):
854 # i18n: "max" is a keyword
854 # i18n: "max" is a keyword
855 raise error.ParseError(_("max first argument should be an iterable"))
855 raise error.ParseError(_("max first argument should be an iterable"))
856
856
857 @templatefunc('min(iterable)')
857 @templatefunc('min(iterable)')
858 def min_(context, mapping, args, **kwargs):
858 def min_(context, mapping, args, **kwargs):
859 """Return the min of an iterable"""
859 """Return the min of an iterable"""
860 if len(args) != 1:
860 if len(args) != 1:
861 # i18n: "min" is a keyword
861 # i18n: "min" is a keyword
862 raise error.ParseError(_("min expects one arguments"))
862 raise error.ParseError(_("min expects one arguments"))
863
863
864 iterable = evalfuncarg(context, mapping, args[0])
864 iterable = evalfuncarg(context, mapping, args[0])
865 try:
865 try:
866 return min(iterable)
866 return min(iterable)
867 except (TypeError, ValueError):
867 except (TypeError, ValueError):
868 # i18n: "min" is a keyword
868 # i18n: "min" is a keyword
869 raise error.ParseError(_("min first argument should be an iterable"))
869 raise error.ParseError(_("min first argument should be an iterable"))
870
870
871 @templatefunc('mod(a, b)')
871 @templatefunc('mod(a, b)')
872 def mod(context, mapping, args):
872 def mod(context, mapping, args):
873 """Calculate a mod b such that a / b + a mod b == a"""
873 """Calculate a mod b such that a / b + a mod b == a"""
874 if not len(args) == 2:
874 if not len(args) == 2:
875 # i18n: "mod" is a keyword
875 # i18n: "mod" is a keyword
876 raise error.ParseError(_("mod expects two arguments"))
876 raise error.ParseError(_("mod expects two arguments"))
877
877
878 func = lambda a, b: a % b
878 func = lambda a, b: a % b
879 return runarithmetic(context, mapping, (func, args[0], args[1]))
879 return runarithmetic(context, mapping, (func, args[0], args[1]))
880
880
881 @templatefunc('obsfatedate(markers)')
881 @templatefunc('obsfatedate(markers)')
882 def obsfatedate(context, mapping, args):
882 def obsfatedate(context, mapping, args):
883 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
883 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
884 if len(args) != 1:
884 if len(args) != 1:
885 # i18n: "obsfatedate" is a keyword
885 # i18n: "obsfatedate" is a keyword
886 raise error.ParseError(_("obsfatedate expects one arguments"))
886 raise error.ParseError(_("obsfatedate expects one arguments"))
887
887
888 markers = evalfuncarg(context, mapping, args[0])
888 markers = evalfuncarg(context, mapping, args[0])
889
889
890 try:
890 try:
891 data = obsutil.markersdates(markers)
891 data = obsutil.markersdates(markers)
892 return templatekw.hybridlist(data, name='date', fmt='%d %d')
892 return templatekw.hybridlist(data, name='date', fmt='%d %d')
893 except (TypeError, KeyError):
893 except (TypeError, KeyError):
894 # i18n: "obsfatedate" is a keyword
894 # i18n: "obsfatedate" is a keyword
895 errmsg = _("obsfatedate first argument should be an iterable")
895 errmsg = _("obsfatedate first argument should be an iterable")
896 raise error.ParseError(errmsg)
896 raise error.ParseError(errmsg)
897
897
898 @templatefunc('obsfateusers(markers)')
898 @templatefunc('obsfateusers(markers)')
899 def obsfateusers(context, mapping, args):
899 def obsfateusers(context, mapping, args):
900 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
900 """Compute obsfate related information based on markers (EXPERIMENTAL)"""
901 if len(args) != 1:
901 if len(args) != 1:
902 # i18n: "obsfateusers" is a keyword
902 # i18n: "obsfateusers" is a keyword
903 raise error.ParseError(_("obsfateusers expects one arguments"))
903 raise error.ParseError(_("obsfateusers expects one arguments"))
904
904
905 markers = evalfuncarg(context, mapping, args[0])
905 markers = evalfuncarg(context, mapping, args[0])
906
906
907 try:
907 try:
908 data = obsutil.markersusers(markers)
908 data = obsutil.markersusers(markers)
909 return templatekw.hybridlist(data, name='user')
909 return templatekw.hybridlist(data, name='user')
910 except (TypeError, KeyError, ValueError):
910 except (TypeError, KeyError, ValueError):
911 # i18n: "obsfateusers" is a keyword
911 # i18n: "obsfateusers" is a keyword
912 msg = _("obsfateusers first argument should be an iterable of "
912 msg = _("obsfateusers first argument should be an iterable of "
913 "obsmakers")
913 "obsmakers")
914 raise error.ParseError(msg)
914 raise error.ParseError(msg)
915
915
916 @templatefunc('obsfateverb(successors)')
916 @templatefunc('obsfateverb(successors)')
917 def obsfateverb(context, mapping, args):
917 def obsfateverb(context, mapping, args):
918 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
918 """Compute obsfate related information based on successors (EXPERIMENTAL)"""
919 if len(args) != 1:
919 if len(args) != 1:
920 # i18n: "obsfateverb" is a keyword
920 # i18n: "obsfateverb" is a keyword
921 raise error.ParseError(_("obsfateverb expects one arguments"))
921 raise error.ParseError(_("obsfateverb expects one arguments"))
922
922
923 successors = evalfuncarg(context, mapping, args[0])
923 successors = evalfuncarg(context, mapping, args[0])
924
924
925 try:
925 try:
926 return obsutil.successorsetverb(successors)
926 return obsutil.successorsetverb(successors)
927 except TypeError:
927 except TypeError:
928 # i18n: "obsfateverb" is a keyword
928 # i18n: "obsfateverb" is a keyword
929 errmsg = _("obsfateverb first argument should be countable")
929 errmsg = _("obsfateverb first argument should be countable")
930 raise error.ParseError(errmsg)
930 raise error.ParseError(errmsg)
931
931
932 @templatefunc('relpath(path)')
932 @templatefunc('relpath(path)')
933 def relpath(context, mapping, args):
933 def relpath(context, mapping, args):
934 """Convert a repository-absolute path into a filesystem path relative to
934 """Convert a repository-absolute path into a filesystem path relative to
935 the current working directory."""
935 the current working directory."""
936 if len(args) != 1:
936 if len(args) != 1:
937 # i18n: "relpath" is a keyword
937 # i18n: "relpath" is a keyword
938 raise error.ParseError(_("relpath expects one argument"))
938 raise error.ParseError(_("relpath expects one argument"))
939
939
940 repo = mapping['ctx'].repo()
940 repo = mapping['ctx'].repo()
941 path = evalstring(context, mapping, args[0])
941 path = evalstring(context, mapping, args[0])
942 return repo.pathto(path)
942 return repo.pathto(path)
943
943
944 @templatefunc('revset(query[, formatargs...])')
944 @templatefunc('revset(query[, formatargs...])')
945 def revset(context, mapping, args):
945 def revset(context, mapping, args):
946 """Execute a revision set query. See
946 """Execute a revision set query. See
947 :hg:`help revset`."""
947 :hg:`help revset`."""
948 if not len(args) > 0:
948 if not len(args) > 0:
949 # i18n: "revset" is a keyword
949 # i18n: "revset" is a keyword
950 raise error.ParseError(_("revset expects one or more arguments"))
950 raise error.ParseError(_("revset expects one or more arguments"))
951
951
952 raw = evalstring(context, mapping, args[0])
952 raw = evalstring(context, mapping, args[0])
953 ctx = mapping['ctx']
953 ctx = mapping['ctx']
954 repo = ctx.repo()
954 repo = ctx.repo()
955
955
956 def query(expr):
956 def query(expr):
957 m = revsetmod.match(repo.ui, expr, repo=repo)
957 m = revsetmod.match(repo.ui, expr, repo=repo)
958 return m(repo)
958 return m(repo)
959
959
960 if len(args) > 1:
960 if len(args) > 1:
961 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
961 formatargs = [evalfuncarg(context, mapping, a) for a in args[1:]]
962 revs = query(revsetlang.formatspec(raw, *formatargs))
962 revs = query(revsetlang.formatspec(raw, *formatargs))
963 revs = list(revs)
963 revs = list(revs)
964 else:
964 else:
965 revsetcache = mapping['cache'].setdefault("revsetcache", {})
965 revsetcache = mapping['cache'].setdefault("revsetcache", {})
966 if raw in revsetcache:
966 if raw in revsetcache:
967 revs = revsetcache[raw]
967 revs = revsetcache[raw]
968 else:
968 else:
969 revs = query(raw)
969 revs = query(raw)
970 revs = list(revs)
970 revs = list(revs)
971 revsetcache[raw] = revs
971 revsetcache[raw] = revs
972
972
973 return templatekw.showrevslist("revision", revs, **mapping)
973 return templatekw.showrevslist("revision", revs, **mapping)
974
974
975 @templatefunc('rstdoc(text, style)')
975 @templatefunc('rstdoc(text, style)')
976 def rstdoc(context, mapping, args):
976 def rstdoc(context, mapping, args):
977 """Format reStructuredText."""
977 """Format reStructuredText."""
978 if len(args) != 2:
978 if len(args) != 2:
979 # i18n: "rstdoc" is a keyword
979 # i18n: "rstdoc" is a keyword
980 raise error.ParseError(_("rstdoc expects two arguments"))
980 raise error.ParseError(_("rstdoc expects two arguments"))
981
981
982 text = evalstring(context, mapping, args[0])
982 text = evalstring(context, mapping, args[0])
983 style = evalstring(context, mapping, args[1])
983 style = evalstring(context, mapping, args[1])
984
984
985 return minirst.format(text, style=style, keep=['verbose'])
985 return minirst.format(text, style=style, keep=['verbose'])
986
986
987 @templatefunc('separate(sep, args)', argspec='sep *args')
987 @templatefunc('separate(sep, args)', argspec='sep *args')
988 def separate(context, mapping, args):
988 def separate(context, mapping, args):
989 """Add a separator between non-empty arguments."""
989 """Add a separator between non-empty arguments."""
990 if 'sep' not in args:
990 if 'sep' not in args:
991 # i18n: "separate" is a keyword
991 # i18n: "separate" is a keyword
992 raise error.ParseError(_("separate expects at least one argument"))
992 raise error.ParseError(_("separate expects at least one argument"))
993
993
994 sep = evalstring(context, mapping, args['sep'])
994 sep = evalstring(context, mapping, args['sep'])
995 first = True
995 first = True
996 for arg in args['args']:
996 for arg in args['args']:
997 argstr = evalstring(context, mapping, arg)
997 argstr = evalstring(context, mapping, arg)
998 if not argstr:
998 if not argstr:
999 continue
999 continue
1000 if first:
1000 if first:
1001 first = False
1001 first = False
1002 else:
1002 else:
1003 yield sep
1003 yield sep
1004 yield argstr
1004 yield argstr
1005
1005
1006 @templatefunc('shortest(node, minlength=4)')
1006 @templatefunc('shortest(node, minlength=4)')
1007 def shortest(context, mapping, args):
1007 def shortest(context, mapping, args):
1008 """Obtain the shortest representation of
1008 """Obtain the shortest representation of
1009 a node."""
1009 a node."""
1010 if not (1 <= len(args) <= 2):
1010 if not (1 <= len(args) <= 2):
1011 # i18n: "shortest" is a keyword
1011 # i18n: "shortest" is a keyword
1012 raise error.ParseError(_("shortest() expects one or two arguments"))
1012 raise error.ParseError(_("shortest() expects one or two arguments"))
1013
1013
1014 node = evalstring(context, mapping, args[0])
1014 node = evalstring(context, mapping, args[0])
1015
1015
1016 minlength = 4
1016 minlength = 4
1017 if len(args) > 1:
1017 if len(args) > 1:
1018 minlength = evalinteger(context, mapping, args[1],
1018 minlength = evalinteger(context, mapping, args[1],
1019 # i18n: "shortest" is a keyword
1019 # i18n: "shortest" is a keyword
1020 _("shortest() expects an integer minlength"))
1020 _("shortest() expects an integer minlength"))
1021
1021
1022 # _partialmatch() of filtered changelog could take O(len(repo)) time,
1022 # _partialmatch() of filtered changelog could take O(len(repo)) time,
1023 # which would be unacceptably slow. so we look for hash collision in
1023 # which would be unacceptably slow. so we look for hash collision in
1024 # unfiltered space, which means some hashes may be slightly longer.
1024 # unfiltered space, which means some hashes may be slightly longer.
1025 cl = mapping['ctx']._repo.unfiltered().changelog
1025 cl = mapping['ctx']._repo.unfiltered().changelog
1026 def isvalid(test):
1026 return cl.shortest(node, minlength)
1027 try:
1028 if cl._partialmatch(test) is None:
1029 return False
1030
1031 try:
1032 i = int(test)
1033 # if we are a pure int, then starting with zero will not be
1034 # confused as a rev; or, obviously, if the int is larger than
1035 # the value of the tip rev
1036 if test[0] == '0' or i > len(cl):
1037 return True
1038 return False
1039 except ValueError:
1040 return True
1041 except error.RevlogError:
1042 return False
1043 except error.WdirUnsupported:
1044 # single 'ff...' match
1045 return True
1046
1047 shortest = node
1048 startlength = max(6, minlength)
1049 length = startlength
1050 while True:
1051 test = node[:length]
1052 if isvalid(test):
1053 shortest = test
1054 if length == minlength or length > startlength:
1055 return shortest
1056 length -= 1
1057 else:
1058 length += 1
1059 if len(shortest) <= length:
1060 return shortest
1061
1027
1062 @templatefunc('strip(text[, chars])')
1028 @templatefunc('strip(text[, chars])')
1063 def strip(context, mapping, args):
1029 def strip(context, mapping, args):
1064 """Strip characters from a string. By default,
1030 """Strip characters from a string. By default,
1065 strips all leading and trailing whitespace."""
1031 strips all leading and trailing whitespace."""
1066 if not (1 <= len(args) <= 2):
1032 if not (1 <= len(args) <= 2):
1067 # i18n: "strip" is a keyword
1033 # i18n: "strip" is a keyword
1068 raise error.ParseError(_("strip expects one or two arguments"))
1034 raise error.ParseError(_("strip expects one or two arguments"))
1069
1035
1070 text = evalstring(context, mapping, args[0])
1036 text = evalstring(context, mapping, args[0])
1071 if len(args) == 2:
1037 if len(args) == 2:
1072 chars = evalstring(context, mapping, args[1])
1038 chars = evalstring(context, mapping, args[1])
1073 return text.strip(chars)
1039 return text.strip(chars)
1074 return text.strip()
1040 return text.strip()
1075
1041
1076 @templatefunc('sub(pattern, replacement, expression)')
1042 @templatefunc('sub(pattern, replacement, expression)')
1077 def sub(context, mapping, args):
1043 def sub(context, mapping, args):
1078 """Perform text substitution
1044 """Perform text substitution
1079 using regular expressions."""
1045 using regular expressions."""
1080 if len(args) != 3:
1046 if len(args) != 3:
1081 # i18n: "sub" is a keyword
1047 # i18n: "sub" is a keyword
1082 raise error.ParseError(_("sub expects three arguments"))
1048 raise error.ParseError(_("sub expects three arguments"))
1083
1049
1084 pat = evalstring(context, mapping, args[0])
1050 pat = evalstring(context, mapping, args[0])
1085 rpl = evalstring(context, mapping, args[1])
1051 rpl = evalstring(context, mapping, args[1])
1086 src = evalstring(context, mapping, args[2])
1052 src = evalstring(context, mapping, args[2])
1087 try:
1053 try:
1088 patre = re.compile(pat)
1054 patre = re.compile(pat)
1089 except re.error:
1055 except re.error:
1090 # i18n: "sub" is a keyword
1056 # i18n: "sub" is a keyword
1091 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1057 raise error.ParseError(_("sub got an invalid pattern: %s") % pat)
1092 try:
1058 try:
1093 yield patre.sub(rpl, src)
1059 yield patre.sub(rpl, src)
1094 except re.error:
1060 except re.error:
1095 # i18n: "sub" is a keyword
1061 # i18n: "sub" is a keyword
1096 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1062 raise error.ParseError(_("sub got an invalid replacement: %s") % rpl)
1097
1063
1098 @templatefunc('startswith(pattern, text)')
1064 @templatefunc('startswith(pattern, text)')
1099 def startswith(context, mapping, args):
1065 def startswith(context, mapping, args):
1100 """Returns the value from the "text" argument
1066 """Returns the value from the "text" argument
1101 if it begins with the content from the "pattern" argument."""
1067 if it begins with the content from the "pattern" argument."""
1102 if len(args) != 2:
1068 if len(args) != 2:
1103 # i18n: "startswith" is a keyword
1069 # i18n: "startswith" is a keyword
1104 raise error.ParseError(_("startswith expects two arguments"))
1070 raise error.ParseError(_("startswith expects two arguments"))
1105
1071
1106 patn = evalstring(context, mapping, args[0])
1072 patn = evalstring(context, mapping, args[0])
1107 text = evalstring(context, mapping, args[1])
1073 text = evalstring(context, mapping, args[1])
1108 if text.startswith(patn):
1074 if text.startswith(patn):
1109 return text
1075 return text
1110 return ''
1076 return ''
1111
1077
1112 @templatefunc('word(number, text[, separator])')
1078 @templatefunc('word(number, text[, separator])')
1113 def word(context, mapping, args):
1079 def word(context, mapping, args):
1114 """Return the nth word from a string."""
1080 """Return the nth word from a string."""
1115 if not (2 <= len(args) <= 3):
1081 if not (2 <= len(args) <= 3):
1116 # i18n: "word" is a keyword
1082 # i18n: "word" is a keyword
1117 raise error.ParseError(_("word expects two or three arguments, got %d")
1083 raise error.ParseError(_("word expects two or three arguments, got %d")
1118 % len(args))
1084 % len(args))
1119
1085
1120 num = evalinteger(context, mapping, args[0],
1086 num = evalinteger(context, mapping, args[0],
1121 # i18n: "word" is a keyword
1087 # i18n: "word" is a keyword
1122 _("word expects an integer index"))
1088 _("word expects an integer index"))
1123 text = evalstring(context, mapping, args[1])
1089 text = evalstring(context, mapping, args[1])
1124 if len(args) == 3:
1090 if len(args) == 3:
1125 splitter = evalstring(context, mapping, args[2])
1091 splitter = evalstring(context, mapping, args[2])
1126 else:
1092 else:
1127 splitter = None
1093 splitter = None
1128
1094
1129 tokens = text.split(splitter)
1095 tokens = text.split(splitter)
1130 if num >= len(tokens) or num < -len(tokens):
1096 if num >= len(tokens) or num < -len(tokens):
1131 return ''
1097 return ''
1132 else:
1098 else:
1133 return tokens[num]
1099 return tokens[num]
1134
1100
1135 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1101 # methods to interpret function arguments or inner expressions (e.g. {_(x)})
1136 exprmethods = {
1102 exprmethods = {
1137 "integer": lambda e, c: (runinteger, e[1]),
1103 "integer": lambda e, c: (runinteger, e[1]),
1138 "string": lambda e, c: (runstring, e[1]),
1104 "string": lambda e, c: (runstring, e[1]),
1139 "symbol": lambda e, c: (runsymbol, e[1]),
1105 "symbol": lambda e, c: (runsymbol, e[1]),
1140 "template": buildtemplate,
1106 "template": buildtemplate,
1141 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1107 "group": lambda e, c: compileexp(e[1], c, exprmethods),
1142 # ".": buildmember,
1108 # ".": buildmember,
1143 "|": buildfilter,
1109 "|": buildfilter,
1144 "%": buildmap,
1110 "%": buildmap,
1145 "func": buildfunc,
1111 "func": buildfunc,
1146 "keyvalue": buildkeyvaluepair,
1112 "keyvalue": buildkeyvaluepair,
1147 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1113 "+": lambda e, c: buildarithmetic(e, c, lambda a, b: a + b),
1148 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1114 "-": lambda e, c: buildarithmetic(e, c, lambda a, b: a - b),
1149 "negate": buildnegate,
1115 "negate": buildnegate,
1150 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1116 "*": lambda e, c: buildarithmetic(e, c, lambda a, b: a * b),
1151 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1117 "/": lambda e, c: buildarithmetic(e, c, lambda a, b: a // b),
1152 }
1118 }
1153
1119
1154 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1120 # methods to interpret top-level template (e.g. {x}, {x|_}, {x % "y"})
1155 methods = exprmethods.copy()
1121 methods = exprmethods.copy()
1156 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1122 methods["integer"] = exprmethods["symbol"] # '{1}' as variable
1157
1123
1158 class _aliasrules(parser.basealiasrules):
1124 class _aliasrules(parser.basealiasrules):
1159 """Parsing and expansion rule set of template aliases"""
1125 """Parsing and expansion rule set of template aliases"""
1160 _section = _('template alias')
1126 _section = _('template alias')
1161 _parse = staticmethod(_parseexpr)
1127 _parse = staticmethod(_parseexpr)
1162
1128
1163 @staticmethod
1129 @staticmethod
1164 def _trygetfunc(tree):
1130 def _trygetfunc(tree):
1165 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1131 """Return (name, args) if tree is func(...) or ...|filter; otherwise
1166 None"""
1132 None"""
1167 if tree[0] == 'func' and tree[1][0] == 'symbol':
1133 if tree[0] == 'func' and tree[1][0] == 'symbol':
1168 return tree[1][1], getlist(tree[2])
1134 return tree[1][1], getlist(tree[2])
1169 if tree[0] == '|' and tree[2][0] == 'symbol':
1135 if tree[0] == '|' and tree[2][0] == 'symbol':
1170 return tree[2][1], [tree[1]]
1136 return tree[2][1], [tree[1]]
1171
1137
1172 def expandaliases(tree, aliases):
1138 def expandaliases(tree, aliases):
1173 """Return new tree of aliases are expanded"""
1139 """Return new tree of aliases are expanded"""
1174 aliasmap = _aliasrules.buildmap(aliases)
1140 aliasmap = _aliasrules.buildmap(aliases)
1175 return _aliasrules.expand(aliasmap, tree)
1141 return _aliasrules.expand(aliasmap, tree)
1176
1142
1177 # template engine
1143 # template engine
1178
1144
1179 stringify = templatefilters.stringify
1145 stringify = templatefilters.stringify
1180
1146
1181 def _flatten(thing):
1147 def _flatten(thing):
1182 '''yield a single stream from a possibly nested set of iterators'''
1148 '''yield a single stream from a possibly nested set of iterators'''
1183 thing = templatekw.unwraphybrid(thing)
1149 thing = templatekw.unwraphybrid(thing)
1184 if isinstance(thing, bytes):
1150 if isinstance(thing, bytes):
1185 yield thing
1151 yield thing
1186 elif thing is None:
1152 elif thing is None:
1187 pass
1153 pass
1188 elif not util.safehasattr(thing, '__iter__'):
1154 elif not util.safehasattr(thing, '__iter__'):
1189 yield pycompat.bytestr(thing)
1155 yield pycompat.bytestr(thing)
1190 else:
1156 else:
1191 for i in thing:
1157 for i in thing:
1192 i = templatekw.unwraphybrid(i)
1158 i = templatekw.unwraphybrid(i)
1193 if isinstance(i, bytes):
1159 if isinstance(i, bytes):
1194 yield i
1160 yield i
1195 elif i is None:
1161 elif i is None:
1196 pass
1162 pass
1197 elif not util.safehasattr(i, '__iter__'):
1163 elif not util.safehasattr(i, '__iter__'):
1198 yield pycompat.bytestr(i)
1164 yield pycompat.bytestr(i)
1199 else:
1165 else:
1200 for j in _flatten(i):
1166 for j in _flatten(i):
1201 yield j
1167 yield j
1202
1168
1203 def unquotestring(s):
1169 def unquotestring(s):
1204 '''unwrap quotes if any; otherwise returns unmodified string'''
1170 '''unwrap quotes if any; otherwise returns unmodified string'''
1205 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1171 if len(s) < 2 or s[0] not in "'\"" or s[0] != s[-1]:
1206 return s
1172 return s
1207 return s[1:-1]
1173 return s[1:-1]
1208
1174
1209 class engine(object):
1175 class engine(object):
1210 '''template expansion engine.
1176 '''template expansion engine.
1211
1177
1212 template expansion works like this. a map file contains key=value
1178 template expansion works like this. a map file contains key=value
1213 pairs. if value is quoted, it is treated as string. otherwise, it
1179 pairs. if value is quoted, it is treated as string. otherwise, it
1214 is treated as name of template file.
1180 is treated as name of template file.
1215
1181
1216 templater is asked to expand a key in map. it looks up key, and
1182 templater is asked to expand a key in map. it looks up key, and
1217 looks for strings like this: {foo}. it expands {foo} by looking up
1183 looks for strings like this: {foo}. it expands {foo} by looking up
1218 foo in map, and substituting it. expansion is recursive: it stops
1184 foo in map, and substituting it. expansion is recursive: it stops
1219 when there is no more {foo} to replace.
1185 when there is no more {foo} to replace.
1220
1186
1221 expansion also allows formatting and filtering.
1187 expansion also allows formatting and filtering.
1222
1188
1223 format uses key to expand each item in list. syntax is
1189 format uses key to expand each item in list. syntax is
1224 {key%format}.
1190 {key%format}.
1225
1191
1226 filter uses function to transform value. syntax is
1192 filter uses function to transform value. syntax is
1227 {key|filter1|filter2|...}.'''
1193 {key|filter1|filter2|...}.'''
1228
1194
1229 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1195 def __init__(self, loader, filters=None, defaults=None, aliases=()):
1230 self._loader = loader
1196 self._loader = loader
1231 if filters is None:
1197 if filters is None:
1232 filters = {}
1198 filters = {}
1233 self._filters = filters
1199 self._filters = filters
1234 if defaults is None:
1200 if defaults is None:
1235 defaults = {}
1201 defaults = {}
1236 self._defaults = defaults
1202 self._defaults = defaults
1237 self._aliasmap = _aliasrules.buildmap(aliases)
1203 self._aliasmap = _aliasrules.buildmap(aliases)
1238 self._cache = {} # key: (func, data)
1204 self._cache = {} # key: (func, data)
1239
1205
1240 def _load(self, t):
1206 def _load(self, t):
1241 '''load, parse, and cache a template'''
1207 '''load, parse, and cache a template'''
1242 if t not in self._cache:
1208 if t not in self._cache:
1243 # put poison to cut recursion while compiling 't'
1209 # put poison to cut recursion while compiling 't'
1244 self._cache[t] = (_runrecursivesymbol, t)
1210 self._cache[t] = (_runrecursivesymbol, t)
1245 try:
1211 try:
1246 x = parse(self._loader(t))
1212 x = parse(self._loader(t))
1247 if self._aliasmap:
1213 if self._aliasmap:
1248 x = _aliasrules.expand(self._aliasmap, x)
1214 x = _aliasrules.expand(self._aliasmap, x)
1249 self._cache[t] = compileexp(x, self, methods)
1215 self._cache[t] = compileexp(x, self, methods)
1250 except: # re-raises
1216 except: # re-raises
1251 del self._cache[t]
1217 del self._cache[t]
1252 raise
1218 raise
1253 return self._cache[t]
1219 return self._cache[t]
1254
1220
1255 def process(self, t, mapping):
1221 def process(self, t, mapping):
1256 '''Perform expansion. t is name of map element to expand.
1222 '''Perform expansion. t is name of map element to expand.
1257 mapping contains added elements for use during expansion. Is a
1223 mapping contains added elements for use during expansion. Is a
1258 generator.'''
1224 generator.'''
1259 func, data = self._load(t)
1225 func, data = self._load(t)
1260 return _flatten(func(self, mapping, data))
1226 return _flatten(func(self, mapping, data))
1261
1227
1262 engines = {'default': engine}
1228 engines = {'default': engine}
1263
1229
1264 def stylelist():
1230 def stylelist():
1265 paths = templatepaths()
1231 paths = templatepaths()
1266 if not paths:
1232 if not paths:
1267 return _('no templates found, try `hg debuginstall` for more info')
1233 return _('no templates found, try `hg debuginstall` for more info')
1268 dirlist = os.listdir(paths[0])
1234 dirlist = os.listdir(paths[0])
1269 stylelist = []
1235 stylelist = []
1270 for file in dirlist:
1236 for file in dirlist:
1271 split = file.split(".")
1237 split = file.split(".")
1272 if split[-1] in ('orig', 'rej'):
1238 if split[-1] in ('orig', 'rej'):
1273 continue
1239 continue
1274 if split[0] == "map-cmdline":
1240 if split[0] == "map-cmdline":
1275 stylelist.append(split[1])
1241 stylelist.append(split[1])
1276 return ", ".join(sorted(stylelist))
1242 return ", ".join(sorted(stylelist))
1277
1243
1278 def _readmapfile(mapfile):
1244 def _readmapfile(mapfile):
1279 """Load template elements from the given map file"""
1245 """Load template elements from the given map file"""
1280 if not os.path.exists(mapfile):
1246 if not os.path.exists(mapfile):
1281 raise error.Abort(_("style '%s' not found") % mapfile,
1247 raise error.Abort(_("style '%s' not found") % mapfile,
1282 hint=_("available styles: %s") % stylelist())
1248 hint=_("available styles: %s") % stylelist())
1283
1249
1284 base = os.path.dirname(mapfile)
1250 base = os.path.dirname(mapfile)
1285 conf = config.config(includepaths=templatepaths())
1251 conf = config.config(includepaths=templatepaths())
1286 conf.read(mapfile)
1252 conf.read(mapfile)
1287
1253
1288 cache = {}
1254 cache = {}
1289 tmap = {}
1255 tmap = {}
1290 for key, val in conf[''].items():
1256 for key, val in conf[''].items():
1291 if not val:
1257 if not val:
1292 raise error.ParseError(_('missing value'), conf.source('', key))
1258 raise error.ParseError(_('missing value'), conf.source('', key))
1293 if val[0] in "'\"":
1259 if val[0] in "'\"":
1294 if val[0] != val[-1]:
1260 if val[0] != val[-1]:
1295 raise error.ParseError(_('unmatched quotes'),
1261 raise error.ParseError(_('unmatched quotes'),
1296 conf.source('', key))
1262 conf.source('', key))
1297 cache[key] = unquotestring(val)
1263 cache[key] = unquotestring(val)
1298 elif key == "__base__":
1264 elif key == "__base__":
1299 # treat as a pointer to a base class for this style
1265 # treat as a pointer to a base class for this style
1300 path = util.normpath(os.path.join(base, val))
1266 path = util.normpath(os.path.join(base, val))
1301
1267
1302 # fallback check in template paths
1268 # fallback check in template paths
1303 if not os.path.exists(path):
1269 if not os.path.exists(path):
1304 for p in templatepaths():
1270 for p in templatepaths():
1305 p2 = util.normpath(os.path.join(p, val))
1271 p2 = util.normpath(os.path.join(p, val))
1306 if os.path.isfile(p2):
1272 if os.path.isfile(p2):
1307 path = p2
1273 path = p2
1308 break
1274 break
1309 p3 = util.normpath(os.path.join(p2, "map"))
1275 p3 = util.normpath(os.path.join(p2, "map"))
1310 if os.path.isfile(p3):
1276 if os.path.isfile(p3):
1311 path = p3
1277 path = p3
1312 break
1278 break
1313
1279
1314 bcache, btmap = _readmapfile(path)
1280 bcache, btmap = _readmapfile(path)
1315 for k in bcache:
1281 for k in bcache:
1316 if k not in cache:
1282 if k not in cache:
1317 cache[k] = bcache[k]
1283 cache[k] = bcache[k]
1318 for k in btmap:
1284 for k in btmap:
1319 if k not in tmap:
1285 if k not in tmap:
1320 tmap[k] = btmap[k]
1286 tmap[k] = btmap[k]
1321 else:
1287 else:
1322 val = 'default', val
1288 val = 'default', val
1323 if ':' in val[1]:
1289 if ':' in val[1]:
1324 val = val[1].split(':', 1)
1290 val = val[1].split(':', 1)
1325 tmap[key] = val[0], os.path.join(base, val[1])
1291 tmap[key] = val[0], os.path.join(base, val[1])
1326 return cache, tmap
1292 return cache, tmap
1327
1293
1328 class TemplateNotFound(error.Abort):
1294 class TemplateNotFound(error.Abort):
1329 pass
1295 pass
1330
1296
1331 class templater(object):
1297 class templater(object):
1332
1298
1333 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1299 def __init__(self, filters=None, defaults=None, cache=None, aliases=(),
1334 minchunk=1024, maxchunk=65536):
1300 minchunk=1024, maxchunk=65536):
1335 '''set up template engine.
1301 '''set up template engine.
1336 filters is dict of functions. each transforms a value into another.
1302 filters is dict of functions. each transforms a value into another.
1337 defaults is dict of default map definitions.
1303 defaults is dict of default map definitions.
1338 aliases is list of alias (name, replacement) pairs.
1304 aliases is list of alias (name, replacement) pairs.
1339 '''
1305 '''
1340 if filters is None:
1306 if filters is None:
1341 filters = {}
1307 filters = {}
1342 if defaults is None:
1308 if defaults is None:
1343 defaults = {}
1309 defaults = {}
1344 if cache is None:
1310 if cache is None:
1345 cache = {}
1311 cache = {}
1346 self.cache = cache.copy()
1312 self.cache = cache.copy()
1347 self.map = {}
1313 self.map = {}
1348 self.filters = templatefilters.filters.copy()
1314 self.filters = templatefilters.filters.copy()
1349 self.filters.update(filters)
1315 self.filters.update(filters)
1350 self.defaults = defaults
1316 self.defaults = defaults
1351 self._aliases = aliases
1317 self._aliases = aliases
1352 self.minchunk, self.maxchunk = minchunk, maxchunk
1318 self.minchunk, self.maxchunk = minchunk, maxchunk
1353 self.ecache = {}
1319 self.ecache = {}
1354
1320
1355 @classmethod
1321 @classmethod
1356 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1322 def frommapfile(cls, mapfile, filters=None, defaults=None, cache=None,
1357 minchunk=1024, maxchunk=65536):
1323 minchunk=1024, maxchunk=65536):
1358 """Create templater from the specified map file"""
1324 """Create templater from the specified map file"""
1359 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1325 t = cls(filters, defaults, cache, [], minchunk, maxchunk)
1360 cache, tmap = _readmapfile(mapfile)
1326 cache, tmap = _readmapfile(mapfile)
1361 t.cache.update(cache)
1327 t.cache.update(cache)
1362 t.map = tmap
1328 t.map = tmap
1363 return t
1329 return t
1364
1330
1365 def __contains__(self, key):
1331 def __contains__(self, key):
1366 return key in self.cache or key in self.map
1332 return key in self.cache or key in self.map
1367
1333
1368 def load(self, t):
1334 def load(self, t):
1369 '''Get the template for the given template name. Use a local cache.'''
1335 '''Get the template for the given template name. Use a local cache.'''
1370 if t not in self.cache:
1336 if t not in self.cache:
1371 try:
1337 try:
1372 self.cache[t] = util.readfile(self.map[t][1])
1338 self.cache[t] = util.readfile(self.map[t][1])
1373 except KeyError as inst:
1339 except KeyError as inst:
1374 raise TemplateNotFound(_('"%s" not in template map') %
1340 raise TemplateNotFound(_('"%s" not in template map') %
1375 inst.args[0])
1341 inst.args[0])
1376 except IOError as inst:
1342 except IOError as inst:
1377 raise IOError(inst.args[0], _('template file %s: %s') %
1343 raise IOError(inst.args[0], _('template file %s: %s') %
1378 (self.map[t][1], inst.args[1]))
1344 (self.map[t][1], inst.args[1]))
1379 return self.cache[t]
1345 return self.cache[t]
1380
1346
1381 def render(self, mapping):
1347 def render(self, mapping):
1382 """Render the default unnamed template and return result as string"""
1348 """Render the default unnamed template and return result as string"""
1383 return stringify(self('', **mapping))
1349 return stringify(self('', **mapping))
1384
1350
1385 def __call__(self, t, **mapping):
1351 def __call__(self, t, **mapping):
1386 mapping = pycompat.byteskwargs(mapping)
1352 mapping = pycompat.byteskwargs(mapping)
1387 ttype = t in self.map and self.map[t][0] or 'default'
1353 ttype = t in self.map and self.map[t][0] or 'default'
1388 if ttype not in self.ecache:
1354 if ttype not in self.ecache:
1389 try:
1355 try:
1390 ecls = engines[ttype]
1356 ecls = engines[ttype]
1391 except KeyError:
1357 except KeyError:
1392 raise error.Abort(_('invalid template engine: %s') % ttype)
1358 raise error.Abort(_('invalid template engine: %s') % ttype)
1393 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1359 self.ecache[ttype] = ecls(self.load, self.filters, self.defaults,
1394 self._aliases)
1360 self._aliases)
1395 proc = self.ecache[ttype]
1361 proc = self.ecache[ttype]
1396
1362
1397 stream = proc.process(t, mapping)
1363 stream = proc.process(t, mapping)
1398 if self.minchunk:
1364 if self.minchunk:
1399 stream = util.increasingchunks(stream, min=self.minchunk,
1365 stream = util.increasingchunks(stream, min=self.minchunk,
1400 max=self.maxchunk)
1366 max=self.maxchunk)
1401 return stream
1367 return stream
1402
1368
1403 def templatepaths():
1369 def templatepaths():
1404 '''return locations used for template files.'''
1370 '''return locations used for template files.'''
1405 pathsrel = ['templates']
1371 pathsrel = ['templates']
1406 paths = [os.path.normpath(os.path.join(util.datapath, f))
1372 paths = [os.path.normpath(os.path.join(util.datapath, f))
1407 for f in pathsrel]
1373 for f in pathsrel]
1408 return [p for p in paths if os.path.isdir(p)]
1374 return [p for p in paths if os.path.isdir(p)]
1409
1375
1410 def templatepath(name):
1376 def templatepath(name):
1411 '''return location of template file. returns None if not found.'''
1377 '''return location of template file. returns None if not found.'''
1412 for p in templatepaths():
1378 for p in templatepaths():
1413 f = os.path.join(p, name)
1379 f = os.path.join(p, name)
1414 if os.path.exists(f):
1380 if os.path.exists(f):
1415 return f
1381 return f
1416 return None
1382 return None
1417
1383
1418 def stylemap(styles, paths=None):
1384 def stylemap(styles, paths=None):
1419 """Return path to mapfile for a given style.
1385 """Return path to mapfile for a given style.
1420
1386
1421 Searches mapfile in the following locations:
1387 Searches mapfile in the following locations:
1422 1. templatepath/style/map
1388 1. templatepath/style/map
1423 2. templatepath/map-style
1389 2. templatepath/map-style
1424 3. templatepath/map
1390 3. templatepath/map
1425 """
1391 """
1426
1392
1427 if paths is None:
1393 if paths is None:
1428 paths = templatepaths()
1394 paths = templatepaths()
1429 elif isinstance(paths, str):
1395 elif isinstance(paths, str):
1430 paths = [paths]
1396 paths = [paths]
1431
1397
1432 if isinstance(styles, str):
1398 if isinstance(styles, str):
1433 styles = [styles]
1399 styles = [styles]
1434
1400
1435 for style in styles:
1401 for style in styles:
1436 # only plain name is allowed to honor template paths
1402 # only plain name is allowed to honor template paths
1437 if (not style
1403 if (not style
1438 or style in (os.curdir, os.pardir)
1404 or style in (os.curdir, os.pardir)
1439 or pycompat.ossep in style
1405 or pycompat.ossep in style
1440 or pycompat.osaltsep and pycompat.osaltsep in style):
1406 or pycompat.osaltsep and pycompat.osaltsep in style):
1441 continue
1407 continue
1442 locations = [os.path.join(style, 'map'), 'map-' + style]
1408 locations = [os.path.join(style, 'map'), 'map-' + style]
1443 locations.append('map')
1409 locations.append('map')
1444
1410
1445 for path in paths:
1411 for path in paths:
1446 for location in locations:
1412 for location in locations:
1447 mapfile = os.path.join(path, location)
1413 mapfile = os.path.join(path, location)
1448 if os.path.isfile(mapfile):
1414 if os.path.isfile(mapfile):
1449 return style, mapfile
1415 return style, mapfile
1450
1416
1451 raise RuntimeError("No hgweb templates found in %r" % paths)
1417 raise RuntimeError("No hgweb templates found in %r" % paths)
1452
1418
1453 def loadfunction(ui, extname, registrarobj):
1419 def loadfunction(ui, extname, registrarobj):
1454 """Load template function from specified registrarobj
1420 """Load template function from specified registrarobj
1455 """
1421 """
1456 for name, func in registrarobj._table.iteritems():
1422 for name, func in registrarobj._table.iteritems():
1457 funcs[name] = func
1423 funcs[name] = func
1458
1424
1459 # tell hggettext to extract docstrings from these functions:
1425 # tell hggettext to extract docstrings from these functions:
1460 i18nfunctions = funcs.values()
1426 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now