##// END OF EJS Templates
py3: slice over bytes to prevent getting ascii values...
Pulkit Goyal -
r36198:34e85044 default
parent child Browse files
Show More
@@ -1,478 +1,478 b''
1 1 # Revision graph generator for Mercurial
2 2 #
3 3 # Copyright 2008 Dirkjan Ochtman <dirkjan@ochtman.nl>
4 4 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """supports walking the history as DAGs suitable for graphical output
10 10
11 11 The most basic format we use is that of::
12 12
13 13 (id, type, data, [parentids])
14 14
15 15 The node and parent ids are arbitrary integers which identify a node in the
16 16 context of the graph returned. Type is a constant specifying the node type.
17 17 Data depends on type.
18 18 """
19 19
20 20 from __future__ import absolute_import
21 21
22 22 from .node import nullrev
23 23 from . import (
24 24 dagop,
25 25 smartset,
26 26 util,
27 27 )
28 28
29 29 CHANGESET = 'C'
30 30 PARENT = 'P'
31 31 GRANDPARENT = 'G'
32 32 MISSINGPARENT = 'M'
33 33 # Style of line to draw. None signals a line that ends and is removed at this
34 34 # point. A number prefix means only the last N characters of the current block
35 35 # will use that style, the rest will use the PARENT style. Add a - sign
36 36 # (so making N negative) and all but the first N characters use that style.
37 37 EDGES = {PARENT: '|', GRANDPARENT: ':', MISSINGPARENT: None}
38 38
39 39 def dagwalker(repo, revs):
40 40 """cset DAG generator yielding (id, CHANGESET, ctx, [parentinfo]) tuples
41 41
42 42 This generator function walks through revisions (which should be ordered
43 43 from bigger to lower). It returns a tuple for each node.
44 44
45 45 Each parentinfo entry is a tuple with (edgetype, parentid), where edgetype
46 46 is one of PARENT, GRANDPARENT or MISSINGPARENT. The node and parent ids
47 47 are arbitrary integers which identify a node in the context of the graph
48 48 returned.
49 49
50 50 """
51 51 gpcache = {}
52 52
53 53 for rev in revs:
54 54 ctx = repo[rev]
55 55 # partition into parents in the rev set and missing parents, then
56 56 # augment the lists with markers, to inform graph drawing code about
57 57 # what kind of edge to draw between nodes.
58 58 pset = set(p.rev() for p in ctx.parents() if p.rev() in revs)
59 59 mpars = [p.rev() for p in ctx.parents()
60 60 if p.rev() != nullrev and p.rev() not in pset]
61 61 parents = [(PARENT, p) for p in sorted(pset)]
62 62
63 63 for mpar in mpars:
64 64 gp = gpcache.get(mpar)
65 65 if gp is None:
66 66 # precompute slow query as we know reachableroots() goes
67 67 # through all revs (issue4782)
68 68 if not isinstance(revs, smartset.baseset):
69 69 revs = smartset.baseset(revs)
70 70 gp = gpcache[mpar] = sorted(set(dagop.reachableroots(
71 71 repo, revs, [mpar])))
72 72 if not gp:
73 73 parents.append((MISSINGPARENT, mpar))
74 74 pset.add(mpar)
75 75 else:
76 76 parents.extend((GRANDPARENT, g) for g in gp if g not in pset)
77 77 pset.update(gp)
78 78
79 79 yield (ctx.rev(), CHANGESET, ctx, parents)
80 80
81 81 def nodes(repo, nodes):
82 82 """cset DAG generator yielding (id, CHANGESET, ctx, [parentids]) tuples
83 83
84 84 This generator function walks the given nodes. It only returns parents
85 85 that are in nodes, too.
86 86 """
87 87 include = set(nodes)
88 88 for node in nodes:
89 89 ctx = repo[node]
90 90 parents = set((PARENT, p.rev()) for p in ctx.parents()
91 91 if p.node() in include)
92 92 yield (ctx.rev(), CHANGESET, ctx, sorted(parents))
93 93
94 94 def colored(dag, repo):
95 95 """annotates a DAG with colored edge information
96 96
97 97 For each DAG node this function emits tuples::
98 98
99 99 (id, type, data, (col, color), [(col, nextcol, color)])
100 100
101 101 with the following new elements:
102 102
103 103 - Tuple (col, color) with column and color index for the current node
104 104 - A list of tuples indicating the edges between the current node and its
105 105 parents.
106 106 """
107 107 seen = []
108 108 colors = {}
109 109 newcolor = 1
110 110 config = {}
111 111
112 112 for key, val in repo.ui.configitems('graph'):
113 113 if '.' in key:
114 114 branch, setting = key.rsplit('.', 1)
115 115 # Validation
116 116 if setting == "width" and val.isdigit():
117 117 config.setdefault(branch, {})[setting] = int(val)
118 118 elif setting == "color" and val.isalnum():
119 119 config.setdefault(branch, {})[setting] = val
120 120
121 121 if config:
122 122 getconf = util.lrucachefunc(
123 123 lambda rev: config.get(repo[rev].branch(), {}))
124 124 else:
125 125 getconf = lambda rev: {}
126 126
127 127 for (cur, type, data, parents) in dag:
128 128
129 129 # Compute seen and next
130 130 if cur not in seen:
131 131 seen.append(cur) # new head
132 132 colors[cur] = newcolor
133 133 newcolor += 1
134 134
135 135 col = seen.index(cur)
136 136 color = colors.pop(cur)
137 137 next = seen[:]
138 138
139 139 # Add parents to next
140 140 addparents = [p for pt, p in parents if p not in next]
141 141 next[col:col + 1] = addparents
142 142
143 143 # Set colors for the parents
144 144 for i, p in enumerate(addparents):
145 145 if not i:
146 146 colors[p] = color
147 147 else:
148 148 colors[p] = newcolor
149 149 newcolor += 1
150 150
151 151 # Add edges to the graph
152 152 edges = []
153 153 for ecol, eid in enumerate(seen):
154 154 if eid in next:
155 155 bconf = getconf(eid)
156 156 edges.append((
157 157 ecol, next.index(eid), colors[eid],
158 158 bconf.get('width', -1),
159 159 bconf.get('color', '')))
160 160 elif eid == cur:
161 161 for ptype, p in parents:
162 162 bconf = getconf(p)
163 163 edges.append((
164 164 ecol, next.index(p), color,
165 165 bconf.get('width', -1),
166 166 bconf.get('color', '')))
167 167
168 168 # Yield and move on
169 169 yield (cur, type, data, (col, color), edges)
170 170 seen = next
171 171
172 172 def asciiedges(type, char, state, rev, parents):
173 173 """adds edge info to changelog DAG walk suitable for ascii()"""
174 174 seen = state['seen']
175 175 if rev not in seen:
176 176 seen.append(rev)
177 177 nodeidx = seen.index(rev)
178 178
179 179 knownparents = []
180 180 newparents = []
181 181 for ptype, parent in parents:
182 182 if parent == rev:
183 183 # self reference (should only be seen in null rev)
184 184 continue
185 185 if parent in seen:
186 186 knownparents.append(parent)
187 187 else:
188 188 newparents.append(parent)
189 189 state['edges'][parent] = state['styles'].get(ptype, '|')
190 190
191 191 ncols = len(seen)
192 192 width = 1 + ncols * 2
193 193 nextseen = seen[:]
194 194 nextseen[nodeidx:nodeidx + 1] = newparents
195 195 edges = [(nodeidx, nextseen.index(p)) for p in knownparents]
196 196
197 197 seen[:] = nextseen
198 198 while len(newparents) > 2:
199 199 # ascii() only knows how to add or remove a single column between two
200 200 # calls. Nodes with more than two parents break this constraint so we
201 201 # introduce intermediate expansion lines to grow the active node list
202 202 # slowly.
203 203 edges.append((nodeidx, nodeidx))
204 204 edges.append((nodeidx, nodeidx + 1))
205 205 nmorecols = 1
206 206 width += 2
207 207 yield (type, char, width, (nodeidx, edges, ncols, nmorecols))
208 208 char = '\\'
209 209 nodeidx += 1
210 210 ncols += 1
211 211 edges = []
212 212 del newparents[0]
213 213
214 214 if len(newparents) > 0:
215 215 edges.append((nodeidx, nodeidx))
216 216 if len(newparents) > 1:
217 217 edges.append((nodeidx, nodeidx + 1))
218 218 nmorecols = len(nextseen) - ncols
219 219 if nmorecols > 0:
220 220 width += 2
221 221 # remove current node from edge characters, no longer needed
222 222 state['edges'].pop(rev, None)
223 223 yield (type, char, width, (nodeidx, edges, ncols, nmorecols))
224 224
225 225 def _fixlongrightedges(edges):
226 226 for (i, (start, end)) in enumerate(edges):
227 227 if end > start:
228 228 edges[i] = (start, end + 1)
229 229
230 230 def _getnodelineedgestail(
231 231 echars, idx, pidx, ncols, coldiff, pdiff, fix_tail):
232 232 if fix_tail and coldiff == pdiff and coldiff != 0:
233 233 # Still going in the same non-vertical direction.
234 234 if coldiff == -1:
235 235 start = max(idx + 1, pidx)
236 236 tail = echars[idx * 2:(start - 1) * 2]
237 237 tail.extend(["/", " "] * (ncols - start))
238 238 return tail
239 239 else:
240 240 return ["\\", " "] * (ncols - idx - 1)
241 241 else:
242 242 remainder = (ncols - idx - 1)
243 243 return echars[-(remainder * 2):] if remainder > 0 else []
244 244
245 245 def _drawedges(echars, edges, nodeline, interline):
246 246 for (start, end) in edges:
247 247 if start == end + 1:
248 248 interline[2 * end + 1] = "/"
249 249 elif start == end - 1:
250 250 interline[2 * start + 1] = "\\"
251 251 elif start == end:
252 252 interline[2 * start] = echars[2 * start]
253 253 else:
254 254 if 2 * end >= len(nodeline):
255 255 continue
256 256 nodeline[2 * end] = "+"
257 257 if start > end:
258 258 (start, end) = (end, start)
259 259 for i in range(2 * start + 1, 2 * end):
260 260 if nodeline[i] != "+":
261 261 nodeline[i] = "-"
262 262
263 263 def _getpaddingline(echars, idx, ncols, edges):
264 264 # all edges up to the current node
265 265 line = echars[:idx * 2]
266 266 # an edge for the current node, if there is one
267 267 if (idx, idx - 1) in edges or (idx, idx) in edges:
268 268 # (idx, idx - 1) (idx, idx)
269 269 # | | | | | | | |
270 270 # +---o | | o---+
271 271 # | | X | | X | |
272 272 # | |/ / | |/ /
273 273 # | | | | | |
274 274 line.extend(echars[idx * 2:(idx + 1) * 2])
275 275 else:
276 276 line.extend([' ', ' '])
277 277 # all edges to the right of the current node
278 278 remainder = ncols - idx - 1
279 279 if remainder > 0:
280 280 line.extend(echars[-(remainder * 2):])
281 281 return line
282 282
283 283 def _drawendinglines(lines, extra, edgemap, seen):
284 284 """Draw ending lines for missing parent edges
285 285
286 286 None indicates an edge that ends at between this node and the next
287 287 Replace with a short line ending in ~ and add / lines to any edges to
288 288 the right.
289 289
290 290 """
291 291 if None not in edgemap.values():
292 292 return
293 293
294 294 # Check for more edges to the right of our ending edges.
295 295 # We need enough space to draw adjustment lines for these.
296 296 edgechars = extra[::2]
297 297 while edgechars and edgechars[-1] is None:
298 298 edgechars.pop()
299 299 shift_size = max((edgechars.count(None) * 2) - 1, 0)
300 300 while len(lines) < 3 + shift_size:
301 301 lines.append(extra[:])
302 302
303 303 if shift_size:
304 304 empties = []
305 305 toshift = []
306 306 first_empty = extra.index(None)
307 307 for i, c in enumerate(extra[first_empty::2], first_empty // 2):
308 308 if c is None:
309 309 empties.append(i * 2)
310 310 else:
311 311 toshift.append(i * 2)
312 312 targets = list(range(first_empty, first_empty + len(toshift) * 2, 2))
313 313 positions = toshift[:]
314 314 for line in lines[-shift_size:]:
315 315 line[first_empty:] = [' '] * (len(line) - first_empty)
316 316 for i in range(len(positions)):
317 317 pos = positions[i] - 1
318 318 positions[i] = max(pos, targets[i])
319 319 line[pos] = '/' if pos > targets[i] else extra[toshift[i]]
320 320
321 321 map = {1: '|', 2: '~'}
322 322 for i, line in enumerate(lines):
323 323 if None not in line:
324 324 continue
325 325 line[:] = [c or map.get(i, ' ') for c in line]
326 326
327 327 # remove edges that ended
328 328 remove = [p for p, c in edgemap.items() if c is None]
329 329 for parent in remove:
330 330 del edgemap[parent]
331 331 seen.remove(parent)
332 332
333 333 def asciistate():
334 334 """returns the initial value for the "state" argument to ascii()"""
335 335 return {
336 336 'seen': [],
337 337 'edges': {},
338 338 'lastcoldiff': 0,
339 339 'lastindex': 0,
340 340 'styles': EDGES.copy(),
341 341 'graphshorten': False,
342 342 }
343 343
344 344 def ascii(ui, state, type, char, text, coldata):
345 345 """prints an ASCII graph of the DAG
346 346
347 347 takes the following arguments (one call per node in the graph):
348 348
349 349 - ui to write to
350 350 - Somewhere to keep the needed state in (init to asciistate())
351 351 - Column of the current node in the set of ongoing edges.
352 352 - Type indicator of node data, usually 'C' for changesets.
353 353 - Payload: (char, lines):
354 354 - Character to use as node's symbol.
355 355 - List of lines to display as the node's text.
356 356 - Edges; a list of (col, next_col) indicating the edges between
357 357 the current node and its parents.
358 358 - Number of columns (ongoing edges) in the current revision.
359 359 - The difference between the number of columns (ongoing edges)
360 360 in the next revision and the number of columns (ongoing edges)
361 361 in the current revision. That is: -1 means one column removed;
362 362 0 means no columns added or removed; 1 means one column added.
363 363 """
364 364 idx, edges, ncols, coldiff = coldata
365 365 assert -2 < coldiff < 2
366 366
367 367 edgemap, seen = state['edges'], state['seen']
368 368 # Be tolerant of history issues; make sure we have at least ncols + coldiff
369 369 # elements to work with. See test-glog.t for broken history test cases.
370 370 echars = [c for p in seen for c in (edgemap.get(p, '|'), ' ')]
371 371 echars.extend(('|', ' ') * max(ncols + coldiff - len(seen), 0))
372 372
373 373 if coldiff == -1:
374 374 # Transform
375 375 #
376 376 # | | | | | |
377 377 # o | | into o---+
378 378 # |X / |/ /
379 379 # | | | |
380 380 _fixlongrightedges(edges)
381 381
382 382 # add_padding_line says whether to rewrite
383 383 #
384 384 # | | | | | | | |
385 385 # | o---+ into | o---+
386 386 # | / / | | | # <--- padding line
387 387 # o | | | / /
388 388 # o | |
389 389 add_padding_line = (len(text) > 2 and coldiff == -1 and
390 390 [x for (x, y) in edges if x + 1 < y])
391 391
392 392 # fix_nodeline_tail says whether to rewrite
393 393 #
394 394 # | | o | | | | o | |
395 395 # | | |/ / | | |/ /
396 396 # | o | | into | o / / # <--- fixed nodeline tail
397 397 # | |/ / | |/ /
398 398 # o | | o | |
399 399 fix_nodeline_tail = len(text) <= 2 and not add_padding_line
400 400
401 401 # nodeline is the line containing the node character (typically o)
402 402 nodeline = echars[:idx * 2]
403 403 nodeline.extend([char, " "])
404 404
405 405 nodeline.extend(
406 406 _getnodelineedgestail(
407 407 echars, idx, state['lastindex'], ncols, coldiff,
408 408 state['lastcoldiff'], fix_nodeline_tail))
409 409
410 410 # shift_interline is the line containing the non-vertical
411 411 # edges between this entry and the next
412 412 shift_interline = echars[:idx * 2]
413 413 for i in xrange(2 + coldiff):
414 414 shift_interline.append(' ')
415 415 count = ncols - idx - 1
416 416 if coldiff == -1:
417 417 for i in xrange(count):
418 418 shift_interline.extend(['/', ' '])
419 419 elif coldiff == 0:
420 420 shift_interline.extend(echars[(idx + 1) * 2:ncols * 2])
421 421 else:
422 422 for i in xrange(count):
423 423 shift_interline.extend(['\\', ' '])
424 424
425 425 # draw edges from the current node to its parents
426 426 _drawedges(echars, edges, nodeline, shift_interline)
427 427
428 428 # lines is the list of all graph lines to print
429 429 lines = [nodeline]
430 430 if add_padding_line:
431 431 lines.append(_getpaddingline(echars, idx, ncols, edges))
432 432
433 433 # If 'graphshorten' config, only draw shift_interline
434 434 # when there is any non vertical flow in graph.
435 435 if state['graphshorten']:
436 436 if any(c in '\/' for c in shift_interline if c):
437 437 lines.append(shift_interline)
438 438 # Else, no 'graphshorten' config so draw shift_interline.
439 439 else:
440 440 lines.append(shift_interline)
441 441
442 442 # make sure that there are as many graph lines as there are
443 443 # log strings
444 444 extra_interline = echars[:(ncols + coldiff) * 2]
445 445 if len(lines) < len(text):
446 446 while len(lines) < len(text):
447 447 lines.append(extra_interline[:])
448 448
449 449 _drawendinglines(lines, extra_interline, edgemap, seen)
450 450
451 451 while len(text) < len(lines):
452 452 text.append("")
453 453
454 454 if any(len(char) > 1 for char in edgemap.values()):
455 455 # limit drawing an edge to the first or last N lines of the current
456 456 # section the rest of the edge is drawn like a parent line.
457 parent = state['styles'][PARENT][-1]
457 parent = state['styles'][PARENT][-1:]
458 458 def _drawgp(char, i):
459 459 # should a grandparent character be drawn for this line?
460 460 if len(char) < 2:
461 461 return True
462 462 num = int(char[:-1])
463 463 # either skip first num lines or take last num lines, based on sign
464 464 return -num <= i if num < 0 else (len(lines) - i) <= num
465 465 for i, line in enumerate(lines):
466 line[:] = [c[-1] if _drawgp(c, i) else parent for c in line]
466 line[:] = [c[-1:] if _drawgp(c, i) else parent for c in line]
467 467 edgemap.update(
468 468 (e, (c if len(c) < 2 else parent)) for e, c in edgemap.items())
469 469
470 470 # print lines
471 471 indentation_level = max(ncols, ncols + coldiff)
472 472 for (line, logstr) in zip(lines, text):
473 473 ln = "%-*s %s" % (2 * indentation_level, "".join(line), logstr)
474 474 ui.write(ln.rstrip() + '\n')
475 475
476 476 # ... and start over
477 477 state['lastcoldiff'] = coldiff
478 478 state['lastindex'] = idx
@@ -1,654 +1,654 b''
1 1 # hgweb/webutil.py - utility library for the web interface.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import copy
12 12 import difflib
13 13 import os
14 14 import re
15 15
16 16 from ..i18n import _
17 17 from ..node import hex, nullid, short
18 18
19 19 from .common import (
20 20 ErrorResponse,
21 21 HTTP_BAD_REQUEST,
22 22 HTTP_NOT_FOUND,
23 23 paritygen,
24 24 )
25 25
26 26 from .. import (
27 27 context,
28 28 error,
29 29 match,
30 30 mdiff,
31 31 patch,
32 32 pathutil,
33 33 pycompat,
34 34 templatefilters,
35 35 templatekw,
36 36 ui as uimod,
37 37 util,
38 38 )
39 39
40 40 def up(p):
41 41 if p[0] != "/":
42 42 p = "/" + p
43 43 if p[-1] == "/":
44 44 p = p[:-1]
45 45 up = os.path.dirname(p)
46 46 if up == "/":
47 47 return "/"
48 48 return up + "/"
49 49
50 50 def _navseq(step, firststep=None):
51 51 if firststep:
52 52 yield firststep
53 53 if firststep >= 20 and firststep <= 40:
54 54 firststep = 50
55 55 yield firststep
56 56 assert step > 0
57 57 assert firststep > 0
58 58 while step <= firststep:
59 59 step *= 10
60 60 while True:
61 61 yield 1 * step
62 62 yield 3 * step
63 63 step *= 10
64 64
65 65 class revnav(object):
66 66
67 67 def __init__(self, repo):
68 68 """Navigation generation object
69 69
70 70 :repo: repo object we generate nav for
71 71 """
72 72 # used for hex generation
73 73 self._revlog = repo.changelog
74 74
75 75 def __nonzero__(self):
76 76 """return True if any revision to navigate over"""
77 77 return self._first() is not None
78 78
79 79 __bool__ = __nonzero__
80 80
81 81 def _first(self):
82 82 """return the minimum non-filtered changeset or None"""
83 83 try:
84 84 return next(iter(self._revlog))
85 85 except StopIteration:
86 86 return None
87 87
88 88 def hex(self, rev):
89 89 return hex(self._revlog.node(rev))
90 90
91 91 def gen(self, pos, pagelen, limit):
92 92 """computes label and revision id for navigation link
93 93
94 94 :pos: is the revision relative to which we generate navigation.
95 95 :pagelen: the size of each navigation page
96 96 :limit: how far shall we link
97 97
98 98 The return is:
99 99 - a single element tuple
100 100 - containing a dictionary with a `before` and `after` key
101 101 - values are generator functions taking arbitrary number of kwargs
102 102 - yield items are dictionaries with `label` and `node` keys
103 103 """
104 104 if not self:
105 105 # empty repo
106 106 return ({'before': (), 'after': ()},)
107 107
108 108 targets = []
109 109 for f in _navseq(1, pagelen):
110 110 if f > limit:
111 111 break
112 112 targets.append(pos + f)
113 113 targets.append(pos - f)
114 114 targets.sort()
115 115
116 116 first = self._first()
117 117 navbefore = [("(%i)" % first, self.hex(first))]
118 118 navafter = []
119 119 for rev in targets:
120 120 if rev not in self._revlog:
121 121 continue
122 122 if pos < rev < limit:
123 123 navafter.append(("+%d" % abs(rev - pos), self.hex(rev)))
124 124 if 0 < rev < pos:
125 125 navbefore.append(("-%d" % abs(rev - pos), self.hex(rev)))
126 126
127 127
128 128 navafter.append(("tip", "tip"))
129 129
130 130 data = lambda i: {"label": i[0], "node": i[1]}
131 131 return ({'before': lambda **map: (data(i) for i in navbefore),
132 132 'after': lambda **map: (data(i) for i in navafter)},)
133 133
134 134 class filerevnav(revnav):
135 135
136 136 def __init__(self, repo, path):
137 137 """Navigation generation object
138 138
139 139 :repo: repo object we generate nav for
140 140 :path: path of the file we generate nav for
141 141 """
142 142 # used for iteration
143 143 self._changelog = repo.unfiltered().changelog
144 144 # used for hex generation
145 145 self._revlog = repo.file(path)
146 146
147 147 def hex(self, rev):
148 148 return hex(self._changelog.node(self._revlog.linkrev(rev)))
149 149
150 150 class _siblings(object):
151 151 def __init__(self, siblings=None, hiderev=None):
152 152 if siblings is None:
153 153 siblings = []
154 154 self.siblings = [s for s in siblings if s.node() != nullid]
155 155 if len(self.siblings) == 1 and self.siblings[0].rev() == hiderev:
156 156 self.siblings = []
157 157
158 158 def __iter__(self):
159 159 for s in self.siblings:
160 160 d = {
161 161 'node': s.hex(),
162 162 'rev': s.rev(),
163 163 'user': s.user(),
164 164 'date': s.date(),
165 165 'description': s.description(),
166 166 'branch': s.branch(),
167 167 }
168 168 if util.safehasattr(s, 'path'):
169 169 d['file'] = s.path()
170 170 yield d
171 171
172 172 def __len__(self):
173 173 return len(self.siblings)
174 174
175 175 def difffeatureopts(req, ui, section):
176 176 diffopts = patch.difffeatureopts(ui, untrusted=True,
177 177 section=section, whitespace=True)
178 178
179 179 for k in ('ignorews', 'ignorewsamount', 'ignorewseol', 'ignoreblanklines'):
180 180 v = req.form.get(k, [None])[0]
181 181 if v is not None:
182 182 v = util.parsebool(v)
183 183 setattr(diffopts, k, v if v is not None else True)
184 184
185 185 return diffopts
186 186
187 187 def annotate(req, fctx, ui):
188 188 diffopts = difffeatureopts(req, ui, 'annotate')
189 189 return fctx.annotate(follow=True, linenumber=True, diffopts=diffopts)
190 190
191 191 def parents(ctx, hide=None):
192 192 if isinstance(ctx, context.basefilectx):
193 193 introrev = ctx.introrev()
194 194 if ctx.changectx().rev() != introrev:
195 195 return _siblings([ctx.repo()[introrev]], hide)
196 196 return _siblings(ctx.parents(), hide)
197 197
198 198 def children(ctx, hide=None):
199 199 return _siblings(ctx.children(), hide)
200 200
201 201 def renamelink(fctx):
202 202 r = fctx.renamed()
203 203 if r:
204 204 return [{'file': r[0], 'node': hex(r[1])}]
205 205 return []
206 206
207 207 def nodetagsdict(repo, node):
208 208 return [{"name": i} for i in repo.nodetags(node)]
209 209
210 210 def nodebookmarksdict(repo, node):
211 211 return [{"name": i} for i in repo.nodebookmarks(node)]
212 212
213 213 def nodebranchdict(repo, ctx):
214 214 branches = []
215 215 branch = ctx.branch()
216 216 # If this is an empty repo, ctx.node() == nullid,
217 217 # ctx.branch() == 'default'.
218 218 try:
219 219 branchnode = repo.branchtip(branch)
220 220 except error.RepoLookupError:
221 221 branchnode = None
222 222 if branchnode == ctx.node():
223 223 branches.append({"name": branch})
224 224 return branches
225 225
226 226 def nodeinbranch(repo, ctx):
227 227 branches = []
228 228 branch = ctx.branch()
229 229 try:
230 230 branchnode = repo.branchtip(branch)
231 231 except error.RepoLookupError:
232 232 branchnode = None
233 233 if branch != 'default' and branchnode != ctx.node():
234 234 branches.append({"name": branch})
235 235 return branches
236 236
237 237 def nodebranchnodefault(ctx):
238 238 branches = []
239 239 branch = ctx.branch()
240 240 if branch != 'default':
241 241 branches.append({"name": branch})
242 242 return branches
243 243
244 244 def showtag(repo, tmpl, t1, node=nullid, **args):
245 245 for t in repo.nodetags(node):
246 246 yield tmpl(t1, tag=t, **args)
247 247
248 248 def showbookmark(repo, tmpl, t1, node=nullid, **args):
249 249 for t in repo.nodebookmarks(node):
250 250 yield tmpl(t1, bookmark=t, **args)
251 251
252 252 def branchentries(repo, stripecount, limit=0):
253 253 tips = []
254 254 heads = repo.heads()
255 255 parity = paritygen(stripecount)
256 256 sortkey = lambda item: (not item[1], item[0].rev())
257 257
258 258 def entries(**map):
259 259 count = 0
260 260 if not tips:
261 261 for tag, hs, tip, closed in repo.branchmap().iterbranches():
262 262 tips.append((repo[tip], closed))
263 263 for ctx, closed in sorted(tips, key=sortkey, reverse=True):
264 264 if limit > 0 and count >= limit:
265 265 return
266 266 count += 1
267 267 if closed:
268 268 status = 'closed'
269 269 elif ctx.node() not in heads:
270 270 status = 'inactive'
271 271 else:
272 272 status = 'open'
273 273 yield {
274 274 'parity': next(parity),
275 275 'branch': ctx.branch(),
276 276 'status': status,
277 277 'node': ctx.hex(),
278 278 'date': ctx.date()
279 279 }
280 280
281 281 return entries
282 282
283 283 def cleanpath(repo, path):
284 284 path = path.lstrip('/')
285 285 return pathutil.canonpath(repo.root, '', path)
286 286
287 287 def changeidctx(repo, changeid):
288 288 try:
289 289 ctx = repo[changeid]
290 290 except error.RepoError:
291 291 man = repo.manifestlog._revlog
292 292 ctx = repo[man.linkrev(man.rev(man.lookup(changeid)))]
293 293
294 294 return ctx
295 295
296 296 def changectx(repo, req):
297 297 changeid = "tip"
298 298 if 'node' in req.form:
299 299 changeid = req.form['node'][0]
300 300 ipos = changeid.find(':')
301 301 if ipos != -1:
302 302 changeid = changeid[(ipos + 1):]
303 303 elif 'manifest' in req.form:
304 304 changeid = req.form['manifest'][0]
305 305
306 306 return changeidctx(repo, changeid)
307 307
308 308 def basechangectx(repo, req):
309 309 if 'node' in req.form:
310 310 changeid = req.form['node'][0]
311 311 ipos = changeid.find(':')
312 312 if ipos != -1:
313 313 changeid = changeid[:ipos]
314 314 return changeidctx(repo, changeid)
315 315
316 316 return None
317 317
318 318 def filectx(repo, req):
319 319 if 'file' not in req.form:
320 320 raise ErrorResponse(HTTP_NOT_FOUND, 'file not given')
321 321 path = cleanpath(repo, req.form['file'][0])
322 322 if 'node' in req.form:
323 323 changeid = req.form['node'][0]
324 324 elif 'filenode' in req.form:
325 325 changeid = req.form['filenode'][0]
326 326 else:
327 327 raise ErrorResponse(HTTP_NOT_FOUND, 'node or filenode not given')
328 328 try:
329 329 fctx = repo[changeid][path]
330 330 except error.RepoError:
331 331 fctx = repo.filectx(path, fileid=changeid)
332 332
333 333 return fctx
334 334
335 335 def linerange(req):
336 336 linerange = req.form.get('linerange')
337 337 if linerange is None:
338 338 return None
339 339 if len(linerange) > 1:
340 340 raise ErrorResponse(HTTP_BAD_REQUEST,
341 341 'redundant linerange parameter')
342 342 try:
343 343 fromline, toline = map(int, linerange[0].split(':', 1))
344 344 except ValueError:
345 345 raise ErrorResponse(HTTP_BAD_REQUEST,
346 346 'invalid linerange parameter')
347 347 try:
348 348 return util.processlinerange(fromline, toline)
349 349 except error.ParseError as exc:
350 350 raise ErrorResponse(HTTP_BAD_REQUEST, str(exc))
351 351
352 352 def formatlinerange(fromline, toline):
353 353 return '%d:%d' % (fromline + 1, toline)
354 354
355 355 def succsandmarkers(repo, ctx):
356 356 for item in templatekw.showsuccsandmarkers(repo, ctx):
357 357 item['successors'] = _siblings(repo[successor]
358 358 for successor in item['successors'])
359 359 yield item
360 360
361 361 def commonentry(repo, ctx):
362 362 node = ctx.node()
363 363 return {
364 364 'rev': ctx.rev(),
365 365 'node': hex(node),
366 366 'author': ctx.user(),
367 367 'desc': ctx.description(),
368 368 'date': ctx.date(),
369 369 'extra': ctx.extra(),
370 370 'phase': ctx.phasestr(),
371 371 'obsolete': ctx.obsolete(),
372 372 'succsandmarkers': lambda **x: succsandmarkers(repo, ctx),
373 373 'instabilities': [{"instability": i} for i in ctx.instabilities()],
374 374 'branch': nodebranchnodefault(ctx),
375 375 'inbranch': nodeinbranch(repo, ctx),
376 376 'branches': nodebranchdict(repo, ctx),
377 377 'tags': nodetagsdict(repo, node),
378 378 'bookmarks': nodebookmarksdict(repo, node),
379 379 'parent': lambda **x: parents(ctx),
380 380 'child': lambda **x: children(ctx),
381 381 }
382 382
383 383 def changelistentry(web, ctx, tmpl):
384 384 '''Obtain a dictionary to be used for entries in a changelist.
385 385
386 386 This function is called when producing items for the "entries" list passed
387 387 to the "shortlog" and "changelog" templates.
388 388 '''
389 389 repo = web.repo
390 390 rev = ctx.rev()
391 391 n = ctx.node()
392 392 showtags = showtag(repo, tmpl, 'changelogtag', n)
393 393 files = listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
394 394
395 395 entry = commonentry(repo, ctx)
396 396 entry.update(
397 397 allparents=lambda **x: parents(ctx),
398 398 parent=lambda **x: parents(ctx, rev - 1),
399 399 child=lambda **x: children(ctx, rev + 1),
400 400 changelogtag=showtags,
401 401 files=files,
402 402 )
403 403 return entry
404 404
405 405 def symrevorshortnode(req, ctx):
406 406 if 'node' in req.form:
407 407 return templatefilters.revescape(req.form['node'][0])
408 408 else:
409 409 return short(ctx.node())
410 410
411 411 def changesetentry(web, req, tmpl, ctx):
412 412 '''Obtain a dictionary to be used to render the "changeset" template.'''
413 413
414 414 showtags = showtag(web.repo, tmpl, 'changesettag', ctx.node())
415 415 showbookmarks = showbookmark(web.repo, tmpl, 'changesetbookmark',
416 416 ctx.node())
417 417 showbranch = nodebranchnodefault(ctx)
418 418
419 419 files = []
420 420 parity = paritygen(web.stripecount)
421 421 for blockno, f in enumerate(ctx.files()):
422 422 template = 'filenodelink' if f in ctx else 'filenolink'
423 423 files.append(tmpl(template,
424 424 node=ctx.hex(), file=f, blockno=blockno + 1,
425 425 parity=next(parity)))
426 426
427 427 basectx = basechangectx(web.repo, req)
428 428 if basectx is None:
429 429 basectx = ctx.p1()
430 430
431 431 style = web.config('web', 'style')
432 432 if 'style' in req.form:
433 433 style = req.form['style'][0]
434 434
435 435 diff = diffs(web, tmpl, ctx, basectx, None, style)
436 436
437 437 parity = paritygen(web.stripecount)
438 438 diffstatsgen = diffstatgen(ctx, basectx)
439 439 diffstats = diffstat(tmpl, ctx, diffstatsgen, parity)
440 440
441 441 return dict(
442 442 diff=diff,
443 443 symrev=symrevorshortnode(req, ctx),
444 444 basenode=basectx.hex(),
445 445 changesettag=showtags,
446 446 changesetbookmark=showbookmarks,
447 447 changesetbranch=showbranch,
448 448 files=files,
449 449 diffsummary=lambda **x: diffsummary(diffstatsgen),
450 450 diffstat=diffstats,
451 451 archives=web.archivelist(ctx.hex()),
452 452 **commonentry(web.repo, ctx))
453 453
454 454 def listfilediffs(tmpl, files, node, max):
455 455 for f in files[:max]:
456 456 yield tmpl('filedifflink', node=hex(node), file=f)
457 457 if len(files) > max:
458 458 yield tmpl('fileellipses')
459 459
460 460 def diffs(web, tmpl, ctx, basectx, files, style, linerange=None,
461 461 lineidprefix=''):
462 462
463 463 def prettyprintlines(lines, blockno):
464 464 for lineno, l in enumerate(lines, 1):
465 465 difflineno = "%d.%d" % (blockno, lineno)
466 466 if l.startswith('+'):
467 467 ltype = "difflineplus"
468 468 elif l.startswith('-'):
469 469 ltype = "difflineminus"
470 470 elif l.startswith('@'):
471 471 ltype = "difflineat"
472 472 else:
473 473 ltype = "diffline"
474 474 yield tmpl(ltype,
475 475 line=l,
476 476 lineno=lineno,
477 477 lineid=lineidprefix + "l%s" % difflineno,
478 478 linenumber="% 8s" % difflineno)
479 479
480 480 repo = web.repo
481 481 if files:
482 482 m = match.exact(repo.root, repo.getcwd(), files)
483 483 else:
484 484 m = match.always(repo.root, repo.getcwd())
485 485
486 486 diffopts = patch.diffopts(repo.ui, untrusted=True)
487 487 node1 = basectx.node()
488 488 node2 = ctx.node()
489 489 parity = paritygen(web.stripecount)
490 490
491 491 diffhunks = patch.diffhunks(repo, node1, node2, m, opts=diffopts)
492 492 for blockno, (fctx1, fctx2, header, hunks) in enumerate(diffhunks, 1):
493 493 if style != 'raw':
494 494 header = header[1:]
495 495 lines = [h + '\n' for h in header]
496 496 for hunkrange, hunklines in hunks:
497 497 if linerange is not None and hunkrange is not None:
498 498 s1, l1, s2, l2 = hunkrange
499 499 if not mdiff.hunkinrange((s2, l2), linerange):
500 500 continue
501 501 lines.extend(hunklines)
502 502 if lines:
503 503 yield tmpl('diffblock', parity=next(parity), blockno=blockno,
504 504 lines=prettyprintlines(lines, blockno))
505 505
506 506 def compare(tmpl, context, leftlines, rightlines):
507 507 '''Generator function that provides side-by-side comparison data.'''
508 508
509 509 def compline(type, leftlineno, leftline, rightlineno, rightline):
510 510 lineid = leftlineno and ("l%s" % leftlineno) or ''
511 511 lineid += rightlineno and ("r%s" % rightlineno) or ''
512 512 return tmpl('comparisonline',
513 513 type=type,
514 514 lineid=lineid,
515 515 leftlineno=leftlineno,
516 516 leftlinenumber="% 6s" % (leftlineno or ''),
517 517 leftline=leftline or '',
518 518 rightlineno=rightlineno,
519 519 rightlinenumber="% 6s" % (rightlineno or ''),
520 520 rightline=rightline or '')
521 521
522 522 def getblock(opcodes):
523 523 for type, llo, lhi, rlo, rhi in opcodes:
524 524 len1 = lhi - llo
525 525 len2 = rhi - rlo
526 526 count = min(len1, len2)
527 527 for i in xrange(count):
528 528 yield compline(type=type,
529 529 leftlineno=llo + i + 1,
530 530 leftline=leftlines[llo + i],
531 531 rightlineno=rlo + i + 1,
532 532 rightline=rightlines[rlo + i])
533 533 if len1 > len2:
534 534 for i in xrange(llo + count, lhi):
535 535 yield compline(type=type,
536 536 leftlineno=i + 1,
537 537 leftline=leftlines[i],
538 538 rightlineno=None,
539 539 rightline=None)
540 540 elif len2 > len1:
541 541 for i in xrange(rlo + count, rhi):
542 542 yield compline(type=type,
543 543 leftlineno=None,
544 544 leftline=None,
545 545 rightlineno=i + 1,
546 546 rightline=rightlines[i])
547 547
548 548 s = difflib.SequenceMatcher(None, leftlines, rightlines)
549 549 if context < 0:
550 550 yield tmpl('comparisonblock', lines=getblock(s.get_opcodes()))
551 551 else:
552 552 for oc in s.get_grouped_opcodes(n=context):
553 553 yield tmpl('comparisonblock', lines=getblock(oc))
554 554
555 555 def diffstatgen(ctx, basectx):
556 556 '''Generator function that provides the diffstat data.'''
557 557
558 558 stats = patch.diffstatdata(
559 559 util.iterlines(ctx.diff(basectx, noprefix=False)))
560 560 maxname, maxtotal, addtotal, removetotal, binary = patch.diffstatsum(stats)
561 561 while True:
562 562 yield stats, maxname, maxtotal, addtotal, removetotal, binary
563 563
564 564 def diffsummary(statgen):
565 565 '''Return a short summary of the diff.'''
566 566
567 567 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
568 568 return _(' %d files changed, %d insertions(+), %d deletions(-)\n') % (
569 569 len(stats), addtotal, removetotal)
570 570
571 571 def diffstat(tmpl, ctx, statgen, parity):
572 572 '''Return a diffstat template for each file in the diff.'''
573 573
574 574 stats, maxname, maxtotal, addtotal, removetotal, binary = next(statgen)
575 575 files = ctx.files()
576 576
577 577 def pct(i):
578 578 if maxtotal == 0:
579 579 return 0
580 580 return (float(i) / maxtotal) * 100
581 581
582 582 fileno = 0
583 583 for filename, adds, removes, isbinary in stats:
584 584 template = 'diffstatlink' if filename in files else 'diffstatnolink'
585 585 total = adds + removes
586 586 fileno += 1
587 587 yield tmpl(template, node=ctx.hex(), file=filename, fileno=fileno,
588 588 total=total, addpct=pct(adds), removepct=pct(removes),
589 589 parity=next(parity))
590 590
591 591 class sessionvars(object):
592 592 def __init__(self, vars, start='?'):
593 593 self.start = start
594 594 self.vars = vars
595 595 def __getitem__(self, key):
596 596 return self.vars[key]
597 597 def __setitem__(self, key, value):
598 598 self.vars[key] = value
599 599 def __copy__(self):
600 600 return sessionvars(copy.copy(self.vars), self.start)
601 601 def __iter__(self):
602 602 separator = self.start
603 603 for key, value in sorted(self.vars.iteritems()):
604 604 yield {'name': key,
605 605 'value': pycompat.bytestr(value),
606 606 'separator': separator,
607 607 }
608 608 separator = '&'
609 609
610 610 class wsgiui(uimod.ui):
611 611 # default termwidth breaks under mod_wsgi
612 612 def termwidth(self):
613 613 return 80
614 614
615 615 def getwebsubs(repo):
616 616 websubtable = []
617 617 websubdefs = repo.ui.configitems('websub')
618 618 # we must maintain interhg backwards compatibility
619 619 websubdefs += repo.ui.configitems('interhg')
620 620 for key, pattern in websubdefs:
621 621 # grab the delimiter from the character after the "s"
622 unesc = pattern[1]
622 unesc = pattern[1:2]
623 623 delim = re.escape(unesc)
624 624
625 625 # identify portions of the pattern, taking care to avoid escaped
626 626 # delimiters. the replace format and flags are optional, but
627 627 # delimiters are required.
628 628 match = re.match(
629 629 r'^s%s(.+)(?:(?<=\\\\)|(?<!\\))%s(.*)%s([ilmsux])*$'
630 630 % (delim, delim, delim), pattern)
631 631 if not match:
632 632 repo.ui.warn(_("websub: invalid pattern for %s: %s\n")
633 633 % (key, pattern))
634 634 continue
635 635
636 636 # we need to unescape the delimiter for regexp and format
637 637 delim_re = re.compile(r'(?<!\\)\\%s' % delim)
638 638 regexp = delim_re.sub(unesc, match.group(1))
639 639 format = delim_re.sub(unesc, match.group(2))
640 640
641 641 # the pattern allows for 6 regexp flags, so set them if necessary
642 642 flagin = match.group(3)
643 643 flags = 0
644 644 if flagin:
645 645 for flag in flagin.upper():
646 646 flags |= re.__dict__[flag]
647 647
648 648 try:
649 649 regexp = re.compile(regexp, flags)
650 650 websubtable.append((regexp, format))
651 651 except re.error:
652 652 repo.ui.warn(_("websub: invalid regexp for %s: %s\n")
653 653 % (key, regexp))
654 654 return websubtable
General Comments 0
You need to be logged in to leave comments. Login now