##// END OF EJS Templates
annotate: correct parameter name of decorate() function
Yuya Nishihara -
r36953:ec46b0ee default
parent child Browse files
Show More
@@ -1,703 +1,703 b''
1 # dagop.py - graph ancestry and topology algorithm for revset
1 # dagop.py - graph ancestry and topology algorithm for revset
2 #
2 #
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import heapq
10 import heapq
11
11
12 from .thirdparty import (
12 from .thirdparty import (
13 attr,
13 attr,
14 )
14 )
15 from . import (
15 from . import (
16 error,
16 error,
17 mdiff,
17 mdiff,
18 node,
18 node,
19 patch,
19 patch,
20 pycompat,
20 pycompat,
21 smartset,
21 smartset,
22 )
22 )
23
23
24 baseset = smartset.baseset
24 baseset = smartset.baseset
25 generatorset = smartset.generatorset
25 generatorset = smartset.generatorset
26
26
27 # possible maximum depth between null and wdir()
27 # possible maximum depth between null and wdir()
28 _maxlogdepth = 0x80000000
28 _maxlogdepth = 0x80000000
29
29
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
30 def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse):
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
31 """Walk DAG using 'pfunc' from the given 'revs' nodes
32
32
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
33 'pfunc(rev)' should return the parent/child revisions of the given 'rev'
34 if 'reverse' is True/False respectively.
34 if 'reverse' is True/False respectively.
35
35
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
36 Scan ends at the stopdepth (exlusive) if specified. Revisions found
37 earlier than the startdepth are omitted.
37 earlier than the startdepth are omitted.
38 """
38 """
39 if startdepth is None:
39 if startdepth is None:
40 startdepth = 0
40 startdepth = 0
41 if stopdepth is None:
41 if stopdepth is None:
42 stopdepth = _maxlogdepth
42 stopdepth = _maxlogdepth
43 if stopdepth == 0:
43 if stopdepth == 0:
44 return
44 return
45 if stopdepth < 0:
45 if stopdepth < 0:
46 raise error.ProgrammingError('negative stopdepth')
46 raise error.ProgrammingError('negative stopdepth')
47 if reverse:
47 if reverse:
48 heapsign = -1 # max heap
48 heapsign = -1 # max heap
49 else:
49 else:
50 heapsign = +1 # min heap
50 heapsign = +1 # min heap
51
51
52 # load input revs lazily to heap so earlier revisions can be yielded
52 # load input revs lazily to heap so earlier revisions can be yielded
53 # without fully computing the input revs
53 # without fully computing the input revs
54 revs.sort(reverse)
54 revs.sort(reverse)
55 irevs = iter(revs)
55 irevs = iter(revs)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
56 pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first)
57
57
58 inputrev = next(irevs, None)
58 inputrev = next(irevs, None)
59 if inputrev is not None:
59 if inputrev is not None:
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
60 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
61
61
62 lastrev = None
62 lastrev = None
63 while pendingheap:
63 while pendingheap:
64 currev, curdepth = heapq.heappop(pendingheap)
64 currev, curdepth = heapq.heappop(pendingheap)
65 currev = heapsign * currev
65 currev = heapsign * currev
66 if currev == inputrev:
66 if currev == inputrev:
67 inputrev = next(irevs, None)
67 inputrev = next(irevs, None)
68 if inputrev is not None:
68 if inputrev is not None:
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
69 heapq.heappush(pendingheap, (heapsign * inputrev, 0))
70 # rescan parents until curdepth >= startdepth because queued entries
70 # rescan parents until curdepth >= startdepth because queued entries
71 # of the same revision are iterated from the lowest depth
71 # of the same revision are iterated from the lowest depth
72 foundnew = (currev != lastrev)
72 foundnew = (currev != lastrev)
73 if foundnew and curdepth >= startdepth:
73 if foundnew and curdepth >= startdepth:
74 lastrev = currev
74 lastrev = currev
75 yield currev
75 yield currev
76 pdepth = curdepth + 1
76 pdepth = curdepth + 1
77 if foundnew and pdepth < stopdepth:
77 if foundnew and pdepth < stopdepth:
78 for prev in pfunc(currev):
78 for prev in pfunc(currev):
79 if prev != node.nullrev:
79 if prev != node.nullrev:
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
80 heapq.heappush(pendingheap, (heapsign * prev, pdepth))
81
81
82 def filectxancestors(fctxs, followfirst=False):
82 def filectxancestors(fctxs, followfirst=False):
83 """Like filectx.ancestors(), but can walk from multiple files/revisions,
83 """Like filectx.ancestors(), but can walk from multiple files/revisions,
84 and includes the given fctxs themselves
84 and includes the given fctxs themselves
85
85
86 Yields (rev, {fctx, ...}) pairs in descending order.
86 Yields (rev, {fctx, ...}) pairs in descending order.
87 """
87 """
88 visit = {}
88 visit = {}
89 visitheap = []
89 visitheap = []
90 def addvisit(fctx):
90 def addvisit(fctx):
91 rev = fctx.rev()
91 rev = fctx.rev()
92 if rev not in visit:
92 if rev not in visit:
93 visit[rev] = set()
93 visit[rev] = set()
94 heapq.heappush(visitheap, -rev) # max heap
94 heapq.heappush(visitheap, -rev) # max heap
95 visit[rev].add(fctx)
95 visit[rev].add(fctx)
96
96
97 if followfirst:
97 if followfirst:
98 cut = 1
98 cut = 1
99 else:
99 else:
100 cut = None
100 cut = None
101
101
102 for c in fctxs:
102 for c in fctxs:
103 addvisit(c)
103 addvisit(c)
104 while visit:
104 while visit:
105 currev = -heapq.heappop(visitheap)
105 currev = -heapq.heappop(visitheap)
106 curfctxs = visit.pop(currev)
106 curfctxs = visit.pop(currev)
107 yield currev, curfctxs
107 yield currev, curfctxs
108 for c in curfctxs:
108 for c in curfctxs:
109 for parent in c.parents()[:cut]:
109 for parent in c.parents()[:cut]:
110 addvisit(parent)
110 addvisit(parent)
111 assert not visitheap
111 assert not visitheap
112
112
113 def filerevancestors(fctxs, followfirst=False):
113 def filerevancestors(fctxs, followfirst=False):
114 """Like filectx.ancestors(), but can walk from multiple files/revisions,
114 """Like filectx.ancestors(), but can walk from multiple files/revisions,
115 and includes the given fctxs themselves
115 and includes the given fctxs themselves
116
116
117 Returns a smartset.
117 Returns a smartset.
118 """
118 """
119 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
119 gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst))
120 return generatorset(gen, iterasc=False)
120 return generatorset(gen, iterasc=False)
121
121
122 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
122 def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc):
123 if followfirst:
123 if followfirst:
124 cut = 1
124 cut = 1
125 else:
125 else:
126 cut = None
126 cut = None
127 cl = repo.changelog
127 cl = repo.changelog
128 def plainpfunc(rev):
128 def plainpfunc(rev):
129 try:
129 try:
130 return cl.parentrevs(rev)[:cut]
130 return cl.parentrevs(rev)[:cut]
131 except error.WdirUnsupported:
131 except error.WdirUnsupported:
132 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
132 return (pctx.rev() for pctx in repo[rev].parents()[:cut])
133 if cutfunc is None:
133 if cutfunc is None:
134 pfunc = plainpfunc
134 pfunc = plainpfunc
135 else:
135 else:
136 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
136 pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)]
137 revs = revs.filter(lambda rev: not cutfunc(rev))
137 revs = revs.filter(lambda rev: not cutfunc(rev))
138 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
138 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True)
139
139
140 def revancestors(repo, revs, followfirst=False, startdepth=None,
140 def revancestors(repo, revs, followfirst=False, startdepth=None,
141 stopdepth=None, cutfunc=None):
141 stopdepth=None, cutfunc=None):
142 """Like revlog.ancestors(), but supports additional options, includes
142 """Like revlog.ancestors(), but supports additional options, includes
143 the given revs themselves, and returns a smartset
143 the given revs themselves, and returns a smartset
144
144
145 Scan ends at the stopdepth (exlusive) if specified. Revisions found
145 Scan ends at the stopdepth (exlusive) if specified. Revisions found
146 earlier than the startdepth are omitted.
146 earlier than the startdepth are omitted.
147
147
148 If cutfunc is provided, it will be used to cut the traversal of the DAG.
148 If cutfunc is provided, it will be used to cut the traversal of the DAG.
149 When cutfunc(X) returns True, the DAG traversal stops - revision X and
149 When cutfunc(X) returns True, the DAG traversal stops - revision X and
150 X's ancestors in the traversal path will be skipped. This could be an
150 X's ancestors in the traversal path will be skipped. This could be an
151 optimization sometimes.
151 optimization sometimes.
152
152
153 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
153 Note: if Y is an ancestor of X, cutfunc(X) returning True does not
154 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
154 necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to
155 return True in this case. For example,
155 return True in this case. For example,
156
156
157 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
157 D # revancestors(repo, D, cutfunc=lambda rev: rev == B)
158 |\ # will include "A", because the path D -> C -> A was not cut.
158 |\ # will include "A", because the path D -> C -> A was not cut.
159 B C # If "B" gets cut, "A" might want to be cut too.
159 B C # If "B" gets cut, "A" might want to be cut too.
160 |/
160 |/
161 A
161 A
162 """
162 """
163 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
163 gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth,
164 cutfunc)
164 cutfunc)
165 return generatorset(gen, iterasc=False)
165 return generatorset(gen, iterasc=False)
166
166
167 def _genrevdescendants(repo, revs, followfirst):
167 def _genrevdescendants(repo, revs, followfirst):
168 if followfirst:
168 if followfirst:
169 cut = 1
169 cut = 1
170 else:
170 else:
171 cut = None
171 cut = None
172
172
173 cl = repo.changelog
173 cl = repo.changelog
174 first = revs.min()
174 first = revs.min()
175 nullrev = node.nullrev
175 nullrev = node.nullrev
176 if first == nullrev:
176 if first == nullrev:
177 # Are there nodes with a null first parent and a non-null
177 # Are there nodes with a null first parent and a non-null
178 # second one? Maybe. Do we care? Probably not.
178 # second one? Maybe. Do we care? Probably not.
179 yield first
179 yield first
180 for i in cl:
180 for i in cl:
181 yield i
181 yield i
182 else:
182 else:
183 seen = set(revs)
183 seen = set(revs)
184 for i in cl.revs(first):
184 for i in cl.revs(first):
185 if i in seen:
185 if i in seen:
186 yield i
186 yield i
187 continue
187 continue
188 for x in cl.parentrevs(i)[:cut]:
188 for x in cl.parentrevs(i)[:cut]:
189 if x != nullrev and x in seen:
189 if x != nullrev and x in seen:
190 seen.add(i)
190 seen.add(i)
191 yield i
191 yield i
192 break
192 break
193
193
194 def _builddescendantsmap(repo, startrev, followfirst):
194 def _builddescendantsmap(repo, startrev, followfirst):
195 """Build map of 'rev -> child revs', offset from startrev"""
195 """Build map of 'rev -> child revs', offset from startrev"""
196 cl = repo.changelog
196 cl = repo.changelog
197 nullrev = node.nullrev
197 nullrev = node.nullrev
198 descmap = [[] for _rev in xrange(startrev, len(cl))]
198 descmap = [[] for _rev in xrange(startrev, len(cl))]
199 for currev in cl.revs(startrev + 1):
199 for currev in cl.revs(startrev + 1):
200 p1rev, p2rev = cl.parentrevs(currev)
200 p1rev, p2rev = cl.parentrevs(currev)
201 if p1rev >= startrev:
201 if p1rev >= startrev:
202 descmap[p1rev - startrev].append(currev)
202 descmap[p1rev - startrev].append(currev)
203 if not followfirst and p2rev != nullrev and p2rev >= startrev:
203 if not followfirst and p2rev != nullrev and p2rev >= startrev:
204 descmap[p2rev - startrev].append(currev)
204 descmap[p2rev - startrev].append(currev)
205 return descmap
205 return descmap
206
206
207 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
207 def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth):
208 startrev = revs.min()
208 startrev = revs.min()
209 descmap = _builddescendantsmap(repo, startrev, followfirst)
209 descmap = _builddescendantsmap(repo, startrev, followfirst)
210 def pfunc(rev):
210 def pfunc(rev):
211 return descmap[rev - startrev]
211 return descmap[rev - startrev]
212 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
212 return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False)
213
213
214 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
214 def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None):
215 """Like revlog.descendants() but supports additional options, includes
215 """Like revlog.descendants() but supports additional options, includes
216 the given revs themselves, and returns a smartset
216 the given revs themselves, and returns a smartset
217
217
218 Scan ends at the stopdepth (exlusive) if specified. Revisions found
218 Scan ends at the stopdepth (exlusive) if specified. Revisions found
219 earlier than the startdepth are omitted.
219 earlier than the startdepth are omitted.
220 """
220 """
221 if startdepth is None and stopdepth is None:
221 if startdepth is None and stopdepth is None:
222 gen = _genrevdescendants(repo, revs, followfirst)
222 gen = _genrevdescendants(repo, revs, followfirst)
223 else:
223 else:
224 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
224 gen = _genrevdescendantsofdepth(repo, revs, followfirst,
225 startdepth, stopdepth)
225 startdepth, stopdepth)
226 return generatorset(gen, iterasc=True)
226 return generatorset(gen, iterasc=True)
227
227
228 def _reachablerootspure(repo, minroot, roots, heads, includepath):
228 def _reachablerootspure(repo, minroot, roots, heads, includepath):
229 """return (heads(::<roots> and ::<heads>))
229 """return (heads(::<roots> and ::<heads>))
230
230
231 If includepath is True, return (<roots>::<heads>)."""
231 If includepath is True, return (<roots>::<heads>)."""
232 if not roots:
232 if not roots:
233 return []
233 return []
234 parentrevs = repo.changelog.parentrevs
234 parentrevs = repo.changelog.parentrevs
235 roots = set(roots)
235 roots = set(roots)
236 visit = list(heads)
236 visit = list(heads)
237 reachable = set()
237 reachable = set()
238 seen = {}
238 seen = {}
239 # prefetch all the things! (because python is slow)
239 # prefetch all the things! (because python is slow)
240 reached = reachable.add
240 reached = reachable.add
241 dovisit = visit.append
241 dovisit = visit.append
242 nextvisit = visit.pop
242 nextvisit = visit.pop
243 # open-code the post-order traversal due to the tiny size of
243 # open-code the post-order traversal due to the tiny size of
244 # sys.getrecursionlimit()
244 # sys.getrecursionlimit()
245 while visit:
245 while visit:
246 rev = nextvisit()
246 rev = nextvisit()
247 if rev in roots:
247 if rev in roots:
248 reached(rev)
248 reached(rev)
249 if not includepath:
249 if not includepath:
250 continue
250 continue
251 parents = parentrevs(rev)
251 parents = parentrevs(rev)
252 seen[rev] = parents
252 seen[rev] = parents
253 for parent in parents:
253 for parent in parents:
254 if parent >= minroot and parent not in seen:
254 if parent >= minroot and parent not in seen:
255 dovisit(parent)
255 dovisit(parent)
256 if not reachable:
256 if not reachable:
257 return baseset()
257 return baseset()
258 if not includepath:
258 if not includepath:
259 return reachable
259 return reachable
260 for rev in sorted(seen):
260 for rev in sorted(seen):
261 for parent in seen[rev]:
261 for parent in seen[rev]:
262 if parent in reachable:
262 if parent in reachable:
263 reached(rev)
263 reached(rev)
264 return reachable
264 return reachable
265
265
266 def reachableroots(repo, roots, heads, includepath=False):
266 def reachableroots(repo, roots, heads, includepath=False):
267 """return (heads(::<roots> and ::<heads>))
267 """return (heads(::<roots> and ::<heads>))
268
268
269 If includepath is True, return (<roots>::<heads>)."""
269 If includepath is True, return (<roots>::<heads>)."""
270 if not roots:
270 if not roots:
271 return baseset()
271 return baseset()
272 minroot = roots.min()
272 minroot = roots.min()
273 roots = list(roots)
273 roots = list(roots)
274 heads = list(heads)
274 heads = list(heads)
275 try:
275 try:
276 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
276 revs = repo.changelog.reachableroots(minroot, heads, roots, includepath)
277 except AttributeError:
277 except AttributeError:
278 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
278 revs = _reachablerootspure(repo, minroot, roots, heads, includepath)
279 revs = baseset(revs)
279 revs = baseset(revs)
280 revs.sort()
280 revs.sort()
281 return revs
281 return revs
282
282
283 def _changesrange(fctx1, fctx2, linerange2, diffopts):
283 def _changesrange(fctx1, fctx2, linerange2, diffopts):
284 """Return `(diffinrange, linerange1)` where `diffinrange` is True
284 """Return `(diffinrange, linerange1)` where `diffinrange` is True
285 if diff from fctx2 to fctx1 has changes in linerange2 and
285 if diff from fctx2 to fctx1 has changes in linerange2 and
286 `linerange1` is the new line range for fctx1.
286 `linerange1` is the new line range for fctx1.
287 """
287 """
288 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
288 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
289 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
289 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
290 diffinrange = any(stype == '!' for _, stype in filteredblocks)
290 diffinrange = any(stype == '!' for _, stype in filteredblocks)
291 return diffinrange, linerange1
291 return diffinrange, linerange1
292
292
293 def blockancestors(fctx, fromline, toline, followfirst=False):
293 def blockancestors(fctx, fromline, toline, followfirst=False):
294 """Yield ancestors of `fctx` with respect to the block of lines within
294 """Yield ancestors of `fctx` with respect to the block of lines within
295 `fromline`-`toline` range.
295 `fromline`-`toline` range.
296 """
296 """
297 diffopts = patch.diffopts(fctx._repo.ui)
297 diffopts = patch.diffopts(fctx._repo.ui)
298 fctx = fctx.introfilectx()
298 fctx = fctx.introfilectx()
299 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
299 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
300 while visit:
300 while visit:
301 c, linerange2 = visit.pop(max(visit))
301 c, linerange2 = visit.pop(max(visit))
302 pl = c.parents()
302 pl = c.parents()
303 if followfirst:
303 if followfirst:
304 pl = pl[:1]
304 pl = pl[:1]
305 if not pl:
305 if not pl:
306 # The block originates from the initial revision.
306 # The block originates from the initial revision.
307 yield c, linerange2
307 yield c, linerange2
308 continue
308 continue
309 inrange = False
309 inrange = False
310 for p in pl:
310 for p in pl:
311 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
311 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
312 inrange = inrange or inrangep
312 inrange = inrange or inrangep
313 if linerange1[0] == linerange1[1]:
313 if linerange1[0] == linerange1[1]:
314 # Parent's linerange is empty, meaning that the block got
314 # Parent's linerange is empty, meaning that the block got
315 # introduced in this revision; no need to go futher in this
315 # introduced in this revision; no need to go futher in this
316 # branch.
316 # branch.
317 continue
317 continue
318 # Set _descendantrev with 'c' (a known descendant) so that, when
318 # Set _descendantrev with 'c' (a known descendant) so that, when
319 # _adjustlinkrev is called for 'p', it receives this descendant
319 # _adjustlinkrev is called for 'p', it receives this descendant
320 # (as srcrev) instead possibly topmost introrev.
320 # (as srcrev) instead possibly topmost introrev.
321 p._descendantrev = c.rev()
321 p._descendantrev = c.rev()
322 visit[p.linkrev(), p.filenode()] = p, linerange1
322 visit[p.linkrev(), p.filenode()] = p, linerange1
323 if inrange:
323 if inrange:
324 yield c, linerange2
324 yield c, linerange2
325
325
326 def blockdescendants(fctx, fromline, toline):
326 def blockdescendants(fctx, fromline, toline):
327 """Yield descendants of `fctx` with respect to the block of lines within
327 """Yield descendants of `fctx` with respect to the block of lines within
328 `fromline`-`toline` range.
328 `fromline`-`toline` range.
329 """
329 """
330 # First possibly yield 'fctx' if it has changes in range with respect to
330 # First possibly yield 'fctx' if it has changes in range with respect to
331 # its parents.
331 # its parents.
332 try:
332 try:
333 c, linerange1 = next(blockancestors(fctx, fromline, toline))
333 c, linerange1 = next(blockancestors(fctx, fromline, toline))
334 except StopIteration:
334 except StopIteration:
335 pass
335 pass
336 else:
336 else:
337 if c == fctx:
337 if c == fctx:
338 yield c, linerange1
338 yield c, linerange1
339
339
340 diffopts = patch.diffopts(fctx._repo.ui)
340 diffopts = patch.diffopts(fctx._repo.ui)
341 fl = fctx.filelog()
341 fl = fctx.filelog()
342 seen = {fctx.filerev(): (fctx, (fromline, toline))}
342 seen = {fctx.filerev(): (fctx, (fromline, toline))}
343 for i in fl.descendants([fctx.filerev()]):
343 for i in fl.descendants([fctx.filerev()]):
344 c = fctx.filectx(i)
344 c = fctx.filectx(i)
345 inrange = False
345 inrange = False
346 for x in fl.parentrevs(i):
346 for x in fl.parentrevs(i):
347 try:
347 try:
348 p, linerange2 = seen[x]
348 p, linerange2 = seen[x]
349 except KeyError:
349 except KeyError:
350 # nullrev or other branch
350 # nullrev or other branch
351 continue
351 continue
352 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
352 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
353 inrange = inrange or inrangep
353 inrange = inrange or inrangep
354 # If revision 'i' has been seen (it's a merge) and the line range
354 # If revision 'i' has been seen (it's a merge) and the line range
355 # previously computed differs from the one we just got, we take the
355 # previously computed differs from the one we just got, we take the
356 # surrounding interval. This is conservative but avoids loosing
356 # surrounding interval. This is conservative but avoids loosing
357 # information.
357 # information.
358 if i in seen and seen[i][1] != linerange1:
358 if i in seen and seen[i][1] != linerange1:
359 lbs, ubs = zip(linerange1, seen[i][1])
359 lbs, ubs = zip(linerange1, seen[i][1])
360 linerange1 = min(lbs), max(ubs)
360 linerange1 = min(lbs), max(ubs)
361 seen[i] = c, linerange1
361 seen[i] = c, linerange1
362 if inrange:
362 if inrange:
363 yield c, linerange1
363 yield c, linerange1
364
364
365 @attr.s(slots=True, frozen=True)
365 @attr.s(slots=True, frozen=True)
366 class annotateline(object):
366 class annotateline(object):
367 fctx = attr.ib()
367 fctx = attr.ib()
368 lineno = attr.ib(default=False)
368 lineno = attr.ib(default=False)
369 # Whether this annotation was the result of a skip-annotate.
369 # Whether this annotation was the result of a skip-annotate.
370 skip = attr.ib(default=False)
370 skip = attr.ib(default=False)
371
371
372 def _countlines(text):
372 def _countlines(text):
373 if text.endswith("\n"):
373 if text.endswith("\n"):
374 return text.count("\n")
374 return text.count("\n")
375 return text.count("\n") + int(bool(text))
375 return text.count("\n") + int(bool(text))
376
376
377 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
377 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
378 r'''
378 r'''
379 Given parent and child fctxes and annotate data for parents, for all lines
379 Given parent and child fctxes and annotate data for parents, for all lines
380 in either parent that match the child, annotate the child with the parent's
380 in either parent that match the child, annotate the child with the parent's
381 data.
381 data.
382
382
383 Additionally, if `skipchild` is True, replace all other lines with parent
383 Additionally, if `skipchild` is True, replace all other lines with parent
384 annotate data as well such that child is never blamed for any lines.
384 annotate data as well such that child is never blamed for any lines.
385
385
386 See test-annotate.py for unit tests.
386 See test-annotate.py for unit tests.
387 '''
387 '''
388 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
388 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
389 for parent in parents]
389 for parent in parents]
390
390
391 if skipchild:
391 if skipchild:
392 # Need to iterate over the blocks twice -- make it a list
392 # Need to iterate over the blocks twice -- make it a list
393 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
393 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
394 # Mercurial currently prefers p2 over p1 for annotate.
394 # Mercurial currently prefers p2 over p1 for annotate.
395 # TODO: change this?
395 # TODO: change this?
396 for parent, blocks in pblocks:
396 for parent, blocks in pblocks:
397 for (a1, a2, b1, b2), t in blocks:
397 for (a1, a2, b1, b2), t in blocks:
398 # Changed blocks ('!') or blocks made only of blank lines ('~')
398 # Changed blocks ('!') or blocks made only of blank lines ('~')
399 # belong to the child.
399 # belong to the child.
400 if t == '=':
400 if t == '=':
401 child[0][b1:b2] = parent[0][a1:a2]
401 child[0][b1:b2] = parent[0][a1:a2]
402
402
403 if skipchild:
403 if skipchild:
404 # Now try and match up anything that couldn't be matched,
404 # Now try and match up anything that couldn't be matched,
405 # Reversing pblocks maintains bias towards p2, matching above
405 # Reversing pblocks maintains bias towards p2, matching above
406 # behavior.
406 # behavior.
407 pblocks.reverse()
407 pblocks.reverse()
408
408
409 # The heuristics are:
409 # The heuristics are:
410 # * Work on blocks of changed lines (effectively diff hunks with -U0).
410 # * Work on blocks of changed lines (effectively diff hunks with -U0).
411 # This could potentially be smarter but works well enough.
411 # This could potentially be smarter but works well enough.
412 # * For a non-matching section, do a best-effort fit. Match lines in
412 # * For a non-matching section, do a best-effort fit. Match lines in
413 # diff hunks 1:1, dropping lines as necessary.
413 # diff hunks 1:1, dropping lines as necessary.
414 # * Repeat the last line as a last resort.
414 # * Repeat the last line as a last resort.
415
415
416 # First, replace as much as possible without repeating the last line.
416 # First, replace as much as possible without repeating the last line.
417 remaining = [(parent, []) for parent, _blocks in pblocks]
417 remaining = [(parent, []) for parent, _blocks in pblocks]
418 for idx, (parent, blocks) in enumerate(pblocks):
418 for idx, (parent, blocks) in enumerate(pblocks):
419 for (a1, a2, b1, b2), _t in blocks:
419 for (a1, a2, b1, b2), _t in blocks:
420 if a2 - a1 >= b2 - b1:
420 if a2 - a1 >= b2 - b1:
421 for bk in xrange(b1, b2):
421 for bk in xrange(b1, b2):
422 if child[0][bk].fctx == childfctx:
422 if child[0][bk].fctx == childfctx:
423 ak = min(a1 + (bk - b1), a2 - 1)
423 ak = min(a1 + (bk - b1), a2 - 1)
424 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
424 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
425 else:
425 else:
426 remaining[idx][1].append((a1, a2, b1, b2))
426 remaining[idx][1].append((a1, a2, b1, b2))
427
427
428 # Then, look at anything left, which might involve repeating the last
428 # Then, look at anything left, which might involve repeating the last
429 # line.
429 # line.
430 for parent, blocks in remaining:
430 for parent, blocks in remaining:
431 for a1, a2, b1, b2 in blocks:
431 for a1, a2, b1, b2 in blocks:
432 for bk in xrange(b1, b2):
432 for bk in xrange(b1, b2):
433 if child[0][bk].fctx == childfctx:
433 if child[0][bk].fctx == childfctx:
434 ak = min(a1 + (bk - b1), a2 - 1)
434 ak = min(a1 + (bk - b1), a2 - 1)
435 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
435 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
436 return child
436 return child
437
437
438 def annotate(base, parents, linenumber=False, skiprevs=None, diffopts=None):
438 def annotate(base, parents, linenumber=False, skiprevs=None, diffopts=None):
439 """Core algorithm for filectx.annotate()
439 """Core algorithm for filectx.annotate()
440
440
441 `parents(fctx)` is a function returning a list of parent filectxs.
441 `parents(fctx)` is a function returning a list of parent filectxs.
442 """
442 """
443
443
444 if linenumber:
444 if linenumber:
445 def decorate(text, rev):
445 def decorate(text, fctx):
446 return ([annotateline(fctx=rev, lineno=i)
446 return ([annotateline(fctx=fctx, lineno=i)
447 for i in xrange(1, _countlines(text) + 1)], text)
447 for i in xrange(1, _countlines(text) + 1)], text)
448 else:
448 else:
449 def decorate(text, rev):
449 def decorate(text, fctx):
450 return ([annotateline(fctx=rev)] * _countlines(text), text)
450 return ([annotateline(fctx=fctx)] * _countlines(text), text)
451
451
452 # This algorithm would prefer to be recursive, but Python is a
452 # This algorithm would prefer to be recursive, but Python is a
453 # bit recursion-hostile. Instead we do an iterative
453 # bit recursion-hostile. Instead we do an iterative
454 # depth-first search.
454 # depth-first search.
455
455
456 # 1st DFS pre-calculates pcache and needed
456 # 1st DFS pre-calculates pcache and needed
457 visit = [base]
457 visit = [base]
458 pcache = {}
458 pcache = {}
459 needed = {base: 1}
459 needed = {base: 1}
460 while visit:
460 while visit:
461 f = visit.pop()
461 f = visit.pop()
462 if f in pcache:
462 if f in pcache:
463 continue
463 continue
464 pl = parents(f)
464 pl = parents(f)
465 pcache[f] = pl
465 pcache[f] = pl
466 for p in pl:
466 for p in pl:
467 needed[p] = needed.get(p, 0) + 1
467 needed[p] = needed.get(p, 0) + 1
468 if p not in pcache:
468 if p not in pcache:
469 visit.append(p)
469 visit.append(p)
470
470
471 # 2nd DFS does the actual annotate
471 # 2nd DFS does the actual annotate
472 visit[:] = [base]
472 visit[:] = [base]
473 hist = {}
473 hist = {}
474 while visit:
474 while visit:
475 f = visit[-1]
475 f = visit[-1]
476 if f in hist:
476 if f in hist:
477 visit.pop()
477 visit.pop()
478 continue
478 continue
479
479
480 ready = True
480 ready = True
481 pl = pcache[f]
481 pl = pcache[f]
482 for p in pl:
482 for p in pl:
483 if p not in hist:
483 if p not in hist:
484 ready = False
484 ready = False
485 visit.append(p)
485 visit.append(p)
486 if ready:
486 if ready:
487 visit.pop()
487 visit.pop()
488 curr = decorate(f.data(), f)
488 curr = decorate(f.data(), f)
489 skipchild = False
489 skipchild = False
490 if skiprevs is not None:
490 if skiprevs is not None:
491 skipchild = f._changeid in skiprevs
491 skipchild = f._changeid in skiprevs
492 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
492 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
493 diffopts)
493 diffopts)
494 for p in pl:
494 for p in pl:
495 if needed[p] == 1:
495 if needed[p] == 1:
496 del hist[p]
496 del hist[p]
497 del needed[p]
497 del needed[p]
498 else:
498 else:
499 needed[p] -= 1
499 needed[p] -= 1
500
500
501 hist[f] = curr
501 hist[f] = curr
502 del pcache[f]
502 del pcache[f]
503
503
504 lineattrs, text = hist[base]
504 lineattrs, text = hist[base]
505 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
505 return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text))
506
506
507 def toposort(revs, parentsfunc, firstbranch=()):
507 def toposort(revs, parentsfunc, firstbranch=()):
508 """Yield revisions from heads to roots one (topo) branch at a time.
508 """Yield revisions from heads to roots one (topo) branch at a time.
509
509
510 This function aims to be used by a graph generator that wishes to minimize
510 This function aims to be used by a graph generator that wishes to minimize
511 the number of parallel branches and their interleaving.
511 the number of parallel branches and their interleaving.
512
512
513 Example iteration order (numbers show the "true" order in a changelog):
513 Example iteration order (numbers show the "true" order in a changelog):
514
514
515 o 4
515 o 4
516 |
516 |
517 o 1
517 o 1
518 |
518 |
519 | o 3
519 | o 3
520 | |
520 | |
521 | o 2
521 | o 2
522 |/
522 |/
523 o 0
523 o 0
524
524
525 Note that the ancestors of merges are understood by the current
525 Note that the ancestors of merges are understood by the current
526 algorithm to be on the same branch. This means no reordering will
526 algorithm to be on the same branch. This means no reordering will
527 occur behind a merge.
527 occur behind a merge.
528 """
528 """
529
529
530 ### Quick summary of the algorithm
530 ### Quick summary of the algorithm
531 #
531 #
532 # This function is based around a "retention" principle. We keep revisions
532 # This function is based around a "retention" principle. We keep revisions
533 # in memory until we are ready to emit a whole branch that immediately
533 # in memory until we are ready to emit a whole branch that immediately
534 # "merges" into an existing one. This reduces the number of parallel
534 # "merges" into an existing one. This reduces the number of parallel
535 # branches with interleaved revisions.
535 # branches with interleaved revisions.
536 #
536 #
537 # During iteration revs are split into two groups:
537 # During iteration revs are split into two groups:
538 # A) revision already emitted
538 # A) revision already emitted
539 # B) revision in "retention". They are stored as different subgroups.
539 # B) revision in "retention". They are stored as different subgroups.
540 #
540 #
541 # for each REV, we do the following logic:
541 # for each REV, we do the following logic:
542 #
542 #
543 # 1) if REV is a parent of (A), we will emit it. If there is a
543 # 1) if REV is a parent of (A), we will emit it. If there is a
544 # retention group ((B) above) that is blocked on REV being
544 # retention group ((B) above) that is blocked on REV being
545 # available, we emit all the revisions out of that retention
545 # available, we emit all the revisions out of that retention
546 # group first.
546 # group first.
547 #
547 #
548 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
548 # 2) else, we'll search for a subgroup in (B) awaiting for REV to be
549 # available, if such subgroup exist, we add REV to it and the subgroup is
549 # available, if such subgroup exist, we add REV to it and the subgroup is
550 # now awaiting for REV.parents() to be available.
550 # now awaiting for REV.parents() to be available.
551 #
551 #
552 # 3) finally if no such group existed in (B), we create a new subgroup.
552 # 3) finally if no such group existed in (B), we create a new subgroup.
553 #
553 #
554 #
554 #
555 # To bootstrap the algorithm, we emit the tipmost revision (which
555 # To bootstrap the algorithm, we emit the tipmost revision (which
556 # puts it in group (A) from above).
556 # puts it in group (A) from above).
557
557
558 revs.sort(reverse=True)
558 revs.sort(reverse=True)
559
559
560 # Set of parents of revision that have been emitted. They can be considered
560 # Set of parents of revision that have been emitted. They can be considered
561 # unblocked as the graph generator is already aware of them so there is no
561 # unblocked as the graph generator is already aware of them so there is no
562 # need to delay the revisions that reference them.
562 # need to delay the revisions that reference them.
563 #
563 #
564 # If someone wants to prioritize a branch over the others, pre-filling this
564 # If someone wants to prioritize a branch over the others, pre-filling this
565 # set will force all other branches to wait until this branch is ready to be
565 # set will force all other branches to wait until this branch is ready to be
566 # emitted.
566 # emitted.
567 unblocked = set(firstbranch)
567 unblocked = set(firstbranch)
568
568
569 # list of groups waiting to be displayed, each group is defined by:
569 # list of groups waiting to be displayed, each group is defined by:
570 #
570 #
571 # (revs: lists of revs waiting to be displayed,
571 # (revs: lists of revs waiting to be displayed,
572 # blocked: set of that cannot be displayed before those in 'revs')
572 # blocked: set of that cannot be displayed before those in 'revs')
573 #
573 #
574 # The second value ('blocked') correspond to parents of any revision in the
574 # The second value ('blocked') correspond to parents of any revision in the
575 # group ('revs') that is not itself contained in the group. The main idea
575 # group ('revs') that is not itself contained in the group. The main idea
576 # of this algorithm is to delay as much as possible the emission of any
576 # of this algorithm is to delay as much as possible the emission of any
577 # revision. This means waiting for the moment we are about to display
577 # revision. This means waiting for the moment we are about to display
578 # these parents to display the revs in a group.
578 # these parents to display the revs in a group.
579 #
579 #
580 # This first implementation is smart until it encounters a merge: it will
580 # This first implementation is smart until it encounters a merge: it will
581 # emit revs as soon as any parent is about to be emitted and can grow an
581 # emit revs as soon as any parent is about to be emitted and can grow an
582 # arbitrary number of revs in 'blocked'. In practice this mean we properly
582 # arbitrary number of revs in 'blocked'. In practice this mean we properly
583 # retains new branches but gives up on any special ordering for ancestors
583 # retains new branches but gives up on any special ordering for ancestors
584 # of merges. The implementation can be improved to handle this better.
584 # of merges. The implementation can be improved to handle this better.
585 #
585 #
586 # The first subgroup is special. It corresponds to all the revision that
586 # The first subgroup is special. It corresponds to all the revision that
587 # were already emitted. The 'revs' lists is expected to be empty and the
587 # were already emitted. The 'revs' lists is expected to be empty and the
588 # 'blocked' set contains the parents revisions of already emitted revision.
588 # 'blocked' set contains the parents revisions of already emitted revision.
589 #
589 #
590 # You could pre-seed the <parents> set of groups[0] to a specific
590 # You could pre-seed the <parents> set of groups[0] to a specific
591 # changesets to select what the first emitted branch should be.
591 # changesets to select what the first emitted branch should be.
592 groups = [([], unblocked)]
592 groups = [([], unblocked)]
593 pendingheap = []
593 pendingheap = []
594 pendingset = set()
594 pendingset = set()
595
595
596 heapq.heapify(pendingheap)
596 heapq.heapify(pendingheap)
597 heappop = heapq.heappop
597 heappop = heapq.heappop
598 heappush = heapq.heappush
598 heappush = heapq.heappush
599 for currentrev in revs:
599 for currentrev in revs:
600 # Heap works with smallest element, we want highest so we invert
600 # Heap works with smallest element, we want highest so we invert
601 if currentrev not in pendingset:
601 if currentrev not in pendingset:
602 heappush(pendingheap, -currentrev)
602 heappush(pendingheap, -currentrev)
603 pendingset.add(currentrev)
603 pendingset.add(currentrev)
604 # iterates on pending rev until after the current rev have been
604 # iterates on pending rev until after the current rev have been
605 # processed.
605 # processed.
606 rev = None
606 rev = None
607 while rev != currentrev:
607 while rev != currentrev:
608 rev = -heappop(pendingheap)
608 rev = -heappop(pendingheap)
609 pendingset.remove(rev)
609 pendingset.remove(rev)
610
610
611 # Seek for a subgroup blocked, waiting for the current revision.
611 # Seek for a subgroup blocked, waiting for the current revision.
612 matching = [i for i, g in enumerate(groups) if rev in g[1]]
612 matching = [i for i, g in enumerate(groups) if rev in g[1]]
613
613
614 if matching:
614 if matching:
615 # The main idea is to gather together all sets that are blocked
615 # The main idea is to gather together all sets that are blocked
616 # on the same revision.
616 # on the same revision.
617 #
617 #
618 # Groups are merged when a common blocking ancestor is
618 # Groups are merged when a common blocking ancestor is
619 # observed. For example, given two groups:
619 # observed. For example, given two groups:
620 #
620 #
621 # revs [5, 4] waiting for 1
621 # revs [5, 4] waiting for 1
622 # revs [3, 2] waiting for 1
622 # revs [3, 2] waiting for 1
623 #
623 #
624 # These two groups will be merged when we process
624 # These two groups will be merged when we process
625 # 1. In theory, we could have merged the groups when
625 # 1. In theory, we could have merged the groups when
626 # we added 2 to the group it is now in (we could have
626 # we added 2 to the group it is now in (we could have
627 # noticed the groups were both blocked on 1 then), but
627 # noticed the groups were both blocked on 1 then), but
628 # the way it works now makes the algorithm simpler.
628 # the way it works now makes the algorithm simpler.
629 #
629 #
630 # We also always keep the oldest subgroup first. We can
630 # We also always keep the oldest subgroup first. We can
631 # probably improve the behavior by having the longest set
631 # probably improve the behavior by having the longest set
632 # first. That way, graph algorithms could minimise the length
632 # first. That way, graph algorithms could minimise the length
633 # of parallel lines their drawing. This is currently not done.
633 # of parallel lines their drawing. This is currently not done.
634 targetidx = matching.pop(0)
634 targetidx = matching.pop(0)
635 trevs, tparents = groups[targetidx]
635 trevs, tparents = groups[targetidx]
636 for i in matching:
636 for i in matching:
637 gr = groups[i]
637 gr = groups[i]
638 trevs.extend(gr[0])
638 trevs.extend(gr[0])
639 tparents |= gr[1]
639 tparents |= gr[1]
640 # delete all merged subgroups (except the one we kept)
640 # delete all merged subgroups (except the one we kept)
641 # (starting from the last subgroup for performance and
641 # (starting from the last subgroup for performance and
642 # sanity reasons)
642 # sanity reasons)
643 for i in reversed(matching):
643 for i in reversed(matching):
644 del groups[i]
644 del groups[i]
645 else:
645 else:
646 # This is a new head. We create a new subgroup for it.
646 # This is a new head. We create a new subgroup for it.
647 targetidx = len(groups)
647 targetidx = len(groups)
648 groups.append(([], {rev}))
648 groups.append(([], {rev}))
649
649
650 gr = groups[targetidx]
650 gr = groups[targetidx]
651
651
652 # We now add the current nodes to this subgroups. This is done
652 # We now add the current nodes to this subgroups. This is done
653 # after the subgroup merging because all elements from a subgroup
653 # after the subgroup merging because all elements from a subgroup
654 # that relied on this rev must precede it.
654 # that relied on this rev must precede it.
655 #
655 #
656 # we also update the <parents> set to include the parents of the
656 # we also update the <parents> set to include the parents of the
657 # new nodes.
657 # new nodes.
658 if rev == currentrev: # only display stuff in rev
658 if rev == currentrev: # only display stuff in rev
659 gr[0].append(rev)
659 gr[0].append(rev)
660 gr[1].remove(rev)
660 gr[1].remove(rev)
661 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
661 parents = [p for p in parentsfunc(rev) if p > node.nullrev]
662 gr[1].update(parents)
662 gr[1].update(parents)
663 for p in parents:
663 for p in parents:
664 if p not in pendingset:
664 if p not in pendingset:
665 pendingset.add(p)
665 pendingset.add(p)
666 heappush(pendingheap, -p)
666 heappush(pendingheap, -p)
667
667
668 # Look for a subgroup to display
668 # Look for a subgroup to display
669 #
669 #
670 # When unblocked is empty (if clause), we were not waiting for any
670 # When unblocked is empty (if clause), we were not waiting for any
671 # revisions during the first iteration (if no priority was given) or
671 # revisions during the first iteration (if no priority was given) or
672 # if we emitted a whole disconnected set of the graph (reached a
672 # if we emitted a whole disconnected set of the graph (reached a
673 # root). In that case we arbitrarily take the oldest known
673 # root). In that case we arbitrarily take the oldest known
674 # subgroup. The heuristic could probably be better.
674 # subgroup. The heuristic could probably be better.
675 #
675 #
676 # Otherwise (elif clause) if the subgroup is blocked on
676 # Otherwise (elif clause) if the subgroup is blocked on
677 # a revision we just emitted, we can safely emit it as
677 # a revision we just emitted, we can safely emit it as
678 # well.
678 # well.
679 if not unblocked:
679 if not unblocked:
680 if len(groups) > 1: # display other subset
680 if len(groups) > 1: # display other subset
681 targetidx = 1
681 targetidx = 1
682 gr = groups[1]
682 gr = groups[1]
683 elif not gr[1] & unblocked:
683 elif not gr[1] & unblocked:
684 gr = None
684 gr = None
685
685
686 if gr is not None:
686 if gr is not None:
687 # update the set of awaited revisions with the one from the
687 # update the set of awaited revisions with the one from the
688 # subgroup
688 # subgroup
689 unblocked |= gr[1]
689 unblocked |= gr[1]
690 # output all revisions in the subgroup
690 # output all revisions in the subgroup
691 for r in gr[0]:
691 for r in gr[0]:
692 yield r
692 yield r
693 # delete the subgroup that you just output
693 # delete the subgroup that you just output
694 # unless it is groups[0] in which case you just empty it.
694 # unless it is groups[0] in which case you just empty it.
695 if targetidx:
695 if targetidx:
696 del groups[targetidx]
696 del groups[targetidx]
697 else:
697 else:
698 gr[0][:] = []
698 gr[0][:] = []
699 # Check if we have some subgroup waiting for revisions we are not going to
699 # Check if we have some subgroup waiting for revisions we are not going to
700 # iterate over
700 # iterate over
701 for g in groups:
701 for g in groups:
702 for r in g[0]:
702 for r in g[0]:
703 yield r
703 yield r
@@ -1,104 +1,104 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2 from __future__ import print_function
2 from __future__ import print_function
3
3
4 import unittest
4 import unittest
5
5
6 from mercurial import (
6 from mercurial import (
7 mdiff,
7 mdiff,
8 )
8 )
9 from mercurial.dagop import (
9 from mercurial.dagop import (
10 annotateline,
10 annotateline,
11 _annotatepair,
11 _annotatepair,
12 )
12 )
13
13
14 class AnnotateTests(unittest.TestCase):
14 class AnnotateTests(unittest.TestCase):
15 """Unit tests for annotate code."""
15 """Unit tests for annotate code."""
16
16
17 def testannotatepair(self):
17 def testannotatepair(self):
18 self.maxDiff = None # camelcase-required
18 self.maxDiff = None # camelcase-required
19
19
20 oldfctx = b'old'
20 oldfctx = b'old'
21 p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
21 p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
22 olddata = b'a\nb\n'
22 olddata = b'a\nb\n'
23 p1data = b'a\nb\nc\n'
23 p1data = b'a\nb\nc\n'
24 p2data = b'a\nc\nd\n'
24 p2data = b'a\nc\nd\n'
25 childdata = b'a\nb2\nc\nc2\nd\n'
25 childdata = b'a\nb2\nc\nc2\nd\n'
26 diffopts = mdiff.diffopts()
26 diffopts = mdiff.diffopts()
27
27
28 def decorate(text, rev):
28 def decorate(text, fctx):
29 return ([annotateline(fctx=rev, lineno=i)
29 return ([annotateline(fctx=fctx, lineno=i)
30 for i in range(1, text.count(b'\n') + 1)],
30 for i in range(1, text.count(b'\n') + 1)],
31 text)
31 text)
32
32
33 # Basic usage
33 # Basic usage
34
34
35 oldann = decorate(olddata, oldfctx)
35 oldann = decorate(olddata, oldfctx)
36 p1ann = decorate(p1data, p1fctx)
36 p1ann = decorate(p1data, p1fctx)
37 p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
37 p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
38 self.assertEqual(p1ann[0], [
38 self.assertEqual(p1ann[0], [
39 annotateline(b'old', 1),
39 annotateline(b'old', 1),
40 annotateline(b'old', 2),
40 annotateline(b'old', 2),
41 annotateline(b'p1', 3),
41 annotateline(b'p1', 3),
42 ])
42 ])
43
43
44 p2ann = decorate(p2data, p2fctx)
44 p2ann = decorate(p2data, p2fctx)
45 p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
45 p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
46 self.assertEqual(p2ann[0], [
46 self.assertEqual(p2ann[0], [
47 annotateline(b'old', 1),
47 annotateline(b'old', 1),
48 annotateline(b'p2', 2),
48 annotateline(b'p2', 2),
49 annotateline(b'p2', 3),
49 annotateline(b'p2', 3),
50 ])
50 ])
51
51
52 # Test with multiple parents (note the difference caused by ordering)
52 # Test with multiple parents (note the difference caused by ordering)
53
53
54 childann = decorate(childdata, childfctx)
54 childann = decorate(childdata, childfctx)
55 childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
55 childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
56 diffopts)
56 diffopts)
57 self.assertEqual(childann[0], [
57 self.assertEqual(childann[0], [
58 annotateline(b'old', 1),
58 annotateline(b'old', 1),
59 annotateline(b'c', 2),
59 annotateline(b'c', 2),
60 annotateline(b'p2', 2),
60 annotateline(b'p2', 2),
61 annotateline(b'c', 4),
61 annotateline(b'c', 4),
62 annotateline(b'p2', 3),
62 annotateline(b'p2', 3),
63 ])
63 ])
64
64
65 childann = decorate(childdata, childfctx)
65 childann = decorate(childdata, childfctx)
66 childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
66 childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
67 diffopts)
67 diffopts)
68 self.assertEqual(childann[0], [
68 self.assertEqual(childann[0], [
69 annotateline(b'old', 1),
69 annotateline(b'old', 1),
70 annotateline(b'c', 2),
70 annotateline(b'c', 2),
71 annotateline(b'p1', 3),
71 annotateline(b'p1', 3),
72 annotateline(b'c', 4),
72 annotateline(b'c', 4),
73 annotateline(b'p2', 3),
73 annotateline(b'p2', 3),
74 ])
74 ])
75
75
76 # Test with skipchild (note the difference caused by ordering)
76 # Test with skipchild (note the difference caused by ordering)
77
77
78 childann = decorate(childdata, childfctx)
78 childann = decorate(childdata, childfctx)
79 childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
79 childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
80 diffopts)
80 diffopts)
81 self.assertEqual(childann[0], [
81 self.assertEqual(childann[0], [
82 annotateline(b'old', 1),
82 annotateline(b'old', 1),
83 annotateline(b'old', 2, True),
83 annotateline(b'old', 2, True),
84 # note that this line was carried over from earlier so it is *not*
84 # note that this line was carried over from earlier so it is *not*
85 # marked skipped
85 # marked skipped
86 annotateline(b'p2', 2),
86 annotateline(b'p2', 2),
87 annotateline(b'p2', 2, True),
87 annotateline(b'p2', 2, True),
88 annotateline(b'p2', 3),
88 annotateline(b'p2', 3),
89 ])
89 ])
90
90
91 childann = decorate(childdata, childfctx)
91 childann = decorate(childdata, childfctx)
92 childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
92 childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
93 diffopts)
93 diffopts)
94 self.assertEqual(childann[0], [
94 self.assertEqual(childann[0], [
95 annotateline(b'old', 1),
95 annotateline(b'old', 1),
96 annotateline(b'old', 2, True),
96 annotateline(b'old', 2, True),
97 annotateline(b'p1', 3),
97 annotateline(b'p1', 3),
98 annotateline(b'p1', 3, True),
98 annotateline(b'p1', 3, True),
99 annotateline(b'p2', 3),
99 annotateline(b'p2', 3),
100 ])
100 ])
101
101
102 if __name__ == '__main__':
102 if __name__ == '__main__':
103 import silenttestrunner
103 import silenttestrunner
104 silenttestrunner.main(__name__)
104 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now