Show More
@@ -1,1108 +1,1113 b'' | |||
|
1 | 1 | # dagop.py - graph ancestry and topology algorithm for revset |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import heapq |
|
11 | 11 | |
|
12 | 12 | from .node import nullrev |
|
13 | 13 | from .thirdparty import attr |
|
14 | 14 | from . import ( |
|
15 | 15 | error, |
|
16 | 16 | mdiff, |
|
17 | 17 | node, |
|
18 | 18 | patch, |
|
19 | 19 | pycompat, |
|
20 | 20 | smartset, |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | baseset = smartset.baseset |
|
24 | 24 | generatorset = smartset.generatorset |
|
25 | 25 | |
|
26 | 26 | # possible maximum depth between null and wdir() |
|
27 | 27 | maxlogdepth = 0x80000000 |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse): |
|
31 | 31 | """Walk DAG using 'pfunc' from the given 'revs' nodes |
|
32 | 32 | |
|
33 | 33 | 'pfunc(rev)' should return the parent/child revisions of the given 'rev' |
|
34 | 34 | if 'reverse' is True/False respectively. |
|
35 | 35 | |
|
36 | 36 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
37 | 37 | earlier than the startdepth are omitted. |
|
38 | 38 | """ |
|
39 | 39 | if startdepth is None: |
|
40 | 40 | startdepth = 0 |
|
41 | 41 | if stopdepth is None: |
|
42 | 42 | stopdepth = maxlogdepth |
|
43 | 43 | if stopdepth == 0: |
|
44 | 44 | return |
|
45 | 45 | if stopdepth < 0: |
|
46 | 46 | raise error.ProgrammingError(b'negative stopdepth') |
|
47 | 47 | if reverse: |
|
48 | 48 | heapsign = -1 # max heap |
|
49 | 49 | else: |
|
50 | 50 | heapsign = +1 # min heap |
|
51 | 51 | |
|
52 | 52 | # load input revs lazily to heap so earlier revisions can be yielded |
|
53 | 53 | # without fully computing the input revs |
|
54 | 54 | revs.sort(reverse) |
|
55 | 55 | irevs = iter(revs) |
|
56 | 56 | pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first) |
|
57 | 57 | |
|
58 | 58 | inputrev = next(irevs, None) |
|
59 | 59 | if inputrev is not None: |
|
60 | 60 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
61 | 61 | |
|
62 | 62 | lastrev = None |
|
63 | 63 | while pendingheap: |
|
64 | 64 | currev, curdepth = heapq.heappop(pendingheap) |
|
65 | 65 | currev = heapsign * currev |
|
66 | 66 | if currev == inputrev: |
|
67 | 67 | inputrev = next(irevs, None) |
|
68 | 68 | if inputrev is not None: |
|
69 | 69 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
70 | 70 | # rescan parents until curdepth >= startdepth because queued entries |
|
71 | 71 | # of the same revision are iterated from the lowest depth |
|
72 | 72 | foundnew = currev != lastrev |
|
73 | 73 | if foundnew and curdepth >= startdepth: |
|
74 | 74 | lastrev = currev |
|
75 | 75 | yield currev |
|
76 | 76 | pdepth = curdepth + 1 |
|
77 | 77 | if foundnew and pdepth < stopdepth: |
|
78 | 78 | for prev in pfunc(currev): |
|
79 | 79 | if prev != node.nullrev: |
|
80 | 80 | heapq.heappush(pendingheap, (heapsign * prev, pdepth)) |
|
81 | 81 | |
|
82 | 82 | |
|
83 | 83 | def filectxancestors(fctxs, followfirst=False): |
|
84 | 84 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
85 | 85 | and includes the given fctxs themselves |
|
86 | 86 | |
|
87 | 87 | Yields (rev, {fctx, ...}) pairs in descending order. |
|
88 | 88 | """ |
|
89 | 89 | visit = {} |
|
90 | 90 | visitheap = [] |
|
91 | 91 | |
|
92 | 92 | def addvisit(fctx): |
|
93 | 93 | rev = fctx.rev() |
|
94 | 94 | if rev not in visit: |
|
95 | 95 | visit[rev] = set() |
|
96 | 96 | heapq.heappush(visitheap, -rev) # max heap |
|
97 | 97 | visit[rev].add(fctx) |
|
98 | 98 | |
|
99 | 99 | if followfirst: |
|
100 | 100 | cut = 1 |
|
101 | 101 | else: |
|
102 | 102 | cut = None |
|
103 | 103 | |
|
104 | 104 | for c in fctxs: |
|
105 | 105 | addvisit(c) |
|
106 | 106 | while visit: |
|
107 | 107 | currev = -(heapq.heappop(visitheap)) |
|
108 | 108 | curfctxs = visit.pop(currev) |
|
109 | 109 | yield currev, curfctxs |
|
110 | 110 | for c in curfctxs: |
|
111 | 111 | for parent in c.parents()[:cut]: |
|
112 | 112 | addvisit(parent) |
|
113 | 113 | assert not visitheap |
|
114 | 114 | |
|
115 | 115 | |
|
116 | 116 | def filerevancestors(fctxs, followfirst=False): |
|
117 | 117 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
118 | 118 | and includes the given fctxs themselves |
|
119 | 119 | |
|
120 | 120 | Returns a smartset. |
|
121 | 121 | """ |
|
122 | 122 | gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) |
|
123 | 123 | return generatorset(gen, iterasc=False) |
|
124 | 124 | |
|
125 | 125 | |
|
126 | 126 | def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): |
|
127 | 127 | if followfirst: |
|
128 | 128 | cut = 1 |
|
129 | 129 | else: |
|
130 | 130 | cut = None |
|
131 | 131 | cl = repo.changelog |
|
132 | 132 | |
|
133 | 133 | def plainpfunc(rev): |
|
134 | 134 | try: |
|
135 | 135 | return cl.parentrevs(rev)[:cut] |
|
136 | 136 | except error.WdirUnsupported: |
|
137 | 137 | return (pctx.rev() for pctx in repo[rev].parents()[:cut]) |
|
138 | 138 | |
|
139 | 139 | if cutfunc is None: |
|
140 | 140 | pfunc = plainpfunc |
|
141 | 141 | else: |
|
142 | 142 | pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)] |
|
143 | 143 | revs = revs.filter(lambda rev: not cutfunc(rev)) |
|
144 | 144 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True) |
|
145 | 145 | |
|
146 | 146 | |
|
147 | 147 | def revancestors( |
|
148 | 148 | repo, revs, followfirst=False, startdepth=None, stopdepth=None, cutfunc=None |
|
149 | 149 | ): |
|
150 | 150 | r"""Like revlog.ancestors(), but supports additional options, includes |
|
151 | 151 | the given revs themselves, and returns a smartset |
|
152 | 152 | |
|
153 | 153 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
154 | 154 | earlier than the startdepth are omitted. |
|
155 | 155 | |
|
156 | 156 | If cutfunc is provided, it will be used to cut the traversal of the DAG. |
|
157 | 157 | When cutfunc(X) returns True, the DAG traversal stops - revision X and |
|
158 | 158 | X's ancestors in the traversal path will be skipped. This could be an |
|
159 | 159 | optimization sometimes. |
|
160 | 160 | |
|
161 | 161 | Note: if Y is an ancestor of X, cutfunc(X) returning True does not |
|
162 | 162 | necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to |
|
163 | 163 | return True in this case. For example, |
|
164 | 164 | |
|
165 | 165 | D # revancestors(repo, D, cutfunc=lambda rev: rev == B) |
|
166 | 166 | |\ # will include "A", because the path D -> C -> A was not cut. |
|
167 | 167 | B C # If "B" gets cut, "A" might want to be cut too. |
|
168 | 168 | |/ |
|
169 | 169 | A |
|
170 | 170 | """ |
|
171 | 171 | gen = _genrevancestors( |
|
172 | 172 | repo, revs, followfirst, startdepth, stopdepth, cutfunc |
|
173 | 173 | ) |
|
174 | 174 | return generatorset(gen, iterasc=False) |
|
175 | 175 | |
|
176 | 176 | |
|
177 | 177 | def _genrevdescendants(repo, revs, followfirst): |
|
178 | 178 | if followfirst: |
|
179 | 179 | cut = 1 |
|
180 | 180 | else: |
|
181 | 181 | cut = None |
|
182 | 182 | |
|
183 | 183 | cl = repo.changelog |
|
184 | 184 | first = revs.min() |
|
185 | 185 | nullrev = node.nullrev |
|
186 | 186 | if first == nullrev: |
|
187 | 187 | # Are there nodes with a null first parent and a non-null |
|
188 | 188 | # second one? Maybe. Do we care? Probably not. |
|
189 | 189 | yield first |
|
190 | 190 | for i in cl: |
|
191 | 191 | yield i |
|
192 | 192 | else: |
|
193 | 193 | seen = set(revs) |
|
194 | 194 | for i in cl.revs(first): |
|
195 | 195 | if i in seen: |
|
196 | 196 | yield i |
|
197 | 197 | continue |
|
198 | 198 | for x in cl.parentrevs(i)[:cut]: |
|
199 | 199 | if x != nullrev and x in seen: |
|
200 | 200 | seen.add(i) |
|
201 | 201 | yield i |
|
202 | 202 | break |
|
203 | 203 | |
|
204 | 204 | |
|
205 | 205 | def _builddescendantsmap(repo, startrev, followfirst): |
|
206 | 206 | """Build map of 'rev -> child revs', offset from startrev""" |
|
207 | 207 | cl = repo.changelog |
|
208 | 208 | nullrev = node.nullrev |
|
209 | 209 | descmap = [[] for _rev in pycompat.xrange(startrev, len(cl))] |
|
210 | 210 | for currev in cl.revs(startrev + 1): |
|
211 | 211 | p1rev, p2rev = cl.parentrevs(currev) |
|
212 | 212 | if p1rev >= startrev: |
|
213 | 213 | descmap[p1rev - startrev].append(currev) |
|
214 | 214 | if not followfirst and p2rev != nullrev and p2rev >= startrev: |
|
215 | 215 | descmap[p2rev - startrev].append(currev) |
|
216 | 216 | return descmap |
|
217 | 217 | |
|
218 | 218 | |
|
219 | 219 | def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth): |
|
220 | 220 | startrev = revs.min() |
|
221 | 221 | descmap = _builddescendantsmap(repo, startrev, followfirst) |
|
222 | 222 | |
|
223 | 223 | def pfunc(rev): |
|
224 | 224 | return descmap[rev - startrev] |
|
225 | 225 | |
|
226 | 226 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False) |
|
227 | 227 | |
|
228 | 228 | |
|
229 | 229 | def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None): |
|
230 | 230 | """Like revlog.descendants() but supports additional options, includes |
|
231 | 231 | the given revs themselves, and returns a smartset |
|
232 | 232 | |
|
233 | 233 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
234 | 234 | earlier than the startdepth are omitted. |
|
235 | 235 | """ |
|
236 | 236 | if startdepth is None and (stopdepth is None or stopdepth >= maxlogdepth): |
|
237 | 237 | gen = _genrevdescendants(repo, revs, followfirst) |
|
238 | 238 | else: |
|
239 | 239 | gen = _genrevdescendantsofdepth( |
|
240 | 240 | repo, revs, followfirst, startdepth, stopdepth |
|
241 | 241 | ) |
|
242 | 242 | return generatorset(gen, iterasc=True) |
|
243 | 243 | |
|
244 | 244 | |
|
245 | 245 | def descendantrevs(revs, revsfn, parentrevsfn): |
|
246 | 246 | """Generate revision number descendants in revision order. |
|
247 | 247 | |
|
248 | 248 | Yields revision numbers starting with a child of some rev in |
|
249 | 249 | ``revs``. Results are ordered by revision number and are |
|
250 | 250 | therefore topological. Each revision is not considered a descendant |
|
251 | 251 | of itself. |
|
252 | 252 | |
|
253 | 253 | ``revsfn`` is a callable that with no argument iterates over all |
|
254 | 254 | revision numbers and with a ``start`` argument iterates over revision |
|
255 | 255 | numbers beginning with that value. |
|
256 | 256 | |
|
257 | 257 | ``parentrevsfn`` is a callable that receives a revision number and |
|
258 | 258 | returns an iterable of parent revision numbers, whose values may include |
|
259 | 259 | nullrev. |
|
260 | 260 | """ |
|
261 | 261 | first = min(revs) |
|
262 | 262 | |
|
263 | 263 | if first == nullrev: |
|
264 | 264 | for rev in revsfn(): |
|
265 | 265 | yield rev |
|
266 | 266 | return |
|
267 | 267 | |
|
268 | 268 | seen = set(revs) |
|
269 | 269 | for rev in revsfn(start=first + 1): |
|
270 | 270 | for prev in parentrevsfn(rev): |
|
271 | 271 | if prev != nullrev and prev in seen: |
|
272 | 272 | seen.add(rev) |
|
273 | 273 | yield rev |
|
274 | 274 | break |
|
275 | 275 | |
|
276 | 276 | |
|
277 | 277 | class subsetparentswalker(object): |
|
278 | 278 | r"""Scan adjacent ancestors in the graph given by the subset |
|
279 | 279 | |
|
280 | 280 | This computes parent-child relations in the sub graph filtered by |
|
281 | 281 | a revset. Primary use case is to draw a revisions graph. |
|
282 | 282 | |
|
283 | 283 | In the following example, we consider that the node 'f' has edges to all |
|
284 | 284 | ancestor nodes, but redundant paths are eliminated. The edge 'f'->'b' |
|
285 | 285 | is eliminated because there is a path 'f'->'c'->'b' for example. |
|
286 | 286 | |
|
287 | 287 | - d - e - |
|
288 | 288 | / \ |
|
289 | 289 | a - b - c - f |
|
290 | 290 | |
|
291 | 291 | If the node 'c' is filtered out, the edge 'f'->'b' is activated. |
|
292 | 292 | |
|
293 | 293 | - d - e - |
|
294 | 294 | / \ |
|
295 | 295 | a - b -(c)- f |
|
296 | 296 | |
|
297 | 297 | Likewise, if 'd' and 'e' are filtered out, this edge is fully eliminated |
|
298 | 298 | since there is a path 'f'->'c'->'b'->'a' for 'f'->'a'. |
|
299 | 299 | |
|
300 | 300 | (d) (e) |
|
301 | 301 | |
|
302 | 302 | a - b - c - f |
|
303 | 303 | |
|
304 | 304 | Implementation-wise, 'f' is passed down to 'a' as unresolved through the |
|
305 | 305 | 'f'->'e'->'d'->'a' path, whereas we do also remember that 'f' has already |
|
306 | 306 | been resolved while walking down the 'f'->'c'->'b'->'a' path. When |
|
307 | 307 | processing the node 'a', the unresolved 'f'->'a' path is eliminated as |
|
308 | 308 | the 'f' end is marked as resolved. |
|
309 | 309 | |
|
310 | 310 | Ancestors are searched from the tipmost revision in the subset so the |
|
311 | 311 | results can be cached. You should specify startrev to narrow the search |
|
312 | 312 | space to ':startrev'. |
|
313 | 313 | """ |
|
314 | 314 | |
|
315 | 315 | def __init__(self, repo, subset, startrev=None): |
|
316 | 316 | if startrev is not None: |
|
317 | 317 | subset = repo.revs(b'%d:null', startrev) & subset |
|
318 | 318 | |
|
319 | 319 | # equivalent to 'subset = subset.sorted(reverse=True)', but there's |
|
320 | 320 | # no such function. |
|
321 | 321 | fastdesc = subset.fastdesc |
|
322 | 322 | if fastdesc: |
|
323 | 323 | desciter = fastdesc() |
|
324 | 324 | else: |
|
325 | 325 | if not subset.isdescending() and not subset.istopo(): |
|
326 | 326 | subset = smartset.baseset(subset) |
|
327 | 327 | subset.sort(reverse=True) |
|
328 | 328 | desciter = iter(subset) |
|
329 | 329 | |
|
330 | 330 | self._repo = repo |
|
331 | 331 | self._changelog = repo.changelog |
|
332 | 332 | self._subset = subset |
|
333 | 333 | |
|
334 | 334 | # scanning state (see _scanparents): |
|
335 | 335 | self._tovisit = [] |
|
336 | 336 | self._pendingcnt = {} |
|
337 | 337 | self._pointers = {} |
|
338 | 338 | self._parents = {} |
|
339 | 339 | self._inputhead = nullrev # reassigned by self._advanceinput() |
|
340 | 340 | self._inputtail = desciter |
|
341 | 341 | self._bottomrev = nullrev |
|
342 | 342 | self._advanceinput() |
|
343 | 343 | |
|
344 | 344 | def parentsset(self, rev): |
|
345 | 345 | """Look up parents of the given revision in the subset, and returns |
|
346 | 346 | as a smartset""" |
|
347 | 347 | return smartset.baseset(self.parents(rev)) |
|
348 | 348 | |
|
349 | 349 | def parents(self, rev): |
|
350 | 350 | """Look up parents of the given revision in the subset |
|
351 | 351 | |
|
352 | 352 | The returned revisions are sorted by parent index (p1/p2). |
|
353 | 353 | """ |
|
354 | 354 | self._scanparents(rev) |
|
355 | 355 | return [r for _c, r in sorted(self._parents.get(rev, []))] |
|
356 | 356 | |
|
357 | 357 | def _parentrevs(self, rev): |
|
358 | 358 | try: |
|
359 | 359 | revs = self._changelog.parentrevs(rev) |
|
360 | 360 | if revs[-1] == nullrev: |
|
361 | 361 | return revs[:-1] |
|
362 | 362 | return revs |
|
363 | 363 | except error.WdirUnsupported: |
|
364 | 364 | return tuple(pctx.rev() for pctx in self._repo[None].parents()) |
|
365 | 365 | |
|
366 | 366 | def _advanceinput(self): |
|
367 | 367 | """Advance the input iterator and set the next revision to _inputhead""" |
|
368 | 368 | if self._inputhead < nullrev: |
|
369 | 369 | return |
|
370 | 370 | try: |
|
371 | 371 | self._inputhead = next(self._inputtail) |
|
372 | 372 | except StopIteration: |
|
373 | 373 | self._bottomrev = self._inputhead |
|
374 | 374 | self._inputhead = nullrev - 1 |
|
375 | 375 | |
|
376 | 376 | def _scanparents(self, stoprev): |
|
377 | 377 | """Scan ancestors until the parents of the specified stoprev are |
|
378 | 378 | resolved""" |
|
379 | 379 | |
|
380 | 380 | # 'tovisit' is the queue of the input revisions and their ancestors. |
|
381 | 381 | # It will be populated incrementally to minimize the initial cost |
|
382 | 382 | # of computing the given subset. |
|
383 | 383 | # |
|
384 | 384 | # For to-visit revisions, we keep track of |
|
385 | 385 | # - the number of the unresolved paths: pendingcnt[rev], |
|
386 | 386 | # - dict of the unresolved descendants and chains: pointers[rev][0], |
|
387 | 387 | # - set of the already resolved descendants: pointers[rev][1]. |
|
388 | 388 | # |
|
389 | 389 | # When a revision is visited, 'pointers[rev]' should be popped and |
|
390 | 390 | # propagated to its parents accordingly. |
|
391 | 391 | # |
|
392 | 392 | # Once all pending paths have been resolved, 'pendingcnt[rev]' becomes |
|
393 | 393 | # 0 and 'parents[rev]' contains the unsorted list of parent revisions |
|
394 | # and p1/p2 chains (excluding linear paths.) | |
|
394 | # and p1/p2 chains (excluding linear paths.) The p1/p2 chains will be | |
|
395 | # used as a sort key preferring p1. 'len(chain)' should be the number | |
|
396 | # of merges between two revisions. | |
|
395 | 397 | |
|
396 | 398 | subset = self._subset |
|
397 | 399 | tovisit = self._tovisit # heap queue of [-rev] |
|
398 | 400 | pendingcnt = self._pendingcnt # {rev: count} for visited revisions |
|
399 | 401 | pointers = self._pointers # {rev: [{unresolved_rev: chain}, resolved]} |
|
400 | 402 | parents = self._parents # {rev: [(chain, rev)]} |
|
401 | 403 | |
|
402 | 404 | while tovisit or self._inputhead >= nullrev: |
|
403 | 405 | if pendingcnt.get(stoprev) == 0: |
|
404 | 406 | return |
|
405 | 407 | |
|
406 | 408 | # feed greater revisions from input set to queue |
|
407 | 409 | if not tovisit: |
|
408 | 410 | heapq.heappush(tovisit, -self._inputhead) |
|
409 | 411 | self._advanceinput() |
|
410 | 412 | while self._inputhead >= -tovisit[0]: |
|
411 | 413 | heapq.heappush(tovisit, -self._inputhead) |
|
412 | 414 | self._advanceinput() |
|
413 | 415 | |
|
414 | 416 | rev = -heapq.heappop(tovisit) |
|
415 | 417 | if rev < self._bottomrev: |
|
416 | 418 | return |
|
417 | 419 | if rev in pendingcnt and rev not in pointers: |
|
418 | 420 | continue # already visited |
|
419 | 421 | |
|
420 | 422 | curactive = rev in subset |
|
421 | 423 | pendingcnt.setdefault(rev, 0) # mark as visited |
|
422 | 424 | if curactive: |
|
423 | 425 | assert rev not in parents |
|
424 | 426 | parents[rev] = [] |
|
425 | 427 | unresolved, resolved = pointers.pop(rev, ({}, set())) |
|
426 | 428 | |
|
427 | 429 | if curactive: |
|
428 | 430 | # reached to active rev, resolve pending descendants' parents |
|
429 | 431 | for r, c in unresolved.items(): |
|
430 | 432 | pendingcnt[r] -= 1 |
|
431 | 433 | assert pendingcnt[r] >= 0 |
|
432 | 434 | if r in resolved: |
|
433 | 435 | continue # eliminate redundant path |
|
434 | 436 | parents[r].append((c, rev)) |
|
435 | 437 | # mark the descendant 'r' as resolved through this path if |
|
436 | 438 | # there are still pending pointers. the 'resolved' set may |
|
437 | 439 | # be concatenated later at a fork revision. |
|
438 | 440 | if pendingcnt[r] > 0: |
|
439 | 441 | resolved.add(r) |
|
440 | 442 | unresolved.clear() |
|
441 | 443 | # occasionally clean resolved markers. otherwise the set |
|
442 | 444 | # would grow indefinitely. |
|
443 | 445 | resolved = {r for r in resolved if pendingcnt[r] > 0} |
|
444 | 446 | |
|
445 | 447 | parentrevs = self._parentrevs(rev) |
|
446 | 448 | bothparentsactive = all(p in subset for p in parentrevs) |
|
447 | 449 | |
|
448 | 450 | # set up or propagate tracking pointers if |
|
449 | 451 | # - one of the parents is not active, |
|
450 | 452 | # - or descendants' parents are unresolved. |
|
451 | 453 | if not bothparentsactive or unresolved or resolved: |
|
452 | 454 | if len(parentrevs) <= 1: |
|
453 | 455 | # can avoid copying the tracking pointer |
|
454 | 456 | parentpointers = [(unresolved, resolved)] |
|
455 | 457 | else: |
|
456 | 458 | parentpointers = [ |
|
457 | 459 | (unresolved, resolved), |
|
458 | 460 | (unresolved.copy(), resolved.copy()), |
|
459 | 461 | ] |
|
460 | 462 | # 'rev' is a merge revision. increment the pending count |
|
461 |
# as the 'unresolved' dict will be duplicated |
|
|
463 | # as the 'unresolved' dict will be duplicated, and append | |
|
464 | # p1/p2 code to the existing chains. | |
|
462 | 465 | for r in unresolved: |
|
463 | 466 | pendingcnt[r] += 1 |
|
467 | parentpointers[0][0][r] += b'1' | |
|
468 | parentpointers[1][0][r] += b'2' | |
|
464 | 469 | for i, p in enumerate(parentrevs): |
|
465 | 470 | assert p < rev |
|
466 | 471 | heapq.heappush(tovisit, -p) |
|
467 | 472 | if p in pointers: |
|
468 | 473 | # 'p' is a fork revision. concatenate tracking pointers |
|
469 | 474 | # and decrement the pending count accordingly. |
|
470 | 475 | knownunresolved, knownresolved = pointers[p] |
|
471 | 476 | unresolved, resolved = parentpointers[i] |
|
472 | 477 | for r, c in unresolved.items(): |
|
473 | c += [b'1', b'2'][i] | |
|
474 | 478 | if r in knownunresolved: |
|
475 | 479 | # unresolved at both paths |
|
476 | 480 | pendingcnt[r] -= 1 |
|
477 | 481 | assert pendingcnt[r] > 0 |
|
478 | 482 | # take shorter chain |
|
479 | 483 | knownunresolved[r] = min(c, knownunresolved[r]) |
|
480 | 484 | else: |
|
481 | 485 | knownunresolved[r] = c |
|
482 | 486 | # simply propagate the 'resolved' set as deduplicating |
|
483 | 487 | # 'unresolved' here would be slightly complicated. |
|
484 | 488 | knownresolved.update(resolved) |
|
485 | 489 | else: |
|
486 | 490 | pointers[p] = parentpointers[i] |
|
487 | 491 | |
|
488 | 492 | # then, populate the active parents directly and add the current |
|
489 | 493 | # 'rev' to the tracking pointers of the inactive parents. |
|
490 | 494 | # 'pointers[p]' may be optimized out if both parents are active. |
|
495 | chaincodes = [b''] if len(parentrevs) <= 1 else [b'1', b'2'] | |
|
491 | 496 | if curactive and bothparentsactive: |
|
492 | 497 | for i, p in enumerate(parentrevs): |
|
493 |
c = |
|
|
498 | c = chaincodes[i] | |
|
494 | 499 | parents[rev].append((c, p)) |
|
495 | 500 | # no need to mark 'rev' as resolved since the 'rev' should |
|
496 | 501 | # be fully resolved (i.e. pendingcnt[rev] == 0) |
|
497 | 502 | assert pendingcnt[rev] == 0 |
|
498 | 503 | elif curactive: |
|
499 | 504 | for i, p in enumerate(parentrevs): |
|
500 | 505 | unresolved, resolved = pointers[p] |
|
501 | 506 | assert rev not in unresolved |
|
502 |
c = |
|
|
507 | c = chaincodes[i] | |
|
503 | 508 | if p in subset: |
|
504 | 509 | parents[rev].append((c, p)) |
|
505 | 510 | # mark 'rev' as resolved through this path |
|
506 | 511 | resolved.add(rev) |
|
507 | 512 | else: |
|
508 | 513 | pendingcnt[rev] += 1 |
|
509 | 514 | unresolved[rev] = c |
|
510 | 515 | assert 0 < pendingcnt[rev] <= 2 |
|
511 | 516 | |
|
512 | 517 | |
|
513 | 518 | def _reachablerootspure(pfunc, minroot, roots, heads, includepath): |
|
514 | 519 | """See revlog.reachableroots""" |
|
515 | 520 | if not roots: |
|
516 | 521 | return [] |
|
517 | 522 | roots = set(roots) |
|
518 | 523 | visit = list(heads) |
|
519 | 524 | reachable = set() |
|
520 | 525 | seen = {} |
|
521 | 526 | # prefetch all the things! (because python is slow) |
|
522 | 527 | reached = reachable.add |
|
523 | 528 | dovisit = visit.append |
|
524 | 529 | nextvisit = visit.pop |
|
525 | 530 | # open-code the post-order traversal due to the tiny size of |
|
526 | 531 | # sys.getrecursionlimit() |
|
527 | 532 | while visit: |
|
528 | 533 | rev = nextvisit() |
|
529 | 534 | if rev in roots: |
|
530 | 535 | reached(rev) |
|
531 | 536 | if not includepath: |
|
532 | 537 | continue |
|
533 | 538 | parents = pfunc(rev) |
|
534 | 539 | seen[rev] = parents |
|
535 | 540 | for parent in parents: |
|
536 | 541 | if parent >= minroot and parent not in seen: |
|
537 | 542 | dovisit(parent) |
|
538 | 543 | if not reachable: |
|
539 | 544 | return baseset() |
|
540 | 545 | if not includepath: |
|
541 | 546 | return reachable |
|
542 | 547 | for rev in sorted(seen): |
|
543 | 548 | for parent in seen[rev]: |
|
544 | 549 | if parent in reachable: |
|
545 | 550 | reached(rev) |
|
546 | 551 | return reachable |
|
547 | 552 | |
|
548 | 553 | |
|
549 | 554 | def reachableroots(repo, roots, heads, includepath=False): |
|
550 | 555 | """See revlog.reachableroots""" |
|
551 | 556 | if not roots: |
|
552 | 557 | return baseset() |
|
553 | 558 | minroot = roots.min() |
|
554 | 559 | roots = list(roots) |
|
555 | 560 | heads = list(heads) |
|
556 | 561 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) |
|
557 | 562 | revs = baseset(revs) |
|
558 | 563 | revs.sort() |
|
559 | 564 | return revs |
|
560 | 565 | |
|
561 | 566 | |
|
562 | 567 | def _changesrange(fctx1, fctx2, linerange2, diffopts): |
|
563 | 568 | """Return `(diffinrange, linerange1)` where `diffinrange` is True |
|
564 | 569 | if diff from fctx2 to fctx1 has changes in linerange2 and |
|
565 | 570 | `linerange1` is the new line range for fctx1. |
|
566 | 571 | """ |
|
567 | 572 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) |
|
568 | 573 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) |
|
569 | 574 | diffinrange = any(stype == b'!' for _, stype in filteredblocks) |
|
570 | 575 | return diffinrange, linerange1 |
|
571 | 576 | |
|
572 | 577 | |
|
573 | 578 | def blockancestors(fctx, fromline, toline, followfirst=False): |
|
574 | 579 | """Yield ancestors of `fctx` with respect to the block of lines within |
|
575 | 580 | `fromline`-`toline` range. |
|
576 | 581 | """ |
|
577 | 582 | diffopts = patch.diffopts(fctx._repo.ui) |
|
578 | 583 | fctx = fctx.introfilectx() |
|
579 | 584 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} |
|
580 | 585 | while visit: |
|
581 | 586 | c, linerange2 = visit.pop(max(visit)) |
|
582 | 587 | pl = c.parents() |
|
583 | 588 | if followfirst: |
|
584 | 589 | pl = pl[:1] |
|
585 | 590 | if not pl: |
|
586 | 591 | # The block originates from the initial revision. |
|
587 | 592 | yield c, linerange2 |
|
588 | 593 | continue |
|
589 | 594 | inrange = False |
|
590 | 595 | for p in pl: |
|
591 | 596 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) |
|
592 | 597 | inrange = inrange or inrangep |
|
593 | 598 | if linerange1[0] == linerange1[1]: |
|
594 | 599 | # Parent's linerange is empty, meaning that the block got |
|
595 | 600 | # introduced in this revision; no need to go futher in this |
|
596 | 601 | # branch. |
|
597 | 602 | continue |
|
598 | 603 | # Set _descendantrev with 'c' (a known descendant) so that, when |
|
599 | 604 | # _adjustlinkrev is called for 'p', it receives this descendant |
|
600 | 605 | # (as srcrev) instead possibly topmost introrev. |
|
601 | 606 | p._descendantrev = c.rev() |
|
602 | 607 | visit[p.linkrev(), p.filenode()] = p, linerange1 |
|
603 | 608 | if inrange: |
|
604 | 609 | yield c, linerange2 |
|
605 | 610 | |
|
606 | 611 | |
|
607 | 612 | def blockdescendants(fctx, fromline, toline): |
|
608 | 613 | """Yield descendants of `fctx` with respect to the block of lines within |
|
609 | 614 | `fromline`-`toline` range. |
|
610 | 615 | """ |
|
611 | 616 | # First possibly yield 'fctx' if it has changes in range with respect to |
|
612 | 617 | # its parents. |
|
613 | 618 | try: |
|
614 | 619 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) |
|
615 | 620 | except StopIteration: |
|
616 | 621 | pass |
|
617 | 622 | else: |
|
618 | 623 | if c == fctx: |
|
619 | 624 | yield c, linerange1 |
|
620 | 625 | |
|
621 | 626 | diffopts = patch.diffopts(fctx._repo.ui) |
|
622 | 627 | fl = fctx.filelog() |
|
623 | 628 | seen = {fctx.filerev(): (fctx, (fromline, toline))} |
|
624 | 629 | for i in fl.descendants([fctx.filerev()]): |
|
625 | 630 | c = fctx.filectx(i) |
|
626 | 631 | inrange = False |
|
627 | 632 | for x in fl.parentrevs(i): |
|
628 | 633 | try: |
|
629 | 634 | p, linerange2 = seen[x] |
|
630 | 635 | except KeyError: |
|
631 | 636 | # nullrev or other branch |
|
632 | 637 | continue |
|
633 | 638 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) |
|
634 | 639 | inrange = inrange or inrangep |
|
635 | 640 | # If revision 'i' has been seen (it's a merge) and the line range |
|
636 | 641 | # previously computed differs from the one we just got, we take the |
|
637 | 642 | # surrounding interval. This is conservative but avoids loosing |
|
638 | 643 | # information. |
|
639 | 644 | if i in seen and seen[i][1] != linerange1: |
|
640 | 645 | lbs, ubs = zip(linerange1, seen[i][1]) |
|
641 | 646 | linerange1 = min(lbs), max(ubs) |
|
642 | 647 | seen[i] = c, linerange1 |
|
643 | 648 | if inrange: |
|
644 | 649 | yield c, linerange1 |
|
645 | 650 | |
|
646 | 651 | |
|
647 | 652 | @attr.s(slots=True, frozen=True) |
|
648 | 653 | class annotateline(object): |
|
649 | 654 | fctx = attr.ib() |
|
650 | 655 | lineno = attr.ib() |
|
651 | 656 | # Whether this annotation was the result of a skip-annotate. |
|
652 | 657 | skip = attr.ib(default=False) |
|
653 | 658 | text = attr.ib(default=None) |
|
654 | 659 | |
|
655 | 660 | |
|
656 | 661 | @attr.s(slots=True, frozen=True) |
|
657 | 662 | class _annotatedfile(object): |
|
658 | 663 | # list indexed by lineno - 1 |
|
659 | 664 | fctxs = attr.ib() |
|
660 | 665 | linenos = attr.ib() |
|
661 | 666 | skips = attr.ib() |
|
662 | 667 | # full file content |
|
663 | 668 | text = attr.ib() |
|
664 | 669 | |
|
665 | 670 | |
|
666 | 671 | def _countlines(text): |
|
667 | 672 | if text.endswith(b"\n"): |
|
668 | 673 | return text.count(b"\n") |
|
669 | 674 | return text.count(b"\n") + int(bool(text)) |
|
670 | 675 | |
|
671 | 676 | |
|
672 | 677 | def _decoratelines(text, fctx): |
|
673 | 678 | n = _countlines(text) |
|
674 | 679 | linenos = pycompat.rangelist(1, n + 1) |
|
675 | 680 | return _annotatedfile([fctx] * n, linenos, [False] * n, text) |
|
676 | 681 | |
|
677 | 682 | |
|
678 | 683 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): |
|
679 | 684 | r''' |
|
680 | 685 | Given parent and child fctxes and annotate data for parents, for all lines |
|
681 | 686 | in either parent that match the child, annotate the child with the parent's |
|
682 | 687 | data. |
|
683 | 688 | |
|
684 | 689 | Additionally, if `skipchild` is True, replace all other lines with parent |
|
685 | 690 | annotate data as well such that child is never blamed for any lines. |
|
686 | 691 | |
|
687 | 692 | See test-annotate.py for unit tests. |
|
688 | 693 | ''' |
|
689 | 694 | pblocks = [ |
|
690 | 695 | (parent, mdiff.allblocks(parent.text, child.text, opts=diffopts)) |
|
691 | 696 | for parent in parents |
|
692 | 697 | ] |
|
693 | 698 | |
|
694 | 699 | if skipchild: |
|
695 | 700 | # Need to iterate over the blocks twice -- make it a list |
|
696 | 701 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] |
|
697 | 702 | # Mercurial currently prefers p2 over p1 for annotate. |
|
698 | 703 | # TODO: change this? |
|
699 | 704 | for parent, blocks in pblocks: |
|
700 | 705 | for (a1, a2, b1, b2), t in blocks: |
|
701 | 706 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
702 | 707 | # belong to the child. |
|
703 | 708 | if t == b'=': |
|
704 | 709 | child.fctxs[b1:b2] = parent.fctxs[a1:a2] |
|
705 | 710 | child.linenos[b1:b2] = parent.linenos[a1:a2] |
|
706 | 711 | child.skips[b1:b2] = parent.skips[a1:a2] |
|
707 | 712 | |
|
708 | 713 | if skipchild: |
|
709 | 714 | # Now try and match up anything that couldn't be matched, |
|
710 | 715 | # Reversing pblocks maintains bias towards p2, matching above |
|
711 | 716 | # behavior. |
|
712 | 717 | pblocks.reverse() |
|
713 | 718 | |
|
714 | 719 | # The heuristics are: |
|
715 | 720 | # * Work on blocks of changed lines (effectively diff hunks with -U0). |
|
716 | 721 | # This could potentially be smarter but works well enough. |
|
717 | 722 | # * For a non-matching section, do a best-effort fit. Match lines in |
|
718 | 723 | # diff hunks 1:1, dropping lines as necessary. |
|
719 | 724 | # * Repeat the last line as a last resort. |
|
720 | 725 | |
|
721 | 726 | # First, replace as much as possible without repeating the last line. |
|
722 | 727 | remaining = [(parent, []) for parent, _blocks in pblocks] |
|
723 | 728 | for idx, (parent, blocks) in enumerate(pblocks): |
|
724 | 729 | for (a1, a2, b1, b2), _t in blocks: |
|
725 | 730 | if a2 - a1 >= b2 - b1: |
|
726 | 731 | for bk in pycompat.xrange(b1, b2): |
|
727 | 732 | if child.fctxs[bk] == childfctx: |
|
728 | 733 | ak = min(a1 + (bk - b1), a2 - 1) |
|
729 | 734 | child.fctxs[bk] = parent.fctxs[ak] |
|
730 | 735 | child.linenos[bk] = parent.linenos[ak] |
|
731 | 736 | child.skips[bk] = True |
|
732 | 737 | else: |
|
733 | 738 | remaining[idx][1].append((a1, a2, b1, b2)) |
|
734 | 739 | |
|
735 | 740 | # Then, look at anything left, which might involve repeating the last |
|
736 | 741 | # line. |
|
737 | 742 | for parent, blocks in remaining: |
|
738 | 743 | for a1, a2, b1, b2 in blocks: |
|
739 | 744 | for bk in pycompat.xrange(b1, b2): |
|
740 | 745 | if child.fctxs[bk] == childfctx: |
|
741 | 746 | ak = min(a1 + (bk - b1), a2 - 1) |
|
742 | 747 | child.fctxs[bk] = parent.fctxs[ak] |
|
743 | 748 | child.linenos[bk] = parent.linenos[ak] |
|
744 | 749 | child.skips[bk] = True |
|
745 | 750 | return child |
|
746 | 751 | |
|
747 | 752 | |
|
748 | 753 | def annotate(base, parents, skiprevs=None, diffopts=None): |
|
749 | 754 | """Core algorithm for filectx.annotate() |
|
750 | 755 | |
|
751 | 756 | `parents(fctx)` is a function returning a list of parent filectxs. |
|
752 | 757 | """ |
|
753 | 758 | |
|
754 | 759 | # This algorithm would prefer to be recursive, but Python is a |
|
755 | 760 | # bit recursion-hostile. Instead we do an iterative |
|
756 | 761 | # depth-first search. |
|
757 | 762 | |
|
758 | 763 | # 1st DFS pre-calculates pcache and needed |
|
759 | 764 | visit = [base] |
|
760 | 765 | pcache = {} |
|
761 | 766 | needed = {base: 1} |
|
762 | 767 | while visit: |
|
763 | 768 | f = visit.pop() |
|
764 | 769 | if f in pcache: |
|
765 | 770 | continue |
|
766 | 771 | pl = parents(f) |
|
767 | 772 | pcache[f] = pl |
|
768 | 773 | for p in pl: |
|
769 | 774 | needed[p] = needed.get(p, 0) + 1 |
|
770 | 775 | if p not in pcache: |
|
771 | 776 | visit.append(p) |
|
772 | 777 | |
|
773 | 778 | # 2nd DFS does the actual annotate |
|
774 | 779 | visit[:] = [base] |
|
775 | 780 | hist = {} |
|
776 | 781 | while visit: |
|
777 | 782 | f = visit[-1] |
|
778 | 783 | if f in hist: |
|
779 | 784 | visit.pop() |
|
780 | 785 | continue |
|
781 | 786 | |
|
782 | 787 | ready = True |
|
783 | 788 | pl = pcache[f] |
|
784 | 789 | for p in pl: |
|
785 | 790 | if p not in hist: |
|
786 | 791 | ready = False |
|
787 | 792 | visit.append(p) |
|
788 | 793 | if ready: |
|
789 | 794 | visit.pop() |
|
790 | 795 | curr = _decoratelines(f.data(), f) |
|
791 | 796 | skipchild = False |
|
792 | 797 | if skiprevs is not None: |
|
793 | 798 | skipchild = f._changeid in skiprevs |
|
794 | 799 | curr = _annotatepair( |
|
795 | 800 | [hist[p] for p in pl], f, curr, skipchild, diffopts |
|
796 | 801 | ) |
|
797 | 802 | for p in pl: |
|
798 | 803 | if needed[p] == 1: |
|
799 | 804 | del hist[p] |
|
800 | 805 | del needed[p] |
|
801 | 806 | else: |
|
802 | 807 | needed[p] -= 1 |
|
803 | 808 | |
|
804 | 809 | hist[f] = curr |
|
805 | 810 | del pcache[f] |
|
806 | 811 | |
|
807 | 812 | a = hist[base] |
|
808 | 813 | return [ |
|
809 | 814 | annotateline(*r) |
|
810 | 815 | for r in zip(a.fctxs, a.linenos, a.skips, mdiff.splitnewlines(a.text)) |
|
811 | 816 | ] |
|
812 | 817 | |
|
813 | 818 | |
|
814 | 819 | def toposort(revs, parentsfunc, firstbranch=()): |
|
815 | 820 | """Yield revisions from heads to roots one (topo) branch at a time. |
|
816 | 821 | |
|
817 | 822 | This function aims to be used by a graph generator that wishes to minimize |
|
818 | 823 | the number of parallel branches and their interleaving. |
|
819 | 824 | |
|
820 | 825 | Example iteration order (numbers show the "true" order in a changelog): |
|
821 | 826 | |
|
822 | 827 | o 4 |
|
823 | 828 | | |
|
824 | 829 | o 1 |
|
825 | 830 | | |
|
826 | 831 | | o 3 |
|
827 | 832 | | | |
|
828 | 833 | | o 2 |
|
829 | 834 | |/ |
|
830 | 835 | o 0 |
|
831 | 836 | |
|
832 | 837 | Note that the ancestors of merges are understood by the current |
|
833 | 838 | algorithm to be on the same branch. This means no reordering will |
|
834 | 839 | occur behind a merge. |
|
835 | 840 | """ |
|
836 | 841 | |
|
837 | 842 | ### Quick summary of the algorithm |
|
838 | 843 | # |
|
839 | 844 | # This function is based around a "retention" principle. We keep revisions |
|
840 | 845 | # in memory until we are ready to emit a whole branch that immediately |
|
841 | 846 | # "merges" into an existing one. This reduces the number of parallel |
|
842 | 847 | # branches with interleaved revisions. |
|
843 | 848 | # |
|
844 | 849 | # During iteration revs are split into two groups: |
|
845 | 850 | # A) revision already emitted |
|
846 | 851 | # B) revision in "retention". They are stored as different subgroups. |
|
847 | 852 | # |
|
848 | 853 | # for each REV, we do the following logic: |
|
849 | 854 | # |
|
850 | 855 | # 1) if REV is a parent of (A), we will emit it. If there is a |
|
851 | 856 | # retention group ((B) above) that is blocked on REV being |
|
852 | 857 | # available, we emit all the revisions out of that retention |
|
853 | 858 | # group first. |
|
854 | 859 | # |
|
855 | 860 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be |
|
856 | 861 | # available, if such subgroup exist, we add REV to it and the subgroup is |
|
857 | 862 | # now awaiting for REV.parents() to be available. |
|
858 | 863 | # |
|
859 | 864 | # 3) finally if no such group existed in (B), we create a new subgroup. |
|
860 | 865 | # |
|
861 | 866 | # |
|
862 | 867 | # To bootstrap the algorithm, we emit the tipmost revision (which |
|
863 | 868 | # puts it in group (A) from above). |
|
864 | 869 | |
|
865 | 870 | revs.sort(reverse=True) |
|
866 | 871 | |
|
867 | 872 | # Set of parents of revision that have been emitted. They can be considered |
|
868 | 873 | # unblocked as the graph generator is already aware of them so there is no |
|
869 | 874 | # need to delay the revisions that reference them. |
|
870 | 875 | # |
|
871 | 876 | # If someone wants to prioritize a branch over the others, pre-filling this |
|
872 | 877 | # set will force all other branches to wait until this branch is ready to be |
|
873 | 878 | # emitted. |
|
874 | 879 | unblocked = set(firstbranch) |
|
875 | 880 | |
|
876 | 881 | # list of groups waiting to be displayed, each group is defined by: |
|
877 | 882 | # |
|
878 | 883 | # (revs: lists of revs waiting to be displayed, |
|
879 | 884 | # blocked: set of that cannot be displayed before those in 'revs') |
|
880 | 885 | # |
|
881 | 886 | # The second value ('blocked') correspond to parents of any revision in the |
|
882 | 887 | # group ('revs') that is not itself contained in the group. The main idea |
|
883 | 888 | # of this algorithm is to delay as much as possible the emission of any |
|
884 | 889 | # revision. This means waiting for the moment we are about to display |
|
885 | 890 | # these parents to display the revs in a group. |
|
886 | 891 | # |
|
887 | 892 | # This first implementation is smart until it encounters a merge: it will |
|
888 | 893 | # emit revs as soon as any parent is about to be emitted and can grow an |
|
889 | 894 | # arbitrary number of revs in 'blocked'. In practice this mean we properly |
|
890 | 895 | # retains new branches but gives up on any special ordering for ancestors |
|
891 | 896 | # of merges. The implementation can be improved to handle this better. |
|
892 | 897 | # |
|
893 | 898 | # The first subgroup is special. It corresponds to all the revision that |
|
894 | 899 | # were already emitted. The 'revs' lists is expected to be empty and the |
|
895 | 900 | # 'blocked' set contains the parents revisions of already emitted revision. |
|
896 | 901 | # |
|
897 | 902 | # You could pre-seed the <parents> set of groups[0] to a specific |
|
898 | 903 | # changesets to select what the first emitted branch should be. |
|
899 | 904 | groups = [([], unblocked)] |
|
900 | 905 | pendingheap = [] |
|
901 | 906 | pendingset = set() |
|
902 | 907 | |
|
903 | 908 | heapq.heapify(pendingheap) |
|
904 | 909 | heappop = heapq.heappop |
|
905 | 910 | heappush = heapq.heappush |
|
906 | 911 | for currentrev in revs: |
|
907 | 912 | # Heap works with smallest element, we want highest so we invert |
|
908 | 913 | if currentrev not in pendingset: |
|
909 | 914 | heappush(pendingheap, -currentrev) |
|
910 | 915 | pendingset.add(currentrev) |
|
911 | 916 | # iterates on pending rev until after the current rev have been |
|
912 | 917 | # processed. |
|
913 | 918 | rev = None |
|
914 | 919 | while rev != currentrev: |
|
915 | 920 | rev = -heappop(pendingheap) |
|
916 | 921 | pendingset.remove(rev) |
|
917 | 922 | |
|
918 | 923 | # Seek for a subgroup blocked, waiting for the current revision. |
|
919 | 924 | matching = [i for i, g in enumerate(groups) if rev in g[1]] |
|
920 | 925 | |
|
921 | 926 | if matching: |
|
922 | 927 | # The main idea is to gather together all sets that are blocked |
|
923 | 928 | # on the same revision. |
|
924 | 929 | # |
|
925 | 930 | # Groups are merged when a common blocking ancestor is |
|
926 | 931 | # observed. For example, given two groups: |
|
927 | 932 | # |
|
928 | 933 | # revs [5, 4] waiting for 1 |
|
929 | 934 | # revs [3, 2] waiting for 1 |
|
930 | 935 | # |
|
931 | 936 | # These two groups will be merged when we process |
|
932 | 937 | # 1. In theory, we could have merged the groups when |
|
933 | 938 | # we added 2 to the group it is now in (we could have |
|
934 | 939 | # noticed the groups were both blocked on 1 then), but |
|
935 | 940 | # the way it works now makes the algorithm simpler. |
|
936 | 941 | # |
|
937 | 942 | # We also always keep the oldest subgroup first. We can |
|
938 | 943 | # probably improve the behavior by having the longest set |
|
939 | 944 | # first. That way, graph algorithms could minimise the length |
|
940 | 945 | # of parallel lines their drawing. This is currently not done. |
|
941 | 946 | targetidx = matching.pop(0) |
|
942 | 947 | trevs, tparents = groups[targetidx] |
|
943 | 948 | for i in matching: |
|
944 | 949 | gr = groups[i] |
|
945 | 950 | trevs.extend(gr[0]) |
|
946 | 951 | tparents |= gr[1] |
|
947 | 952 | # delete all merged subgroups (except the one we kept) |
|
948 | 953 | # (starting from the last subgroup for performance and |
|
949 | 954 | # sanity reasons) |
|
950 | 955 | for i in reversed(matching): |
|
951 | 956 | del groups[i] |
|
952 | 957 | else: |
|
953 | 958 | # This is a new head. We create a new subgroup for it. |
|
954 | 959 | targetidx = len(groups) |
|
955 | 960 | groups.append(([], {rev})) |
|
956 | 961 | |
|
957 | 962 | gr = groups[targetidx] |
|
958 | 963 | |
|
959 | 964 | # We now add the current nodes to this subgroups. This is done |
|
960 | 965 | # after the subgroup merging because all elements from a subgroup |
|
961 | 966 | # that relied on this rev must precede it. |
|
962 | 967 | # |
|
963 | 968 | # we also update the <parents> set to include the parents of the |
|
964 | 969 | # new nodes. |
|
965 | 970 | if rev == currentrev: # only display stuff in rev |
|
966 | 971 | gr[0].append(rev) |
|
967 | 972 | gr[1].remove(rev) |
|
968 | 973 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] |
|
969 | 974 | gr[1].update(parents) |
|
970 | 975 | for p in parents: |
|
971 | 976 | if p not in pendingset: |
|
972 | 977 | pendingset.add(p) |
|
973 | 978 | heappush(pendingheap, -p) |
|
974 | 979 | |
|
975 | 980 | # Look for a subgroup to display |
|
976 | 981 | # |
|
977 | 982 | # When unblocked is empty (if clause), we were not waiting for any |
|
978 | 983 | # revisions during the first iteration (if no priority was given) or |
|
979 | 984 | # if we emitted a whole disconnected set of the graph (reached a |
|
980 | 985 | # root). In that case we arbitrarily take the oldest known |
|
981 | 986 | # subgroup. The heuristic could probably be better. |
|
982 | 987 | # |
|
983 | 988 | # Otherwise (elif clause) if the subgroup is blocked on |
|
984 | 989 | # a revision we just emitted, we can safely emit it as |
|
985 | 990 | # well. |
|
986 | 991 | if not unblocked: |
|
987 | 992 | if len(groups) > 1: # display other subset |
|
988 | 993 | targetidx = 1 |
|
989 | 994 | gr = groups[1] |
|
990 | 995 | elif not gr[1] & unblocked: |
|
991 | 996 | gr = None |
|
992 | 997 | |
|
993 | 998 | if gr is not None: |
|
994 | 999 | # update the set of awaited revisions with the one from the |
|
995 | 1000 | # subgroup |
|
996 | 1001 | unblocked |= gr[1] |
|
997 | 1002 | # output all revisions in the subgroup |
|
998 | 1003 | for r in gr[0]: |
|
999 | 1004 | yield r |
|
1000 | 1005 | # delete the subgroup that you just output |
|
1001 | 1006 | # unless it is groups[0] in which case you just empty it. |
|
1002 | 1007 | if targetidx: |
|
1003 | 1008 | del groups[targetidx] |
|
1004 | 1009 | else: |
|
1005 | 1010 | gr[0][:] = [] |
|
1006 | 1011 | # Check if we have some subgroup waiting for revisions we are not going to |
|
1007 | 1012 | # iterate over |
|
1008 | 1013 | for g in groups: |
|
1009 | 1014 | for r in g[0]: |
|
1010 | 1015 | yield r |
|
1011 | 1016 | |
|
1012 | 1017 | |
|
1013 | 1018 | def headrevs(revs, parentsfn): |
|
1014 | 1019 | """Resolve the set of heads from a set of revisions. |
|
1015 | 1020 | |
|
1016 | 1021 | Receives an iterable of revision numbers and a callbable that receives a |
|
1017 | 1022 | revision number and returns an iterable of parent revision numbers, possibly |
|
1018 | 1023 | including nullrev. |
|
1019 | 1024 | |
|
1020 | 1025 | Returns a set of revision numbers that are DAG heads within the passed |
|
1021 | 1026 | subset. |
|
1022 | 1027 | |
|
1023 | 1028 | ``nullrev`` is never included in the returned set, even if it is provided in |
|
1024 | 1029 | the input set. |
|
1025 | 1030 | """ |
|
1026 | 1031 | headrevs = set(revs) |
|
1027 | 1032 | parents = {node.nullrev} |
|
1028 | 1033 | up = parents.update |
|
1029 | 1034 | |
|
1030 | 1035 | for rev in revs: |
|
1031 | 1036 | up(parentsfn(rev)) |
|
1032 | 1037 | headrevs.difference_update(parents) |
|
1033 | 1038 | return headrevs |
|
1034 | 1039 | |
|
1035 | 1040 | |
|
1036 | 1041 | def headrevssubset(revsfn, parentrevsfn, startrev=None, stoprevs=None): |
|
1037 | 1042 | """Returns the set of all revs that have no children with control. |
|
1038 | 1043 | |
|
1039 | 1044 | ``revsfn`` is a callable that with no arguments returns an iterator over |
|
1040 | 1045 | all revision numbers in topological order. With a ``start`` argument, it |
|
1041 | 1046 | returns revision numbers starting at that number. |
|
1042 | 1047 | |
|
1043 | 1048 | ``parentrevsfn`` is a callable receiving a revision number and returns an |
|
1044 | 1049 | iterable of parent revision numbers, where values can include nullrev. |
|
1045 | 1050 | |
|
1046 | 1051 | ``startrev`` is a revision number at which to start the search. |
|
1047 | 1052 | |
|
1048 | 1053 | ``stoprevs`` is an iterable of revision numbers that, when encountered, |
|
1049 | 1054 | will stop DAG traversal beyond them. Parents of revisions in this |
|
1050 | 1055 | collection will be heads. |
|
1051 | 1056 | """ |
|
1052 | 1057 | if startrev is None: |
|
1053 | 1058 | startrev = nullrev |
|
1054 | 1059 | |
|
1055 | 1060 | stoprevs = set(stoprevs or []) |
|
1056 | 1061 | |
|
1057 | 1062 | reachable = {startrev} |
|
1058 | 1063 | heads = {startrev} |
|
1059 | 1064 | |
|
1060 | 1065 | for rev in revsfn(start=startrev + 1): |
|
1061 | 1066 | for prev in parentrevsfn(rev): |
|
1062 | 1067 | if prev in reachable: |
|
1063 | 1068 | if rev not in stoprevs: |
|
1064 | 1069 | reachable.add(rev) |
|
1065 | 1070 | heads.add(rev) |
|
1066 | 1071 | |
|
1067 | 1072 | if prev in heads and prev not in stoprevs: |
|
1068 | 1073 | heads.remove(prev) |
|
1069 | 1074 | |
|
1070 | 1075 | return heads |
|
1071 | 1076 | |
|
1072 | 1077 | |
|
1073 | 1078 | def linearize(revs, parentsfn): |
|
1074 | 1079 | """Linearize and topologically sort a list of revisions. |
|
1075 | 1080 | |
|
1076 | 1081 | The linearization process tries to create long runs of revs where a child |
|
1077 | 1082 | rev comes immediately after its first parent. This is done by visiting the |
|
1078 | 1083 | heads of the revs in inverse topological order, and for each visited rev, |
|
1079 | 1084 | visiting its second parent, then its first parent, then adding the rev |
|
1080 | 1085 | itself to the output list. |
|
1081 | 1086 | |
|
1082 | 1087 | Returns a list of revision numbers. |
|
1083 | 1088 | """ |
|
1084 | 1089 | visit = list(sorted(headrevs(revs, parentsfn), reverse=True)) |
|
1085 | 1090 | finished = set() |
|
1086 | 1091 | result = [] |
|
1087 | 1092 | |
|
1088 | 1093 | while visit: |
|
1089 | 1094 | rev = visit.pop() |
|
1090 | 1095 | if rev < 0: |
|
1091 | 1096 | rev = -rev - 1 |
|
1092 | 1097 | |
|
1093 | 1098 | if rev not in finished: |
|
1094 | 1099 | result.append(rev) |
|
1095 | 1100 | finished.add(rev) |
|
1096 | 1101 | |
|
1097 | 1102 | else: |
|
1098 | 1103 | visit.append(-rev - 1) |
|
1099 | 1104 | |
|
1100 | 1105 | for prev in parentsfn(rev): |
|
1101 | 1106 | if prev == node.nullrev or prev not in revs or prev in finished: |
|
1102 | 1107 | continue |
|
1103 | 1108 | |
|
1104 | 1109 | visit.append(prev) |
|
1105 | 1110 | |
|
1106 | 1111 | assert len(result) == len(revs) |
|
1107 | 1112 | |
|
1108 | 1113 | return result |
@@ -1,337 +1,440 b'' | |||
|
1 | 1 | Test graph-related template functions |
|
2 | 2 | ===================================== |
|
3 | 3 | |
|
4 | 4 | $ cat <<'EOF' >> $HGRCPATH |
|
5 | 5 | > [extensions] |
|
6 | 6 | > drawdag = $RUNTESTDIR/drawdag.py |
|
7 | 7 | > EOF |
|
8 | 8 | |
|
9 | 9 | $ hg init a |
|
10 | 10 | $ cd a |
|
11 | 11 | |
|
12 | 12 | $ hg debugdrawdag <<'EOF' |
|
13 | 13 | > l |
|
14 | 14 | > / \ |
|
15 | 15 | > | k |
|
16 | 16 | > | |\ |
|
17 | 17 | > | | j |
|
18 | 18 | > | | | |
|
19 | 19 | > i | | |
|
20 | 20 | > |\ | | |
|
21 | 21 | > h | | | |
|
22 | 22 | > | | | | |
|
23 | 23 | > | g | | |
|
24 | 24 | > | | | | |
|
25 | 25 | > f | | | |
|
26 | 26 | > | |/ / |
|
27 | 27 | > | e | |
|
28 | 28 | > | |/ |
|
29 | 29 | > | d |
|
30 | 30 | > |/| |
|
31 | 31 | > c | |
|
32 | 32 | > | | |
|
33 | 33 | > b | |
|
34 | 34 | > | |
|
35 | 35 | > a |
|
36 | 36 | > EOF |
|
37 | 37 | |
|
38 | 38 | $ hg log -Gq -T'{rev} {tags}\n' |
|
39 | 39 | o 11 l tip |
|
40 | 40 | |\ |
|
41 | 41 | | o 10 i |
|
42 | 42 | | |\ |
|
43 | 43 | o \ \ 9 k |
|
44 | 44 | |\ \ \ |
|
45 | 45 | +-----o 8 g |
|
46 | 46 | | | | |
|
47 | 47 | | o | 7 j |
|
48 | 48 | | | | |
|
49 | 49 | | | o 6 h |
|
50 | 50 | | | | |
|
51 | 51 | o | | 5 e |
|
52 | 52 | |/ / |
|
53 | 53 | | o 4 f |
|
54 | 54 | | | |
|
55 | 55 | o | 3 d |
|
56 | 56 | |\| |
|
57 | 57 | | o 2 c |
|
58 | 58 | | | |
|
59 | 59 | | o 1 b |
|
60 | 60 | | |
|
61 | 61 | o 0 a |
|
62 | 62 | |
|
63 | 63 | |
|
64 | 64 | $ cd .. |
|
65 | 65 | |
|
66 | Create repository containing merges of p1 > p2: | |
|
67 | ||
|
68 | $ hg init named-branch-order | |
|
69 | $ cd named-branch-order | |
|
70 | ||
|
71 | $ hg branch -q b0 | |
|
72 | $ hg ci -m 0 | |
|
73 | $ hg up -q null | |
|
74 | $ hg branch -q b1 | |
|
75 | $ hg ci -m 1 | |
|
76 | $ hg up -q null | |
|
77 | $ hg branch -q b2 | |
|
78 | $ hg ci -m 2 | |
|
79 | $ hg merge -q 1 | |
|
80 | $ hg ci -m 3 | |
|
81 | $ hg ci -m 4 --config ui.allowemptycommit=true | |
|
82 | $ hg merge -q 0 | |
|
83 | $ hg ci -m 5 | |
|
84 | $ hg ci -m 6 --config ui.allowemptycommit=true | |
|
85 | $ hg up -q 1 | |
|
86 | $ hg branch -q b7 | |
|
87 | $ hg ci -m 7 | |
|
88 | $ hg ci -m 8 --config ui.allowemptycommit=true | |
|
89 | $ hg up -q 6 | |
|
90 | $ hg ci -m 9 --config ui.allowemptycommit=true | |
|
91 | $ hg up -q 8 | |
|
92 | $ hg merge -q 9 | |
|
93 | $ hg ci -m 10 | |
|
94 | ||
|
95 | $ hg log -Gq -T'{rev} {branch} -> {p1.rev} {p2.rev}\n' | |
|
96 | @ 10 b7 -> 8 9 | |
|
97 | |\ | |
|
98 | | o 9 b2 -> 6 -1 | |
|
99 | | | | |
|
100 | o | 8 b7 -> 7 -1 | |
|
101 | | | | |
|
102 | o | 7 b7 -> 1 -1 | |
|
103 | | | | |
|
104 | | o 6 b2 -> 5 -1 | |
|
105 | | | | |
|
106 | | o 5 b2 -> 4 0 | |
|
107 | | |\ | |
|
108 | | | o 4 b2 -> 3 -1 | |
|
109 | | | | | |
|
110 | +---o 3 b2 -> 2 1 | |
|
111 | | | | | |
|
112 | | | o 2 b2 -> -1 -1 | |
|
113 | | | | |
|
114 | o | 1 b1 -> -1 -1 | |
|
115 | / | |
|
116 | o 0 b0 -> -1 -1 | |
|
117 | ||
|
118 | ||
|
119 | $ cd .. | |
|
120 | ||
|
66 | 121 | subsetparents |
|
67 | 122 | ------------- |
|
68 | 123 | |
|
69 | 124 | $ cd a |
|
70 | 125 | |
|
71 | 126 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+i"))}\n' -r 'c+i' |
|
72 | 127 | o 10 i: 2 |
|
73 | 128 | : |
|
74 | 129 | o 2 c: |
|
75 | 130 | | |
|
76 | 131 | ~ |
|
77 | 132 | |
|
78 | 133 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i"))}\n' -r 'c+h+i' |
|
79 | 134 | o 10 i: 6 |
|
80 | 135 | |\ |
|
81 | 136 | o : 6 h: 2 |
|
82 | 137 | :/ |
|
83 | 138 | o 2 c: |
|
84 | 139 | | |
|
85 | 140 | ~ |
|
86 | 141 | |
|
87 | 142 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+l"))}\n' -r 'c+h+l' |
|
88 | 143 | o 11 l tip: 6 |
|
89 | 144 | :\ |
|
90 | 145 | : o 6 h: 2 |
|
91 | 146 | :/ |
|
92 | 147 | o 2 c: |
|
93 | 148 | | |
|
94 | 149 | ~ |
|
95 | 150 | |
|
96 | 151 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+f+l"))}\n' -r 'c+f+l' |
|
97 | 152 | o 11 l tip: 4 |
|
98 | 153 | :\ |
|
99 | 154 | : o 4 f: 2 |
|
100 | 155 | :/ |
|
101 | 156 | o 2 c: |
|
102 | 157 | | |
|
103 | 158 | ~ |
|
104 | 159 | |
|
105 | 160 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+h+i+k"))}\n' -r 'c+h+i+k' |
|
106 | 161 | o 10 i: 6 |
|
107 | 162 | |\ |
|
108 | 163 | | : o 9 k: 2 |
|
109 | 164 | | :/ |
|
110 | 165 | o : 6 h: 2 |
|
111 | 166 | :/ |
|
112 | 167 | o 2 c: |
|
113 | 168 | | |
|
114 | 169 | ~ |
|
115 | 170 | |
|
116 | 171 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+d+h+i+k"))}\n' -r 'c+d+h+i+k' |
|
117 | 172 | o 10 i: 6 3 |
|
118 | 173 | |\ |
|
119 | 174 | | : o 9 k: 3 |
|
120 | 175 | | :/ |
|
121 | 176 | o : 6 h: 2 |
|
122 | 177 | : : |
|
123 | 178 | : o 3 d: 2 |
|
124 | 179 | :/| |
|
125 | 180 | : ~ |
|
126 | 181 | o 2 c: |
|
127 | 182 | | |
|
128 | 183 | ~ |
|
129 | 184 | |
|
130 | 185 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+j+k+i"))}\n' -r 'c+j+k+i' |
|
131 | 186 | o 10 i: 2 |
|
132 | 187 | : |
|
133 | 188 | : o 9 k: 7 |
|
134 | 189 | :/| |
|
135 | 190 | : o 7 j: 2 |
|
136 | 191 | :/ |
|
137 | 192 | o 2 c: |
|
138 | 193 | | |
|
139 | 194 | ~ |
|
140 | 195 | |
|
141 | 196 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("c+e+f+j"))}\n' -r 'c+e+f+j' |
|
142 | 197 | o 7 j: 2 |
|
143 | 198 | : |
|
144 | 199 | : o 5 e: 2 |
|
145 | 200 | :/ |
|
146 | 201 | : o 4 f: 2 |
|
147 | 202 | :/ |
|
148 | 203 | o 2 c: |
|
149 | 204 | | |
|
150 | 205 | ~ |
|
151 | 206 | |
|
152 | 207 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+e+f+j"))}\n' -r 'b+e+f+j' |
|
153 | 208 | o 7 j: 1 |
|
154 | 209 | : |
|
155 | 210 | : o 5 e: 1 |
|
156 | 211 | :/ |
|
157 | 212 | : o 4 f: 1 |
|
158 | 213 | :/ |
|
159 | 214 | o 1 b: |
|
160 | 215 | |
|
161 | 216 | |
|
162 | 217 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n' -r 'a+c+f+g+j+l' |
|
163 | 218 | o 11 l tip: 4 8 7 |
|
164 | 219 | :\ |
|
165 | 220 | : \ |
|
166 | 221 | : :\ |
|
167 | 222 | : : \ |
|
168 | 223 | : : :\ |
|
169 | 224 | : : : \ |
|
170 | 225 | : : : :\ |
|
171 | 226 | : o---+ : 8 g: 0 2 |
|
172 | 227 | : :/ / / |
|
173 | 228 | : +---o 7 j: 0 2 |
|
174 | 229 | : : :/ |
|
175 | 230 | o---+ 4 f: 2 |
|
176 | 231 | / / |
|
177 | 232 | : o 2 c: |
|
178 | 233 | : | |
|
179 | 234 | : ~ |
|
180 | 235 | o 0 a: |
|
181 | 236 | |
|
182 | 237 | |
|
183 | 238 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+l"))}\n' -r 'b+i+l' |
|
184 | 239 | o 11 l tip: 10 |
|
185 | 240 | |\ |
|
186 | 241 | o : 10 i: 1 |
|
187 | 242 | :/ |
|
188 | 243 | o 1 b: |
|
189 | 244 | |
|
190 | 245 | |
|
191 | 246 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("b+i+j+l"))}\n' -r 'b+i+j+l' |
|
192 | 247 | o 11 l tip: 10 7 |
|
193 | 248 | |\ |
|
194 | 249 | | \ |
|
195 | 250 | | :\ |
|
196 | 251 | o : : 10 i: 1 |
|
197 | 252 | :/ / |
|
198 | 253 | : o 7 j: 1 |
|
199 | 254 | :/ |
|
200 | 255 | o 1 b: |
|
201 | 256 | |
|
202 | 257 | |
|
203 | 258 | null in subset: |
|
204 | 259 | |
|
205 | 260 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+c+f"))}\n' -r 'null+a+c+f' |
|
206 | 261 | o 4 f: 2 |
|
207 | 262 | | |
|
208 | 263 | o 2 c: -1 |
|
209 | 264 | : |
|
210 | 265 | : o 0 a: -1 |
|
211 | 266 | :/ |
|
212 | 267 | @ -1 : -1 |
|
213 | 268 | |
|
214 | 269 | |
|
215 | 270 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("null+a+b+c+f"))}\n' -r 'null+a+b+c+f' |
|
216 | 271 | o 4 f: 2 |
|
217 | 272 | | |
|
218 | 273 | o 2 c: 1 |
|
219 | 274 | | |
|
220 | 275 | o 1 b: -1 |
|
221 | 276 | | |
|
222 | 277 | | o 0 a: -1 |
|
223 | 278 | |/ |
|
224 | 279 | @ -1 : -1 |
|
225 | 280 | |
|
226 | 281 | |
|
227 | 282 | wdir in subset: |
|
228 | 283 | |
|
229 | 284 | $ hg update -qC i |
|
230 | 285 | |
|
231 | 286 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("f+k+wdir()"))}\n' -r 'f+k+wdir()' |
|
232 | 287 | o 2147483647 : 4 |
|
233 | 288 | : |
|
234 | 289 | : o 9 k: |
|
235 | 290 | : |\ |
|
236 | 291 | : ~ ~ |
|
237 | 292 | o 4 f: |
|
238 | 293 | | |
|
239 | 294 | ~ |
|
240 | 295 | |
|
241 | 296 | $ hg update -qC null |
|
242 | 297 | |
|
243 | 298 | Revisions not in subset: |
|
244 | 299 | |
|
245 | 300 | $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("a+c+f+g+j+l"))}\n' |
|
246 | 301 | 11 l tip: 4 8 7 |
|
247 | 302 | 10 i: |
|
248 | 303 | 9 k: |
|
249 | 304 | 8 g: 0 2 |
|
250 | 305 | 7 j: 0 2 |
|
251 | 306 | 6 h: |
|
252 | 307 | 5 e: |
|
253 | 308 | 4 f: 2 |
|
254 | 309 | 3 d: |
|
255 | 310 | 2 c: |
|
256 | 311 | 1 b: |
|
257 | 312 | 0 a: |
|
258 | 313 | |
|
259 | 314 | $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n' |
|
260 | 315 | 11 l tip: |
|
261 | 316 | 10 i: |
|
262 | 317 | 9 k: |
|
263 | 318 | 8 g: |
|
264 | 319 | 7 j: |
|
265 | 320 | 6 h: |
|
266 | 321 | 5 e: |
|
267 | 322 | 4 f: |
|
268 | 323 | 3 d: |
|
269 | 324 | 2 c: 1 |
|
270 | 325 | 1 b: |
|
271 | 326 | 0 a: |
|
272 | 327 | |
|
273 | 328 | $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("b+c"))}\n' -r'reverse(null:2)' |
|
274 | 329 | 2 c: 1 |
|
275 | 330 | 1 b: |
|
276 | 331 | 0 a: |
|
277 | 332 | -1 : |
|
278 | 333 | |
|
279 | 334 | Nothing excluded: |
|
280 | 335 | |
|
281 | 336 | $ hg log -T '{rev} {tags}: {subsetparents(rev, revset("null:wdir()"))}\n' -r'reverse(null:wdir())' |
|
282 | 337 | 2147483647 : -1 |
|
283 | 338 | 11 l tip: 10 9 |
|
284 | 339 | 10 i: 6 8 |
|
285 | 340 | 9 k: 5 7 |
|
286 | 341 | 8 g: 5 |
|
287 | 342 | 7 j: 3 |
|
288 | 343 | 6 h: 4 |
|
289 | 344 | 5 e: 3 |
|
290 | 345 | 4 f: 2 |
|
291 | 346 | 3 d: 0 2 |
|
292 | 347 | 2 c: 1 |
|
293 | 348 | 1 b: -1 |
|
294 | 349 | 0 a: -1 |
|
295 | 350 | -1 : -1 |
|
296 | 351 | |
|
297 | 352 | Uncachable query: |
|
298 | 353 | |
|
299 | 354 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("%d:%d", rev, rev - 1))}\n' |
|
300 | 355 | o 11 l tip: 10 |
|
301 | 356 | |\ |
|
302 | 357 | | o 10 i: |
|
303 | 358 | | |\ |
|
304 | 359 | o \ \ 9 k: |
|
305 | 360 | |\ \ \ |
|
306 | 361 | +-----o 8 g: |
|
307 | 362 | | | | |
|
308 | 363 | | o | 7 j: |
|
309 | 364 | | | | |
|
310 | 365 | | | o 6 h: |
|
311 | 366 | | | | |
|
312 | 367 | o | | 5 e: |
|
313 | 368 | |/ / |
|
314 | 369 | | o 4 f: |
|
315 | 370 | | | |
|
316 | 371 | o | 3 d: 2 |
|
317 | 372 | |\| |
|
318 | 373 | | o 2 c: 1 |
|
319 | 374 | | | |
|
320 | 375 | | o 1 b: |
|
321 | 376 | | |
|
322 | 377 | o 0 a: -1 |
|
323 | 378 | |
|
324 | 379 | |
|
325 | 380 | Invalid arguments: |
|
326 | 381 | |
|
327 | 382 | $ hg log -T '{subsetparents()}\n' |
|
328 | 383 | hg: parse error: subsetparents expects two arguments |
|
329 | 384 | [255] |
|
330 | 385 | $ hg log -T '{subsetparents("a")}\n' |
|
331 | 386 | hg: parse error: subsetparents expects two arguments |
|
332 | 387 | [255] |
|
333 | 388 | $ hg log -T '{subsetparents(rev, extras)}\n' |
|
334 | 389 | hg: parse error: subsetparents expects a queried revset |
|
335 | 390 | [255] |
|
336 | 391 | |
|
337 | 392 | $ cd .. |
|
393 | ||
|
394 | subsetparents: p1/p2 order | |
|
395 | ------------------------- | |
|
396 | ||
|
397 | $ cd named-branch-order | |
|
398 | ||
|
399 | Parents should be sorted in p1/p2 order since p1 is likely to belong to | |
|
400 | the same named branch: | |
|
401 | ||
|
402 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+6"))}\n' -r '0+1+2+6' | |
|
403 | o 6 : 2 1 0 | |
|
404 | :\ | |
|
405 | : \ | |
|
406 | : :\ | |
|
407 | : : o 2 : | |
|
408 | : : | |
|
409 | : o 1 : | |
|
410 | : | |
|
411 | o 0 : | |
|
412 | ||
|
413 | ||
|
414 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+6+10"))}\n' -r '0+1+2+6+10' | |
|
415 | @ 10 tip: 6 | |
|
416 | :\ | |
|
417 | : o 6 : 2 1 0 | |
|
418 | :/:\ | |
|
419 | : : o 2 : | |
|
420 | : : | |
|
421 | o : 1 : | |
|
422 | / | |
|
423 | o 0 : | |
|
424 | ||
|
425 | ||
|
426 | And p1 path should be selected if both p1/p2 paths exist: | |
|
427 | ||
|
428 | $ hg log -Gq -T '{rev} {tags}: {subsetparents(rev, revset("0+1+2+10"))}\n' -r '0+1+2+10' | |
|
429 | @ 10 tip: 1 2 0 | |
|
430 | :\ | |
|
431 | : \ | |
|
432 | : :\ | |
|
433 | : : o 2 : | |
|
434 | : : | |
|
435 | o : 1 : | |
|
436 | / | |
|
437 | o 0 : | |
|
438 | ||
|
439 | ||
|
440 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now