Show More
@@ -424,7 +424,7 b' class filefixupstate:' | |||||
424 | newfixups.append((fixuprev, a1, a2, b1, b2)) |
|
424 | newfixups.append((fixuprev, a1, a2, b1, b2)) | |
425 | elif a2 - a1 == b2 - b1 or b1 == b2: |
|
425 | elif a2 - a1 == b2 - b1 or b1 == b2: | |
426 | # 1:1 line mapping, or chunk was deleted |
|
426 | # 1:1 line mapping, or chunk was deleted | |
427 |
for i in |
|
427 | for i in range(a1, a2): | |
428 | rev, linenum = annotated[i] |
|
428 | rev, linenum = annotated[i] | |
429 | if rev > 1: |
|
429 | if rev > 1: | |
430 | if b1 == b2: # deletion, simply remove that single line |
|
430 | if b1 == b2: # deletion, simply remove that single line | |
@@ -451,7 +451,7 b' class filefixupstate:' | |||||
451 | """ |
|
451 | """ | |
452 | llog = linelog.linelog() |
|
452 | llog = linelog.linelog() | |
453 | a, alines = b'', [] |
|
453 | a, alines = b'', [] | |
454 |
for i in |
|
454 | for i in range(len(self.contents)): | |
455 | b, blines = self.contents[i], self.contentlines[i] |
|
455 | b, blines = self.contents[i], self.contentlines[i] | |
456 | llrev = i * 2 + 1 |
|
456 | llrev = i * 2 + 1 | |
457 | chunks = self._alldiffchunks(a, b, alines, blines) |
|
457 | chunks = self._alldiffchunks(a, b, alines, blines) | |
@@ -463,7 +463,7 b' class filefixupstate:' | |||||
463 | def _checkoutlinelog(self): |
|
463 | def _checkoutlinelog(self): | |
464 | """() -> [str]. check out file contents from linelog""" |
|
464 | """() -> [str]. check out file contents from linelog""" | |
465 | contents = [] |
|
465 | contents = [] | |
466 |
for i in |
|
466 | for i in range(len(self.contents)): | |
467 | rev = (i + 1) * 2 |
|
467 | rev = (i + 1) * 2 | |
468 | self.linelog.annotate(rev) |
|
468 | self.linelog.annotate(rev) | |
469 | content = b''.join(map(self._getline, self.linelog.annotateresult)) |
|
469 | content = b''.join(map(self._getline, self.linelog.annotateresult)) | |
@@ -605,9 +605,9 b' class filefixupstate:' | |||||
605 | a1, a2, b1, b2 = chunk |
|
605 | a1, a2, b1, b2 = chunk | |
606 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) |
|
606 | aidxs, bidxs = [0] * (a2 - a1), [0] * (b2 - b1) | |
607 | for idx, fa1, fa2, fb1, fb2 in fixups: |
|
607 | for idx, fa1, fa2, fb1, fb2 in fixups: | |
608 |
for i in |
|
608 | for i in range(fa1, fa2): | |
609 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 |
|
609 | aidxs[i - a1] = (max(idx, 1) - 1) // 2 | |
610 |
for i in |
|
610 | for i in range(fb1, fb2): | |
611 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 |
|
611 | bidxs[i - b1] = (max(idx, 1) - 1) // 2 | |
612 |
|
612 | |||
613 | fm.startitem() |
|
613 | fm.startitem() | |
@@ -637,7 +637,7 b' class filefixupstate:' | |||||
637 | ) |
|
637 | ) | |
638 | fm.data(path=self.path, linetype=linetype) |
|
638 | fm.data(path=self.path, linetype=linetype) | |
639 |
|
639 | |||
640 |
for i in |
|
640 | for i in range(a1, a2): | |
641 | writeline( |
|
641 | writeline( | |
642 | aidxs[i - a1], |
|
642 | aidxs[i - a1], | |
643 | b'-', |
|
643 | b'-', | |
@@ -645,7 +645,7 b' class filefixupstate:' | |||||
645 | b'deleted', |
|
645 | b'deleted', | |
646 | b'diff.deleted', |
|
646 | b'diff.deleted', | |
647 | ) |
|
647 | ) | |
648 |
for i in |
|
648 | for i in range(b1, b2): | |
649 | writeline( |
|
649 | writeline( | |
650 | bidxs[i - b1], |
|
650 | bidxs[i - b1], | |
651 | b'+', |
|
651 | b'+', |
@@ -219,7 +219,6 b' from mercurial import (' | |||||
219 | error, |
|
219 | error, | |
220 | extensions, |
|
220 | extensions, | |
221 | match, |
|
221 | match, | |
222 | pycompat, |
|
|||
223 | registrar, |
|
222 | registrar, | |
224 | util, |
|
223 | util, | |
225 | ) |
|
224 | ) | |
@@ -452,7 +451,7 b' def _txnhook(ui, repo, hooktype, node, s' | |||||
452 | allow = buildmatch(ui, repo, user, b'acl.allow') |
|
451 | allow = buildmatch(ui, repo, user, b'acl.allow') | |
453 | deny = buildmatch(ui, repo, user, b'acl.deny') |
|
452 | deny = buildmatch(ui, repo, user, b'acl.deny') | |
454 |
|
453 | |||
455 |
for rev in |
|
454 | for rev in range(repo[node].rev(), len(repo)): | |
456 | ctx = repo[rev] |
|
455 | ctx = repo[rev] | |
457 | branch = ctx.branch() |
|
456 | branch = ctx.branch() | |
458 | if denybranches and denybranches(branch): |
|
457 | if denybranches and denybranches(branch): |
@@ -17,7 +17,6 b' from mercurial import (' | |||||
17 | encoding, |
|
17 | encoding, | |
18 | extensions, |
|
18 | extensions, | |
19 | graphmod, |
|
19 | graphmod, | |
20 | pycompat, |
|
|||
21 | templatekw, |
|
20 | templatekw, | |
22 | ) |
|
21 | ) | |
23 |
|
22 | |||
@@ -53,7 +52,7 b' def prettyedge(before, edge, after):' | |||||
53 | def convertedges(line): |
|
52 | def convertedges(line): | |
54 | line = b' %s ' % line |
|
53 | line = b' %s ' % line | |
55 | pretty = [] |
|
54 | pretty = [] | |
56 |
for idx in |
|
55 | for idx in range(len(line) - 2): | |
57 | pretty.append( |
|
56 | pretty.append( | |
58 | prettyedge( |
|
57 | prettyedge( | |
59 | line[idx : idx + 1], |
|
58 | line[idx : idx + 1], |
@@ -832,7 +832,7 b' def createchangeset(ui, log, fuzz=60, me' | |||||
832 | # branchpoints such that it is the latest possible |
|
832 | # branchpoints such that it is the latest possible | |
833 | # commit without any intervening, unrelated commits. |
|
833 | # commit without any intervening, unrelated commits. | |
834 |
|
834 | |||
835 |
for candidate in |
|
835 | for candidate in range(i): | |
836 | if c.branch not in changesets[candidate].branchpoints: |
|
836 | if c.branch not in changesets[candidate].branchpoints: | |
837 | if p is not None: |
|
837 | if p is not None: | |
838 | break |
|
838 | break |
@@ -309,7 +309,7 b' def _checkhook(ui, repo, node, headsonly' | |||||
309 | ensureenabled(ui) |
|
309 | ensureenabled(ui) | |
310 | files = set() |
|
310 | files = set() | |
311 | revs = set() |
|
311 | revs = set() | |
312 |
for rev in |
|
312 | for rev in range(repo[node].rev(), len(repo)): | |
313 | revs.add(rev) |
|
313 | revs.add(rev) | |
314 | if headsonly: |
|
314 | if headsonly: | |
315 | ctx = repo[rev] |
|
315 | ctx = repo[rev] |
@@ -75,7 +75,7 b' def _decorate(fctx):' | |||||
75 | linecount = text.count(b'\n') |
|
75 | linecount = text.count(b'\n') | |
76 | if text and not text.endswith(b'\n'): |
|
76 | if text and not text.endswith(b'\n'): | |
77 | linecount += 1 |
|
77 | linecount += 1 | |
78 |
return ([(fctx, i) for i in |
|
78 | return ([(fctx, i) for i in range(linecount)], text) | |
79 |
|
79 | |||
80 |
|
80 | |||
81 | # extracted from mercurial.context.basefilectx.annotate. slightly modified |
|
81 | # extracted from mercurial.context.basefilectx.annotate. slightly modified | |
@@ -577,7 +577,7 b' class _annotatecontext:' | |||||
577 | result = [None] * len(annotateresult) |
|
577 | result = [None] * len(annotateresult) | |
578 | # {(rev, linenum): [lineindex]} |
|
578 | # {(rev, linenum): [lineindex]} | |
579 | key2idxs = collections.defaultdict(list) |
|
579 | key2idxs = collections.defaultdict(list) | |
580 |
for i in |
|
580 | for i in range(len(result)): | |
581 | key2idxs[(revs[i], annotateresult[i][1])].append(i) |
|
581 | key2idxs[(revs[i], annotateresult[i][1])].append(i) | |
582 | while key2idxs: |
|
582 | while key2idxs: | |
583 | # find an unresolved line and its linelog rev to annotate |
|
583 | # find an unresolved line and its linelog rev to annotate |
@@ -93,7 +93,7 b' class defaultformatter:' | |||||
93 |
|
93 | |||
94 | # buffered output |
|
94 | # buffered output | |
95 | result = b'' |
|
95 | result = b'' | |
96 |
for i in |
|
96 | for i in range(len(annotatedresult)): | |
97 | for j, p in enumerate(pieces): |
|
97 | for j, p in enumerate(pieces): | |
98 | sep = self.funcmap[j][1] |
|
98 | sep = self.funcmap[j][1] | |
99 | padding = b' ' * (maxwidths[j] - len(p[i])) |
|
99 | padding = b' ' * (maxwidths[j] - len(p[i])) | |
@@ -148,7 +148,7 b' class jsonformatter(defaultformatter):' | |||||
148 |
|
148 | |||
149 | result = b'' |
|
149 | result = b'' | |
150 | lasti = len(annotatedresult) - 1 |
|
150 | lasti = len(annotatedresult) - 1 | |
151 |
for i in |
|
151 | for i in range(len(annotatedresult)): | |
152 | result += b'\n {\n' |
|
152 | result += b'\n {\n' | |
153 | for j, p in enumerate(pieces): |
|
153 | for j, p in enumerate(pieces): | |
154 | k, vs = p |
|
154 | k, vs = p |
@@ -15,7 +15,6 b' from mercurial.node import hex' | |||||
15 | from mercurial.pycompat import open |
|
15 | from mercurial.pycompat import open | |
16 | from mercurial import ( |
|
16 | from mercurial import ( | |
17 | error as hgerror, |
|
17 | error as hgerror, | |
18 | pycompat, |
|
|||
19 | ) |
|
18 | ) | |
20 | from . import error |
|
19 | from . import error | |
21 |
|
20 | |||
@@ -165,13 +164,11 b' class revmap:' | |||||
165 | if self._lastmaxrev == -1: # write the entire file |
|
164 | if self._lastmaxrev == -1: # write the entire file | |
166 | with open(self.path, b'wb') as f: |
|
165 | with open(self.path, b'wb') as f: | |
167 | f.write(self.HEADER) |
|
166 | f.write(self.HEADER) | |
168 |
for i in |
|
167 | for i in range(1, len(self._rev2hsh)): | |
169 | self._writerev(i, f) |
|
168 | self._writerev(i, f) | |
170 | else: # append incrementally |
|
169 | else: # append incrementally | |
171 | with open(self.path, b'ab') as f: |
|
170 | with open(self.path, b'ab') as f: | |
172 | for i in pycompat.xrange( |
|
171 | for i in range(self._lastmaxrev + 1, len(self._rev2hsh)): | |
173 | self._lastmaxrev + 1, len(self._rev2hsh) |
|
|||
174 | ): |
|
|||
175 | self._writerev(i, f) |
|
172 | self._writerev(i, f) | |
176 | self._lastmaxrev = self.maxrev |
|
173 | self._lastmaxrev = self.maxrev | |
177 |
|
174 |
@@ -112,7 +112,7 b' class changelog(baselog):' | |||||
112 | return False |
|
112 | return False | |
113 |
|
113 | |||
114 | def __iter__(self): |
|
114 | def __iter__(self): | |
115 |
return iter( |
|
115 | return iter(range(len(self))) | |
116 |
|
116 | |||
117 | @property |
|
117 | @property | |
118 | def filteredrevs(self): |
|
118 | def filteredrevs(self): | |
@@ -186,7 +186,7 b' class changelog(baselog):' | |||||
186 |
|
186 | |||
187 | def shortest(self, node, minlength=1): |
|
187 | def shortest(self, node, minlength=1): | |
188 | nodehex = hex(node) |
|
188 | nodehex = hex(node) | |
189 |
for attempt in |
|
189 | for attempt in range(minlength, len(nodehex) + 1): | |
190 | candidate = nodehex[:attempt] |
|
190 | candidate = nodehex[:attempt] | |
191 | matches = int( |
|
191 | matches = int( | |
192 | self._db.execute( |
|
192 | self._db.execute( |
@@ -245,7 +245,7 b' def revtree(ui, args, repo, full=b"tree"' | |||||
245 | else: |
|
245 | else: | |
246 | i -= chunk |
|
246 | i -= chunk | |
247 |
|
247 | |||
248 |
for x in |
|
248 | for x in range(chunk): | |
249 | if i + x >= count: |
|
249 | if i + x >= count: | |
250 | l[chunk - x :] = [0] * (chunk - x) |
|
250 | l[chunk - x :] = [0] * (chunk - x) | |
251 | break |
|
251 | break | |
@@ -256,7 +256,7 b' def revtree(ui, args, repo, full=b"tree"' | |||||
256 | else: |
|
256 | else: | |
257 | if (i + x) in repo: |
|
257 | if (i + x) in repo: | |
258 | l[x] = 1 |
|
258 | l[x] = 1 | |
259 |
for x in |
|
259 | for x in range(chunk - 1, -1, -1): | |
260 | if l[x] != 0: |
|
260 | if l[x] != 0: | |
261 | yield (i + x, full is not None and l[x] or None) |
|
261 | yield (i + x, full is not None and l[x] or None) | |
262 | if i == 0: |
|
262 | if i == 0: | |
@@ -267,7 +267,7 b' def revtree(ui, args, repo, full=b"tree"' | |||||
267 | if len(ar) == 0: |
|
267 | if len(ar) == 0: | |
268 | return 1 |
|
268 | return 1 | |
269 | mask = 0 |
|
269 | mask = 0 | |
270 |
for i in |
|
270 | for i in range(len(ar)): | |
271 | if sha in reachable[i]: |
|
271 | if sha in reachable[i]: | |
272 | mask |= 1 << i |
|
272 | mask |= 1 << i | |
273 |
|
273 |
@@ -455,7 +455,7 b' class histeditstate:' | |||||
455 | rules = [] |
|
455 | rules = [] | |
456 | rulelen = int(lines[index]) |
|
456 | rulelen = int(lines[index]) | |
457 | index += 1 |
|
457 | index += 1 | |
458 |
for i in |
|
458 | for i in range(rulelen): | |
459 | ruleaction = lines[index] |
|
459 | ruleaction = lines[index] | |
460 | index += 1 |
|
460 | index += 1 | |
461 | rule = lines[index] |
|
461 | rule = lines[index] | |
@@ -466,7 +466,7 b' class histeditstate:' | |||||
466 | replacements = [] |
|
466 | replacements = [] | |
467 | replacementlen = int(lines[index]) |
|
467 | replacementlen = int(lines[index]) | |
468 | index += 1 |
|
468 | index += 1 | |
469 |
for i in |
|
469 | for i in range(replacementlen): | |
470 | replacement = lines[index] |
|
470 | replacement = lines[index] | |
471 | original = bin(replacement[:40]) |
|
471 | original = bin(replacement[:40]) | |
472 | succ = [ |
|
472 | succ = [ | |
@@ -1574,7 +1574,7 b' pgup/K: move patch up, pgdn/J: move patc' | |||||
1574 |
|
1574 | |||
1575 | start = min(old_rule_pos, new_rule_pos) |
|
1575 | start = min(old_rule_pos, new_rule_pos) | |
1576 | end = max(old_rule_pos, new_rule_pos) |
|
1576 | end = max(old_rule_pos, new_rule_pos) | |
1577 |
for r in |
|
1577 | for r in range(start, end + 1): | |
1578 | rules[new_rule_pos].checkconflicts(rules[r]) |
|
1578 | rules[new_rule_pos].checkconflicts(rules[r]) | |
1579 | rules[old_rule_pos].checkconflicts(rules[r]) |
|
1579 | rules[old_rule_pos].checkconflicts(rules[r]) | |
1580 |
|
1580 |
@@ -461,7 +461,7 b' class patchheader:' | |||||
461 | the field and a blank line.""" |
|
461 | the field and a blank line.""" | |
462 | if self.message: |
|
462 | if self.message: | |
463 | subj = b'subject: ' + self.message[0].lower() |
|
463 | subj = b'subject: ' + self.message[0].lower() | |
464 |
for i in |
|
464 | for i in range(len(self.comments)): | |
465 | if subj == self.comments[i].lower(): |
|
465 | if subj == self.comments[i].lower(): | |
466 | del self.comments[i] |
|
466 | del self.comments[i] | |
467 | self.message = self.message[2:] |
|
467 | self.message = self.message[2:] | |
@@ -2040,7 +2040,7 b' class queue:' | |||||
2040 | # if the patch excludes a modified file, mark that |
|
2040 | # if the patch excludes a modified file, mark that | |
2041 | # file with mtime=0 so status can see it. |
|
2041 | # file with mtime=0 so status can see it. | |
2042 | mm = [] |
|
2042 | mm = [] | |
2043 |
for i in |
|
2043 | for i in range(len(m) - 1, -1, -1): | |
2044 | if not match1(m[i]): |
|
2044 | if not match1(m[i]): | |
2045 | mm.append(m[i]) |
|
2045 | mm.append(m[i]) | |
2046 | del m[i] |
|
2046 | del m[i] | |
@@ -2165,7 +2165,7 b' class queue:' | |||||
2165 | else: |
|
2165 | else: | |
2166 | start = self.series.index(patch) + 1 |
|
2166 | start = self.series.index(patch) + 1 | |
2167 | unapplied = [] |
|
2167 | unapplied = [] | |
2168 |
for i in |
|
2168 | for i in range(start, len(self.series)): | |
2169 | pushable, reason = self.pushable(i) |
|
2169 | pushable, reason = self.pushable(i) | |
2170 | if pushable: |
|
2170 | if pushable: | |
2171 | unapplied.append((i, self.series[i])) |
|
2171 | unapplied.append((i, self.series[i])) | |
@@ -2210,7 +2210,7 b' class queue:' | |||||
2210 | if not missing: |
|
2210 | if not missing: | |
2211 | if self.ui.verbose: |
|
2211 | if self.ui.verbose: | |
2212 | idxwidth = len(b"%d" % (start + length - 1)) |
|
2212 | idxwidth = len(b"%d" % (start + length - 1)) | |
2213 |
for i in |
|
2213 | for i in range(start, start + length): | |
2214 | patch = self.series[i] |
|
2214 | patch = self.series[i] | |
2215 | if patch in applied: |
|
2215 | if patch in applied: | |
2216 | char, state = b'A', b'applied' |
|
2216 | char, state = b'A', b'applied' | |
@@ -2371,7 +2371,7 b' class queue:' | |||||
2371 | def nextpatch(start): |
|
2371 | def nextpatch(start): | |
2372 | if all_patches or start >= len(self.series): |
|
2372 | if all_patches or start >= len(self.series): | |
2373 | return start |
|
2373 | return start | |
2374 |
for i in |
|
2374 | for i in range(start, len(self.series)): | |
2375 | p, reason = self.pushable(i) |
|
2375 | p, reason = self.pushable(i) | |
2376 | if p: |
|
2376 | if p: | |
2377 | return i |
|
2377 | return i | |
@@ -3389,7 +3389,7 b' def guard(ui, repo, *args, **opts):' | |||||
3389 | raise error.Abort( |
|
3389 | raise error.Abort( | |
3390 | _(b'cannot mix -l/--list with options or arguments') |
|
3390 | _(b'cannot mix -l/--list with options or arguments') | |
3391 | ) |
|
3391 | ) | |
3392 |
for i in |
|
3392 | for i in range(len(q.series)): | |
3393 | status(i) |
|
3393 | status(i) | |
3394 | return |
|
3394 | return | |
3395 | if not args or args[0][0:1] in b'-+': |
|
3395 | if not args or args[0][0:1] in b'-+': | |
@@ -3767,18 +3767,14 b' def select(ui, repo, *args, **opts):' | |||||
3767 | pushable = lambda i: q.pushable(q.applied[i].name)[0] |
|
3767 | pushable = lambda i: q.pushable(q.applied[i].name)[0] | |
3768 | if args or opts.get(b'none'): |
|
3768 | if args or opts.get(b'none'): | |
3769 | old_unapplied = q.unapplied(repo) |
|
3769 | old_unapplied = q.unapplied(repo) | |
3770 | old_guarded = [ |
|
3770 | old_guarded = [i for i in range(len(q.applied)) if not pushable(i)] | |
3771 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
|||
3772 | ] |
|
|||
3773 | q.setactive(args) |
|
3771 | q.setactive(args) | |
3774 | q.savedirty() |
|
3772 | q.savedirty() | |
3775 | if not args: |
|
3773 | if not args: | |
3776 | ui.status(_(b'guards deactivated\n')) |
|
3774 | ui.status(_(b'guards deactivated\n')) | |
3777 | if not opts.get(b'pop') and not opts.get(b'reapply'): |
|
3775 | if not opts.get(b'pop') and not opts.get(b'reapply'): | |
3778 | unapplied = q.unapplied(repo) |
|
3776 | unapplied = q.unapplied(repo) | |
3779 | guarded = [ |
|
3777 | guarded = [i for i in range(len(q.applied)) if not pushable(i)] | |
3780 | i for i in pycompat.xrange(len(q.applied)) if not pushable(i) |
|
|||
3781 | ] |
|
|||
3782 | if len(unapplied) != len(old_unapplied): |
|
3778 | if len(unapplied) != len(old_unapplied): | |
3783 | ui.status( |
|
3779 | ui.status( | |
3784 | _( |
|
3780 | _( | |
@@ -3825,7 +3821,7 b' def select(ui, repo, *args, **opts):' | |||||
3825 | reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name |
|
3821 | reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name | |
3826 | popped = False |
|
3822 | popped = False | |
3827 | if opts.get(b'pop') or opts.get(b'reapply'): |
|
3823 | if opts.get(b'pop') or opts.get(b'reapply'): | |
3828 |
for i in |
|
3824 | for i in range(len(q.applied)): | |
3829 | if not pushable(i): |
|
3825 | if not pushable(i): | |
3830 | ui.status(_(b'popping guarded patches\n')) |
|
3826 | ui.status(_(b'popping guarded patches\n')) | |
3831 | popped = True |
|
3827 | popped = True |
@@ -318,7 +318,7 b' class basepack(versionmixin):' | |||||
318 | params = self.params |
|
318 | params = self.params | |
319 | rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize] |
|
319 | rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize] | |
320 | fanouttable = [] |
|
320 | fanouttable = [] | |
321 |
for i in |
|
321 | for i in range(0, params.fanoutcount): | |
322 | loc = i * 4 |
|
322 | loc = i * 4 | |
323 | fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0] |
|
323 | fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0] | |
324 | fanouttable.append(fanoutentry) |
|
324 | fanouttable.append(fanoutentry) |
@@ -171,7 +171,7 b' class basestore:' | |||||
171 |
|
171 | |||
172 | # Scan the changelog until we've found every file name |
|
172 | # Scan the changelog until we've found every file name | |
173 | cl = self.repo.unfiltered().changelog |
|
173 | cl = self.repo.unfiltered().changelog | |
174 |
for rev in |
|
174 | for rev in range(len(cl) - 1, -1, -1): | |
175 | if not missingfilename: |
|
175 | if not missingfilename: | |
176 | break |
|
176 | break | |
177 | files = cl.readfiles(cl.node(rev)) |
|
177 | files = cl.readfiles(cl.node(rev)) |
@@ -7,7 +7,6 b' from mercurial.node import (' | |||||
7 | from mercurial.pycompat import getattr |
|
7 | from mercurial.pycompat import getattr | |
8 | from mercurial import ( |
|
8 | from mercurial import ( | |
9 | mdiff, |
|
9 | mdiff, | |
10 | pycompat, |
|
|||
11 | revlog, |
|
10 | revlog, | |
12 | ) |
|
11 | ) | |
13 | from . import ( |
|
12 | from . import ( | |
@@ -366,7 +365,7 b' class manifestrevlogstore:' | |||||
366 | rl = revlog.revlog(self._svfs, radix=b'00manifesttree') |
|
365 | rl = revlog.revlog(self._svfs, radix=b'00manifesttree') | |
367 | startlinkrev = self._repackstartlinkrev |
|
366 | startlinkrev = self._repackstartlinkrev | |
368 | endlinkrev = self._repackendlinkrev |
|
367 | endlinkrev = self._repackendlinkrev | |
369 |
for rev in |
|
368 | for rev in range(len(rl) - 1, -1, -1): | |
370 | linkrev = rl.linkrev(rev) |
|
369 | linkrev = rl.linkrev(rev) | |
371 | if linkrev < startlinkrev: |
|
370 | if linkrev < startlinkrev: | |
372 | break |
|
371 | break | |
@@ -383,7 +382,7 b' class manifestrevlogstore:' | |||||
383 | treename = path[5 : -len(b'/00manifest')] |
|
382 | treename = path[5 : -len(b'/00manifest')] | |
384 |
|
383 | |||
385 | rl = revlog.revlog(self._svfs, indexfile=path[:-2]) |
|
384 | rl = revlog.revlog(self._svfs, indexfile=path[:-2]) | |
386 |
for rev in |
|
385 | for rev in range(len(rl) - 1, -1, -1): | |
387 | linkrev = rl.linkrev(rev) |
|
386 | linkrev = rl.linkrev(rev) | |
388 | if linkrev < startlinkrev: |
|
387 | if linkrev < startlinkrev: | |
389 | break |
|
388 | break |
@@ -7,7 +7,6 b' from mercurial.node import (' | |||||
7 | ) |
|
7 | ) | |
8 | from mercurial.i18n import _ |
|
8 | from mercurial.i18n import _ | |
9 | from mercurial import ( |
|
9 | from mercurial import ( | |
10 | pycompat, |
|
|||
11 | util, |
|
10 | util, | |
12 | ) |
|
11 | ) | |
13 | from . import ( |
|
12 | from . import ( | |
@@ -232,7 +231,7 b' class datapack(basepack.basepack):' | |||||
232 |
|
231 | |||
233 | # Scan forward to find the first non-same entry, which is the upper |
|
232 | # Scan forward to find the first non-same entry, which is the upper | |
234 | # bound. |
|
233 | # bound. | |
235 |
for i in |
|
234 | for i in range(fanoutkey + 1, params.fanoutcount): | |
236 | end = fanout[i] + params.indexstart |
|
235 | end = fanout[i] + params.indexstart | |
237 | if end != start: |
|
236 | if end != start: | |
238 | break |
|
237 | break |
@@ -5,7 +5,6 b' from mercurial.node import (' | |||||
5 | sha1nodeconstants, |
|
5 | sha1nodeconstants, | |
6 | ) |
|
6 | ) | |
7 | from mercurial import ( |
|
7 | from mercurial import ( | |
8 | pycompat, |
|
|||
9 | util, |
|
8 | util, | |
10 | ) |
|
9 | ) | |
11 | from mercurial.utils import hashutil |
|
10 | from mercurial.utils import hashutil | |
@@ -207,7 +206,7 b' class historypack(basepack.basepack):' | |||||
207 | start = fanout[fanoutkey] + params.indexstart |
|
206 | start = fanout[fanoutkey] + params.indexstart | |
208 | indexend = self._indexend |
|
207 | indexend = self._indexend | |
209 |
|
208 | |||
210 |
for i in |
|
209 | for i in range(fanoutkey + 1, params.fanoutcount): | |
211 | end = fanout[i] + params.indexstart |
|
210 | end = fanout[i] + params.indexstart | |
212 | if end != start: |
|
211 | if end != start: | |
213 | break |
|
212 | break | |
@@ -323,7 +322,7 b' class historypack(basepack.basepack):' | |||||
323 | )[0] |
|
322 | )[0] | |
324 | offset += ENTRYCOUNTSIZE |
|
323 | offset += ENTRYCOUNTSIZE | |
325 |
|
324 | |||
326 |
for i in |
|
325 | for i in range(revcount): | |
327 | entry = struct.unpack( |
|
326 | entry = struct.unpack( | |
328 | PACKFORMAT, data[offset : offset + PACKENTRYLENGTH] |
|
327 | PACKFORMAT, data[offset : offset + PACKENTRYLENGTH] | |
329 | ) |
|
328 | ) |
@@ -9,7 +9,6 b' from mercurial import (' | |||||
9 | lock as lockmod, |
|
9 | lock as lockmod, | |
10 | mdiff, |
|
10 | mdiff, | |
11 | policy, |
|
11 | policy, | |
12 | pycompat, |
|
|||
13 | scmutil, |
|
12 | scmutil, | |
14 | util, |
|
13 | util, | |
15 | vfs, |
|
14 | vfs, | |
@@ -347,7 +346,7 b' def _computeincrementalpack(files, opts)' | |||||
347 |
|
346 | |||
348 | # Group the packs by generation (i.e. by size) |
|
347 | # Group the packs by generation (i.e. by size) | |
349 | generations = [] |
|
348 | generations = [] | |
350 |
for i in |
|
349 | for i in range(len(limits)): | |
351 | generations.append([]) |
|
350 | generations.append([]) | |
352 |
|
351 | |||
353 | sizes = {} |
|
352 | sizes = {} |
@@ -13,7 +13,6 b' from mercurial import (' | |||||
13 | error, |
|
13 | error, | |
14 | match, |
|
14 | match, | |
15 | mdiff, |
|
15 | mdiff, | |
16 | pycompat, |
|
|||
17 | ) |
|
16 | ) | |
18 | from . import ( |
|
17 | from . import ( | |
19 | constants, |
|
18 | constants, | |
@@ -43,7 +42,7 b' def shallowgroup(cls, self, nodelist, rl' | |||||
43 | nodelist.insert(0, p) |
|
42 | nodelist.insert(0, p) | |
44 |
|
43 | |||
45 | # build deltas |
|
44 | # build deltas | |
46 |
for i in |
|
45 | for i in range(len(nodelist) - 1): | |
47 | prev, curr = nodelist[i], nodelist[i + 1] |
|
46 | prev, curr = nodelist[i], nodelist[i + 1] | |
48 | linknode = lookup(curr) |
|
47 | linknode = lookup(curr) | |
49 | for c in self.nodechunk(rlog, curr, prev, linknode): |
|
48 | for c in self.nodechunk(rlog, curr, prev, linknode): |
@@ -454,14 +454,14 b' def readpath(stream):' | |||||
454 | def readnodelist(stream): |
|
454 | def readnodelist(stream): | |
455 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) |
|
455 | rawlen = readexactly(stream, constants.NODECOUNTSIZE) | |
456 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] |
|
456 | nodecount = struct.unpack(constants.NODECOUNTSTRUCT, rawlen)[0] | |
457 |
for i in |
|
457 | for i in range(nodecount): | |
458 | yield readexactly(stream, constants.NODESIZE) |
|
458 | yield readexactly(stream, constants.NODESIZE) | |
459 |
|
459 | |||
460 |
|
460 | |||
461 | def readpathlist(stream): |
|
461 | def readpathlist(stream): | |
462 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) |
|
462 | rawlen = readexactly(stream, constants.PATHCOUNTSIZE) | |
463 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] |
|
463 | pathcount = struct.unpack(constants.PATHCOUNTSTRUCT, rawlen)[0] | |
464 |
for i in |
|
464 | for i in range(pathcount): | |
465 | yield readpath(stream) |
|
465 | yield readpath(stream) | |
466 |
|
466 | |||
467 |
|
467 |
@@ -396,7 +396,7 b' class sqlitefilestore:' | |||||
396 | return len(self._revisions) |
|
396 | return len(self._revisions) | |
397 |
|
397 | |||
398 | def __iter__(self): |
|
398 | def __iter__(self): | |
399 |
return iter( |
|
399 | return iter(range(len(self._revisions))) | |
400 |
|
400 | |||
401 | def hasnode(self, node): |
|
401 | def hasnode(self, node): | |
402 | if node == sha1nodeconstants.nullid: |
|
402 | if node == sha1nodeconstants.nullid: |
@@ -48,7 +48,6 b' from mercurial.node import short' | |||||
48 | from mercurial import ( |
|
48 | from mercurial import ( | |
49 | cmdutil, |
|
49 | cmdutil, | |
50 | extensions, |
|
50 | extensions, | |
51 | pycompat, |
|
|||
52 | registrar, |
|
51 | registrar, | |
53 | ) |
|
52 | ) | |
54 | from mercurial.utils import stringutil |
|
53 | from mercurial.utils import stringutil | |
@@ -156,9 +155,7 b' def forbidnewline(ui, repo, hooktype, no' | |||||
156 | # changegroup that contains an unacceptable commit followed later |
|
155 | # changegroup that contains an unacceptable commit followed later | |
157 | # by a commit that fixes the problem. |
|
156 | # by a commit that fixes the problem. | |
158 | tip = repo[b'tip'] |
|
157 | tip = repo[b'tip'] | |
159 | for rev in pycompat.xrange( |
|
158 | for rev in range(repo.changelog.tiprev(), repo[node].rev() - 1, -1): | |
160 | repo.changelog.tiprev(), repo[node].rev() - 1, -1 |
|
|||
161 | ): |
|
|||
162 | c = repo[rev] |
|
159 | c = repo[rev] | |
163 | for f in c.files(): |
|
160 | for f in c.files(): | |
164 | if f in seen or f not in tip or f not in c: |
|
161 | if f in seen or f not in tip or f not in c: |
@@ -12,7 +12,6 b' from .node import nullrev' | |||||
12 | from . import ( |
|
12 | from . import ( | |
13 | dagop, |
|
13 | dagop, | |
14 | policy, |
|
14 | policy, | |
15 | pycompat, |
|
|||
16 | ) |
|
15 | ) | |
17 |
|
16 | |||
18 | parsers = policy.importmod('parsers') |
|
17 | parsers = policy.importmod('parsers') | |
@@ -187,7 +186,7 b' class incrementalmissingancestors:' | |||||
187 | # no revs to consider |
|
186 | # no revs to consider | |
188 | return |
|
187 | return | |
189 |
|
188 | |||
190 |
for curr in |
|
189 | for curr in range(start, min(revs) - 1, -1): | |
191 | if curr not in bases: |
|
190 | if curr not in bases: | |
192 | continue |
|
191 | continue | |
193 | revs.discard(curr) |
|
192 | revs.discard(curr) | |
@@ -228,7 +227,7 b' class incrementalmissingancestors:' | |||||
228 | # exit. |
|
227 | # exit. | |
229 |
|
228 | |||
230 | missing = [] |
|
229 | missing = [] | |
231 |
for curr in |
|
230 | for curr in range(start, nullrev, -1): | |
232 | if not revsvisit: |
|
231 | if not revsvisit: | |
233 | break |
|
232 | break | |
234 |
|
233 |
@@ -420,7 +420,7 b' class cg1unpacker:' | |||||
420 | cl = repo.changelog |
|
420 | cl = repo.changelog | |
421 | ml = repo.manifestlog |
|
421 | ml = repo.manifestlog | |
422 | # validate incoming csets have their manifests |
|
422 | # validate incoming csets have their manifests | |
423 |
for cset in |
|
423 | for cset in range(clstart, clend): | |
424 | mfnode = cl.changelogrevision(cset).manifest |
|
424 | mfnode = cl.changelogrevision(cset).manifest | |
425 | mfest = ml[mfnode].readdelta() |
|
425 | mfest = ml[mfnode].readdelta() | |
426 | # store file nodes we must see |
|
426 | # store file nodes we must see | |
@@ -509,7 +509,7 b' class cg1unpacker:' | |||||
509 | **pycompat.strkwargs(hookargs) |
|
509 | **pycompat.strkwargs(hookargs) | |
510 | ) |
|
510 | ) | |
511 |
|
511 | |||
512 |
added = |
|
512 | added = range(clstart, clend) | |
513 | phaseall = None |
|
513 | phaseall = None | |
514 | if srctype in (b'push', b'serve'): |
|
514 | if srctype in (b'push', b'serve'): | |
515 | # Old servers can not push the boundary themselves. |
|
515 | # Old servers can not push the boundary themselves. | |
@@ -825,7 +825,7 b' def _resolvenarrowrevisioninfo(' | |||||
825 | # somewhat unsurprised to find a case in the wild |
|
825 | # somewhat unsurprised to find a case in the wild | |
826 | # where this breaks down a bit. That said, I don't |
|
826 | # where this breaks down a bit. That said, I don't | |
827 | # know if it would hurt anything. |
|
827 | # know if it would hurt anything. | |
828 |
for i in |
|
828 | for i in range(rev, 0, -1): | |
829 | if store.linkrev(i) == clrev: |
|
829 | if store.linkrev(i) == clrev: | |
830 | return i |
|
830 | return i | |
831 | # We failed to resolve a parent for this node, so |
|
831 | # We failed to resolve a parent for this node, so | |
@@ -1956,7 +1956,7 b' def _addchangegroupfiles(' | |||||
1956 | revisions += len(fl) - o |
|
1956 | revisions += len(fl) - o | |
1957 | if f in needfiles: |
|
1957 | if f in needfiles: | |
1958 | needs = needfiles[f] |
|
1958 | needs = needfiles[f] | |
1959 |
for new in |
|
1959 | for new in range(o, len(fl)): | |
1960 | n = fl.node(new) |
|
1960 | n = fl.node(new) | |
1961 | if n in needs: |
|
1961 | if n in needs: | |
1962 | needs.remove(n) |
|
1962 | needs.remove(n) |
@@ -32,7 +32,6 b' from . import (' | |||||
32 | patch, |
|
32 | patch, | |
33 | pathutil, |
|
33 | pathutil, | |
34 | phases, |
|
34 | phases, | |
35 | pycompat, |
|
|||
36 | repoview, |
|
35 | repoview, | |
37 | scmutil, |
|
36 | scmutil, | |
38 | sparse, |
|
37 | sparse, | |
@@ -2431,7 +2430,7 b' class overlayworkingctx(committablectx):' | |||||
2431 | # Test that each new directory to be created to write this path from p2 |
|
2430 | # Test that each new directory to be created to write this path from p2 | |
2432 | # is not a file in p1. |
|
2431 | # is not a file in p1. | |
2433 | components = path.split(b'/') |
|
2432 | components = path.split(b'/') | |
2434 |
for i in |
|
2433 | for i in range(len(components)): | |
2435 | component = b"/".join(components[0:i]) |
|
2434 | component = b"/".join(components[0:i]) | |
2436 | if component in self: |
|
2435 | if component in self: | |
2437 | fail(path, component) |
|
2436 | fail(path, component) |
@@ -203,7 +203,7 b' def _genrevdescendants(repo, revs, follo' | |||||
203 | def _builddescendantsmap(repo, startrev, followfirst): |
|
203 | def _builddescendantsmap(repo, startrev, followfirst): | |
204 | """Build map of 'rev -> child revs', offset from startrev""" |
|
204 | """Build map of 'rev -> child revs', offset from startrev""" | |
205 | cl = repo.changelog |
|
205 | cl = repo.changelog | |
206 |
descmap = [[] for _rev in |
|
206 | descmap = [[] for _rev in range(startrev, len(cl))] | |
207 | for currev in cl.revs(startrev + 1): |
|
207 | for currev in cl.revs(startrev + 1): | |
208 | p1rev, p2rev = cl.parentrevs(currev) |
|
208 | p1rev, p2rev = cl.parentrevs(currev) | |
209 | if p1rev >= startrev: |
|
209 | if p1rev >= startrev: | |
@@ -725,7 +725,7 b' def _annotatepair(parents, childfctx, ch' | |||||
725 | for idx, (parent, blocks) in enumerate(pblocks): |
|
725 | for idx, (parent, blocks) in enumerate(pblocks): | |
726 | for (a1, a2, b1, b2), _t in blocks: |
|
726 | for (a1, a2, b1, b2), _t in blocks: | |
727 | if a2 - a1 >= b2 - b1: |
|
727 | if a2 - a1 >= b2 - b1: | |
728 |
for bk in |
|
728 | for bk in range(b1, b2): | |
729 | if child.fctxs[bk] == childfctx: |
|
729 | if child.fctxs[bk] == childfctx: | |
730 | ak = min(a1 + (bk - b1), a2 - 1) |
|
730 | ak = min(a1 + (bk - b1), a2 - 1) | |
731 | child.fctxs[bk] = parent.fctxs[ak] |
|
731 | child.fctxs[bk] = parent.fctxs[ak] | |
@@ -738,7 +738,7 b' def _annotatepair(parents, childfctx, ch' | |||||
738 | # line. |
|
738 | # line. | |
739 | for parent, blocks in remaining: |
|
739 | for parent, blocks in remaining: | |
740 | for a1, a2, b1, b2 in blocks: |
|
740 | for a1, a2, b1, b2 in blocks: | |
741 |
for bk in |
|
741 | for bk in range(b1, b2): | |
742 | if child.fctxs[bk] == childfctx: |
|
742 | if child.fctxs[bk] == childfctx: | |
743 | ak = min(a1 + (bk - b1), a2 - 1) |
|
743 | ak = min(a1 + (bk - b1), a2 - 1) | |
744 | child.fctxs[bk] = parent.fctxs[ak] |
|
744 | child.fctxs[bk] = parent.fctxs[ak] |
@@ -228,7 +228,7 b' def parsedag(desc):' | |||||
228 | c, digs = nextrun(nextch(), pycompat.bytestr(string.digits)) |
|
228 | c, digs = nextrun(nextch(), pycompat.bytestr(string.digits)) | |
229 | # pytype: enable=wrong-arg-types |
|
229 | # pytype: enable=wrong-arg-types | |
230 | n = int(digs) |
|
230 | n = int(digs) | |
231 |
for i in |
|
231 | for i in range(0, n): | |
232 | yield b'n', (r, [p1]) |
|
232 | yield b'n', (r, [p1]) | |
233 | p1 = r |
|
233 | p1 = r | |
234 | r += 1 |
|
234 | r += 1 |
@@ -248,9 +248,7 b' def debugbuilddag(' | |||||
248 | if mergeable_file: |
|
248 | if mergeable_file: | |
249 | linesperrev = 2 |
|
249 | linesperrev = 2 | |
250 | # make a file with k lines per rev |
|
250 | # make a file with k lines per rev | |
251 | initialmergedlines = [ |
|
251 | initialmergedlines = [b'%d' % i for i in range(0, total * linesperrev)] | |
252 | b'%d' % i for i in pycompat.xrange(0, total * linesperrev) |
|
|||
253 | ] |
|
|||
254 | initialmergedlines.append(b"") |
|
252 | initialmergedlines.append(b"") | |
255 |
|
253 | |||
256 | tags = [] |
|
254 | tags = [] | |
@@ -3193,7 +3191,7 b' def debugrevlog(ui, repo, file_=None, **' | |||||
3193 | ts = 0 |
|
3191 | ts = 0 | |
3194 | heads = set() |
|
3192 | heads = set() | |
3195 |
|
3193 | |||
3196 |
for rev in |
|
3194 | for rev in range(numrevs): | |
3197 | dbase = r.deltaparent(rev) |
|
3195 | dbase = r.deltaparent(rev) | |
3198 | if dbase == -1: |
|
3196 | if dbase == -1: | |
3199 | dbase = rev |
|
3197 | dbase = rev | |
@@ -3291,7 +3289,7 b' def debugrevlog(ui, repo, file_=None, **' | |||||
3291 | l[2] += size |
|
3289 | l[2] += size | |
3292 |
|
3290 | |||
3293 | numrevs = len(r) |
|
3291 | numrevs = len(r) | |
3294 |
for rev in |
|
3292 | for rev in range(numrevs): | |
3295 | p1, p2 = r.parentrevs(rev) |
|
3293 | p1, p2 = r.parentrevs(rev) | |
3296 | delta = r.deltaparent(rev) |
|
3294 | delta = r.deltaparent(rev) | |
3297 | if format > 0: |
|
3295 | if format > 0: |
@@ -10,7 +10,6 b' from .i18n import _' | |||||
10 |
|
10 | |||
11 | from . import ( |
|
11 | from . import ( | |
12 | error, |
|
12 | error, | |
13 | pycompat, |
|
|||
14 | ) |
|
13 | ) | |
15 |
|
14 | |||
16 | MISSING_NEWLINE_MARKER = b'\\ No newline at end of file\n' |
|
15 | MISSING_NEWLINE_MARKER = b'\\ No newline at end of file\n' | |
@@ -29,7 +28,7 b' def addlines(fp, hunk, lena, lenb, a, b)' | |||||
29 | num = max(todoa, todob) |
|
28 | num = max(todoa, todob) | |
30 | if num == 0: |
|
29 | if num == 0: | |
31 | break |
|
30 | break | |
32 |
for i in |
|
31 | for i in range(num): | |
33 | s = fp.readline() |
|
32 | s = fp.readline() | |
34 | if not s: |
|
33 | if not s: | |
35 | raise error.ParseError(_(b'incomplete hunk')) |
|
34 | raise error.ParseError(_(b'incomplete hunk')) | |
@@ -76,7 +75,7 b' def testhunk(a, b, bstart):' | |||||
76 | blen = len(b) |
|
75 | blen = len(b) | |
77 | if alen > blen - bstart or bstart < 0: |
|
76 | if alen > blen - bstart or bstart < 0: | |
78 | return False |
|
77 | return False | |
79 |
for i in |
|
78 | for i in range(alen): | |
80 | if a[i][1:] != b[i + bstart]: |
|
79 | if a[i][1:] != b[i + bstart]: | |
81 | return False |
|
80 | return False | |
82 | return True |
|
81 | return True |
@@ -401,7 +401,7 b' def getcols(s, start, c):' | |||||
401 | # type: (bytes, int, int) -> bytes |
|
401 | # type: (bytes, int, int) -> bytes | |
402 | """Use colwidth to find a c-column substring of s starting at byte |
|
402 | """Use colwidth to find a c-column substring of s starting at byte | |
403 | index start""" |
|
403 | index start""" | |
404 |
for x in |
|
404 | for x in range(start + c, len(s)): | |
405 | t = s[start:x] |
|
405 | t = s[start:x] | |
406 | if colwidth(t) == c: |
|
406 | if colwidth(t) == c: | |
407 | return t |
|
407 | return t |
@@ -22,7 +22,6 b' from .node import nullrev' | |||||
22 | from .thirdparty import attr |
|
22 | from .thirdparty import attr | |
23 | from . import ( |
|
23 | from . import ( | |
24 | dagop, |
|
24 | dagop, | |
25 | pycompat, |
|
|||
26 | smartset, |
|
25 | smartset, | |
27 | util, |
|
26 | util, | |
28 | ) |
|
27 | ) | |
@@ -463,16 +462,16 b' def ascii(ui, state, type, char, text, c' | |||||
463 | # shift_interline is the line containing the non-vertical |
|
462 | # shift_interline is the line containing the non-vertical | |
464 | # edges between this entry and the next |
|
463 | # edges between this entry and the next | |
465 | shift_interline = echars[: idx * 2] |
|
464 | shift_interline = echars[: idx * 2] | |
466 |
for i in |
|
465 | for i in range(2 + coldiff): | |
467 | shift_interline.append(b' ') |
|
466 | shift_interline.append(b' ') | |
468 | count = ncols - idx - 1 |
|
467 | count = ncols - idx - 1 | |
469 | if coldiff == -1: |
|
468 | if coldiff == -1: | |
470 |
for i in |
|
469 | for i in range(count): | |
471 | shift_interline.extend([b'/', b' ']) |
|
470 | shift_interline.extend([b'/', b' ']) | |
472 | elif coldiff == 0: |
|
471 | elif coldiff == 0: | |
473 | shift_interline.extend(echars[(idx + 1) * 2 : ncols * 2]) |
|
472 | shift_interline.extend(echars[(idx + 1) * 2 : ncols * 2]) | |
474 | else: |
|
473 | else: | |
475 |
for i in |
|
474 | for i in range(count): | |
476 | shift_interline.extend([b'\\', b' ']) |
|
475 | shift_interline.extend([b'\\', b' ']) | |
477 |
|
476 | |||
478 | # draw edges from the current node to its parents |
|
477 | # draw edges from the current node to its parents |
@@ -67,15 +67,15 b' def difflinestates(a, b):' | |||||
67 | sm = difflib.SequenceMatcher(None, a, b) |
|
67 | sm = difflib.SequenceMatcher(None, a, b) | |
68 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
68 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): | |
69 | if tag == 'insert': |
|
69 | if tag == 'insert': | |
70 |
for i in |
|
70 | for i in range(blo, bhi): | |
71 | yield (b'+', b[i]) |
|
71 | yield (b'+', b[i]) | |
72 | elif tag == 'delete': |
|
72 | elif tag == 'delete': | |
73 |
for i in |
|
73 | for i in range(alo, ahi): | |
74 | yield (b'-', a[i]) |
|
74 | yield (b'-', a[i]) | |
75 | elif tag == 'replace': |
|
75 | elif tag == 'replace': | |
76 |
for i in |
|
76 | for i in range(alo, ahi): | |
77 | yield (b'-', a[i]) |
|
77 | yield (b'-', a[i]) | |
78 |
for i in |
|
78 | for i in range(blo, bhi): | |
79 | yield (b'+', b[i]) |
|
79 | yield (b'+', b[i]) | |
80 |
|
80 | |||
81 |
|
81 |
@@ -228,7 +228,7 b' def _search(web):' | |||||
228 |
|
228 | |||
229 | def revgen(): |
|
229 | def revgen(): | |
230 | cl = web.repo.changelog |
|
230 | cl = web.repo.changelog | |
231 |
for i in |
|
231 | for i in range(len(web.repo) - 1, 0, -100): | |
232 | l = [] |
|
232 | l = [] | |
233 | for j in cl.revs(max(0, i - 99), i): |
|
233 | for j in cl.revs(max(0, i - 99), i): | |
234 | ctx = web.repo[j] |
|
234 | ctx = web.repo[j] |
@@ -720,7 +720,7 b' def _getcompblockgen(context, leftlines,' | |||||
720 | len1 = lhi - llo |
|
720 | len1 = lhi - llo | |
721 | len2 = rhi - rlo |
|
721 | len2 = rhi - rlo | |
722 | count = min(len1, len2) |
|
722 | count = min(len1, len2) | |
723 |
for i in |
|
723 | for i in range(count): | |
724 | yield _compline( |
|
724 | yield _compline( | |
725 | type=type, |
|
725 | type=type, | |
726 | leftlineno=llo + i + 1, |
|
726 | leftlineno=llo + i + 1, | |
@@ -729,7 +729,7 b' def _getcompblockgen(context, leftlines,' | |||||
729 | rightline=rightlines[rlo + i], |
|
729 | rightline=rightlines[rlo + i], | |
730 | ) |
|
730 | ) | |
731 | if len1 > len2: |
|
731 | if len1 > len2: | |
732 |
for i in |
|
732 | for i in range(llo + count, lhi): | |
733 | yield _compline( |
|
733 | yield _compline( | |
734 | type=type, |
|
734 | type=type, | |
735 | leftlineno=i + 1, |
|
735 | leftlineno=i + 1, | |
@@ -738,7 +738,7 b' def _getcompblockgen(context, leftlines,' | |||||
738 | rightline=None, |
|
738 | rightline=None, | |
739 | ) |
|
739 | ) | |
740 | elif len2 > len1: |
|
740 | elif len2 > len1: | |
741 |
for i in |
|
741 | for i in range(rlo + count, rhi): | |
742 | yield _compline( |
|
742 | yield _compline( | |
743 | type=type, |
|
743 | type=type, | |
744 | leftlineno=None, |
|
744 | leftlineno=None, |
@@ -55,7 +55,7 b' def encodevalueinheaders(value, header, ' | |||||
55 | result = [] |
|
55 | result = [] | |
56 |
|
56 | |||
57 | n = 0 |
|
57 | n = 0 | |
58 |
for i in |
|
58 | for i in range(0, len(value), valuelen): | |
59 | n += 1 |
|
59 | n += 1 | |
60 | result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen]))) |
|
60 | result.append((fmt % str(n), pycompat.strurl(value[i : i + valuelen]))) | |
61 |
|
61 |
@@ -293,7 +293,7 b' class linelog:' | |||||
293 | % (expected, numentries) |
|
293 | % (expected, numentries) | |
294 | ) |
|
294 | ) | |
295 | instructions = [_eof(0, 0)] |
|
295 | instructions = [_eof(0, 0)] | |
296 |
for offset in |
|
296 | for offset in range(1, numentries): | |
297 | instructions.append(_decodeone(buf, offset * _llentry.size)) |
|
297 | instructions.append(_decodeone(buf, offset * _llentry.size)) | |
298 | return cls(instructions, maxrev=maxrev) |
|
298 | return cls(instructions, maxrev=maxrev) | |
299 |
|
299 | |||
@@ -349,7 +349,7 b' class linelog:' | |||||
349 | tgt = oldproglen + (b2 - b1 + 1) |
|
349 | tgt = oldproglen + (b2 - b1 + 1) | |
350 | # Jump to skip the insert if we're at an older revision. |
|
350 | # Jump to skip the insert if we're at an older revision. | |
351 | appendinst(_jl(rev, tgt)) |
|
351 | appendinst(_jl(rev, tgt)) | |
352 |
for linenum in |
|
352 | for linenum in range(b1, b2): | |
353 | if _internal_blines is None: |
|
353 | if _internal_blines is None: | |
354 | bappend(lineinfo(rev, linenum, programlen())) |
|
354 | bappend(lineinfo(rev, linenum, programlen())) | |
355 | appendinst(_line(rev, linenum)) |
|
355 | appendinst(_line(rev, linenum)) | |
@@ -447,7 +447,7 b' class linelog:' | |||||
447 | # only take as many steps as there are instructions in the |
|
447 | # only take as many steps as there are instructions in the | |
448 | # program - if we don't find an EOF or our stop-line before |
|
448 | # program - if we don't find an EOF or our stop-line before | |
449 | # then, something is badly broken. |
|
449 | # then, something is badly broken. | |
450 |
for step in |
|
450 | for step in range(len(self._program)): | |
451 | inst = self._program[pc] |
|
451 | inst = self._program[pc] | |
452 | nextpc = pc + 1 |
|
452 | nextpc = pc + 1 | |
453 | if isinstance(inst, _jump): |
|
453 | if isinstance(inst, _jump): |
@@ -1885,7 +1885,7 b' class localrepository:' | |||||
1885 | # wdirrev isn't contiguous so the slice shouldn't include it |
|
1885 | # wdirrev isn't contiguous so the slice shouldn't include it | |
1886 | return [ |
|
1886 | return [ | |
1887 | self[i] |
|
1887 | self[i] | |
1888 |
for i in |
|
1888 | for i in range(*changeid.indices(len(self))) | |
1889 | if i not in self.changelog.filteredrevs |
|
1889 | if i not in self.changelog.filteredrevs | |
1890 | ] |
|
1890 | ] | |
1891 |
|
1891 |
@@ -11,7 +11,6 b' import errno' | |||||
11 |
|
11 | |||
12 | from . import ( |
|
12 | from . import ( | |
13 | encoding, |
|
13 | encoding, | |
14 | pycompat, |
|
|||
15 | ) |
|
14 | ) | |
16 |
|
15 | |||
17 | from .utils import ( |
|
16 | from .utils import ( | |
@@ -54,7 +53,7 b' def openlogfile(ui, vfs, name, maxfiles=' | |||||
54 | else: |
|
53 | else: | |
55 | if st.st_size >= maxsize: |
|
54 | if st.st_size >= maxsize: | |
56 | path = vfs.join(name) |
|
55 | path = vfs.join(name) | |
57 |
for i in |
|
56 | for i in range(maxfiles - 1, 1, -1): | |
58 | rotate( |
|
57 | rotate( | |
59 | oldpath=b'%s.%d' % (path, i - 1), |
|
58 | oldpath=b'%s.%d' % (path, i - 1), | |
60 | newpath=b'%s.%d' % (path, i), |
|
59 | newpath=b'%s.%d' % (path, i), |
@@ -378,7 +378,7 b' def _unidiff(t1, t2, opts=defaultopts):' | |||||
378 | # walk backwards from the start of the context up to the start of |
|
378 | # walk backwards from the start of the context up to the start of | |
379 | # the previous hunk context until we find a line starting with an |
|
379 | # the previous hunk context until we find a line starting with an | |
380 | # alphanumeric char. |
|
380 | # alphanumeric char. | |
381 |
for i in |
|
381 | for i in range(astart - 1, lastpos - 1, -1): | |
382 | if l1[i][0:1].isalnum(): |
|
382 | if l1[i][0:1].isalnum(): | |
383 | func = b' ' + l1[i].rstrip() |
|
383 | func = b' ' + l1[i].rstrip() | |
384 | # split long function name if ASCII. otherwise we have no |
|
384 | # split long function name if ASCII. otherwise we have no | |
@@ -402,7 +402,7 b' def _unidiff(t1, t2, opts=defaultopts):' | |||||
402 | hunklines = ( |
|
402 | hunklines = ( | |
403 | [b"@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))] |
|
403 | [b"@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))] | |
404 | + delta |
|
404 | + delta | |
405 |
+ [b' ' + l1[x] for x in |
|
405 | + [b' ' + l1[x] for x in range(a2, aend)] | |
406 | ) |
|
406 | ) | |
407 | # If either file ends without a newline and the last line of |
|
407 | # If either file ends without a newline and the last line of | |
408 | # that file is part of a hunk, a marker is printed. If the |
|
408 | # that file is part of a hunk, a marker is printed. If the | |
@@ -411,7 +411,7 b' def _unidiff(t1, t2, opts=defaultopts):' | |||||
411 | # which the hunk can end in a shared line without a newline. |
|
411 | # which the hunk can end in a shared line without a newline. | |
412 | skip = False |
|
412 | skip = False | |
413 | if not t1.endswith(b'\n') and astart + alen == len(l1) + 1: |
|
413 | if not t1.endswith(b'\n') and astart + alen == len(l1) + 1: | |
414 |
for i in |
|
414 | for i in range(len(hunklines) - 1, -1, -1): | |
415 | if hunklines[i].startswith((b'-', b' ')): |
|
415 | if hunklines[i].startswith((b'-', b' ')): | |
416 | if hunklines[i].startswith(b' '): |
|
416 | if hunklines[i].startswith(b' '): | |
417 | skip = True |
|
417 | skip = True | |
@@ -419,7 +419,7 b' def _unidiff(t1, t2, opts=defaultopts):' | |||||
419 | hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER) |
|
419 | hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER) | |
420 | break |
|
420 | break | |
421 | if not skip and not t2.endswith(b'\n') and bstart + blen == len(l2) + 1: |
|
421 | if not skip and not t2.endswith(b'\n') and bstart + blen == len(l2) + 1: | |
422 |
for i in |
|
422 | for i in range(len(hunklines) - 1, -1, -1): | |
423 | if hunklines[i].startswith(b'+'): |
|
423 | if hunklines[i].startswith(b'+'): | |
424 | hunklines[i] += b'\n' |
|
424 | hunklines[i] += b'\n' | |
425 | hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER) |
|
425 | hunklines.insert(i + 1, diffhelper.MISSING_NEWLINE_MARKER) |
@@ -349,7 +349,7 b' def findtables(blocks):' | |||||
349 | # position in bytes |
|
349 | # position in bytes | |
350 | columns = [ |
|
350 | columns = [ | |
351 | x |
|
351 | x | |
352 |
for x in |
|
352 | for x in range(len(div)) | |
353 | if div[x : x + 1] == b'=' and (x == 0 or div[x - 1 : x] == b' ') |
|
353 | if div[x : x + 1] == b'=' and (x == 0 or div[x - 1 : x] == b' ') | |
354 | ] |
|
354 | ] | |
355 | rows = [] |
|
355 | rows = [] | |
@@ -769,7 +769,7 b' def filtersections(blocks, section):' | |||||
769 | if llen and llen != plen: |
|
769 | if llen and llen != plen: | |
770 | collapse = False |
|
770 | collapse = False | |
771 | s = [] |
|
771 | s = [] | |
772 |
for j in |
|
772 | for j in range(3, plen - 1): | |
773 | parent = parents[j] |
|
773 | parent = parents[j] | |
774 | if j >= llen or lastparents[j] != parent: |
|
774 | if j >= llen or lastparents[j] != parent: | |
775 | s.append(len(blocks)) |
|
775 | s.append(len(blocks)) |
@@ -397,7 +397,7 b' def _fm1purereadmarkers(data, off, stop)' | |||||
397 | off = o3 + metasize * nummeta |
|
397 | off = o3 + metasize * nummeta | |
398 | metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off]) |
|
398 | metapairsize = unpack(b'>' + (metafmt * nummeta), data[o3:off]) | |
399 | metadata = [] |
|
399 | metadata = [] | |
400 |
for idx in |
|
400 | for idx in range(0, len(metapairsize), 2): | |
401 | o1 = off + metapairsize[idx] |
|
401 | o1 = off + metapairsize[idx] | |
402 | o2 = o1 + metapairsize[idx + 1] |
|
402 | o2 = o1 + metapairsize[idx + 1] | |
403 | metadata.append((data[off:o1], data[o1:o2])) |
|
403 | metadata.append((data[off:o1], data[o1:o2])) |
@@ -864,9 +864,7 b' class patchfile:' | |||||
864 | for x, s in enumerate(self.lines): |
|
864 | for x, s in enumerate(self.lines): | |
865 | self.hash.setdefault(s, []).append(x) |
|
865 | self.hash.setdefault(s, []).append(x) | |
866 |
|
866 | |||
867 | for fuzzlen in pycompat.xrange( |
|
867 | for fuzzlen in range(self.ui.configint(b"patch", b"fuzz") + 1): | |
868 | self.ui.configint(b"patch", b"fuzz") + 1 |
|
|||
869 | ): |
|
|||
870 | for toponly in [True, False]: |
|
868 | for toponly in [True, False]: | |
871 | old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) |
|
869 | old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly) | |
872 | oldstart = oldstart + self.offset + self.skew |
|
870 | oldstart = oldstart + self.offset + self.skew | |
@@ -1431,7 +1429,7 b' class hunk:' | |||||
1431 | self.lena = int(aend) - self.starta |
|
1429 | self.lena = int(aend) - self.starta | |
1432 | if self.starta: |
|
1430 | if self.starta: | |
1433 | self.lena += 1 |
|
1431 | self.lena += 1 | |
1434 |
for x in |
|
1432 | for x in range(self.lena): | |
1435 | l = lr.readline() |
|
1433 | l = lr.readline() | |
1436 | if l.startswith(b'---'): |
|
1434 | if l.startswith(b'---'): | |
1437 | # lines addition, old block is empty |
|
1435 | # lines addition, old block is empty | |
@@ -1466,7 +1464,7 b' class hunk:' | |||||
1466 | if self.startb: |
|
1464 | if self.startb: | |
1467 | self.lenb += 1 |
|
1465 | self.lenb += 1 | |
1468 | hunki = 1 |
|
1466 | hunki = 1 | |
1469 |
for x in |
|
1467 | for x in range(self.lenb): | |
1470 | l = lr.readline() |
|
1468 | l = lr.readline() | |
1471 | if l.startswith(br'\ '): |
|
1469 | if l.startswith(br'\ '): | |
1472 | # XXX: the only way to hit this is with an invalid line range. |
|
1470 | # XXX: the only way to hit this is with an invalid line range. | |
@@ -1547,14 +1545,14 b' class hunk:' | |||||
1547 | top = 0 |
|
1545 | top = 0 | |
1548 | bot = 0 |
|
1546 | bot = 0 | |
1549 | hlen = len(self.hunk) |
|
1547 | hlen = len(self.hunk) | |
1550 |
for x in |
|
1548 | for x in range(hlen - 1): | |
1551 | # the hunk starts with the @@ line, so use x+1 |
|
1549 | # the hunk starts with the @@ line, so use x+1 | |
1552 | if self.hunk[x + 1].startswith(b' '): |
|
1550 | if self.hunk[x + 1].startswith(b' '): | |
1553 | top += 1 |
|
1551 | top += 1 | |
1554 | else: |
|
1552 | else: | |
1555 | break |
|
1553 | break | |
1556 | if not toponly: |
|
1554 | if not toponly: | |
1557 |
for x in |
|
1555 | for x in range(hlen - 1): | |
1558 | if self.hunk[hlen - bot - 1].startswith(b' '): |
|
1556 | if self.hunk[hlen - bot - 1].startswith(b' '): | |
1559 | bot += 1 |
|
1557 | bot += 1 | |
1560 | else: |
|
1558 | else: |
@@ -255,14 +255,14 b' def _sortedrange_insert(data, idx, rev, ' | |||||
255 | merge_after = r2[0] == rev + 1 and t2 == t |
|
255 | merge_after = r2[0] == rev + 1 and t2 == t | |
256 |
|
256 | |||
257 | if merge_before and merge_after: |
|
257 | if merge_before and merge_after: | |
258 |
data[idx - 1] = ( |
|
258 | data[idx - 1] = (range(r1[0], r2[-1] + 1), t) | |
259 | data.pop(idx) |
|
259 | data.pop(idx) | |
260 | elif merge_before: |
|
260 | elif merge_before: | |
261 |
data[idx - 1] = ( |
|
261 | data[idx - 1] = (range(r1[0], rev + 1), t) | |
262 | elif merge_after: |
|
262 | elif merge_after: | |
263 |
data[idx] = ( |
|
263 | data[idx] = (range(rev, r2[-1] + 1), t) | |
264 | else: |
|
264 | else: | |
265 |
data.insert(idx, ( |
|
265 | data.insert(idx, (range(rev, rev + 1), t)) | |
266 |
|
266 | |||
267 |
|
267 | |||
268 | def _sortedrange_split(data, idx, rev, t): |
|
268 | def _sortedrange_split(data, idx, rev, t): | |
@@ -274,16 +274,16 b' def _sortedrange_split(data, idx, rev, t' | |||||
274 | data.pop(idx) |
|
274 | data.pop(idx) | |
275 | _sortedrange_insert(data, idx, rev, t) |
|
275 | _sortedrange_insert(data, idx, rev, t) | |
276 | elif r1[0] == rev: |
|
276 | elif r1[0] == rev: | |
277 |
data[idx] = ( |
|
277 | data[idx] = (range(rev + 1, r1[-1] + 1), t1) | |
278 | _sortedrange_insert(data, idx, rev, t) |
|
278 | _sortedrange_insert(data, idx, rev, t) | |
279 | elif r1[-1] == rev: |
|
279 | elif r1[-1] == rev: | |
280 |
data[idx] = ( |
|
280 | data[idx] = (range(r1[0], rev), t1) | |
281 | _sortedrange_insert(data, idx + 1, rev, t) |
|
281 | _sortedrange_insert(data, idx + 1, rev, t) | |
282 | else: |
|
282 | else: | |
283 | data[idx : idx + 1] = [ |
|
283 | data[idx : idx + 1] = [ | |
284 |
( |
|
284 | (range(r1[0], rev), t1), | |
285 |
( |
|
285 | (range(rev, rev + 1), t), | |
286 |
( |
|
286 | (range(rev + 1, r1[-1] + 1), t1), | |
287 | ] |
|
287 | ] | |
288 |
|
288 | |||
289 |
|
289 | |||
@@ -297,7 +297,7 b' def _trackphasechange(data, rev, old, ne' | |||||
297 |
|
297 | |||
298 | # If data is empty, create a one-revision range and done |
|
298 | # If data is empty, create a one-revision range and done | |
299 | if not data: |
|
299 | if not data: | |
300 |
data.insert(0, ( |
|
300 | data.insert(0, (range(rev, rev + 1), (old, new))) | |
301 | return |
|
301 | return | |
302 |
|
302 | |||
303 | low = 0 |
|
303 | low = 0 | |
@@ -333,14 +333,14 b' def _trackphasechange(data, rev, old, ne' | |||||
333 | low = mid + 1 |
|
333 | low = mid + 1 | |
334 |
|
334 | |||
335 | if low == len(data): |
|
335 | if low == len(data): | |
336 |
data.append(( |
|
336 | data.append((range(rev, rev + 1), t)) | |
337 | return |
|
337 | return | |
338 |
|
338 | |||
339 | r1, t1 = data[low] |
|
339 | r1, t1 = data[low] | |
340 | if r1[0] > rev: |
|
340 | if r1[0] > rev: | |
341 |
data.insert(low, ( |
|
341 | data.insert(low, (range(rev, rev + 1), t)) | |
342 | else: |
|
342 | else: | |
343 |
data.insert(low + 1, ( |
|
343 | data.insert(low + 1, (range(rev, rev + 1), t)) | |
344 |
|
344 | |||
345 |
|
345 | |||
346 | class phasecache: |
|
346 | class phasecache: | |
@@ -629,7 +629,7 b' class phasecache:' | |||||
629 | affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old)) |
|
629 | affected = set(repo.revs(b'(%ln::) - (%ln::)', new, old)) | |
630 |
|
630 | |||
631 | # find the phase of the affected revision |
|
631 | # find the phase of the affected revision | |
632 |
for phase in |
|
632 | for phase in range(targetphase, -1, -1): | |
633 | if phase: |
|
633 | if phase: | |
634 | roots = oldroots.get(phase, []) |
|
634 | roots = oldroots.get(phase, []) | |
635 | revs = set(repo.revs(b'%ln::%ld', roots, affected)) |
|
635 | revs = set(repo.revs(b'%ln::%ld', roots, affected)) |
@@ -75,7 +75,7 b' def _bin(bs):' | |||||
75 | def _str(v, l): |
|
75 | def _str(v, l): | |
76 | # type: (int, int) -> bytes |
|
76 | # type: (int, int) -> bytes | |
77 | bs = b"" |
|
77 | bs = b"" | |
78 |
for p in |
|
78 | for p in range(l): | |
79 | bs = pycompat.bytechr(v & 255) + bs |
|
79 | bs = pycompat.bytechr(v & 255) + bs | |
80 | v >>= 8 |
|
80 | v >>= 8 | |
81 | return bs |
|
81 | return bs | |
@@ -99,7 +99,7 b' def _hweight(x):' | |||||
99 | return c |
|
99 | return c | |
100 |
|
100 | |||
101 |
|
101 | |||
102 |
_htab = [_hweight(x) for x in |
|
102 | _htab = [_hweight(x) for x in range(256)] | |
103 |
|
103 | |||
104 |
|
104 | |||
105 | def _hamming(a, b): |
|
105 | def _hamming(a, b): | |
@@ -164,7 +164,7 b' def ctxpvec(ctx):' | |||||
164 | pvc = r._pveccache |
|
164 | pvc = r._pveccache | |
165 | if ctx.rev() not in pvc: |
|
165 | if ctx.rev() not in pvc: | |
166 | cl = r.changelog |
|
166 | cl = r.changelog | |
167 |
for n in |
|
167 | for n in range(ctx.rev() + 1): | |
168 | if n not in pvc: |
|
168 | if n not in pvc: | |
169 | node = cl.node(n) |
|
169 | node = cl.node(n) | |
170 | p1, p2 = cl.parentrevs(n) |
|
170 | p1, p2 = cl.parentrevs(n) |
@@ -24,7 +24,6 b' from . import (' | |||||
24 | obsutil, |
|
24 | obsutil, | |
25 | pathutil, |
|
25 | pathutil, | |
26 | phases, |
|
26 | phases, | |
27 | pycompat, |
|
|||
28 | requirements, |
|
27 | requirements, | |
29 | scmutil, |
|
28 | scmutil, | |
30 | util, |
|
29 | util, | |
@@ -91,7 +90,7 b' def _collectfiles(repo, striprev):' | |||||
91 | """find out the filelogs affected by the strip""" |
|
90 | """find out the filelogs affected by the strip""" | |
92 | files = set() |
|
91 | files = set() | |
93 |
|
92 | |||
94 |
for x in |
|
93 | for x in range(striprev, len(repo)): | |
95 | files.update(repo[x].files()) |
|
94 | files.update(repo[x].files()) | |
96 |
|
95 | |||
97 | return sorted(files) |
|
96 | return sorted(files) |
@@ -168,7 +168,7 b' def computeimpactable(repo, visibilityex' | |||||
168 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) |
|
168 | firstmutable = min(firstmutable, min(cl.rev(r) for r in roots)) | |
169 | # protect from nullrev root |
|
169 | # protect from nullrev root | |
170 | firstmutable = max(0, firstmutable) |
|
170 | firstmutable = max(0, firstmutable) | |
171 |
return frozenset( |
|
171 | return frozenset(range(firstmutable, len(cl))) | |
172 |
|
172 | |||
173 |
|
173 | |||
174 | # function to compute filtered set |
|
174 | # function to compute filtered set | |
@@ -264,7 +264,7 b' def wrapchangelog(unfichangelog, filtere' | |||||
264 | class filteredchangelogmixin: |
|
264 | class filteredchangelogmixin: | |
265 | def tiprev(self): |
|
265 | def tiprev(self): | |
266 | """filtered version of revlog.tiprev""" |
|
266 | """filtered version of revlog.tiprev""" | |
267 |
for i in |
|
267 | for i in range(len(self) - 1, -2, -1): | |
268 | if i not in self.filteredrevs: |
|
268 | if i not in self.filteredrevs: | |
269 | return i |
|
269 | return i | |
270 |
|
270 | |||
@@ -276,7 +276,7 b' class filteredchangelogmixin:' | |||||
276 | """filtered version of revlog.__iter__""" |
|
276 | """filtered version of revlog.__iter__""" | |
277 |
|
277 | |||
278 | def filterediter(): |
|
278 | def filterediter(): | |
279 |
for i in |
|
279 | for i in range(len(self)): | |
280 | if i not in self.filteredrevs: |
|
280 | if i not in self.filteredrevs: | |
281 | yield i |
|
281 | yield i | |
282 |
|
282 |
@@ -743,7 +743,7 b' class revlog:' | |||||
743 | return len(self.index) |
|
743 | return len(self.index) | |
744 |
|
744 | |||
745 | def __iter__(self): |
|
745 | def __iter__(self): | |
746 |
return iter( |
|
746 | return iter(range(len(self))) | |
747 |
|
747 | |||
748 | def revs(self, start=0, stop=None): |
|
748 | def revs(self, start=0, stop=None): | |
749 | """iterate over all rev in this revlog (from start to stop)""" |
|
749 | """iterate over all rev in this revlog (from start to stop)""" |
@@ -15,7 +15,6 b' from ..i18n import _' | |||||
15 | from .. import ( |
|
15 | from .. import ( | |
16 | error, |
|
16 | error, | |
17 | node, |
|
17 | node, | |
18 | pycompat, |
|
|||
19 | revlogutils, |
|
18 | revlogutils, | |
20 | util, |
|
19 | util, | |
21 | ) |
|
20 | ) | |
@@ -77,7 +76,7 b' class revlogoldindex(list):' | |||||
77 | def __delitem__(self, i): |
|
76 | def __delitem__(self, i): | |
78 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: |
|
77 | if not isinstance(i, slice) or not i.stop == -1 or i.step is not None: | |
79 | raise ValueError(b"deleting slices only supports a:-1 with step 1") |
|
78 | raise ValueError(b"deleting slices only supports a:-1 with step 1") | |
80 |
for r in |
|
79 | for r in range(i.start, len(self)): | |
81 | del self._nodemap[self[r][7]] |
|
80 | del self._nodemap[self[r][7]] | |
82 | super(revlogoldindex, self).__delitem__(i) |
|
81 | super(revlogoldindex, self).__delitem__(i) | |
83 |
|
82 |
@@ -75,7 +75,7 b' symbols = {}' | |||||
75 | + pycompat.sysbytes(string.digits) |
|
75 | + pycompat.sysbytes(string.digits) | |
76 | + b'._@' |
|
76 | + b'._@' | |
77 | ) |
|
77 | ) | |
78 |
) | set(map(pycompat.bytechr, |
|
78 | ) | set(map(pycompat.bytechr, range(128, 256))) | |
79 |
|
79 | |||
80 | # default set of valid characters for non-initial letters of symbols |
|
80 | # default set of valid characters for non-initial letters of symbols | |
81 | _symletters = _syminitletters | set(pycompat.iterbytestr(b'-/')) |
|
81 | _symletters = _syminitletters | set(pycompat.iterbytestr(b'-/')) |
@@ -800,7 +800,7 b' def walkchangerevs(repo, revs, makefilem' | |||||
800 | stopiteration = False |
|
800 | stopiteration = False | |
801 | for windowsize in increasingwindows(): |
|
801 | for windowsize in increasingwindows(): | |
802 | nrevs = [] |
|
802 | nrevs = [] | |
803 |
for i in |
|
803 | for i in range(windowsize): | |
804 | rev = next(it, None) |
|
804 | rev = next(it, None) | |
805 | if rev is None: |
|
805 | if rev is None: | |
806 | stopiteration = True |
|
806 | stopiteration = True |
@@ -92,7 +92,7 b' def runservice(' | |||||
92 | runargs.append(b'--daemon-postexec=unlink:%s' % lockpath) |
|
92 | runargs.append(b'--daemon-postexec=unlink:%s' % lockpath) | |
93 | # Don't pass --cwd to the child process, because we've already |
|
93 | # Don't pass --cwd to the child process, because we've already | |
94 | # changed directory. |
|
94 | # changed directory. | |
95 |
for i in |
|
95 | for i in range(1, len(runargs)): | |
96 | if runargs[i].startswith(b'--cwd='): |
|
96 | if runargs[i].startswith(b'--cwd='): | |
97 | del runargs[i] |
|
97 | del runargs[i] | |
98 | break |
|
98 | break |
@@ -1010,8 +1010,7 b' def _rebaserestoredcommit(' | |||||
1010 | tr.close() |
|
1010 | tr.close() | |
1011 |
|
1011 | |||
1012 | nodestoremove = [ |
|
1012 | nodestoremove = [ | |
1013 | repo.changelog.node(rev) |
|
1013 | repo.changelog.node(rev) for rev in range(oldtiprev, len(repo)) | |
1014 | for rev in pycompat.xrange(oldtiprev, len(repo)) |
|
|||
1015 | ] |
|
1014 | ] | |
1016 | shelvedstate.save( |
|
1015 | shelvedstate.save( | |
1017 | repo, |
|
1016 | repo, |
@@ -21,7 +21,6 b' from .i18n import _' | |||||
21 | from . import ( |
|
21 | from . import ( | |
22 | error, |
|
22 | error, | |
23 | mdiff, |
|
23 | mdiff, | |
24 | pycompat, |
|
|||
25 | ) |
|
24 | ) | |
26 | from .utils import stringutil |
|
25 | from .utils import stringutil | |
27 |
|
26 | |||
@@ -53,9 +52,7 b' def compare_range(a, astart, aend, b, bs' | |||||
53 | """Compare a[astart:aend] == b[bstart:bend], without slicing.""" |
|
52 | """Compare a[astart:aend] == b[bstart:bend], without slicing.""" | |
54 | if (aend - astart) != (bend - bstart): |
|
53 | if (aend - astart) != (bend - bstart): | |
55 | return False |
|
54 | return False | |
56 | for ia, ib in zip( |
|
55 | for ia, ib in zip(range(astart, aend), range(bstart, bend)): | |
57 | pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) |
|
|||
58 | ): |
|
|||
59 | if a[ia] != b[ib]: |
|
56 | if a[ia] != b[ib]: | |
60 | return False |
|
57 | return False | |
61 | else: |
|
58 | else: |
@@ -152,11 +152,11 b' class abstractsmartset:' | |||||
152 | # but start > stop is allowed, which should be an empty set. |
|
152 | # but start > stop is allowed, which should be an empty set. | |
153 | ys = [] |
|
153 | ys = [] | |
154 | it = iter(self) |
|
154 | it = iter(self) | |
155 |
for x in |
|
155 | for x in range(start): | |
156 | y = next(it, None) |
|
156 | y = next(it, None) | |
157 | if y is None: |
|
157 | if y is None: | |
158 | break |
|
158 | break | |
159 |
for x in |
|
159 | for x in range(stop - start): | |
160 | y = next(it, None) |
|
160 | y = next(it, None) | |
161 | if y is None: |
|
161 | if y is None: | |
162 | break |
|
162 | break | |
@@ -1030,13 +1030,13 b' class _spanset(abstractsmartset):' | |||||
1030 | return self.fastdesc() |
|
1030 | return self.fastdesc() | |
1031 |
|
1031 | |||
1032 | def fastasc(self): |
|
1032 | def fastasc(self): | |
1033 |
iterrange = |
|
1033 | iterrange = range(self._start, self._end) | |
1034 | if self._hiddenrevs: |
|
1034 | if self._hiddenrevs: | |
1035 | return self._iterfilter(iterrange) |
|
1035 | return self._iterfilter(iterrange) | |
1036 | return iter(iterrange) |
|
1036 | return iter(iterrange) | |
1037 |
|
1037 | |||
1038 | def fastdesc(self): |
|
1038 | def fastdesc(self): | |
1039 |
iterrange = |
|
1039 | iterrange = range(self._end - 1, self._start - 1, -1) | |
1040 | if self._hiddenrevs: |
|
1040 | if self._hiddenrevs: | |
1041 | return self._iterfilter(iterrange) |
|
1041 | return self._iterfilter(iterrange) | |
1042 | return iter(iterrange) |
|
1042 | return iter(iterrange) |
@@ -150,7 +150,7 b' def _buildencodefun():' | |||||
150 | def decode(s): |
|
150 | def decode(s): | |
151 | i = 0 |
|
151 | i = 0 | |
152 | while i < len(s): |
|
152 | while i < len(s): | |
153 |
for l in |
|
153 | for l in range(1, 4): | |
154 | try: |
|
154 | try: | |
155 | yield dmap[s[i : i + l]] |
|
155 | yield dmap[s[i : i + l]] | |
156 | i += l |
|
156 | i += l | |
@@ -161,9 +161,7 b' def _buildencodefun():' | |||||
161 | raise KeyError |
|
161 | raise KeyError | |
162 |
|
162 | |||
163 | return ( |
|
163 | return ( | |
164 | lambda s: b''.join( |
|
164 | lambda s: b''.join([cmap[s[c : c + 1]] for c in range(len(s))]), | |
165 | [cmap[s[c : c + 1]] for c in pycompat.xrange(len(s))] |
|
|||
166 | ), |
|
|||
167 | lambda s: b''.join(list(decode(s))), |
|
165 | lambda s: b''.join(list(decode(s))), | |
168 | ) |
|
166 | ) | |
169 |
|
167 | |||
@@ -200,7 +198,7 b' def _buildlowerencodefun():' | |||||
200 | 'the~07quick~adshot' |
|
198 | 'the~07quick~adshot' | |
201 | """ |
|
199 | """ | |
202 | xchr = pycompat.bytechr |
|
200 | xchr = pycompat.bytechr | |
203 |
cmap = {xchr(x): xchr(x) for x in |
|
201 | cmap = {xchr(x): xchr(x) for x in range(127)} | |
204 | for x in _reserved(): |
|
202 | for x in _reserved(): | |
205 | cmap[xchr(x)] = b"~%02x" % x |
|
203 | cmap[xchr(x)] = b"~%02x" % x | |
206 | for x in range(ord(b"A"), ord(b"Z") + 1): |
|
204 | for x in range(ord(b"A"), ord(b"Z") + 1): |
@@ -426,7 +426,7 b' def consumev1(repo, fp, filecount, bytec' | |||||
426 |
|
426 | |||
427 | with repo.transaction(b'clone'): |
|
427 | with repo.transaction(b'clone'): | |
428 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): |
|
428 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): | |
429 |
for i in |
|
429 | for i in range(filecount): | |
430 | # XXX doesn't support '\n' or '\r' in filenames |
|
430 | # XXX doesn't support '\n' or '\r' in filenames | |
431 | l = fp.readline() |
|
431 | l = fp.readline() | |
432 | try: |
|
432 | try: |
@@ -140,7 +140,7 b' def commondir(filelist):' | |||||
140 | b = b[: len(a)] |
|
140 | b = b[: len(a)] | |
141 | if a == b: |
|
141 | if a == b: | |
142 | return a |
|
142 | return a | |
143 |
for i in |
|
143 | for i in range(len(a)): | |
144 | if a[i] != b[i]: |
|
144 | if a[i] != b[i]: | |
145 | return a[:i] |
|
145 | return a[:i] | |
146 | return a |
|
146 | return a | |
@@ -311,7 +311,7 b" def indent(text, prefix, firstline=b''):" | |||||
311 | endswithnewline = text[-1:] == b'\n' |
|
311 | endswithnewline = text[-1:] == b'\n' | |
312 |
|
312 | |||
313 | def indenter(): |
|
313 | def indenter(): | |
314 |
for i in |
|
314 | for i in range(num_lines): | |
315 | l = lines[i] |
|
315 | l = lines[i] | |
316 | if l.strip(): |
|
316 | if l.strip(): | |
317 | yield prefix if i else firstline |
|
317 | yield prefix if i else firstline |
@@ -12,7 +12,6 b' from .i18n import _' | |||||
12 | from .node import short |
|
12 | from .node import short | |
13 | from . import ( |
|
13 | from . import ( | |
14 | error, |
|
14 | error, | |
15 | pycompat, |
|
|||
16 | ) |
|
15 | ) | |
17 |
|
16 | |||
18 |
|
17 | |||
@@ -116,7 +115,7 b' def findcommonincoming(repo, remote, hea' | |||||
116 | repo.ui.debug( |
|
115 | repo.ui.debug( | |
117 | b"request %d: %s\n" % (reqcnt, b" ".join(map(short, r))) |
|
116 | b"request %d: %s\n" % (reqcnt, b" ".join(map(short, r))) | |
118 | ) |
|
117 | ) | |
119 |
for p in |
|
118 | for p in range(0, len(r), 10): | |
120 | with remote.commandexecutor() as e: |
|
119 | with remote.commandexecutor() as e: | |
121 | subset = r[p : p + 10] |
|
120 | subset = r[p : p + 10] | |
122 | if audit is not None: |
|
121 | if audit is not None: |
@@ -19,7 +19,6 b' from .. import (' | |||||
19 | dagop, |
|
19 | dagop, | |
20 | error, |
|
20 | error, | |
21 | mdiff, |
|
21 | mdiff, | |
22 | pycompat, |
|
|||
23 | ) |
|
22 | ) | |
24 | from ..interfaces import repository |
|
23 | from ..interfaces import repository | |
25 | from ..revlogutils import sidedata as sidedatamod |
|
24 | from ..revlogutils import sidedata as sidedatamod | |
@@ -181,7 +180,7 b' def iterrevs(storelen, start=0, stop=Non' | |||||
181 | else: |
|
180 | else: | |
182 | stop = storelen |
|
181 | stop = storelen | |
183 |
|
182 | |||
184 |
return |
|
183 | return range(start, stop, step) | |
185 |
|
184 | |||
186 |
|
185 | |||
187 | def fileidlookup(store, fileid, identifier): |
|
186 | def fileidlookup(store, fileid, identifier): |
@@ -750,7 +750,7 b' def _MBTextWrapper(**kwargs):' | |||||
750 | def _cutdown(self, ucstr, space_left): |
|
750 | def _cutdown(self, ucstr, space_left): | |
751 | l = 0 |
|
751 | l = 0 | |
752 | colwidth = encoding.ucolwidth |
|
752 | colwidth = encoding.ucolwidth | |
753 |
for i in |
|
753 | for i in range(len(ucstr)): | |
754 | l += colwidth(ucstr[i]) |
|
754 | l += colwidth(ucstr[i]) | |
755 | if space_left < l: |
|
755 | if space_left < l: | |
756 | return (ucstr[:i], ucstr[i:]) |
|
756 | return (ucstr[:i], ucstr[i:]) |
@@ -732,7 +732,7 b' def unlink(f):' | |||||
732 | # callers to recreate f immediately while having other readers do their |
|
732 | # callers to recreate f immediately while having other readers do their | |
733 | # implicit zombie filename blocking on a temporary name. |
|
733 | # implicit zombie filename blocking on a temporary name. | |
734 |
|
734 | |||
735 |
for tries in |
|
735 | for tries in range(10): | |
736 | temp = b'%s-%08x' % (f, random.randint(0, 0xFFFFFFFF)) |
|
736 | temp = b'%s-%08x' % (f, random.randint(0, 0xFFFFFFFF)) | |
737 | try: |
|
737 | try: | |
738 | os.rename(f, temp) # raises OSError EEXIST if temp exists |
|
738 | os.rename(f, temp) # raises OSError EEXIST if temp exists |
@@ -390,14 +390,14 b' class sshv1protocolhandler:' | |||||
390 | def getargs(self, args): |
|
390 | def getargs(self, args): | |
391 | data = {} |
|
391 | data = {} | |
392 | keys = args.split() |
|
392 | keys = args.split() | |
393 |
for n in |
|
393 | for n in range(len(keys)): | |
394 | argline = self._fin.readline()[:-1] |
|
394 | argline = self._fin.readline()[:-1] | |
395 | arg, l = argline.split() |
|
395 | arg, l = argline.split() | |
396 | if arg not in keys: |
|
396 | if arg not in keys: | |
397 | raise error.Abort(_(b"unexpected parameter %r") % arg) |
|
397 | raise error.Abort(_(b"unexpected parameter %r") % arg) | |
398 | if arg == b'*': |
|
398 | if arg == b'*': | |
399 | star = {} |
|
399 | star = {} | |
400 |
for k in |
|
400 | for k in range(int(l)): | |
401 | argline = self._fin.readline()[:-1] |
|
401 | argline = self._fin.readline()[:-1] | |
402 | arg, l = argline.split() |
|
402 | arg, l = argline.split() | |
403 | val = self._fin.read(int(l)) |
|
403 | val = self._fin.read(int(l)) |
@@ -519,7 +519,7 b' class wirepeer(repository.peer):' | |||||
519 | def between(self, pairs): |
|
519 | def between(self, pairs): | |
520 | batch = 8 # avoid giant requests |
|
520 | batch = 8 # avoid giant requests | |
521 | r = [] |
|
521 | r = [] | |
522 |
for i in |
|
522 | for i in range(0, len(pairs), batch): | |
523 | n = b" ".join( |
|
523 | n = b" ".join( | |
524 | [ |
|
524 | [ | |
525 | wireprototypes.encodelist(p, b'-') |
|
525 | wireprototypes.encodelist(p, b'-') |
@@ -267,7 +267,7 b' class datapacktestsbase:' | |||||
267 | revisions = [] |
|
267 | revisions = [] | |
268 | blobs = {} |
|
268 | blobs = {} | |
269 | total = basepack.SMALLFANOUTCUTOFF + 1 |
|
269 | total = basepack.SMALLFANOUTCUTOFF + 1 | |
270 |
for i in |
|
270 | for i in range(total): | |
271 | filename = b"filename-%d" % i |
|
271 | filename = b"filename-%d" % i | |
272 | content = filename |
|
272 | content = filename | |
273 | node = self.getHash(content) |
|
273 | node = self.getHash(content) | |
@@ -357,7 +357,7 b' class datapacktestsbase:' | |||||
357 | ] |
|
357 | ] | |
358 | for packsize in packsizes: |
|
358 | for packsize in packsizes: | |
359 | revisions = [] |
|
359 | revisions = [] | |
360 |
for i in |
|
360 | for i in range(packsize): | |
361 | filename = b"filename-%d" % i |
|
361 | filename = b"filename-%d" % i | |
362 | content = b"content-%d" % i |
|
362 | content = b"content-%d" % i | |
363 | node = self.getHash(content) |
|
363 | node = self.getHash(content) |
@@ -283,7 +283,7 b' class histpacktests(unittest.TestCase):' | |||||
283 | This causes it to use a 2^16 fanout table instead.""" |
|
283 | This causes it to use a 2^16 fanout table instead.""" | |
284 | total = basepack.SMALLFANOUTCUTOFF + 1 |
|
284 | total = basepack.SMALLFANOUTCUTOFF + 1 | |
285 | revisions = [] |
|
285 | revisions = [] | |
286 |
for i in |
|
286 | for i in range(total): | |
287 | filename = b"foo-%d" % i |
|
287 | filename = b"foo-%d" % i | |
288 | node = self.getFakeHash() |
|
288 | node = self.getFakeHash() | |
289 | p1 = self.getFakeHash() |
|
289 | p1 = self.getFakeHash() |
General Comments 0
You need to be logged in to leave comments.
Login now