Show More
@@ -70,11 +70,11 b' class TestGetRepoChangeset(object):' | |||
|
70 | 70 | result = response.json['result'] |
|
71 | 71 | assert result |
|
72 | 72 | assert len(result) == limit |
|
73 |
for x in |
|
|
73 | for x in range(limit): | |
|
74 | 74 | assert result[x]['revision'] == x |
|
75 | 75 | |
|
76 | 76 | if details == 'full': |
|
77 |
for x in |
|
|
77 | for x in range(limit): | |
|
78 | 78 | assert 'bookmarks' in result[x]['refs'] |
|
79 | 79 | assert 'branches' in result[x]['refs'] |
|
80 | 80 | assert 'tags' in result[x]['refs'] |
@@ -98,7 +98,7 b' class TestGetRepoChangeset(object):' | |||
|
98 | 98 | result = response.json['result'] |
|
99 | 99 | assert result |
|
100 | 100 | assert len(result) == limit |
|
101 |
for i in |
|
|
101 | for i in range(limit): | |
|
102 | 102 | assert result[i]['revision'] == int(expected_revision) + i |
|
103 | 103 | |
|
104 | 104 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) |
@@ -126,7 +126,7 b' class TestGetRepoChangeset(object):' | |||
|
126 | 126 | result = response.json['result'] |
|
127 | 127 | assert result |
|
128 | 128 | assert len(result) == limit |
|
129 |
for i in |
|
|
129 | for i in range(limit): | |
|
130 | 130 | assert result[i]['revision'] == int(expected_revision) + i |
|
131 | 131 | |
|
132 | 132 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) |
@@ -49,7 +49,7 b' class StatsClientBase(object):' | |||
|
49 | 49 | statsd = StatsdClient.statsd |
|
50 | 50 | with statsd.timer('bucket_name', auto_send=True) as tmr: |
|
51 | 51 | # This block will be timed. |
|
52 |
for i in |
|
|
52 | for i in range(0, 100000): | |
|
53 | 53 | i ** 2 |
|
54 | 54 | # you can access time here... |
|
55 | 55 | elapsed_ms = tmr.ms |
@@ -132,7 +132,7 b' def in_filter_generator(qry, items, limi' | |||
|
132 | 132 | items = [-1] |
|
133 | 133 | |
|
134 | 134 | parts = [] |
|
135 |
for chunk in |
|
|
135 | for chunk in range(0, len(items), limit): | |
|
136 | 136 | parts.append( |
|
137 | 137 | qry.in_(items[chunk: chunk + limit]) |
|
138 | 138 | ) |
@@ -2215,7 +2215,7 b' class Repository(Base, BaseModel):' | |||
|
2215 | 2215 | # Try doing the filtering in chunks to avoid hitting limits |
|
2216 | 2216 | size = 500 |
|
2217 | 2217 | status_results = [] |
|
2218 |
for chunk in |
|
|
2218 | for chunk in range(0, len(revisions), size): | |
|
2219 | 2219 | status_results += statuses.filter( |
|
2220 | 2220 | ChangesetStatus.revision.in_( |
|
2221 | 2221 | revisions[chunk: chunk+size]) |
@@ -132,7 +132,7 b' def in_filter_generator(qry, items, limi' | |||
|
132 | 132 | items = [-1] |
|
133 | 133 | |
|
134 | 134 | parts = [] |
|
135 |
for chunk in |
|
|
135 | for chunk in range(0, len(items), limit): | |
|
136 | 136 | parts.append( |
|
137 | 137 | qry.in_(items[chunk: chunk + limit]) |
|
138 | 138 | ) |
@@ -2281,7 +2281,7 b' class Repository(Base, BaseModel):' | |||
|
2281 | 2281 | # Try doing the filtering in chunks to avoid hitting limits |
|
2282 | 2282 | size = 500 |
|
2283 | 2283 | status_results = [] |
|
2284 |
for chunk in |
|
|
2284 | for chunk in range(0, len(revisions), size): | |
|
2285 | 2285 | status_results += statuses.filter( |
|
2286 | 2286 | ChangesetStatus.revision.in_( |
|
2287 | 2287 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||
|
131 | 131 | items = [-1] |
|
132 | 132 | |
|
133 | 133 | parts = [] |
|
134 |
for chunk in |
|
|
134 | for chunk in range(0, len(items), limit): | |
|
135 | 135 | parts.append( |
|
136 | 136 | qry.in_(items[chunk: chunk + limit]) |
|
137 | 137 | ) |
@@ -2329,7 +2329,7 b' class Repository(Base, BaseModel):' | |||
|
2329 | 2329 | # Try doing the filtering in chunks to avoid hitting limits |
|
2330 | 2330 | size = 500 |
|
2331 | 2331 | status_results = [] |
|
2332 |
for chunk in |
|
|
2332 | for chunk in range(0, len(revisions), size): | |
|
2333 | 2333 | status_results += statuses.filter( |
|
2334 | 2334 | ChangesetStatus.revision.in_( |
|
2335 | 2335 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||
|
131 | 131 | items = [-1] |
|
132 | 132 | |
|
133 | 133 | parts = [] |
|
134 |
for chunk in |
|
|
134 | for chunk in range(0, len(items), limit): | |
|
135 | 135 | parts.append( |
|
136 | 136 | qry.in_(items[chunk: chunk + limit]) |
|
137 | 137 | ) |
@@ -2352,7 +2352,7 b' class Repository(Base, BaseModel):' | |||
|
2352 | 2352 | # Try doing the filtering in chunks to avoid hitting limits |
|
2353 | 2353 | size = 500 |
|
2354 | 2354 | status_results = [] |
|
2355 |
for chunk in |
|
|
2355 | for chunk in range(0, len(revisions), size): | |
|
2356 | 2356 | status_results += statuses.filter( |
|
2357 | 2357 | ChangesetStatus.revision.in_( |
|
2358 | 2358 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||
|
131 | 131 | items = [-1] |
|
132 | 132 | |
|
133 | 133 | parts = [] |
|
134 |
for chunk in |
|
|
134 | for chunk in range(0, len(items), limit): | |
|
135 | 135 | parts.append( |
|
136 | 136 | qry.in_(items[chunk: chunk + limit]) |
|
137 | 137 | ) |
@@ -2352,7 +2352,7 b' class Repository(Base, BaseModel):' | |||
|
2352 | 2352 | # Try doing the filtering in chunks to avoid hitting limits |
|
2353 | 2353 | size = 500 |
|
2354 | 2354 | status_results = [] |
|
2355 |
for chunk in |
|
|
2355 | for chunk in range(0, len(revisions), size): | |
|
2356 | 2356 | status_results += statuses.filter( |
|
2357 | 2357 | ChangesetStatus.revision.in_( |
|
2358 | 2358 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||
|
136 | 136 | items = [-1] |
|
137 | 137 | |
|
138 | 138 | parts = [] |
|
139 |
for chunk in |
|
|
139 | for chunk in range(0, len(items), limit): | |
|
140 | 140 | parts.append( |
|
141 | 141 | qry.in_(items[chunk: chunk + limit]) |
|
142 | 142 | ) |
@@ -2404,7 +2404,7 b' class Repository(Base, BaseModel):' | |||
|
2404 | 2404 | # Try doing the filtering in chunks to avoid hitting limits |
|
2405 | 2405 | size = 500 |
|
2406 | 2406 | status_results = [] |
|
2407 |
for chunk in |
|
|
2407 | for chunk in range(0, len(revisions), size): | |
|
2408 | 2408 | status_results += statuses.filter( |
|
2409 | 2409 | ChangesetStatus.revision.in_( |
|
2410 | 2410 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||
|
136 | 136 | items = [-1] |
|
137 | 137 | |
|
138 | 138 | parts = [] |
|
139 |
for chunk in |
|
|
139 | for chunk in range(0, len(items), limit): | |
|
140 | 140 | parts.append( |
|
141 | 141 | qry.in_(items[chunk: chunk + limit]) |
|
142 | 142 | ) |
@@ -2489,7 +2489,7 b' class Repository(Base, BaseModel):' | |||
|
2489 | 2489 | # Try doing the filtering in chunks to avoid hitting limits |
|
2490 | 2490 | size = 500 |
|
2491 | 2491 | status_results = [] |
|
2492 |
for chunk in |
|
|
2492 | for chunk in range(0, len(revisions), size): | |
|
2493 | 2493 | status_results += statuses.filter( |
|
2494 | 2494 | ChangesetStatus.revision.in_( |
|
2495 | 2495 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||
|
136 | 136 | items = [-1] |
|
137 | 137 | |
|
138 | 138 | parts = [] |
|
139 |
for chunk in |
|
|
139 | for chunk in range(0, len(items), limit): | |
|
140 | 140 | parts.append( |
|
141 | 141 | qry.in_(items[chunk: chunk + limit]) |
|
142 | 142 | ) |
@@ -2495,7 +2495,7 b' class Repository(Base, BaseModel):' | |||
|
2495 | 2495 | # Try doing the filtering in chunks to avoid hitting limits |
|
2496 | 2496 | size = 500 |
|
2497 | 2497 | status_results = [] |
|
2498 |
for chunk in |
|
|
2498 | for chunk in range(0, len(revisions), size): | |
|
2499 | 2499 | status_results += statuses.filter( |
|
2500 | 2500 | ChangesetStatus.revision.in_( |
|
2501 | 2501 | revisions[chunk: chunk+size]) |
@@ -141,7 +141,7 b' def in_filter_generator(qry, items, limi' | |||
|
141 | 141 | items = [-1] |
|
142 | 142 | |
|
143 | 143 | parts = [] |
|
144 |
for chunk in |
|
|
144 | for chunk in range(0, len(items), limit): | |
|
145 | 145 | parts.append( |
|
146 | 146 | qry.in_(items[chunk: chunk + limit]) |
|
147 | 147 | ) |
@@ -2526,7 +2526,7 b' class Repository(Base, BaseModel):' | |||
|
2526 | 2526 | # Try doing the filtering in chunks to avoid hitting limits |
|
2527 | 2527 | size = 500 |
|
2528 | 2528 | status_results = [] |
|
2529 |
for chunk in |
|
|
2529 | for chunk in range(0, len(revisions), size): | |
|
2530 | 2530 | status_results += statuses.filter( |
|
2531 | 2531 | ChangesetStatus.revision.in_( |
|
2532 | 2532 | revisions[chunk: chunk+size]) |
@@ -1916,7 +1916,7 b' class Repository(Base, BaseModel):' | |||
|
1916 | 1916 | # Try doing the filtering in chunks to avoid hitting limits |
|
1917 | 1917 | size = 500 |
|
1918 | 1918 | status_results = [] |
|
1919 |
for chunk in |
|
|
1919 | for chunk in range(0, len(revisions), size): | |
|
1920 | 1920 | status_results += statuses.filter( |
|
1921 | 1921 | ChangesetStatus.revision.in_( |
|
1922 | 1922 | revisions[chunk: chunk+size]) |
@@ -1919,7 +1919,7 b' class Repository(Base, BaseModel):' | |||
|
1919 | 1919 | # Try doing the filtering in chunks to avoid hitting limits |
|
1920 | 1920 | size = 500 |
|
1921 | 1921 | status_results = [] |
|
1922 |
for chunk in |
|
|
1922 | for chunk in range(0, len(revisions), size): | |
|
1923 | 1923 | status_results += statuses.filter( |
|
1924 | 1924 | ChangesetStatus.revision.in_( |
|
1925 | 1925 | revisions[chunk: chunk+size]) |
@@ -1918,7 +1918,7 b' class Repository(Base, BaseModel):' | |||
|
1918 | 1918 | # Try doing the filtering in chunks to avoid hitting limits |
|
1919 | 1919 | size = 500 |
|
1920 | 1920 | status_results = [] |
|
1921 |
for chunk in |
|
|
1921 | for chunk in range(0, len(revisions), size): | |
|
1922 | 1922 | status_results += statuses.filter( |
|
1923 | 1923 | ChangesetStatus.revision.in_( |
|
1924 | 1924 | revisions[chunk: chunk+size]) |
@@ -1920,7 +1920,7 b' class Repository(Base, BaseModel):' | |||
|
1920 | 1920 | # Try doing the filtering in chunks to avoid hitting limits |
|
1921 | 1921 | size = 500 |
|
1922 | 1922 | status_results = [] |
|
1923 |
for chunk in |
|
|
1923 | for chunk in range(0, len(revisions), size): | |
|
1924 | 1924 | status_results += statuses.filter( |
|
1925 | 1925 | ChangesetStatus.revision.in_( |
|
1926 | 1926 | revisions[chunk: chunk+size]) |
@@ -1920,7 +1920,7 b' class Repository(Base, BaseModel):' | |||
|
1920 | 1920 | # Try doing the filtering in chunks to avoid hitting limits |
|
1921 | 1921 | size = 500 |
|
1922 | 1922 | status_results = [] |
|
1923 |
for chunk in |
|
|
1923 | for chunk in range(0, len(revisions), size): | |
|
1924 | 1924 | status_results += statuses.filter( |
|
1925 | 1925 | ChangesetStatus.revision.in_( |
|
1926 | 1926 | revisions[chunk: chunk+size]) |
@@ -1963,7 +1963,7 b' class Repository(Base, BaseModel):' | |||
|
1963 | 1963 | # Try doing the filtering in chunks to avoid hitting limits |
|
1964 | 1964 | size = 500 |
|
1965 | 1965 | status_results = [] |
|
1966 |
for chunk in |
|
|
1966 | for chunk in range(0, len(revisions), size): | |
|
1967 | 1967 | status_results += statuses.filter( |
|
1968 | 1968 | ChangesetStatus.revision.in_( |
|
1969 | 1969 | revisions[chunk: chunk+size]) |
@@ -1964,7 +1964,7 b' class Repository(Base, BaseModel):' | |||
|
1964 | 1964 | # Try doing the filtering in chunks to avoid hitting limits |
|
1965 | 1965 | size = 500 |
|
1966 | 1966 | status_results = [] |
|
1967 |
for chunk in |
|
|
1967 | for chunk in range(0, len(revisions), size): | |
|
1968 | 1968 | status_results += statuses.filter( |
|
1969 | 1969 | ChangesetStatus.revision.in_( |
|
1970 | 1970 | revisions[chunk: chunk+size]) |
@@ -2151,7 +2151,7 b' class Repository(Base, BaseModel):' | |||
|
2151 | 2151 | # Try doing the filtering in chunks to avoid hitting limits |
|
2152 | 2152 | size = 500 |
|
2153 | 2153 | status_results = [] |
|
2154 |
for chunk in |
|
|
2154 | for chunk in range(0, len(revisions), size): | |
|
2155 | 2155 | status_results += statuses.filter( |
|
2156 | 2156 | ChangesetStatus.revision.in_( |
|
2157 | 2157 | revisions[chunk: chunk+size]) |
@@ -288,13 +288,13 b' class diff_match_patch:' | |||
|
288 | 288 | k1end = 0 |
|
289 | 289 | k2start = 0 |
|
290 | 290 | k2end = 0 |
|
291 |
for d in |
|
|
291 | for d in range(max_d): | |
|
292 | 292 | # Bail out if deadline is reached. |
|
293 | 293 | if time.time() > deadline: |
|
294 | 294 | break |
|
295 | 295 | |
|
296 | 296 | # Walk the front path one step. |
|
297 |
for k1 in |
|
|
297 | for k1 in range(-d + k1start, d + 1 - k1end, 2): | |
|
298 | 298 | k1_offset = v_offset + k1 |
|
299 | 299 | if k1 == -d or (k1 != d and |
|
300 | 300 | v1[k1_offset - 1] < v1[k1_offset + 1]): |
@@ -323,7 +323,7 b' class diff_match_patch:' | |||
|
323 | 323 | return self.diff_bisectSplit(text1, text2, x1, y1, deadline) |
|
324 | 324 | |
|
325 | 325 | # Walk the reverse path one step. |
|
326 |
for k2 in |
|
|
326 | for k2 in range(-d + k2start, d + 1 - k2end, 2): | |
|
327 | 327 | k2_offset = v_offset + k2 |
|
328 | 328 | if k2 == -d or (k2 != d and |
|
329 | 329 | v2[k2_offset - 1] < v2[k2_offset + 1]): |
@@ -446,7 +446,7 b' class diff_match_patch:' | |||
|
446 | 446 | diffs: Array of diff tuples. |
|
447 | 447 | lineArray: Array of unique strings. |
|
448 | 448 | """ |
|
449 |
for x in |
|
|
449 | for x in range(len(diffs)): | |
|
450 | 450 | text = [] |
|
451 | 451 | for char in diffs[x][1]: |
|
452 | 452 | text.append(lineArray[ord(char)]) |
@@ -1042,7 +1042,7 b' class diff_match_patch:' | |||
|
1042 | 1042 | chars2 = 0 |
|
1043 | 1043 | last_chars1 = 0 |
|
1044 | 1044 | last_chars2 = 0 |
|
1045 |
for x in |
|
|
1045 | for x in range(len(diffs)): | |
|
1046 | 1046 | (op, text) = diffs[x] |
|
1047 | 1047 | if op != self.DIFF_INSERT: # Equality or deletion. |
|
1048 | 1048 | chars1 += len(text) |
@@ -1301,7 +1301,7 b' class diff_match_patch:' | |||
|
1301 | 1301 | bin_max = len(pattern) + len(text) |
|
1302 | 1302 | # Empty initialization added to appease pychecker. |
|
1303 | 1303 | last_rd = None |
|
1304 |
for d in |
|
|
1304 | for d in range(len(pattern)): | |
|
1305 | 1305 | # Scan for the best match each iteration allows for one more error. |
|
1306 | 1306 | # Run a binary search to determine how far from 'loc' we can stray at |
|
1307 | 1307 | # this error level. |
@@ -1321,7 +1321,7 b' class diff_match_patch:' | |||
|
1321 | 1321 | |
|
1322 | 1322 | rd = [0] * (finish + 2) |
|
1323 | 1323 | rd[finish + 1] = (1 << d) - 1 |
|
1324 |
for j in |
|
|
1324 | for j in range(finish, start - 1, -1): | |
|
1325 | 1325 | if len(text) <= j - 1: |
|
1326 | 1326 | # Out of range. |
|
1327 | 1327 | charMatch = 0 |
@@ -1364,7 +1364,7 b' class diff_match_patch:' | |||
|
1364 | 1364 | s = {} |
|
1365 | 1365 | for char in pattern: |
|
1366 | 1366 | s[char] = 0 |
|
1367 |
for i in |
|
|
1367 | for i in range(len(pattern)): | |
|
1368 | 1368 | s[pattern[i]] |= 1 << (len(pattern) - i - 1) |
|
1369 | 1369 | return s |
|
1370 | 1370 | |
@@ -1473,7 +1473,7 b' class diff_match_patch:' | |||
|
1473 | 1473 | char_count2 = 0 # Number of characters into the text2 string. |
|
1474 | 1474 | prepatch_text = text1 # Recreate the patches to determine context info. |
|
1475 | 1475 | postpatch_text = text1 |
|
1476 |
for x in |
|
|
1476 | for x in range(len(diffs)): | |
|
1477 | 1477 | (diff_type, diff_text) = diffs[x] |
|
1478 | 1478 | if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: |
|
1479 | 1479 | # A new patch starts here. |
@@ -1646,7 +1646,7 b' class diff_match_patch:' | |||
|
1646 | 1646 | """ |
|
1647 | 1647 | paddingLength = self.Patch_Margin |
|
1648 | 1648 | nullPadding = "" |
|
1649 |
for x in |
|
|
1649 | for x in range(1, paddingLength + 1): | |
|
1650 | 1650 | nullPadding += chr(x) |
|
1651 | 1651 | |
|
1652 | 1652 | # Bump all the patches forward. |
@@ -1705,7 +1705,7 b' class diff_match_patch:' | |||
|
1705 | 1705 | # Python has the option of not splitting strings due to its ability |
|
1706 | 1706 | # to handle integers of arbitrary precision. |
|
1707 | 1707 | return |
|
1708 |
for x in |
|
|
1708 | for x in range(len(patches)): | |
|
1709 | 1709 | if patches[x].length1 <= patch_size: |
|
1710 | 1710 | continue |
|
1711 | 1711 | bigpatch = patches[x] |
@@ -581,7 +581,7 b' def unique_color_generator(n=10000, satu' | |||
|
581 | 581 | golden_ratio = 0.618033988749895 |
|
582 | 582 | h = 0.22717784590367374 |
|
583 | 583 | |
|
584 |
for _ in |
|
|
584 | for _ in range(n): | |
|
585 | 585 | h += golden_ratio |
|
586 | 586 | h %= 1 |
|
587 | 587 | HSV_tuple = [h, saturation, lightness] |
@@ -22,7 +22,7 b' import sys' | |||
|
22 | 22 | import logging |
|
23 | 23 | |
|
24 | 24 | |
|
25 |
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = |
|
|
25 | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38) | |
|
26 | 26 | |
|
27 | 27 | # Sequences |
|
28 | 28 | RESET_SEQ = "\033[0m" |
@@ -1259,7 +1259,7 b' class BaseCommit(object):' | |||
|
1259 | 1259 | |
|
1260 | 1260 | :param branch: show commits within the given named branch |
|
1261 | 1261 | """ |
|
1262 |
indexes = |
|
|
1262 | indexes = range(self.idx + 1, self.repository.count()) | |
|
1263 | 1263 | return self._find_next(indexes, branch) |
|
1264 | 1264 | |
|
1265 | 1265 | def prev(self, branch=None): |
@@ -1269,7 +1269,7 b' class BaseCommit(object):' | |||
|
1269 | 1269 | |
|
1270 | 1270 | :param branch: show commit within the given named branch |
|
1271 | 1271 | """ |
|
1272 |
indexes = |
|
|
1272 | indexes = range(self.idx - 1, -1, -1) | |
|
1273 | 1273 | return self._find_next(indexes, branch) |
|
1274 | 1274 | |
|
1275 | 1275 | def _find_next(self, indexes, branch=None): |
@@ -102,7 +102,7 b' class SubversionRepository(base.BaseRepo' | |||
|
102 | 102 | @CachedProperty |
|
103 | 103 | def commit_ids(self): |
|
104 | 104 | head = self._remote.lookup(None) |
|
105 |
return [str(r) for r in |
|
|
105 | return [str(r) for r in range(1, head + 1)] | |
|
106 | 106 | |
|
107 | 107 | def _rebuild_cache(self, commit_ids): |
|
108 | 108 | pass |
@@ -142,7 +142,7 b' def in_filter_generator(qry, items, limi' | |||
|
142 | 142 | items = [-1] |
|
143 | 143 | |
|
144 | 144 | parts = [] |
|
145 |
for chunk in |
|
|
145 | for chunk in range(0, len(items), limit): | |
|
146 | 146 | parts.append( |
|
147 | 147 | qry.in_(items[chunk: chunk + limit]) |
|
148 | 148 | ) |
@@ -2548,7 +2548,7 b' class Repository(Base, BaseModel):' | |||
|
2548 | 2548 | # Try doing the filtering in chunks to avoid hitting limits |
|
2549 | 2549 | size = 500 |
|
2550 | 2550 | status_results = [] |
|
2551 |
for chunk in |
|
|
2551 | for chunk in range(0, len(revisions), size): | |
|
2552 | 2552 | status_results += statuses.filter( |
|
2553 | 2553 | ChangesetStatus.revision.in_( |
|
2554 | 2554 | revisions[chunk: chunk+size]) |
@@ -55,20 +55,20 b' def data():' | |||
|
55 | 55 | |
|
56 | 56 | def test_reuse_app_no_data(repeat, vcsserver_http_echo_app): |
|
57 | 57 | app = vcs_http_app(vcsserver_http_echo_app) |
|
58 |
for x in |
|
|
58 | for x in range(repeat / 10): | |
|
59 | 59 | response = app.post('/') |
|
60 | 60 | assert response.status_code == 200 |
|
61 | 61 | |
|
62 | 62 | |
|
63 | 63 | def test_reuse_app_with_data(data, repeat, vcsserver_http_echo_app): |
|
64 | 64 | app = vcs_http_app(vcsserver_http_echo_app) |
|
65 |
for x in |
|
|
65 | for x in range(repeat / 10): | |
|
66 | 66 | response = app.post('/', params=data) |
|
67 | 67 | assert response.status_code == 200 |
|
68 | 68 | |
|
69 | 69 | |
|
70 | 70 | def test_create_app_per_request_no_data(repeat, vcsserver_http_echo_app): |
|
71 |
for x in |
|
|
71 | for x in range(repeat / 10): | |
|
72 | 72 | app = vcs_http_app(vcsserver_http_echo_app) |
|
73 | 73 | response = app.post('/') |
|
74 | 74 | assert response.status_code == 200 |
@@ -76,7 +76,7 b' def test_create_app_per_request_no_data(' | |||
|
76 | 76 | |
|
77 | 77 | def test_create_app_per_request_with_data( |
|
78 | 78 | data, repeat, vcsserver_http_echo_app): |
|
79 |
for x in |
|
|
79 | for x in range(repeat / 10): | |
|
80 | 80 | app = vcs_http_app(vcsserver_http_echo_app) |
|
81 | 81 | response = app.post('/', params=data) |
|
82 | 82 | assert response.status_code == 200 |
@@ -125,7 +125,7 b' class Repository(object):' | |||
|
125 | 125 | log.error('api: {}'.format(e)) |
|
126 | 126 | |
|
127 | 127 | def create_commits(self, number, file_size): |
|
128 |
for i in |
|
|
128 | for i in range(number): | |
|
129 | 129 | file_name = self.FILE_NAME_TEMPLATE.format(i) |
|
130 | 130 | log.debug("Create commit[{}] {}".format(self.name, file_name)) |
|
131 | 131 | self._create_file(file_name, file_size) |
@@ -275,7 +275,7 b' class Benchmark(object):' | |||
|
275 | 275 | |
|
276 | 276 | def _create_repos(self): |
|
277 | 277 | log.info("Creating repositories...") |
|
278 |
for i in |
|
|
278 | for i in range(self.config.repositories): | |
|
279 | 279 | self.git_repos.append(self._create_repo('git', i)) |
|
280 | 280 | self.hg_repos.append(self._create_repo('hg', i)) |
|
281 | 281 |
@@ -830,7 +830,7 b' class VcsBackend(object):' | |||
|
830 | 830 | |
|
831 | 831 | commits = commits or [ |
|
832 | 832 | {'message': 'Commit %s of %s' % (x, repo_name)} |
|
833 |
for x in |
|
|
833 | for x in range(number_of_commits)] | |
|
834 | 834 | _add_commits_to_repo(repo, commits) |
|
835 | 835 | return repo |
|
836 | 836 |
@@ -173,7 +173,7 b' def generate_repo_with_commits(vcs_repo)' | |||
|
173 | 173 | |
|
174 | 174 | def commit_generator(num): |
|
175 | 175 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
176 |
for x in |
|
|
176 | for x in range(num): | |
|
177 | 177 | yield { |
|
178 | 178 | 'message': 'Commit %d' % x, |
|
179 | 179 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -56,7 +56,7 b' class TestCommitsInNonEmptyRepo(BackendT' | |||
|
56 | 56 | @classmethod |
|
57 | 57 | def _get_commits(cls): |
|
58 | 58 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
59 |
for x in |
|
|
59 | for x in range(5): | |
|
60 | 60 | yield { |
|
61 | 61 | 'message': 'Commit %d' % x, |
|
62 | 62 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -208,7 +208,7 b' class TestCommits(BackendTestMixin):' | |||
|
208 | 208 | @classmethod |
|
209 | 209 | def _get_commits(cls): |
|
210 | 210 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
211 |
for x in |
|
|
211 | for x in range(5): | |
|
212 | 212 | yield { |
|
213 | 213 | 'message': u'Commit %d' % x, |
|
214 | 214 | 'author': u'Joe Doe <joe.doe@example.com>', |
@@ -321,7 +321,7 b' class TestCommits(BackendTestMixin):' | |||
|
321 | 321 | def test_get_filenodes_generator(self): |
|
322 | 322 | tip = self.repo.get_commit() |
|
323 | 323 | filepaths = [node.path for node in tip.get_filenodes_generator()] |
|
324 |
assert filepaths == ['file_%d.txt' % x for x in |
|
|
324 | assert filepaths == ['file_%d.txt' % x for x in range(5)] | |
|
325 | 325 | |
|
326 | 326 | def test_get_file_annotate(self): |
|
327 | 327 | file_added_commit = self.repo.get_commit(commit_idx=3) |
@@ -33,7 +33,7 b' class TestGetitem(BackendTestMixin):' | |||
|
33 | 33 | @classmethod |
|
34 | 34 | def _get_commits(cls): |
|
35 | 35 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
36 |
for x in |
|
|
36 | for x in range(5): | |
|
37 | 37 | yield { |
|
38 | 38 | 'message': 'Commit %d' % x, |
|
39 | 39 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -55,7 +55,7 b' class TestGetitem(BackendTestMixin):' | |||
|
55 | 55 | assert self.repo[offset].message == message |
|
56 | 56 | |
|
57 | 57 | def test_returns_correct_items(self): |
|
58 |
commits = [self.repo[x] for x in |
|
|
58 | commits = [self.repo[x] for x in range(len(self.repo.commit_ids))] | |
|
59 | 59 | assert commits == list(self.repo.get_commits()) |
|
60 | 60 | |
|
61 | 61 | def test_raises_for_next_commit(self): |
@@ -30,7 +30,7 b' class TestGetslice(BackendTestMixin):' | |||
|
30 | 30 | @classmethod |
|
31 | 31 | def _get_commits(cls): |
|
32 | 32 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
33 |
for x in |
|
|
33 | for x in range(5): | |
|
34 | 34 | yield { |
|
35 | 35 | 'message': 'Commit %d' % x, |
|
36 | 36 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -309,7 +309,7 b' class TestInMemoryCommit(BackendTestMixi' | |||
|
309 | 309 | def test_multiple_commits(self): |
|
310 | 310 | N = 3 # number of commits to perform |
|
311 | 311 | last = None |
|
312 |
for x in |
|
|
312 | for x in range(N): | |
|
313 | 313 | fname = 'file%s' % str(x).rjust(5, '0') |
|
314 | 314 | content = 'foobar\n' * x |
|
315 | 315 | node = FileNode(fname, content=content) |
@@ -270,6 +270,6 b' class TestNodesCommits(BackendTestMixin)' | |||
|
270 | 270 | repo = generate_repo_with_commits(20) |
|
271 | 271 | last_commit = repo.get_commit() |
|
272 | 272 | |
|
273 |
for x in |
|
|
273 | for x in range(3): | |
|
274 | 274 | node = last_commit.get_node('file_%s.txt' % x) |
|
275 | 275 | assert node.last_commit == repo[x] |
@@ -485,7 +485,7 b' class TestRepositoryStrip(BackendTestMix' | |||
|
485 | 485 | ], |
|
486 | 486 | }, |
|
487 | 487 | ] |
|
488 |
for x in |
|
|
488 | for x in range(10): | |
|
489 | 489 | commit_data = { |
|
490 | 490 | 'message': 'Changed foobar - commit%s' % x, |
|
491 | 491 | 'author': 'Jane Doe <jane.doe@example.com>', |
@@ -107,7 +107,7 b' def get_normalized_path(path):' | |||
|
107 | 107 | return get_normalized_path(newpath) |
|
108 | 108 | else: |
|
109 | 109 | start = int(m.group(1)[-5:]) + 1 |
|
110 |
for x in |
|
|
110 | for x in range(start, 10000): | |
|
111 | 111 | newname = name[:-5] + str(x).rjust(5, '0') |
|
112 | 112 | newpath = os.path.join(dir, newname) |
|
113 | 113 | if ext: |
General Comments 0
You need to be logged in to leave comments.
Login now