Show More
@@ -70,11 +70,11 b' class TestGetRepoChangeset(object):' | |||||
70 | result = response.json['result'] |
|
70 | result = response.json['result'] | |
71 | assert result |
|
71 | assert result | |
72 | assert len(result) == limit |
|
72 | assert len(result) == limit | |
73 |
for x in |
|
73 | for x in range(limit): | |
74 | assert result[x]['revision'] == x |
|
74 | assert result[x]['revision'] == x | |
75 |
|
75 | |||
76 | if details == 'full': |
|
76 | if details == 'full': | |
77 |
for x in |
|
77 | for x in range(limit): | |
78 | assert 'bookmarks' in result[x]['refs'] |
|
78 | assert 'bookmarks' in result[x]['refs'] | |
79 | assert 'branches' in result[x]['refs'] |
|
79 | assert 'branches' in result[x]['refs'] | |
80 | assert 'tags' in result[x]['refs'] |
|
80 | assert 'tags' in result[x]['refs'] | |
@@ -98,7 +98,7 b' class TestGetRepoChangeset(object):' | |||||
98 | result = response.json['result'] |
|
98 | result = response.json['result'] | |
99 | assert result |
|
99 | assert result | |
100 | assert len(result) == limit |
|
100 | assert len(result) == limit | |
101 |
for i in |
|
101 | for i in range(limit): | |
102 | assert result[i]['revision'] == int(expected_revision) + i |
|
102 | assert result[i]['revision'] == int(expected_revision) + i | |
103 |
|
103 | |||
104 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) |
|
104 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) | |
@@ -126,7 +126,7 b' class TestGetRepoChangeset(object):' | |||||
126 | result = response.json['result'] |
|
126 | result = response.json['result'] | |
127 | assert result |
|
127 | assert result | |
128 | assert len(result) == limit |
|
128 | assert len(result) == limit | |
129 |
for i in |
|
129 | for i in range(limit): | |
130 | assert result[i]['revision'] == int(expected_revision) + i |
|
130 | assert result[i]['revision'] == int(expected_revision) + i | |
131 |
|
131 | |||
132 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) |
|
132 | @pytest.mark.parametrize("details", ['basic', 'extended', 'full']) |
@@ -49,7 +49,7 b' class StatsClientBase(object):' | |||||
49 | statsd = StatsdClient.statsd |
|
49 | statsd = StatsdClient.statsd | |
50 | with statsd.timer('bucket_name', auto_send=True) as tmr: |
|
50 | with statsd.timer('bucket_name', auto_send=True) as tmr: | |
51 | # This block will be timed. |
|
51 | # This block will be timed. | |
52 |
for i in |
|
52 | for i in range(0, 100000): | |
53 | i ** 2 |
|
53 | i ** 2 | |
54 | # you can access time here... |
|
54 | # you can access time here... | |
55 | elapsed_ms = tmr.ms |
|
55 | elapsed_ms = tmr.ms |
@@ -132,7 +132,7 b' def in_filter_generator(qry, items, limi' | |||||
132 | items = [-1] |
|
132 | items = [-1] | |
133 |
|
133 | |||
134 | parts = [] |
|
134 | parts = [] | |
135 |
for chunk in |
|
135 | for chunk in range(0, len(items), limit): | |
136 | parts.append( |
|
136 | parts.append( | |
137 | qry.in_(items[chunk: chunk + limit]) |
|
137 | qry.in_(items[chunk: chunk + limit]) | |
138 | ) |
|
138 | ) | |
@@ -2215,7 +2215,7 b' class Repository(Base, BaseModel):' | |||||
2215 | # Try doing the filtering in chunks to avoid hitting limits |
|
2215 | # Try doing the filtering in chunks to avoid hitting limits | |
2216 | size = 500 |
|
2216 | size = 500 | |
2217 | status_results = [] |
|
2217 | status_results = [] | |
2218 |
for chunk in |
|
2218 | for chunk in range(0, len(revisions), size): | |
2219 | status_results += statuses.filter( |
|
2219 | status_results += statuses.filter( | |
2220 | ChangesetStatus.revision.in_( |
|
2220 | ChangesetStatus.revision.in_( | |
2221 | revisions[chunk: chunk+size]) |
|
2221 | revisions[chunk: chunk+size]) |
@@ -132,7 +132,7 b' def in_filter_generator(qry, items, limi' | |||||
132 | items = [-1] |
|
132 | items = [-1] | |
133 |
|
133 | |||
134 | parts = [] |
|
134 | parts = [] | |
135 |
for chunk in |
|
135 | for chunk in range(0, len(items), limit): | |
136 | parts.append( |
|
136 | parts.append( | |
137 | qry.in_(items[chunk: chunk + limit]) |
|
137 | qry.in_(items[chunk: chunk + limit]) | |
138 | ) |
|
138 | ) | |
@@ -2281,7 +2281,7 b' class Repository(Base, BaseModel):' | |||||
2281 | # Try doing the filtering in chunks to avoid hitting limits |
|
2281 | # Try doing the filtering in chunks to avoid hitting limits | |
2282 | size = 500 |
|
2282 | size = 500 | |
2283 | status_results = [] |
|
2283 | status_results = [] | |
2284 |
for chunk in |
|
2284 | for chunk in range(0, len(revisions), size): | |
2285 | status_results += statuses.filter( |
|
2285 | status_results += statuses.filter( | |
2286 | ChangesetStatus.revision.in_( |
|
2286 | ChangesetStatus.revision.in_( | |
2287 | revisions[chunk: chunk+size]) |
|
2287 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||||
131 | items = [-1] |
|
131 | items = [-1] | |
132 |
|
132 | |||
133 | parts = [] |
|
133 | parts = [] | |
134 |
for chunk in |
|
134 | for chunk in range(0, len(items), limit): | |
135 | parts.append( |
|
135 | parts.append( | |
136 | qry.in_(items[chunk: chunk + limit]) |
|
136 | qry.in_(items[chunk: chunk + limit]) | |
137 | ) |
|
137 | ) | |
@@ -2329,7 +2329,7 b' class Repository(Base, BaseModel):' | |||||
2329 | # Try doing the filtering in chunks to avoid hitting limits |
|
2329 | # Try doing the filtering in chunks to avoid hitting limits | |
2330 | size = 500 |
|
2330 | size = 500 | |
2331 | status_results = [] |
|
2331 | status_results = [] | |
2332 |
for chunk in |
|
2332 | for chunk in range(0, len(revisions), size): | |
2333 | status_results += statuses.filter( |
|
2333 | status_results += statuses.filter( | |
2334 | ChangesetStatus.revision.in_( |
|
2334 | ChangesetStatus.revision.in_( | |
2335 | revisions[chunk: chunk+size]) |
|
2335 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||||
131 | items = [-1] |
|
131 | items = [-1] | |
132 |
|
132 | |||
133 | parts = [] |
|
133 | parts = [] | |
134 |
for chunk in |
|
134 | for chunk in range(0, len(items), limit): | |
135 | parts.append( |
|
135 | parts.append( | |
136 | qry.in_(items[chunk: chunk + limit]) |
|
136 | qry.in_(items[chunk: chunk + limit]) | |
137 | ) |
|
137 | ) | |
@@ -2352,7 +2352,7 b' class Repository(Base, BaseModel):' | |||||
2352 | # Try doing the filtering in chunks to avoid hitting limits |
|
2352 | # Try doing the filtering in chunks to avoid hitting limits | |
2353 | size = 500 |
|
2353 | size = 500 | |
2354 | status_results = [] |
|
2354 | status_results = [] | |
2355 |
for chunk in |
|
2355 | for chunk in range(0, len(revisions), size): | |
2356 | status_results += statuses.filter( |
|
2356 | status_results += statuses.filter( | |
2357 | ChangesetStatus.revision.in_( |
|
2357 | ChangesetStatus.revision.in_( | |
2358 | revisions[chunk: chunk+size]) |
|
2358 | revisions[chunk: chunk+size]) |
@@ -131,7 +131,7 b' def in_filter_generator(qry, items, limi' | |||||
131 | items = [-1] |
|
131 | items = [-1] | |
132 |
|
132 | |||
133 | parts = [] |
|
133 | parts = [] | |
134 |
for chunk in |
|
134 | for chunk in range(0, len(items), limit): | |
135 | parts.append( |
|
135 | parts.append( | |
136 | qry.in_(items[chunk: chunk + limit]) |
|
136 | qry.in_(items[chunk: chunk + limit]) | |
137 | ) |
|
137 | ) | |
@@ -2352,7 +2352,7 b' class Repository(Base, BaseModel):' | |||||
2352 | # Try doing the filtering in chunks to avoid hitting limits |
|
2352 | # Try doing the filtering in chunks to avoid hitting limits | |
2353 | size = 500 |
|
2353 | size = 500 | |
2354 | status_results = [] |
|
2354 | status_results = [] | |
2355 |
for chunk in |
|
2355 | for chunk in range(0, len(revisions), size): | |
2356 | status_results += statuses.filter( |
|
2356 | status_results += statuses.filter( | |
2357 | ChangesetStatus.revision.in_( |
|
2357 | ChangesetStatus.revision.in_( | |
2358 | revisions[chunk: chunk+size]) |
|
2358 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||||
136 | items = [-1] |
|
136 | items = [-1] | |
137 |
|
137 | |||
138 | parts = [] |
|
138 | parts = [] | |
139 |
for chunk in |
|
139 | for chunk in range(0, len(items), limit): | |
140 | parts.append( |
|
140 | parts.append( | |
141 | qry.in_(items[chunk: chunk + limit]) |
|
141 | qry.in_(items[chunk: chunk + limit]) | |
142 | ) |
|
142 | ) | |
@@ -2404,7 +2404,7 b' class Repository(Base, BaseModel):' | |||||
2404 | # Try doing the filtering in chunks to avoid hitting limits |
|
2404 | # Try doing the filtering in chunks to avoid hitting limits | |
2405 | size = 500 |
|
2405 | size = 500 | |
2406 | status_results = [] |
|
2406 | status_results = [] | |
2407 |
for chunk in |
|
2407 | for chunk in range(0, len(revisions), size): | |
2408 | status_results += statuses.filter( |
|
2408 | status_results += statuses.filter( | |
2409 | ChangesetStatus.revision.in_( |
|
2409 | ChangesetStatus.revision.in_( | |
2410 | revisions[chunk: chunk+size]) |
|
2410 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||||
136 | items = [-1] |
|
136 | items = [-1] | |
137 |
|
137 | |||
138 | parts = [] |
|
138 | parts = [] | |
139 |
for chunk in |
|
139 | for chunk in range(0, len(items), limit): | |
140 | parts.append( |
|
140 | parts.append( | |
141 | qry.in_(items[chunk: chunk + limit]) |
|
141 | qry.in_(items[chunk: chunk + limit]) | |
142 | ) |
|
142 | ) | |
@@ -2489,7 +2489,7 b' class Repository(Base, BaseModel):' | |||||
2489 | # Try doing the filtering in chunks to avoid hitting limits |
|
2489 | # Try doing the filtering in chunks to avoid hitting limits | |
2490 | size = 500 |
|
2490 | size = 500 | |
2491 | status_results = [] |
|
2491 | status_results = [] | |
2492 |
for chunk in |
|
2492 | for chunk in range(0, len(revisions), size): | |
2493 | status_results += statuses.filter( |
|
2493 | status_results += statuses.filter( | |
2494 | ChangesetStatus.revision.in_( |
|
2494 | ChangesetStatus.revision.in_( | |
2495 | revisions[chunk: chunk+size]) |
|
2495 | revisions[chunk: chunk+size]) |
@@ -136,7 +136,7 b' def in_filter_generator(qry, items, limi' | |||||
136 | items = [-1] |
|
136 | items = [-1] | |
137 |
|
137 | |||
138 | parts = [] |
|
138 | parts = [] | |
139 |
for chunk in |
|
139 | for chunk in range(0, len(items), limit): | |
140 | parts.append( |
|
140 | parts.append( | |
141 | qry.in_(items[chunk: chunk + limit]) |
|
141 | qry.in_(items[chunk: chunk + limit]) | |
142 | ) |
|
142 | ) | |
@@ -2495,7 +2495,7 b' class Repository(Base, BaseModel):' | |||||
2495 | # Try doing the filtering in chunks to avoid hitting limits |
|
2495 | # Try doing the filtering in chunks to avoid hitting limits | |
2496 | size = 500 |
|
2496 | size = 500 | |
2497 | status_results = [] |
|
2497 | status_results = [] | |
2498 |
for chunk in |
|
2498 | for chunk in range(0, len(revisions), size): | |
2499 | status_results += statuses.filter( |
|
2499 | status_results += statuses.filter( | |
2500 | ChangesetStatus.revision.in_( |
|
2500 | ChangesetStatus.revision.in_( | |
2501 | revisions[chunk: chunk+size]) |
|
2501 | revisions[chunk: chunk+size]) |
@@ -141,7 +141,7 b' def in_filter_generator(qry, items, limi' | |||||
141 | items = [-1] |
|
141 | items = [-1] | |
142 |
|
142 | |||
143 | parts = [] |
|
143 | parts = [] | |
144 |
for chunk in |
|
144 | for chunk in range(0, len(items), limit): | |
145 | parts.append( |
|
145 | parts.append( | |
146 | qry.in_(items[chunk: chunk + limit]) |
|
146 | qry.in_(items[chunk: chunk + limit]) | |
147 | ) |
|
147 | ) | |
@@ -2526,7 +2526,7 b' class Repository(Base, BaseModel):' | |||||
2526 | # Try doing the filtering in chunks to avoid hitting limits |
|
2526 | # Try doing the filtering in chunks to avoid hitting limits | |
2527 | size = 500 |
|
2527 | size = 500 | |
2528 | status_results = [] |
|
2528 | status_results = [] | |
2529 |
for chunk in |
|
2529 | for chunk in range(0, len(revisions), size): | |
2530 | status_results += statuses.filter( |
|
2530 | status_results += statuses.filter( | |
2531 | ChangesetStatus.revision.in_( |
|
2531 | ChangesetStatus.revision.in_( | |
2532 | revisions[chunk: chunk+size]) |
|
2532 | revisions[chunk: chunk+size]) |
@@ -1916,7 +1916,7 b' class Repository(Base, BaseModel):' | |||||
1916 | # Try doing the filtering in chunks to avoid hitting limits |
|
1916 | # Try doing the filtering in chunks to avoid hitting limits | |
1917 | size = 500 |
|
1917 | size = 500 | |
1918 | status_results = [] |
|
1918 | status_results = [] | |
1919 |
for chunk in |
|
1919 | for chunk in range(0, len(revisions), size): | |
1920 | status_results += statuses.filter( |
|
1920 | status_results += statuses.filter( | |
1921 | ChangesetStatus.revision.in_( |
|
1921 | ChangesetStatus.revision.in_( | |
1922 | revisions[chunk: chunk+size]) |
|
1922 | revisions[chunk: chunk+size]) |
@@ -1919,7 +1919,7 b' class Repository(Base, BaseModel):' | |||||
1919 | # Try doing the filtering in chunks to avoid hitting limits |
|
1919 | # Try doing the filtering in chunks to avoid hitting limits | |
1920 | size = 500 |
|
1920 | size = 500 | |
1921 | status_results = [] |
|
1921 | status_results = [] | |
1922 |
for chunk in |
|
1922 | for chunk in range(0, len(revisions), size): | |
1923 | status_results += statuses.filter( |
|
1923 | status_results += statuses.filter( | |
1924 | ChangesetStatus.revision.in_( |
|
1924 | ChangesetStatus.revision.in_( | |
1925 | revisions[chunk: chunk+size]) |
|
1925 | revisions[chunk: chunk+size]) |
@@ -1918,7 +1918,7 b' class Repository(Base, BaseModel):' | |||||
1918 | # Try doing the filtering in chunks to avoid hitting limits |
|
1918 | # Try doing the filtering in chunks to avoid hitting limits | |
1919 | size = 500 |
|
1919 | size = 500 | |
1920 | status_results = [] |
|
1920 | status_results = [] | |
1921 |
for chunk in |
|
1921 | for chunk in range(0, len(revisions), size): | |
1922 | status_results += statuses.filter( |
|
1922 | status_results += statuses.filter( | |
1923 | ChangesetStatus.revision.in_( |
|
1923 | ChangesetStatus.revision.in_( | |
1924 | revisions[chunk: chunk+size]) |
|
1924 | revisions[chunk: chunk+size]) |
@@ -1920,7 +1920,7 b' class Repository(Base, BaseModel):' | |||||
1920 | # Try doing the filtering in chunks to avoid hitting limits |
|
1920 | # Try doing the filtering in chunks to avoid hitting limits | |
1921 | size = 500 |
|
1921 | size = 500 | |
1922 | status_results = [] |
|
1922 | status_results = [] | |
1923 |
for chunk in |
|
1923 | for chunk in range(0, len(revisions), size): | |
1924 | status_results += statuses.filter( |
|
1924 | status_results += statuses.filter( | |
1925 | ChangesetStatus.revision.in_( |
|
1925 | ChangesetStatus.revision.in_( | |
1926 | revisions[chunk: chunk+size]) |
|
1926 | revisions[chunk: chunk+size]) |
@@ -1920,7 +1920,7 b' class Repository(Base, BaseModel):' | |||||
1920 | # Try doing the filtering in chunks to avoid hitting limits |
|
1920 | # Try doing the filtering in chunks to avoid hitting limits | |
1921 | size = 500 |
|
1921 | size = 500 | |
1922 | status_results = [] |
|
1922 | status_results = [] | |
1923 |
for chunk in |
|
1923 | for chunk in range(0, len(revisions), size): | |
1924 | status_results += statuses.filter( |
|
1924 | status_results += statuses.filter( | |
1925 | ChangesetStatus.revision.in_( |
|
1925 | ChangesetStatus.revision.in_( | |
1926 | revisions[chunk: chunk+size]) |
|
1926 | revisions[chunk: chunk+size]) |
@@ -1963,7 +1963,7 b' class Repository(Base, BaseModel):' | |||||
1963 | # Try doing the filtering in chunks to avoid hitting limits |
|
1963 | # Try doing the filtering in chunks to avoid hitting limits | |
1964 | size = 500 |
|
1964 | size = 500 | |
1965 | status_results = [] |
|
1965 | status_results = [] | |
1966 |
for chunk in |
|
1966 | for chunk in range(0, len(revisions), size): | |
1967 | status_results += statuses.filter( |
|
1967 | status_results += statuses.filter( | |
1968 | ChangesetStatus.revision.in_( |
|
1968 | ChangesetStatus.revision.in_( | |
1969 | revisions[chunk: chunk+size]) |
|
1969 | revisions[chunk: chunk+size]) |
@@ -1964,7 +1964,7 b' class Repository(Base, BaseModel):' | |||||
1964 | # Try doing the filtering in chunks to avoid hitting limits |
|
1964 | # Try doing the filtering in chunks to avoid hitting limits | |
1965 | size = 500 |
|
1965 | size = 500 | |
1966 | status_results = [] |
|
1966 | status_results = [] | |
1967 |
for chunk in |
|
1967 | for chunk in range(0, len(revisions), size): | |
1968 | status_results += statuses.filter( |
|
1968 | status_results += statuses.filter( | |
1969 | ChangesetStatus.revision.in_( |
|
1969 | ChangesetStatus.revision.in_( | |
1970 | revisions[chunk: chunk+size]) |
|
1970 | revisions[chunk: chunk+size]) |
@@ -2151,7 +2151,7 b' class Repository(Base, BaseModel):' | |||||
2151 | # Try doing the filtering in chunks to avoid hitting limits |
|
2151 | # Try doing the filtering in chunks to avoid hitting limits | |
2152 | size = 500 |
|
2152 | size = 500 | |
2153 | status_results = [] |
|
2153 | status_results = [] | |
2154 |
for chunk in |
|
2154 | for chunk in range(0, len(revisions), size): | |
2155 | status_results += statuses.filter( |
|
2155 | status_results += statuses.filter( | |
2156 | ChangesetStatus.revision.in_( |
|
2156 | ChangesetStatus.revision.in_( | |
2157 | revisions[chunk: chunk+size]) |
|
2157 | revisions[chunk: chunk+size]) |
@@ -288,13 +288,13 b' class diff_match_patch:' | |||||
288 | k1end = 0 |
|
288 | k1end = 0 | |
289 | k2start = 0 |
|
289 | k2start = 0 | |
290 | k2end = 0 |
|
290 | k2end = 0 | |
291 |
for d in |
|
291 | for d in range(max_d): | |
292 | # Bail out if deadline is reached. |
|
292 | # Bail out if deadline is reached. | |
293 | if time.time() > deadline: |
|
293 | if time.time() > deadline: | |
294 | break |
|
294 | break | |
295 |
|
295 | |||
296 | # Walk the front path one step. |
|
296 | # Walk the front path one step. | |
297 |
for k1 in |
|
297 | for k1 in range(-d + k1start, d + 1 - k1end, 2): | |
298 | k1_offset = v_offset + k1 |
|
298 | k1_offset = v_offset + k1 | |
299 | if k1 == -d or (k1 != d and |
|
299 | if k1 == -d or (k1 != d and | |
300 | v1[k1_offset - 1] < v1[k1_offset + 1]): |
|
300 | v1[k1_offset - 1] < v1[k1_offset + 1]): | |
@@ -323,7 +323,7 b' class diff_match_patch:' | |||||
323 | return self.diff_bisectSplit(text1, text2, x1, y1, deadline) |
|
323 | return self.diff_bisectSplit(text1, text2, x1, y1, deadline) | |
324 |
|
324 | |||
325 | # Walk the reverse path one step. |
|
325 | # Walk the reverse path one step. | |
326 |
for k2 in |
|
326 | for k2 in range(-d + k2start, d + 1 - k2end, 2): | |
327 | k2_offset = v_offset + k2 |
|
327 | k2_offset = v_offset + k2 | |
328 | if k2 == -d or (k2 != d and |
|
328 | if k2 == -d or (k2 != d and | |
329 | v2[k2_offset - 1] < v2[k2_offset + 1]): |
|
329 | v2[k2_offset - 1] < v2[k2_offset + 1]): | |
@@ -446,7 +446,7 b' class diff_match_patch:' | |||||
446 | diffs: Array of diff tuples. |
|
446 | diffs: Array of diff tuples. | |
447 | lineArray: Array of unique strings. |
|
447 | lineArray: Array of unique strings. | |
448 | """ |
|
448 | """ | |
449 |
for x in |
|
449 | for x in range(len(diffs)): | |
450 | text = [] |
|
450 | text = [] | |
451 | for char in diffs[x][1]: |
|
451 | for char in diffs[x][1]: | |
452 | text.append(lineArray[ord(char)]) |
|
452 | text.append(lineArray[ord(char)]) | |
@@ -1042,7 +1042,7 b' class diff_match_patch:' | |||||
1042 | chars2 = 0 |
|
1042 | chars2 = 0 | |
1043 | last_chars1 = 0 |
|
1043 | last_chars1 = 0 | |
1044 | last_chars2 = 0 |
|
1044 | last_chars2 = 0 | |
1045 |
for x in |
|
1045 | for x in range(len(diffs)): | |
1046 | (op, text) = diffs[x] |
|
1046 | (op, text) = diffs[x] | |
1047 | if op != self.DIFF_INSERT: # Equality or deletion. |
|
1047 | if op != self.DIFF_INSERT: # Equality or deletion. | |
1048 | chars1 += len(text) |
|
1048 | chars1 += len(text) | |
@@ -1301,7 +1301,7 b' class diff_match_patch:' | |||||
1301 | bin_max = len(pattern) + len(text) |
|
1301 | bin_max = len(pattern) + len(text) | |
1302 | # Empty initialization added to appease pychecker. |
|
1302 | # Empty initialization added to appease pychecker. | |
1303 | last_rd = None |
|
1303 | last_rd = None | |
1304 |
for d in |
|
1304 | for d in range(len(pattern)): | |
1305 | # Scan for the best match each iteration allows for one more error. |
|
1305 | # Scan for the best match each iteration allows for one more error. | |
1306 | # Run a binary search to determine how far from 'loc' we can stray at |
|
1306 | # Run a binary search to determine how far from 'loc' we can stray at | |
1307 | # this error level. |
|
1307 | # this error level. | |
@@ -1321,7 +1321,7 b' class diff_match_patch:' | |||||
1321 |
|
1321 | |||
1322 | rd = [0] * (finish + 2) |
|
1322 | rd = [0] * (finish + 2) | |
1323 | rd[finish + 1] = (1 << d) - 1 |
|
1323 | rd[finish + 1] = (1 << d) - 1 | |
1324 |
for j in |
|
1324 | for j in range(finish, start - 1, -1): | |
1325 | if len(text) <= j - 1: |
|
1325 | if len(text) <= j - 1: | |
1326 | # Out of range. |
|
1326 | # Out of range. | |
1327 | charMatch = 0 |
|
1327 | charMatch = 0 | |
@@ -1364,7 +1364,7 b' class diff_match_patch:' | |||||
1364 | s = {} |
|
1364 | s = {} | |
1365 | for char in pattern: |
|
1365 | for char in pattern: | |
1366 | s[char] = 0 |
|
1366 | s[char] = 0 | |
1367 |
for i in |
|
1367 | for i in range(len(pattern)): | |
1368 | s[pattern[i]] |= 1 << (len(pattern) - i - 1) |
|
1368 | s[pattern[i]] |= 1 << (len(pattern) - i - 1) | |
1369 | return s |
|
1369 | return s | |
1370 |
|
1370 | |||
@@ -1473,7 +1473,7 b' class diff_match_patch:' | |||||
1473 | char_count2 = 0 # Number of characters into the text2 string. |
|
1473 | char_count2 = 0 # Number of characters into the text2 string. | |
1474 | prepatch_text = text1 # Recreate the patches to determine context info. |
|
1474 | prepatch_text = text1 # Recreate the patches to determine context info. | |
1475 | postpatch_text = text1 |
|
1475 | postpatch_text = text1 | |
1476 |
for x in |
|
1476 | for x in range(len(diffs)): | |
1477 | (diff_type, diff_text) = diffs[x] |
|
1477 | (diff_type, diff_text) = diffs[x] | |
1478 | if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: |
|
1478 | if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: | |
1479 | # A new patch starts here. |
|
1479 | # A new patch starts here. | |
@@ -1646,7 +1646,7 b' class diff_match_patch:' | |||||
1646 | """ |
|
1646 | """ | |
1647 | paddingLength = self.Patch_Margin |
|
1647 | paddingLength = self.Patch_Margin | |
1648 | nullPadding = "" |
|
1648 | nullPadding = "" | |
1649 |
for x in |
|
1649 | for x in range(1, paddingLength + 1): | |
1650 | nullPadding += chr(x) |
|
1650 | nullPadding += chr(x) | |
1651 |
|
1651 | |||
1652 | # Bump all the patches forward. |
|
1652 | # Bump all the patches forward. | |
@@ -1705,7 +1705,7 b' class diff_match_patch:' | |||||
1705 | # Python has the option of not splitting strings due to its ability |
|
1705 | # Python has the option of not splitting strings due to its ability | |
1706 | # to handle integers of arbitrary precision. |
|
1706 | # to handle integers of arbitrary precision. | |
1707 | return |
|
1707 | return | |
1708 |
for x in |
|
1708 | for x in range(len(patches)): | |
1709 | if patches[x].length1 <= patch_size: |
|
1709 | if patches[x].length1 <= patch_size: | |
1710 | continue |
|
1710 | continue | |
1711 | bigpatch = patches[x] |
|
1711 | bigpatch = patches[x] |
@@ -581,7 +581,7 b' def unique_color_generator(n=10000, satu' | |||||
581 | golden_ratio = 0.618033988749895 |
|
581 | golden_ratio = 0.618033988749895 | |
582 | h = 0.22717784590367374 |
|
582 | h = 0.22717784590367374 | |
583 |
|
583 | |||
584 |
for _ in |
|
584 | for _ in range(n): | |
585 | h += golden_ratio |
|
585 | h += golden_ratio | |
586 | h %= 1 |
|
586 | h %= 1 | |
587 | HSV_tuple = [h, saturation, lightness] |
|
587 | HSV_tuple = [h, saturation, lightness] |
@@ -22,7 +22,7 b' import sys' | |||||
22 | import logging |
|
22 | import logging | |
23 |
|
23 | |||
24 |
|
24 | |||
25 |
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = |
|
25 | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(30, 38) | |
26 |
|
26 | |||
27 | # Sequences |
|
27 | # Sequences | |
28 | RESET_SEQ = "\033[0m" |
|
28 | RESET_SEQ = "\033[0m" |
@@ -1259,7 +1259,7 b' class BaseCommit(object):' | |||||
1259 |
|
1259 | |||
1260 | :param branch: show commits within the given named branch |
|
1260 | :param branch: show commits within the given named branch | |
1261 | """ |
|
1261 | """ | |
1262 |
indexes = |
|
1262 | indexes = range(self.idx + 1, self.repository.count()) | |
1263 | return self._find_next(indexes, branch) |
|
1263 | return self._find_next(indexes, branch) | |
1264 |
|
1264 | |||
1265 | def prev(self, branch=None): |
|
1265 | def prev(self, branch=None): | |
@@ -1269,7 +1269,7 b' class BaseCommit(object):' | |||||
1269 |
|
1269 | |||
1270 | :param branch: show commit within the given named branch |
|
1270 | :param branch: show commit within the given named branch | |
1271 | """ |
|
1271 | """ | |
1272 |
indexes = |
|
1272 | indexes = range(self.idx - 1, -1, -1) | |
1273 | return self._find_next(indexes, branch) |
|
1273 | return self._find_next(indexes, branch) | |
1274 |
|
1274 | |||
1275 | def _find_next(self, indexes, branch=None): |
|
1275 | def _find_next(self, indexes, branch=None): |
@@ -102,7 +102,7 b' class SubversionRepository(base.BaseRepo' | |||||
102 | @CachedProperty |
|
102 | @CachedProperty | |
103 | def commit_ids(self): |
|
103 | def commit_ids(self): | |
104 | head = self._remote.lookup(None) |
|
104 | head = self._remote.lookup(None) | |
105 |
return [str(r) for r in |
|
105 | return [str(r) for r in range(1, head + 1)] | |
106 |
|
106 | |||
107 | def _rebuild_cache(self, commit_ids): |
|
107 | def _rebuild_cache(self, commit_ids): | |
108 | pass |
|
108 | pass |
@@ -142,7 +142,7 b' def in_filter_generator(qry, items, limi' | |||||
142 | items = [-1] |
|
142 | items = [-1] | |
143 |
|
143 | |||
144 | parts = [] |
|
144 | parts = [] | |
145 |
for chunk in |
|
145 | for chunk in range(0, len(items), limit): | |
146 | parts.append( |
|
146 | parts.append( | |
147 | qry.in_(items[chunk: chunk + limit]) |
|
147 | qry.in_(items[chunk: chunk + limit]) | |
148 | ) |
|
148 | ) | |
@@ -2548,7 +2548,7 b' class Repository(Base, BaseModel):' | |||||
2548 | # Try doing the filtering in chunks to avoid hitting limits |
|
2548 | # Try doing the filtering in chunks to avoid hitting limits | |
2549 | size = 500 |
|
2549 | size = 500 | |
2550 | status_results = [] |
|
2550 | status_results = [] | |
2551 |
for chunk in |
|
2551 | for chunk in range(0, len(revisions), size): | |
2552 | status_results += statuses.filter( |
|
2552 | status_results += statuses.filter( | |
2553 | ChangesetStatus.revision.in_( |
|
2553 | ChangesetStatus.revision.in_( | |
2554 | revisions[chunk: chunk+size]) |
|
2554 | revisions[chunk: chunk+size]) |
@@ -55,20 +55,20 b' def data():' | |||||
55 |
|
55 | |||
56 | def test_reuse_app_no_data(repeat, vcsserver_http_echo_app): |
|
56 | def test_reuse_app_no_data(repeat, vcsserver_http_echo_app): | |
57 | app = vcs_http_app(vcsserver_http_echo_app) |
|
57 | app = vcs_http_app(vcsserver_http_echo_app) | |
58 |
for x in |
|
58 | for x in range(repeat / 10): | |
59 | response = app.post('/') |
|
59 | response = app.post('/') | |
60 | assert response.status_code == 200 |
|
60 | assert response.status_code == 200 | |
61 |
|
61 | |||
62 |
|
62 | |||
63 | def test_reuse_app_with_data(data, repeat, vcsserver_http_echo_app): |
|
63 | def test_reuse_app_with_data(data, repeat, vcsserver_http_echo_app): | |
64 | app = vcs_http_app(vcsserver_http_echo_app) |
|
64 | app = vcs_http_app(vcsserver_http_echo_app) | |
65 |
for x in |
|
65 | for x in range(repeat / 10): | |
66 | response = app.post('/', params=data) |
|
66 | response = app.post('/', params=data) | |
67 | assert response.status_code == 200 |
|
67 | assert response.status_code == 200 | |
68 |
|
68 | |||
69 |
|
69 | |||
70 | def test_create_app_per_request_no_data(repeat, vcsserver_http_echo_app): |
|
70 | def test_create_app_per_request_no_data(repeat, vcsserver_http_echo_app): | |
71 |
for x in |
|
71 | for x in range(repeat / 10): | |
72 | app = vcs_http_app(vcsserver_http_echo_app) |
|
72 | app = vcs_http_app(vcsserver_http_echo_app) | |
73 | response = app.post('/') |
|
73 | response = app.post('/') | |
74 | assert response.status_code == 200 |
|
74 | assert response.status_code == 200 | |
@@ -76,7 +76,7 b' def test_create_app_per_request_no_data(' | |||||
76 |
|
76 | |||
77 | def test_create_app_per_request_with_data( |
|
77 | def test_create_app_per_request_with_data( | |
78 | data, repeat, vcsserver_http_echo_app): |
|
78 | data, repeat, vcsserver_http_echo_app): | |
79 |
for x in |
|
79 | for x in range(repeat / 10): | |
80 | app = vcs_http_app(vcsserver_http_echo_app) |
|
80 | app = vcs_http_app(vcsserver_http_echo_app) | |
81 | response = app.post('/', params=data) |
|
81 | response = app.post('/', params=data) | |
82 | assert response.status_code == 200 |
|
82 | assert response.status_code == 200 |
@@ -125,7 +125,7 b' class Repository(object):' | |||||
125 | log.error('api: {}'.format(e)) |
|
125 | log.error('api: {}'.format(e)) | |
126 |
|
126 | |||
127 | def create_commits(self, number, file_size): |
|
127 | def create_commits(self, number, file_size): | |
128 |
for i in |
|
128 | for i in range(number): | |
129 | file_name = self.FILE_NAME_TEMPLATE.format(i) |
|
129 | file_name = self.FILE_NAME_TEMPLATE.format(i) | |
130 | log.debug("Create commit[{}] {}".format(self.name, file_name)) |
|
130 | log.debug("Create commit[{}] {}".format(self.name, file_name)) | |
131 | self._create_file(file_name, file_size) |
|
131 | self._create_file(file_name, file_size) | |
@@ -275,7 +275,7 b' class Benchmark(object):' | |||||
275 |
|
275 | |||
276 | def _create_repos(self): |
|
276 | def _create_repos(self): | |
277 | log.info("Creating repositories...") |
|
277 | log.info("Creating repositories...") | |
278 |
for i in |
|
278 | for i in range(self.config.repositories): | |
279 | self.git_repos.append(self._create_repo('git', i)) |
|
279 | self.git_repos.append(self._create_repo('git', i)) | |
280 | self.hg_repos.append(self._create_repo('hg', i)) |
|
280 | self.hg_repos.append(self._create_repo('hg', i)) | |
281 |
|
281 |
@@ -830,7 +830,7 b' class VcsBackend(object):' | |||||
830 |
|
830 | |||
831 | commits = commits or [ |
|
831 | commits = commits or [ | |
832 | {'message': 'Commit %s of %s' % (x, repo_name)} |
|
832 | {'message': 'Commit %s of %s' % (x, repo_name)} | |
833 |
for x in |
|
833 | for x in range(number_of_commits)] | |
834 | _add_commits_to_repo(repo, commits) |
|
834 | _add_commits_to_repo(repo, commits) | |
835 | return repo |
|
835 | return repo | |
836 |
|
836 |
@@ -173,7 +173,7 b' def generate_repo_with_commits(vcs_repo)' | |||||
173 |
|
173 | |||
174 | def commit_generator(num): |
|
174 | def commit_generator(num): | |
175 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
175 | start_date = datetime.datetime(2010, 1, 1, 20) | |
176 |
for x in |
|
176 | for x in range(num): | |
177 | yield { |
|
177 | yield { | |
178 | 'message': 'Commit %d' % x, |
|
178 | 'message': 'Commit %d' % x, | |
179 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
179 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -56,7 +56,7 b' class TestCommitsInNonEmptyRepo(BackendT' | |||||
56 | @classmethod |
|
56 | @classmethod | |
57 | def _get_commits(cls): |
|
57 | def _get_commits(cls): | |
58 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
58 | start_date = datetime.datetime(2010, 1, 1, 20) | |
59 |
for x in |
|
59 | for x in range(5): | |
60 | yield { |
|
60 | yield { | |
61 | 'message': 'Commit %d' % x, |
|
61 | 'message': 'Commit %d' % x, | |
62 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
62 | 'author': 'Joe Doe <joe.doe@example.com>', | |
@@ -208,7 +208,7 b' class TestCommits(BackendTestMixin):' | |||||
208 | @classmethod |
|
208 | @classmethod | |
209 | def _get_commits(cls): |
|
209 | def _get_commits(cls): | |
210 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
210 | start_date = datetime.datetime(2010, 1, 1, 20) | |
211 |
for x in |
|
211 | for x in range(5): | |
212 | yield { |
|
212 | yield { | |
213 | 'message': u'Commit %d' % x, |
|
213 | 'message': u'Commit %d' % x, | |
214 | 'author': u'Joe Doe <joe.doe@example.com>', |
|
214 | 'author': u'Joe Doe <joe.doe@example.com>', | |
@@ -321,7 +321,7 b' class TestCommits(BackendTestMixin):' | |||||
321 | def test_get_filenodes_generator(self): |
|
321 | def test_get_filenodes_generator(self): | |
322 | tip = self.repo.get_commit() |
|
322 | tip = self.repo.get_commit() | |
323 | filepaths = [node.path for node in tip.get_filenodes_generator()] |
|
323 | filepaths = [node.path for node in tip.get_filenodes_generator()] | |
324 |
assert filepaths == ['file_%d.txt' % x for x in |
|
324 | assert filepaths == ['file_%d.txt' % x for x in range(5)] | |
325 |
|
325 | |||
326 | def test_get_file_annotate(self): |
|
326 | def test_get_file_annotate(self): | |
327 | file_added_commit = self.repo.get_commit(commit_idx=3) |
|
327 | file_added_commit = self.repo.get_commit(commit_idx=3) |
@@ -33,7 +33,7 b' class TestGetitem(BackendTestMixin):' | |||||
33 | @classmethod |
|
33 | @classmethod | |
34 | def _get_commits(cls): |
|
34 | def _get_commits(cls): | |
35 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
35 | start_date = datetime.datetime(2010, 1, 1, 20) | |
36 |
for x in |
|
36 | for x in range(5): | |
37 | yield { |
|
37 | yield { | |
38 | 'message': 'Commit %d' % x, |
|
38 | 'message': 'Commit %d' % x, | |
39 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
39 | 'author': 'Joe Doe <joe.doe@example.com>', | |
@@ -55,7 +55,7 b' class TestGetitem(BackendTestMixin):' | |||||
55 | assert self.repo[offset].message == message |
|
55 | assert self.repo[offset].message == message | |
56 |
|
56 | |||
57 | def test_returns_correct_items(self): |
|
57 | def test_returns_correct_items(self): | |
58 |
commits = [self.repo[x] for x in |
|
58 | commits = [self.repo[x] for x in range(len(self.repo.commit_ids))] | |
59 | assert commits == list(self.repo.get_commits()) |
|
59 | assert commits == list(self.repo.get_commits()) | |
60 |
|
60 | |||
61 | def test_raises_for_next_commit(self): |
|
61 | def test_raises_for_next_commit(self): |
@@ -30,7 +30,7 b' class TestGetslice(BackendTestMixin):' | |||||
30 | @classmethod |
|
30 | @classmethod | |
31 | def _get_commits(cls): |
|
31 | def _get_commits(cls): | |
32 | start_date = datetime.datetime(2010, 1, 1, 20) |
|
32 | start_date = datetime.datetime(2010, 1, 1, 20) | |
33 |
for x in |
|
33 | for x in range(5): | |
34 | yield { |
|
34 | yield { | |
35 | 'message': 'Commit %d' % x, |
|
35 | 'message': 'Commit %d' % x, | |
36 | 'author': 'Joe Doe <joe.doe@example.com>', |
|
36 | 'author': 'Joe Doe <joe.doe@example.com>', |
@@ -309,7 +309,7 b' class TestInMemoryCommit(BackendTestMixi' | |||||
309 | def test_multiple_commits(self): |
|
309 | def test_multiple_commits(self): | |
310 | N = 3 # number of commits to perform |
|
310 | N = 3 # number of commits to perform | |
311 | last = None |
|
311 | last = None | |
312 |
for x in |
|
312 | for x in range(N): | |
313 | fname = 'file%s' % str(x).rjust(5, '0') |
|
313 | fname = 'file%s' % str(x).rjust(5, '0') | |
314 | content = 'foobar\n' * x |
|
314 | content = 'foobar\n' * x | |
315 | node = FileNode(fname, content=content) |
|
315 | node = FileNode(fname, content=content) |
@@ -270,6 +270,6 b' class TestNodesCommits(BackendTestMixin)' | |||||
270 | repo = generate_repo_with_commits(20) |
|
270 | repo = generate_repo_with_commits(20) | |
271 | last_commit = repo.get_commit() |
|
271 | last_commit = repo.get_commit() | |
272 |
|
272 | |||
273 |
for x in |
|
273 | for x in range(3): | |
274 | node = last_commit.get_node('file_%s.txt' % x) |
|
274 | node = last_commit.get_node('file_%s.txt' % x) | |
275 | assert node.last_commit == repo[x] |
|
275 | assert node.last_commit == repo[x] |
@@ -485,7 +485,7 b' class TestRepositoryStrip(BackendTestMix' | |||||
485 | ], |
|
485 | ], | |
486 | }, |
|
486 | }, | |
487 | ] |
|
487 | ] | |
488 |
for x in |
|
488 | for x in range(10): | |
489 | commit_data = { |
|
489 | commit_data = { | |
490 | 'message': 'Changed foobar - commit%s' % x, |
|
490 | 'message': 'Changed foobar - commit%s' % x, | |
491 | 'author': 'Jane Doe <jane.doe@example.com>', |
|
491 | 'author': 'Jane Doe <jane.doe@example.com>', |
@@ -107,7 +107,7 b' def get_normalized_path(path):' | |||||
107 | return get_normalized_path(newpath) |
|
107 | return get_normalized_path(newpath) | |
108 | else: |
|
108 | else: | |
109 | start = int(m.group(1)[-5:]) + 1 |
|
109 | start = int(m.group(1)[-5:]) + 1 | |
110 |
for x in |
|
110 | for x in range(start, 10000): | |
111 | newname = name[:-5] + str(x).rjust(5, '0') |
|
111 | newname = name[:-5] + str(x).rjust(5, '0') | |
112 | newpath = os.path.join(dir, newname) |
|
112 | newpath = os.path.join(dir, newname) | |
113 | if ext: |
|
113 | if ext: |
General Comments 0
You need to be logged in to leave comments.
Login now