Show More
@@ -0,0 +1,14 b'' | |||
|
1 | Copyright 2006 Google Inc. | |
|
2 | http://code.google.com/p/google-diff-match-patch/ | |
|
3 | ||
|
4 | Licensed under the Apache License, Version 2.0 (the "License"); | |
|
5 | you may not use this file except in compliance with the License. | |
|
6 | You may obtain a copy of the License at | |
|
7 | ||
|
8 | http://www.apache.org/licenses/LICENSE-2.0 | |
|
9 | ||
|
10 | Unless required by applicable law or agreed to in writing, software | |
|
11 | distributed under the License is distributed on an "AS IS" BASIS, | |
|
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
|
13 | See the License for the specific language governing permissions and | |
|
14 | limitations under the License. |
This diff has been collapsed as it changes many lines, (1919 lines changed) Show them Hide them | |||
@@ -0,0 +1,1919 b'' | |||
|
1 | #!/usr/bin/python2.4 | |
|
2 | ||
|
3 | from __future__ import division | |
|
4 | ||
|
5 | """Diff Match and Patch | |
|
6 | ||
|
7 | Copyright 2006 Google Inc. | |
|
8 | http://code.google.com/p/google-diff-match-patch/ | |
|
9 | ||
|
10 | Licensed under the Apache License, Version 2.0 (the "License"); | |
|
11 | you may not use this file except in compliance with the License. | |
|
12 | You may obtain a copy of the License at | |
|
13 | ||
|
14 | http://www.apache.org/licenses/LICENSE-2.0 | |
|
15 | ||
|
16 | Unless required by applicable law or agreed to in writing, software | |
|
17 | distributed under the License is distributed on an "AS IS" BASIS, | |
|
18 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
|
19 | See the License for the specific language governing permissions and | |
|
20 | limitations under the License. | |
|
21 | """ | |
|
22 | ||
|
23 | """Functions for diff, match and patch. | |
|
24 | ||
|
25 | Computes the difference between two texts to create a patch. | |
|
26 | Applies the patch onto another text, allowing for errors. | |
|
27 | """ | |
|
28 | ||
|
29 | __author__ = 'fraser@google.com (Neil Fraser)' | |
|
30 | ||
|
31 | import math | |
|
32 | import re | |
|
33 | import sys | |
|
34 | import time | |
|
35 | import urllib | |
|
36 | ||
|
37 | class diff_match_patch: | |
|
38 | """Class containing the diff, match and patch methods. | |
|
39 | ||
|
40 | Also contains the behaviour settings. | |
|
41 | """ | |
|
42 | ||
|
43 | def __init__(self): | |
|
44 | """Inits a diff_match_patch object with default settings. | |
|
45 | Redefine these in your program to override the defaults. | |
|
46 | """ | |
|
47 | ||
|
48 | # Number of seconds to map a diff before giving up (0 for infinity). | |
|
49 | self.Diff_Timeout = 1.0 | |
|
50 | # Cost of an empty edit operation in terms of edit characters. | |
|
51 | self.Diff_EditCost = 4 | |
|
52 | # At what point is no match declared (0.0 = perfection, 1.0 = very loose). | |
|
53 | self.Match_Threshold = 0.5 | |
|
54 | # How far to search for a match (0 = exact location, 1000+ = broad match). | |
|
55 | # A match this many characters away from the expected location will add | |
|
56 | # 1.0 to the score (0.0 is a perfect match). | |
|
57 | self.Match_Distance = 1000 | |
|
58 | # When deleting a large block of text (over ~64 characters), how close do | |
|
59 | # the contents have to be to match the expected contents. (0.0 = perfection, | |
|
60 | # 1.0 = very loose). Note that Match_Threshold controls how closely the | |
|
61 | # end points of a delete need to match. | |
|
62 | self.Patch_DeleteThreshold = 0.5 | |
|
63 | # Chunk size for context length. | |
|
64 | self.Patch_Margin = 4 | |
|
65 | ||
|
66 | # The number of bits in an int. | |
|
67 | # Python has no maximum, thus to disable patch splitting set to 0. | |
|
68 | # However to avoid long patches in certain pathological cases, use 32. | |
|
69 | # Multiple short patches (using native ints) are much faster than long ones. | |
|
70 | self.Match_MaxBits = 32 | |
|
71 | ||
|
72 | # DIFF FUNCTIONS | |
|
73 | ||
|
74 | # The data structure representing a diff is an array of tuples: | |
|
75 | # [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")] | |
|
76 | # which means: delete "Hello", add "Goodbye" and keep " world." | |
|
77 | DIFF_DELETE = -1 | |
|
78 | DIFF_INSERT = 1 | |
|
79 | DIFF_EQUAL = 0 | |
|
80 | ||
|
81 | def diff_main(self, text1, text2, checklines=True, deadline=None): | |
|
82 | """Find the differences between two texts. Simplifies the problem by | |
|
83 | stripping any common prefix or suffix off the texts before diffing. | |
|
84 | ||
|
85 | Args: | |
|
86 | text1: Old string to be diffed. | |
|
87 | text2: New string to be diffed. | |
|
88 | checklines: Optional speedup flag. If present and false, then don't run | |
|
89 | a line-level diff first to identify the changed areas. | |
|
90 | Defaults to true, which does a faster, slightly less optimal diff. | |
|
91 | deadline: Optional time when the diff should be complete by. Used | |
|
92 | internally for recursive calls. Users should set DiffTimeout instead. | |
|
93 | ||
|
94 | Returns: | |
|
95 | Array of changes. | |
|
96 | """ | |
|
97 | # Set a deadline by which time the diff must be complete. | |
|
98 | if deadline == None: | |
|
99 | # Unlike in most languages, Python counts time in seconds. | |
|
100 | if self.Diff_Timeout <= 0: | |
|
101 | deadline = sys.maxint | |
|
102 | else: | |
|
103 | deadline = time.time() + self.Diff_Timeout | |
|
104 | ||
|
105 | # Check for null inputs. | |
|
106 | if text1 == None or text2 == None: | |
|
107 | raise ValueError("Null inputs. (diff_main)") | |
|
108 | ||
|
109 | # Check for equality (speedup). | |
|
110 | if text1 == text2: | |
|
111 | if text1: | |
|
112 | return [(self.DIFF_EQUAL, text1)] | |
|
113 | return [] | |
|
114 | ||
|
115 | # Trim off common prefix (speedup). | |
|
116 | commonlength = self.diff_commonPrefix(text1, text2) | |
|
117 | commonprefix = text1[:commonlength] | |
|
118 | text1 = text1[commonlength:] | |
|
119 | text2 = text2[commonlength:] | |
|
120 | ||
|
121 | # Trim off common suffix (speedup). | |
|
122 | commonlength = self.diff_commonSuffix(text1, text2) | |
|
123 | if commonlength == 0: | |
|
124 | commonsuffix = '' | |
|
125 | else: | |
|
126 | commonsuffix = text1[-commonlength:] | |
|
127 | text1 = text1[:-commonlength] | |
|
128 | text2 = text2[:-commonlength] | |
|
129 | ||
|
130 | # Compute the diff on the middle block. | |
|
131 | diffs = self.diff_compute(text1, text2, checklines, deadline) | |
|
132 | ||
|
133 | # Restore the prefix and suffix. | |
|
134 | if commonprefix: | |
|
135 | diffs[:0] = [(self.DIFF_EQUAL, commonprefix)] | |
|
136 | if commonsuffix: | |
|
137 | diffs.append((self.DIFF_EQUAL, commonsuffix)) | |
|
138 | self.diff_cleanupMerge(diffs) | |
|
139 | return diffs | |
|
140 | ||
|
141 | def diff_compute(self, text1, text2, checklines, deadline): | |
|
142 | """Find the differences between two texts. Assumes that the texts do not | |
|
143 | have any common prefix or suffix. | |
|
144 | ||
|
145 | Args: | |
|
146 | text1: Old string to be diffed. | |
|
147 | text2: New string to be diffed. | |
|
148 | checklines: Speedup flag. If false, then don't run a line-level diff | |
|
149 | first to identify the changed areas. | |
|
150 | If true, then run a faster, slightly less optimal diff. | |
|
151 | deadline: Time when the diff should be complete by. | |
|
152 | ||
|
153 | Returns: | |
|
154 | Array of changes. | |
|
155 | """ | |
|
156 | if not text1: | |
|
157 | # Just add some text (speedup). | |
|
158 | return [(self.DIFF_INSERT, text2)] | |
|
159 | ||
|
160 | if not text2: | |
|
161 | # Just delete some text (speedup). | |
|
162 | return [(self.DIFF_DELETE, text1)] | |
|
163 | ||
|
164 | if len(text1) > len(text2): | |
|
165 | (longtext, shorttext) = (text1, text2) | |
|
166 | else: | |
|
167 | (shorttext, longtext) = (text1, text2) | |
|
168 | i = longtext.find(shorttext) | |
|
169 | if i != -1: | |
|
170 | # Shorter text is inside the longer text (speedup). | |
|
171 | diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext), | |
|
172 | (self.DIFF_INSERT, longtext[i + len(shorttext):])] | |
|
173 | # Swap insertions for deletions if diff is reversed. | |
|
174 | if len(text1) > len(text2): | |
|
175 | diffs[0] = (self.DIFF_DELETE, diffs[0][1]) | |
|
176 | diffs[2] = (self.DIFF_DELETE, diffs[2][1]) | |
|
177 | return diffs | |
|
178 | ||
|
179 | if len(shorttext) == 1: | |
|
180 | # Single character string. | |
|
181 | # After the previous speedup, the character can't be an equality. | |
|
182 | return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] | |
|
183 | ||
|
184 | # Check to see if the problem can be split in two. | |
|
185 | hm = self.diff_halfMatch(text1, text2) | |
|
186 | if hm: | |
|
187 | # A half-match was found, sort out the return data. | |
|
188 | (text1_a, text1_b, text2_a, text2_b, mid_common) = hm | |
|
189 | # Send both pairs off for separate processing. | |
|
190 | diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline) | |
|
191 | diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline) | |
|
192 | # Merge the results. | |
|
193 | return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b | |
|
194 | ||
|
195 | if checklines and len(text1) > 100 and len(text2) > 100: | |
|
196 | return self.diff_lineMode(text1, text2, deadline) | |
|
197 | ||
|
198 | return self.diff_bisect(text1, text2, deadline) | |
|
199 | ||
|
200 | def diff_lineMode(self, text1, text2, deadline): | |
|
201 | """Do a quick line-level diff on both strings, then rediff the parts for | |
|
202 | greater accuracy. | |
|
203 | This speedup can produce non-minimal diffs. | |
|
204 | ||
|
205 | Args: | |
|
206 | text1: Old string to be diffed. | |
|
207 | text2: New string to be diffed. | |
|
208 | deadline: Time when the diff should be complete by. | |
|
209 | ||
|
210 | Returns: | |
|
211 | Array of changes. | |
|
212 | """ | |
|
213 | ||
|
214 | # Scan the text on a line-by-line basis first. | |
|
215 | (text1, text2, linearray) = self.diff_linesToChars(text1, text2) | |
|
216 | ||
|
217 | diffs = self.diff_main(text1, text2, False, deadline) | |
|
218 | ||
|
219 | # Convert the diff back to original text. | |
|
220 | self.diff_charsToLines(diffs, linearray) | |
|
221 | # Eliminate freak matches (e.g. blank lines) | |
|
222 | self.diff_cleanupSemantic(diffs) | |
|
223 | ||
|
224 | # Rediff any replacement blocks, this time character-by-character. | |
|
225 | # Add a dummy entry at the end. | |
|
226 | diffs.append((self.DIFF_EQUAL, '')) | |
|
227 | pointer = 0 | |
|
228 | count_delete = 0 | |
|
229 | count_insert = 0 | |
|
230 | text_delete = '' | |
|
231 | text_insert = '' | |
|
232 | while pointer < len(diffs): | |
|
233 | if diffs[pointer][0] == self.DIFF_INSERT: | |
|
234 | count_insert += 1 | |
|
235 | text_insert += diffs[pointer][1] | |
|
236 | elif diffs[pointer][0] == self.DIFF_DELETE: | |
|
237 | count_delete += 1 | |
|
238 | text_delete += diffs[pointer][1] | |
|
239 | elif diffs[pointer][0] == self.DIFF_EQUAL: | |
|
240 | # Upon reaching an equality, check for prior redundancies. | |
|
241 | if count_delete >= 1 and count_insert >= 1: | |
|
242 | # Delete the offending records and add the merged ones. | |
|
243 | a = self.diff_main(text_delete, text_insert, False, deadline) | |
|
244 | diffs[pointer - count_delete - count_insert : pointer] = a | |
|
245 | pointer = pointer - count_delete - count_insert + len(a) | |
|
246 | count_insert = 0 | |
|
247 | count_delete = 0 | |
|
248 | text_delete = '' | |
|
249 | text_insert = '' | |
|
250 | ||
|
251 | pointer += 1 | |
|
252 | ||
|
253 | diffs.pop() # Remove the dummy entry at the end. | |
|
254 | ||
|
255 | return diffs | |
|
256 | ||
|
257 | def diff_bisect(self, text1, text2, deadline): | |
|
258 | """Find the 'middle snake' of a diff, split the problem in two | |
|
259 | and return the recursively constructed diff. | |
|
260 | See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. | |
|
261 | ||
|
262 | Args: | |
|
263 | text1: Old string to be diffed. | |
|
264 | text2: New string to be diffed. | |
|
265 | deadline: Time at which to bail if not yet complete. | |
|
266 | ||
|
267 | Returns: | |
|
268 | Array of diff tuples. | |
|
269 | """ | |
|
270 | ||
|
271 | # Cache the text lengths to prevent multiple calls. | |
|
272 | text1_length = len(text1) | |
|
273 | text2_length = len(text2) | |
|
274 | max_d = (text1_length + text2_length + 1) // 2 | |
|
275 | v_offset = max_d | |
|
276 | v_length = 2 * max_d | |
|
277 | v1 = [-1] * v_length | |
|
278 | v1[v_offset + 1] = 0 | |
|
279 | v2 = v1[:] | |
|
280 | delta = text1_length - text2_length | |
|
281 | # If the total number of characters is odd, then the front path will | |
|
282 | # collide with the reverse path. | |
|
283 | front = (delta % 2 != 0) | |
|
284 | # Offsets for start and end of k loop. | |
|
285 | # Prevents mapping of space beyond the grid. | |
|
286 | k1start = 0 | |
|
287 | k1end = 0 | |
|
288 | k2start = 0 | |
|
289 | k2end = 0 | |
|
290 | for d in xrange(max_d): | |
|
291 | # Bail out if deadline is reached. | |
|
292 | if time.time() > deadline: | |
|
293 | break | |
|
294 | ||
|
295 | # Walk the front path one step. | |
|
296 | for k1 in xrange(-d + k1start, d + 1 - k1end, 2): | |
|
297 | k1_offset = v_offset + k1 | |
|
298 | if k1 == -d or (k1 != d and | |
|
299 | v1[k1_offset - 1] < v1[k1_offset + 1]): | |
|
300 | x1 = v1[k1_offset + 1] | |
|
301 | else: | |
|
302 | x1 = v1[k1_offset - 1] + 1 | |
|
303 | y1 = x1 - k1 | |
|
304 | while (x1 < text1_length and y1 < text2_length and | |
|
305 | text1[x1] == text2[y1]): | |
|
306 | x1 += 1 | |
|
307 | y1 += 1 | |
|
308 | v1[k1_offset] = x1 | |
|
309 | if x1 > text1_length: | |
|
310 | # Ran off the right of the graph. | |
|
311 | k1end += 2 | |
|
312 | elif y1 > text2_length: | |
|
313 | # Ran off the bottom of the graph. | |
|
314 | k1start += 2 | |
|
315 | elif front: | |
|
316 | k2_offset = v_offset + delta - k1 | |
|
317 | if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1: | |
|
318 | # Mirror x2 onto top-left coordinate system. | |
|
319 | x2 = text1_length - v2[k2_offset] | |
|
320 | if x1 >= x2: | |
|
321 | # Overlap detected. | |
|
322 | return self.diff_bisectSplit(text1, text2, x1, y1, deadline) | |
|
323 | ||
|
324 | # Walk the reverse path one step. | |
|
325 | for k2 in xrange(-d + k2start, d + 1 - k2end, 2): | |
|
326 | k2_offset = v_offset + k2 | |
|
327 | if k2 == -d or (k2 != d and | |
|
328 | v2[k2_offset - 1] < v2[k2_offset + 1]): | |
|
329 | x2 = v2[k2_offset + 1] | |
|
330 | else: | |
|
331 | x2 = v2[k2_offset - 1] + 1 | |
|
332 | y2 = x2 - k2 | |
|
333 | while (x2 < text1_length and y2 < text2_length and | |
|
334 | text1[-x2 - 1] == text2[-y2 - 1]): | |
|
335 | x2 += 1 | |
|
336 | y2 += 1 | |
|
337 | v2[k2_offset] = x2 | |
|
338 | if x2 > text1_length: | |
|
339 | # Ran off the left of the graph. | |
|
340 | k2end += 2 | |
|
341 | elif y2 > text2_length: | |
|
342 | # Ran off the top of the graph. | |
|
343 | k2start += 2 | |
|
344 | elif not front: | |
|
345 | k1_offset = v_offset + delta - k2 | |
|
346 | if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1: | |
|
347 | x1 = v1[k1_offset] | |
|
348 | y1 = v_offset + x1 - k1_offset | |
|
349 | # Mirror x2 onto top-left coordinate system. | |
|
350 | x2 = text1_length - x2 | |
|
351 | if x1 >= x2: | |
|
352 | # Overlap detected. | |
|
353 | return self.diff_bisectSplit(text1, text2, x1, y1, deadline) | |
|
354 | ||
|
355 | # Diff took too long and hit the deadline or | |
|
356 | # number of diffs equals number of characters, no commonality at all. | |
|
357 | return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)] | |
|
358 | ||
|
359 | def diff_bisectSplit(self, text1, text2, x, y, deadline): | |
|
360 | """Given the location of the 'middle snake', split the diff in two parts | |
|
361 | and recurse. | |
|
362 | ||
|
363 | Args: | |
|
364 | text1: Old string to be diffed. | |
|
365 | text2: New string to be diffed. | |
|
366 | x: Index of split point in text1. | |
|
367 | y: Index of split point in text2. | |
|
368 | deadline: Time at which to bail if not yet complete. | |
|
369 | ||
|
370 | Returns: | |
|
371 | Array of diff tuples. | |
|
372 | """ | |
|
373 | text1a = text1[:x] | |
|
374 | text2a = text2[:y] | |
|
375 | text1b = text1[x:] | |
|
376 | text2b = text2[y:] | |
|
377 | ||
|
378 | # Compute both diffs serially. | |
|
379 | diffs = self.diff_main(text1a, text2a, False, deadline) | |
|
380 | diffsb = self.diff_main(text1b, text2b, False, deadline) | |
|
381 | ||
|
382 | return diffs + diffsb | |
|
383 | ||
|
384 | def diff_linesToChars(self, text1, text2): | |
|
385 | """Split two texts into an array of strings. Reduce the texts to a string | |
|
386 | of hashes where each Unicode character represents one line. | |
|
387 | ||
|
388 | Args: | |
|
389 | text1: First string. | |
|
390 | text2: Second string. | |
|
391 | ||
|
392 | Returns: | |
|
393 | Three element tuple, containing the encoded text1, the encoded text2 and | |
|
394 | the array of unique strings. The zeroth element of the array of unique | |
|
395 | strings is intentionally blank. | |
|
396 | """ | |
|
397 | lineArray = [] # e.g. lineArray[4] == "Hello\n" | |
|
398 | lineHash = {} # e.g. lineHash["Hello\n"] == 4 | |
|
399 | ||
|
400 | # "\x00" is a valid character, but various debuggers don't like it. | |
|
401 | # So we'll insert a junk entry to avoid generating a null character. | |
|
402 | lineArray.append('') | |
|
403 | ||
|
404 | def diff_linesToCharsMunge(text): | |
|
405 | """Split a text into an array of strings. Reduce the texts to a string | |
|
406 | of hashes where each Unicode character represents one line. | |
|
407 | Modifies linearray and linehash through being a closure. | |
|
408 | ||
|
409 | Args: | |
|
410 | text: String to encode. | |
|
411 | ||
|
412 | Returns: | |
|
413 | Encoded string. | |
|
414 | """ | |
|
415 | chars = [] | |
|
416 | # Walk the text, pulling out a substring for each line. | |
|
417 | # text.split('\n') would would temporarily double our memory footprint. | |
|
418 | # Modifying text would create many large strings to garbage collect. | |
|
419 | lineStart = 0 | |
|
420 | lineEnd = -1 | |
|
421 | while lineEnd < len(text) - 1: | |
|
422 | lineEnd = text.find('\n', lineStart) | |
|
423 | if lineEnd == -1: | |
|
424 | lineEnd = len(text) - 1 | |
|
425 | line = text[lineStart:lineEnd + 1] | |
|
426 | lineStart = lineEnd + 1 | |
|
427 | ||
|
428 | if line in lineHash: | |
|
429 | chars.append(unichr(lineHash[line])) | |
|
430 | else: | |
|
431 | lineArray.append(line) | |
|
432 | lineHash[line] = len(lineArray) - 1 | |
|
433 | chars.append(unichr(len(lineArray) - 1)) | |
|
434 | return "".join(chars) | |
|
435 | ||
|
436 | chars1 = diff_linesToCharsMunge(text1) | |
|
437 | chars2 = diff_linesToCharsMunge(text2) | |
|
438 | return (chars1, chars2, lineArray) | |
|
439 | ||
|
440 | def diff_charsToLines(self, diffs, lineArray): | |
|
441 | """Rehydrate the text in a diff from a string of line hashes to real lines | |
|
442 | of text. | |
|
443 | ||
|
444 | Args: | |
|
445 | diffs: Array of diff tuples. | |
|
446 | lineArray: Array of unique strings. | |
|
447 | """ | |
|
448 | for x in xrange(len(diffs)): | |
|
449 | text = [] | |
|
450 | for char in diffs[x][1]: | |
|
451 | text.append(lineArray[ord(char)]) | |
|
452 | diffs[x] = (diffs[x][0], "".join(text)) | |
|
453 | ||
|
454 | def diff_commonPrefix(self, text1, text2): | |
|
455 | """Determine the common prefix of two strings. | |
|
456 | ||
|
457 | Args: | |
|
458 | text1: First string. | |
|
459 | text2: Second string. | |
|
460 | ||
|
461 | Returns: | |
|
462 | The number of characters common to the start of each string. | |
|
463 | """ | |
|
464 | # Quick check for common null cases. | |
|
465 | if not text1 or not text2 or text1[0] != text2[0]: | |
|
466 | return 0 | |
|
467 | # Binary search. | |
|
468 | # Performance analysis: http://neil.fraser.name/news/2007/10/09/ | |
|
469 | pointermin = 0 | |
|
470 | pointermax = min(len(text1), len(text2)) | |
|
471 | pointermid = pointermax | |
|
472 | pointerstart = 0 | |
|
473 | while pointermin < pointermid: | |
|
474 | if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]: | |
|
475 | pointermin = pointermid | |
|
476 | pointerstart = pointermin | |
|
477 | else: | |
|
478 | pointermax = pointermid | |
|
479 | pointermid = (pointermax - pointermin) // 2 + pointermin | |
|
480 | return pointermid | |
|
481 | ||
|
482 | def diff_commonSuffix(self, text1, text2): | |
|
483 | """Determine the common suffix of two strings. | |
|
484 | ||
|
485 | Args: | |
|
486 | text1: First string. | |
|
487 | text2: Second string. | |
|
488 | ||
|
489 | Returns: | |
|
490 | The number of characters common to the end of each string. | |
|
491 | """ | |
|
492 | # Quick check for common null cases. | |
|
493 | if not text1 or not text2 or text1[-1] != text2[-1]: | |
|
494 | return 0 | |
|
495 | # Binary search. | |
|
496 | # Performance analysis: http://neil.fraser.name/news/2007/10/09/ | |
|
497 | pointermin = 0 | |
|
498 | pointermax = min(len(text1), len(text2)) | |
|
499 | pointermid = pointermax | |
|
500 | pointerend = 0 | |
|
501 | while pointermin < pointermid: | |
|
502 | if (text1[-pointermid:len(text1) - pointerend] == | |
|
503 | text2[-pointermid:len(text2) - pointerend]): | |
|
504 | pointermin = pointermid | |
|
505 | pointerend = pointermin | |
|
506 | else: | |
|
507 | pointermax = pointermid | |
|
508 | pointermid = (pointermax - pointermin) // 2 + pointermin | |
|
509 | return pointermid | |
|
510 | ||
|
511 | def diff_commonOverlap(self, text1, text2): | |
|
512 | """Determine if the suffix of one string is the prefix of another. | |
|
513 | ||
|
514 | Args: | |
|
515 | text1 First string. | |
|
516 | text2 Second string. | |
|
517 | ||
|
518 | Returns: | |
|
519 | The number of characters common to the end of the first | |
|
520 | string and the start of the second string. | |
|
521 | """ | |
|
522 | # Cache the text lengths to prevent multiple calls. | |
|
523 | text1_length = len(text1) | |
|
524 | text2_length = len(text2) | |
|
525 | # Eliminate the null case. | |
|
526 | if text1_length == 0 or text2_length == 0: | |
|
527 | return 0 | |
|
528 | # Truncate the longer string. | |
|
529 | if text1_length > text2_length: | |
|
530 | text1 = text1[-text2_length:] | |
|
531 | elif text1_length < text2_length: | |
|
532 | text2 = text2[:text1_length] | |
|
533 | text_length = min(text1_length, text2_length) | |
|
534 | # Quick check for the worst case. | |
|
535 | if text1 == text2: | |
|
536 | return text_length | |
|
537 | ||
|
538 | # Start by looking for a single character match | |
|
539 | # and increase length until no match is found. | |
|
540 | # Performance analysis: http://neil.fraser.name/news/2010/11/04/ | |
|
541 | best = 0 | |
|
542 | length = 1 | |
|
543 | while True: | |
|
544 | pattern = text1[-length:] | |
|
545 | found = text2.find(pattern) | |
|
546 | if found == -1: | |
|
547 | return best | |
|
548 | length += found | |
|
549 | if found == 0 or text1[-length:] == text2[:length]: | |
|
550 | best = length | |
|
551 | length += 1 | |
|
552 | ||
|
553 | def diff_halfMatch(self, text1, text2): | |
|
554 | """Do the two texts share a substring which is at least half the length of | |
|
555 | the longer text? | |
|
556 | This speedup can produce non-minimal diffs. | |
|
557 | ||
|
558 | Args: | |
|
559 | text1: First string. | |
|
560 | text2: Second string. | |
|
561 | ||
|
562 | Returns: | |
|
563 | Five element Array, containing the prefix of text1, the suffix of text1, | |
|
564 | the prefix of text2, the suffix of text2 and the common middle. Or None | |
|
565 | if there was no match. | |
|
566 | """ | |
|
567 | if self.Diff_Timeout <= 0: | |
|
568 | # Don't risk returning a non-optimal diff if we have unlimited time. | |
|
569 | return None | |
|
570 | if len(text1) > len(text2): | |
|
571 | (longtext, shorttext) = (text1, text2) | |
|
572 | else: | |
|
573 | (shorttext, longtext) = (text1, text2) | |
|
574 | if len(longtext) < 4 or len(shorttext) * 2 < len(longtext): | |
|
575 | return None # Pointless. | |
|
576 | ||
|
577 | def diff_halfMatchI(longtext, shorttext, i): | |
|
578 | """Does a substring of shorttext exist within longtext such that the | |
|
579 | substring is at least half the length of longtext? | |
|
580 | Closure, but does not reference any external variables. | |
|
581 | ||
|
582 | Args: | |
|
583 | longtext: Longer string. | |
|
584 | shorttext: Shorter string. | |
|
585 | i: Start index of quarter length substring within longtext. | |
|
586 | ||
|
587 | Returns: | |
|
588 | Five element Array, containing the prefix of longtext, the suffix of | |
|
589 | longtext, the prefix of shorttext, the suffix of shorttext and the | |
|
590 | common middle. Or None if there was no match. | |
|
591 | """ | |
|
592 | seed = longtext[i:i + len(longtext) // 4] | |
|
593 | best_common = '' | |
|
594 | j = shorttext.find(seed) | |
|
595 | while j != -1: | |
|
596 | prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:]) | |
|
597 | suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j]) | |
|
598 | if len(best_common) < suffixLength + prefixLength: | |
|
599 | best_common = (shorttext[j - suffixLength:j] + | |
|
600 | shorttext[j:j + prefixLength]) | |
|
601 | best_longtext_a = longtext[:i - suffixLength] | |
|
602 | best_longtext_b = longtext[i + prefixLength:] | |
|
603 | best_shorttext_a = shorttext[:j - suffixLength] | |
|
604 | best_shorttext_b = shorttext[j + prefixLength:] | |
|
605 | j = shorttext.find(seed, j + 1) | |
|
606 | ||
|
607 | if len(best_common) * 2 >= len(longtext): | |
|
608 | return (best_longtext_a, best_longtext_b, | |
|
609 | best_shorttext_a, best_shorttext_b, best_common) | |
|
610 | else: | |
|
611 | return None | |
|
612 | ||
|
613 | # First check if the second quarter is the seed for a half-match. | |
|
614 | hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4) | |
|
615 | # Check again based on the third quarter. | |
|
616 | hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2) | |
|
617 | if not hm1 and not hm2: | |
|
618 | return None | |
|
619 | elif not hm2: | |
|
620 | hm = hm1 | |
|
621 | elif not hm1: | |
|
622 | hm = hm2 | |
|
623 | else: | |
|
624 | # Both matched. Select the longest. | |
|
625 | if len(hm1[4]) > len(hm2[4]): | |
|
626 | hm = hm1 | |
|
627 | else: | |
|
628 | hm = hm2 | |
|
629 | ||
|
630 | # A half-match was found, sort out the return data. | |
|
631 | if len(text1) > len(text2): | |
|
632 | (text1_a, text1_b, text2_a, text2_b, mid_common) = hm | |
|
633 | else: | |
|
634 | (text2_a, text2_b, text1_a, text1_b, mid_common) = hm | |
|
635 | return (text1_a, text1_b, text2_a, text2_b, mid_common) | |
|
636 | ||
|
637 | def diff_cleanupSemantic(self, diffs): | |
|
638 | """Reduce the number of edits by eliminating semantically trivial | |
|
639 | equalities. | |
|
640 | ||
|
641 | Args: | |
|
642 | diffs: Array of diff tuples. | |
|
643 | """ | |
|
644 | changes = False | |
|
645 | equalities = [] # Stack of indices where equalities are found. | |
|
646 | lastequality = None # Always equal to diffs[equalities[-1]][1] | |
|
647 | pointer = 0 # Index of current position. | |
|
648 | # Number of chars that changed prior to the equality. | |
|
649 | length_insertions1, length_deletions1 = 0, 0 | |
|
650 | # Number of chars that changed after the equality. | |
|
651 | length_insertions2, length_deletions2 = 0, 0 | |
|
652 | while pointer < len(diffs): | |
|
653 | if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. | |
|
654 | equalities.append(pointer) | |
|
655 | length_insertions1, length_insertions2 = length_insertions2, 0 | |
|
656 | length_deletions1, length_deletions2 = length_deletions2, 0 | |
|
657 | lastequality = diffs[pointer][1] | |
|
658 | else: # An insertion or deletion. | |
|
659 | if diffs[pointer][0] == self.DIFF_INSERT: | |
|
660 | length_insertions2 += len(diffs[pointer][1]) | |
|
661 | else: | |
|
662 | length_deletions2 += len(diffs[pointer][1]) | |
|
663 | # Eliminate an equality that is smaller or equal to the edits on both | |
|
664 | # sides of it. | |
|
665 | if (lastequality and (len(lastequality) <= | |
|
666 | max(length_insertions1, length_deletions1)) and | |
|
667 | (len(lastequality) <= max(length_insertions2, length_deletions2))): | |
|
668 | # Duplicate record. | |
|
669 | diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) | |
|
670 | # Change second copy to insert. | |
|
671 | diffs[equalities[-1] + 1] = (self.DIFF_INSERT, | |
|
672 | diffs[equalities[-1] + 1][1]) | |
|
673 | # Throw away the equality we just deleted. | |
|
674 | equalities.pop() | |
|
675 | # Throw away the previous equality (it needs to be reevaluated). | |
|
676 | if len(equalities): | |
|
677 | equalities.pop() | |
|
678 | if len(equalities): | |
|
679 | pointer = equalities[-1] | |
|
680 | else: | |
|
681 | pointer = -1 | |
|
682 | # Reset the counters. | |
|
683 | length_insertions1, length_deletions1 = 0, 0 | |
|
684 | length_insertions2, length_deletions2 = 0, 0 | |
|
685 | lastequality = None | |
|
686 | changes = True | |
|
687 | pointer += 1 | |
|
688 | ||
|
689 | # Normalize the diff. | |
|
690 | if changes: | |
|
691 | self.diff_cleanupMerge(diffs) | |
|
692 | self.diff_cleanupSemanticLossless(diffs) | |
|
693 | ||
|
694 | # Find any overlaps between deletions and insertions. | |
|
695 | # e.g: <del>abcxxx</del><ins>xxxdef</ins> | |
|
696 | # -> <del>abc</del>xxx<ins>def</ins> | |
|
697 | # e.g: <del>xxxabc</del><ins>defxxx</ins> | |
|
698 | # -> <ins>def</ins>xxx<del>abc</del> | |
|
699 | # Only extract an overlap if it is as big as the edit ahead or behind it. | |
|
700 | pointer = 1 | |
|
701 | while pointer < len(diffs): | |
|
702 | if (diffs[pointer - 1][0] == self.DIFF_DELETE and | |
|
703 | diffs[pointer][0] == self.DIFF_INSERT): | |
|
704 | deletion = diffs[pointer - 1][1] | |
|
705 | insertion = diffs[pointer][1] | |
|
706 | overlap_length1 = self.diff_commonOverlap(deletion, insertion) | |
|
707 | overlap_length2 = self.diff_commonOverlap(insertion, deletion) | |
|
708 | if overlap_length1 >= overlap_length2: | |
|
709 | if (overlap_length1 >= len(deletion) / 2.0 or | |
|
710 | overlap_length1 >= len(insertion) / 2.0): | |
|
711 | # Overlap found. Insert an equality and trim the surrounding edits. | |
|
712 | diffs.insert(pointer, (self.DIFF_EQUAL, | |
|
713 | insertion[:overlap_length1])) | |
|
714 | diffs[pointer - 1] = (self.DIFF_DELETE, | |
|
715 | deletion[:len(deletion) - overlap_length1]) | |
|
716 | diffs[pointer + 1] = (self.DIFF_INSERT, | |
|
717 | insertion[overlap_length1:]) | |
|
718 | pointer += 1 | |
|
719 | else: | |
|
720 | if (overlap_length2 >= len(deletion) / 2.0 or | |
|
721 | overlap_length2 >= len(insertion) / 2.0): | |
|
722 | # Reverse overlap found. | |
|
723 | # Insert an equality and swap and trim the surrounding edits. | |
|
724 | diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])) | |
|
725 | diffs[pointer - 1] = (self.DIFF_INSERT, | |
|
726 | insertion[:len(insertion) - overlap_length2]) | |
|
727 | diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:]) | |
|
728 | pointer += 1 | |
|
729 | pointer += 1 | |
|
730 | pointer += 1 | |
|
731 | ||
|
732 | def diff_cleanupSemanticLossless(self, diffs): | |
|
733 | """Look for single edits surrounded on both sides by equalities | |
|
734 | which can be shifted sideways to align the edit to a word boundary. | |
|
735 | e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. | |
|
736 | ||
|
737 | Args: | |
|
738 | diffs: Array of diff tuples. | |
|
739 | """ | |
|
740 | ||
|
741 | def diff_cleanupSemanticScore(one, two): | |
|
742 | """Given two strings, compute a score representing whether the | |
|
743 | internal boundary falls on logical boundaries. | |
|
744 | Scores range from 6 (best) to 0 (worst). | |
|
745 | Closure, but does not reference any external variables. | |
|
746 | ||
|
747 | Args: | |
|
748 | one: First string. | |
|
749 | two: Second string. | |
|
750 | ||
|
751 | Returns: | |
|
752 | The score. | |
|
753 | """ | |
|
754 | if not one or not two: | |
|
755 | # Edges are the best. | |
|
756 | return 6 | |
|
757 | ||
|
758 | # Each port of this function behaves slightly differently due to | |
|
759 | # subtle differences in each language's definition of things like | |
|
760 | # 'whitespace'. Since this function's purpose is largely cosmetic, | |
|
761 | # the choice has been made to use each language's native features | |
|
762 | # rather than force total conformity. | |
|
763 | char1 = one[-1] | |
|
764 | char2 = two[0] | |
|
765 | nonAlphaNumeric1 = not char1.isalnum() | |
|
766 | nonAlphaNumeric2 = not char2.isalnum() | |
|
767 | whitespace1 = nonAlphaNumeric1 and char1.isspace() | |
|
768 | whitespace2 = nonAlphaNumeric2 and char2.isspace() | |
|
769 | lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n") | |
|
770 | lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n") | |
|
771 | blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one) | |
|
772 | blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two) | |
|
773 | ||
|
774 | if blankLine1 or blankLine2: | |
|
775 | # Five points for blank lines. | |
|
776 | return 5 | |
|
777 | elif lineBreak1 or lineBreak2: | |
|
778 | # Four points for line breaks. | |
|
779 | return 4 | |
|
780 | elif nonAlphaNumeric1 and not whitespace1 and whitespace2: | |
|
781 | # Three points for end of sentences. | |
|
782 | return 3 | |
|
783 | elif whitespace1 or whitespace2: | |
|
784 | # Two points for whitespace. | |
|
785 | return 2 | |
|
786 | elif nonAlphaNumeric1 or nonAlphaNumeric2: | |
|
787 | # One point for non-alphanumeric. | |
|
788 | return 1 | |
|
789 | return 0 | |
|
790 | ||
|
791 | pointer = 1 | |
|
792 | # Intentionally ignore the first and last element (don't need checking). | |
|
793 | while pointer < len(diffs) - 1: | |
|
794 | if (diffs[pointer - 1][0] == self.DIFF_EQUAL and | |
|
795 | diffs[pointer + 1][0] == self.DIFF_EQUAL): | |
|
796 | # This is a single edit surrounded by equalities. | |
|
797 | equality1 = diffs[pointer - 1][1] | |
|
798 | edit = diffs[pointer][1] | |
|
799 | equality2 = diffs[pointer + 1][1] | |
|
800 | ||
|
801 | # First, shift the edit as far left as possible. | |
|
802 | commonOffset = self.diff_commonSuffix(equality1, edit) | |
|
803 | if commonOffset: | |
|
804 | commonString = edit[-commonOffset:] | |
|
805 | equality1 = equality1[:-commonOffset] | |
|
806 | edit = commonString + edit[:-commonOffset] | |
|
807 | equality2 = commonString + equality2 | |
|
808 | ||
|
809 | # Second, step character by character right, looking for the best fit. | |
|
810 | bestEquality1 = equality1 | |
|
811 | bestEdit = edit | |
|
812 | bestEquality2 = equality2 | |
|
813 | bestScore = (diff_cleanupSemanticScore(equality1, edit) + | |
|
814 | diff_cleanupSemanticScore(edit, equality2)) | |
|
815 | while edit and equality2 and edit[0] == equality2[0]: | |
|
816 | equality1 += edit[0] | |
|
817 | edit = edit[1:] + equality2[0] | |
|
818 | equality2 = equality2[1:] | |
|
819 | score = (diff_cleanupSemanticScore(equality1, edit) + | |
|
820 | diff_cleanupSemanticScore(edit, equality2)) | |
|
821 | # The >= encourages trailing rather than leading whitespace on edits. | |
|
822 | if score >= bestScore: | |
|
823 | bestScore = score | |
|
824 | bestEquality1 = equality1 | |
|
825 | bestEdit = edit | |
|
826 | bestEquality2 = equality2 | |
|
827 | ||
|
828 | if diffs[pointer - 1][1] != bestEquality1: | |
|
829 | # We have an improvement, save it back to the diff. | |
|
830 | if bestEquality1: | |
|
831 | diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1) | |
|
832 | else: | |
|
833 | del diffs[pointer - 1] | |
|
834 | pointer -= 1 | |
|
835 | diffs[pointer] = (diffs[pointer][0], bestEdit) | |
|
836 | if bestEquality2: | |
|
837 | diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2) | |
|
838 | else: | |
|
839 | del diffs[pointer + 1] | |
|
840 | pointer -= 1 | |
|
841 | pointer += 1 | |
|
842 | ||
|
843 | # Define some regex patterns for matching boundaries. | |
|
844 | BLANKLINEEND = re.compile(r"\n\r?\n$"); | |
|
845 | BLANKLINESTART = re.compile(r"^\r?\n\r?\n"); | |
|
846 | ||
|
847 | def diff_cleanupEfficiency(self, diffs): | |
|
848 | """Reduce the number of edits by eliminating operationally trivial | |
|
849 | equalities. | |
|
850 | ||
|
851 | Args: | |
|
852 | diffs: Array of diff tuples. | |
|
853 | """ | |
|
854 | changes = False | |
|
855 | equalities = [] # Stack of indices where equalities are found. | |
|
856 | lastequality = None # Always equal to diffs[equalities[-1]][1] | |
|
857 | pointer = 0 # Index of current position. | |
|
858 | pre_ins = False # Is there an insertion operation before the last equality. | |
|
859 | pre_del = False # Is there a deletion operation before the last equality. | |
|
860 | post_ins = False # Is there an insertion operation after the last equality. | |
|
861 | post_del = False # Is there a deletion operation after the last equality. | |
|
862 | while pointer < len(diffs): | |
|
863 | if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found. | |
|
864 | if (len(diffs[pointer][1]) < self.Diff_EditCost and | |
|
865 | (post_ins or post_del)): | |
|
866 | # Candidate found. | |
|
867 | equalities.append(pointer) | |
|
868 | pre_ins = post_ins | |
|
869 | pre_del = post_del | |
|
870 | lastequality = diffs[pointer][1] | |
|
871 | else: | |
|
872 | # Not a candidate, and can never become one. | |
|
873 | equalities = [] | |
|
874 | lastequality = None | |
|
875 | ||
|
876 | post_ins = post_del = False | |
|
877 | else: # An insertion or deletion. | |
|
878 | if diffs[pointer][0] == self.DIFF_DELETE: | |
|
879 | post_del = True | |
|
880 | else: | |
|
881 | post_ins = True | |
|
882 | ||
|
883 | # Five types to be split: | |
|
884 | # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> | |
|
885 | # <ins>A</ins>X<ins>C</ins><del>D</del> | |
|
886 | # <ins>A</ins><del>B</del>X<ins>C</ins> | |
|
887 | # <ins>A</del>X<ins>C</ins><del>D</del> | |
|
888 | # <ins>A</ins><del>B</del>X<del>C</del> | |
|
889 | ||
|
890 | if lastequality and ((pre_ins and pre_del and post_ins and post_del) or | |
|
891 | ((len(lastequality) < self.Diff_EditCost / 2) and | |
|
892 | (pre_ins + pre_del + post_ins + post_del) == 3)): | |
|
893 | # Duplicate record. | |
|
894 | diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality)) | |
|
895 | # Change second copy to insert. | |
|
896 | diffs[equalities[-1] + 1] = (self.DIFF_INSERT, | |
|
897 | diffs[equalities[-1] + 1][1]) | |
|
898 | equalities.pop() # Throw away the equality we just deleted. | |
|
899 | lastequality = None | |
|
900 | if pre_ins and pre_del: | |
|
901 | # No changes made which could affect previous entry, keep going. | |
|
902 | post_ins = post_del = True | |
|
903 | equalities = [] | |
|
904 | else: | |
|
905 | if len(equalities): | |
|
906 | equalities.pop() # Throw away the previous equality. | |
|
907 | if len(equalities): | |
|
908 | pointer = equalities[-1] | |
|
909 | else: | |
|
910 | pointer = -1 | |
|
911 | post_ins = post_del = False | |
|
912 | changes = True | |
|
913 | pointer += 1 | |
|
914 | ||
|
915 | if changes: | |
|
916 | self.diff_cleanupMerge(diffs) | |
|
917 | ||
|
918 | def diff_cleanupMerge(self, diffs): | |
|
919 | """Reorder and merge like edit sections. Merge equalities. | |
|
920 | Any edit section can move as long as it doesn't cross an equality. | |
|
921 | ||
|
922 | Args: | |
|
923 | diffs: Array of diff tuples. | |
|
924 | """ | |
|
925 | diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end. | |
|
926 | pointer = 0 | |
|
927 | count_delete = 0 | |
|
928 | count_insert = 0 | |
|
929 | text_delete = '' | |
|
930 | text_insert = '' | |
|
931 | while pointer < len(diffs): | |
|
932 | if diffs[pointer][0] == self.DIFF_INSERT: | |
|
933 | count_insert += 1 | |
|
934 | text_insert += diffs[pointer][1] | |
|
935 | pointer += 1 | |
|
936 | elif diffs[pointer][0] == self.DIFF_DELETE: | |
|
937 | count_delete += 1 | |
|
938 | text_delete += diffs[pointer][1] | |
|
939 | pointer += 1 | |
|
940 | elif diffs[pointer][0] == self.DIFF_EQUAL: | |
|
941 | # Upon reaching an equality, check for prior redundancies. | |
|
942 | if count_delete + count_insert > 1: | |
|
943 | if count_delete != 0 and count_insert != 0: | |
|
944 | # Factor out any common prefixies. | |
|
945 | commonlength = self.diff_commonPrefix(text_insert, text_delete) | |
|
946 | if commonlength != 0: | |
|
947 | x = pointer - count_delete - count_insert - 1 | |
|
948 | if x >= 0 and diffs[x][0] == self.DIFF_EQUAL: | |
|
949 | diffs[x] = (diffs[x][0], diffs[x][1] + | |
|
950 | text_insert[:commonlength]) | |
|
951 | else: | |
|
952 | diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength])) | |
|
953 | pointer += 1 | |
|
954 | text_insert = text_insert[commonlength:] | |
|
955 | text_delete = text_delete[commonlength:] | |
|
956 | # Factor out any common suffixies. | |
|
957 | commonlength = self.diff_commonSuffix(text_insert, text_delete) | |
|
958 | if commonlength != 0: | |
|
959 | diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] + | |
|
960 | diffs[pointer][1]) | |
|
961 | text_insert = text_insert[:-commonlength] | |
|
962 | text_delete = text_delete[:-commonlength] | |
|
963 | # Delete the offending records and add the merged ones. | |
|
964 | if count_delete == 0: | |
|
965 | diffs[pointer - count_insert : pointer] = [ | |
|
966 | (self.DIFF_INSERT, text_insert)] | |
|
967 | elif count_insert == 0: | |
|
968 | diffs[pointer - count_delete : pointer] = [ | |
|
969 | (self.DIFF_DELETE, text_delete)] | |
|
970 | else: | |
|
971 | diffs[pointer - count_delete - count_insert : pointer] = [ | |
|
972 | (self.DIFF_DELETE, text_delete), | |
|
973 | (self.DIFF_INSERT, text_insert)] | |
|
974 | pointer = pointer - count_delete - count_insert + 1 | |
|
975 | if count_delete != 0: | |
|
976 | pointer += 1 | |
|
977 | if count_insert != 0: | |
|
978 | pointer += 1 | |
|
979 | elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL: | |
|
980 | # Merge this equality with the previous one. | |
|
981 | diffs[pointer - 1] = (diffs[pointer - 1][0], | |
|
982 | diffs[pointer - 1][1] + diffs[pointer][1]) | |
|
983 | del diffs[pointer] | |
|
984 | else: | |
|
985 | pointer += 1 | |
|
986 | ||
|
987 | count_insert = 0 | |
|
988 | count_delete = 0 | |
|
989 | text_delete = '' | |
|
990 | text_insert = '' | |
|
991 | ||
|
992 | if diffs[-1][1] == '': | |
|
993 | diffs.pop() # Remove the dummy entry at the end. | |
|
994 | ||
|
995 | # Second pass: look for single edits surrounded on both sides by equalities | |
|
996 | # which can be shifted sideways to eliminate an equality. | |
|
997 | # e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC | |
|
998 | changes = False | |
|
999 | pointer = 1 | |
|
1000 | # Intentionally ignore the first and last element (don't need checking). | |
|
1001 | while pointer < len(diffs) - 1: | |
|
1002 | if (diffs[pointer - 1][0] == self.DIFF_EQUAL and | |
|
1003 | diffs[pointer + 1][0] == self.DIFF_EQUAL): | |
|
1004 | # This is a single edit surrounded by equalities. | |
|
1005 | if diffs[pointer][1].endswith(diffs[pointer - 1][1]): | |
|
1006 | # Shift the edit over the previous equality. | |
|
1007 | diffs[pointer] = (diffs[pointer][0], | |
|
1008 | diffs[pointer - 1][1] + | |
|
1009 | diffs[pointer][1][:-len(diffs[pointer - 1][1])]) | |
|
1010 | diffs[pointer + 1] = (diffs[pointer + 1][0], | |
|
1011 | diffs[pointer - 1][1] + diffs[pointer + 1][1]) | |
|
1012 | del diffs[pointer - 1] | |
|
1013 | changes = True | |
|
1014 | elif diffs[pointer][1].startswith(diffs[pointer + 1][1]): | |
|
1015 | # Shift the edit over the next equality. | |
|
1016 | diffs[pointer - 1] = (diffs[pointer - 1][0], | |
|
1017 | diffs[pointer - 1][1] + diffs[pointer + 1][1]) | |
|
1018 | diffs[pointer] = (diffs[pointer][0], | |
|
1019 | diffs[pointer][1][len(diffs[pointer + 1][1]):] + | |
|
1020 | diffs[pointer + 1][1]) | |
|
1021 | del diffs[pointer + 1] | |
|
1022 | changes = True | |
|
1023 | pointer += 1 | |
|
1024 | ||
|
1025 | # If shifts were made, the diff needs reordering and another shift sweep. | |
|
1026 | if changes: | |
|
1027 | self.diff_cleanupMerge(diffs) | |
|
1028 | ||
|
1029 | def diff_xIndex(self, diffs, loc): | |
|
1030 | """loc is a location in text1, compute and return the equivalent location | |
|
1031 | in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8 | |
|
1032 | ||
|
1033 | Args: | |
|
1034 | diffs: Array of diff tuples. | |
|
1035 | loc: Location within text1. | |
|
1036 | ||
|
1037 | Returns: | |
|
1038 | Location within text2. | |
|
1039 | """ | |
|
1040 | chars1 = 0 | |
|
1041 | chars2 = 0 | |
|
1042 | last_chars1 = 0 | |
|
1043 | last_chars2 = 0 | |
|
1044 | for x in xrange(len(diffs)): | |
|
1045 | (op, text) = diffs[x] | |
|
1046 | if op != self.DIFF_INSERT: # Equality or deletion. | |
|
1047 | chars1 += len(text) | |
|
1048 | if op != self.DIFF_DELETE: # Equality or insertion. | |
|
1049 | chars2 += len(text) | |
|
1050 | if chars1 > loc: # Overshot the location. | |
|
1051 | break | |
|
1052 | last_chars1 = chars1 | |
|
1053 | last_chars2 = chars2 | |
|
1054 | ||
|
1055 | if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE: | |
|
1056 | # The location was deleted. | |
|
1057 | return last_chars2 | |
|
1058 | # Add the remaining len(character). | |
|
1059 | return last_chars2 + (loc - last_chars1) | |
|
1060 | ||
|
1061 | def diff_prettyHtml(self, diffs): | |
|
1062 | """Convert a diff array into a pretty HTML report. | |
|
1063 | ||
|
1064 | Args: | |
|
1065 | diffs: Array of diff tuples. | |
|
1066 | ||
|
1067 | Returns: | |
|
1068 | HTML representation. | |
|
1069 | """ | |
|
1070 | html = [] | |
|
1071 | for (op, data) in diffs: | |
|
1072 | text = (data.replace("&", "&").replace("<", "<") | |
|
1073 | .replace(">", ">").replace("\n", "¶<br>")) | |
|
1074 | if op == self.DIFF_INSERT: | |
|
1075 | html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text) | |
|
1076 | elif op == self.DIFF_DELETE: | |
|
1077 | html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text) | |
|
1078 | elif op == self.DIFF_EQUAL: | |
|
1079 | html.append("<span>%s</span>" % text) | |
|
1080 | return "".join(html) | |
|
1081 | ||
|
1082 | def diff_text1(self, diffs): | |
|
1083 | """Compute and return the source text (all equalities and deletions). | |
|
1084 | ||
|
1085 | Args: | |
|
1086 | diffs: Array of diff tuples. | |
|
1087 | ||
|
1088 | Returns: | |
|
1089 | Source text. | |
|
1090 | """ | |
|
1091 | text = [] | |
|
1092 | for (op, data) in diffs: | |
|
1093 | if op != self.DIFF_INSERT: | |
|
1094 | text.append(data) | |
|
1095 | return "".join(text) | |
|
1096 | ||
|
1097 | def diff_text2(self, diffs): | |
|
1098 | """Compute and return the destination text (all equalities and insertions). | |
|
1099 | ||
|
1100 | Args: | |
|
1101 | diffs: Array of diff tuples. | |
|
1102 | ||
|
1103 | Returns: | |
|
1104 | Destination text. | |
|
1105 | """ | |
|
1106 | text = [] | |
|
1107 | for (op, data) in diffs: | |
|
1108 | if op != self.DIFF_DELETE: | |
|
1109 | text.append(data) | |
|
1110 | return "".join(text) | |
|
1111 | ||
|
1112 | def diff_levenshtein(self, diffs): | |
|
1113 | """Compute the Levenshtein distance; the number of inserted, deleted or | |
|
1114 | substituted characters. | |
|
1115 | ||
|
1116 | Args: | |
|
1117 | diffs: Array of diff tuples. | |
|
1118 | ||
|
1119 | Returns: | |
|
1120 | Number of changes. | |
|
1121 | """ | |
|
1122 | levenshtein = 0 | |
|
1123 | insertions = 0 | |
|
1124 | deletions = 0 | |
|
1125 | for (op, data) in diffs: | |
|
1126 | if op == self.DIFF_INSERT: | |
|
1127 | insertions += len(data) | |
|
1128 | elif op == self.DIFF_DELETE: | |
|
1129 | deletions += len(data) | |
|
1130 | elif op == self.DIFF_EQUAL: | |
|
1131 | # A deletion and an insertion is one substitution. | |
|
1132 | levenshtein += max(insertions, deletions) | |
|
1133 | insertions = 0 | |
|
1134 | deletions = 0 | |
|
1135 | levenshtein += max(insertions, deletions) | |
|
1136 | return levenshtein | |
|
1137 | ||
|
1138 | def diff_toDelta(self, diffs): | |
|
1139 | """Crush the diff into an encoded string which describes the operations | |
|
1140 | required to transform text1 into text2. | |
|
1141 | E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. | |
|
1142 | Operations are tab-separated. Inserted text is escaped using %xx notation. | |
|
1143 | ||
|
1144 | Args: | |
|
1145 | diffs: Array of diff tuples. | |
|
1146 | ||
|
1147 | Returns: | |
|
1148 | Delta text. | |
|
1149 | """ | |
|
1150 | text = [] | |
|
1151 | for (op, data) in diffs: | |
|
1152 | if op == self.DIFF_INSERT: | |
|
1153 | # High ascii will raise UnicodeDecodeError. Use Unicode instead. | |
|
1154 | data = data.encode("utf-8") | |
|
1155 | text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# ")) | |
|
1156 | elif op == self.DIFF_DELETE: | |
|
1157 | text.append("-%d" % len(data)) | |
|
1158 | elif op == self.DIFF_EQUAL: | |
|
1159 | text.append("=%d" % len(data)) | |
|
1160 | return "\t".join(text) | |
|
1161 | ||
|
1162 | def diff_fromDelta(self, text1, delta): | |
|
1163 | """Given the original text1, and an encoded string which describes the | |
|
1164 | operations required to transform text1 into text2, compute the full diff. | |
|
1165 | ||
|
1166 | Args: | |
|
1167 | text1: Source string for the diff. | |
|
1168 | delta: Delta text. | |
|
1169 | ||
|
1170 | Returns: | |
|
1171 | Array of diff tuples. | |
|
1172 | ||
|
1173 | Raises: | |
|
1174 | ValueError: If invalid input. | |
|
1175 | """ | |
|
1176 | if type(delta) == unicode: | |
|
1177 | # Deltas should be composed of a subset of ascii chars, Unicode not | |
|
1178 | # required. If this encode raises UnicodeEncodeError, delta is invalid. | |
|
1179 | delta = delta.encode("ascii") | |
|
1180 | diffs = [] | |
|
1181 | pointer = 0 # Cursor in text1 | |
|
1182 | tokens = delta.split("\t") | |
|
1183 | for token in tokens: | |
|
1184 | if token == "": | |
|
1185 | # Blank tokens are ok (from a trailing \t). | |
|
1186 | continue | |
|
1187 | # Each token begins with a one character parameter which specifies the | |
|
1188 | # operation of this token (delete, insert, equality). | |
|
1189 | param = token[1:] | |
|
1190 | if token[0] == "+": | |
|
1191 | param = urllib.unquote(param).decode("utf-8") | |
|
1192 | diffs.append((self.DIFF_INSERT, param)) | |
|
1193 | elif token[0] == "-" or token[0] == "=": | |
|
1194 | try: | |
|
1195 | n = int(param) | |
|
1196 | except ValueError: | |
|
1197 | raise ValueError("Invalid number in diff_fromDelta: " + param) | |
|
1198 | if n < 0: | |
|
1199 | raise ValueError("Negative number in diff_fromDelta: " + param) | |
|
1200 | text = text1[pointer : pointer + n] | |
|
1201 | pointer += n | |
|
1202 | if token[0] == "=": | |
|
1203 | diffs.append((self.DIFF_EQUAL, text)) | |
|
1204 | else: | |
|
1205 | diffs.append((self.DIFF_DELETE, text)) | |
|
1206 | else: | |
|
1207 | # Anything else is an error. | |
|
1208 | raise ValueError("Invalid diff operation in diff_fromDelta: " + | |
|
1209 | token[0]) | |
|
1210 | if pointer != len(text1): | |
|
1211 | raise ValueError( | |
|
1212 | "Delta length (%d) does not equal source text length (%d)." % | |
|
1213 | (pointer, len(text1))) | |
|
1214 | return diffs | |
|
1215 | ||
|
1216 | # MATCH FUNCTIONS | |
|
1217 | ||
|
1218 | def match_main(self, text, pattern, loc): | |
|
1219 | """Locate the best instance of 'pattern' in 'text' near 'loc'. | |
|
1220 | ||
|
1221 | Args: | |
|
1222 | text: The text to search. | |
|
1223 | pattern: The pattern to search for. | |
|
1224 | loc: The location to search around. | |
|
1225 | ||
|
1226 | Returns: | |
|
1227 | Best match index or -1. | |
|
1228 | """ | |
|
1229 | # Check for null inputs. | |
|
1230 | if text == None or pattern == None: | |
|
1231 | raise ValueError("Null inputs. (match_main)") | |
|
1232 | ||
|
1233 | loc = max(0, min(loc, len(text))) | |
|
1234 | if text == pattern: | |
|
1235 | # Shortcut (potentially not guaranteed by the algorithm) | |
|
1236 | return 0 | |
|
1237 | elif not text: | |
|
1238 | # Nothing to match. | |
|
1239 | return -1 | |
|
1240 | elif text[loc:loc + len(pattern)] == pattern: | |
|
1241 | # Perfect match at the perfect spot! (Includes case of null pattern) | |
|
1242 | return loc | |
|
1243 | else: | |
|
1244 | # Do a fuzzy compare. | |
|
1245 | match = self.match_bitap(text, pattern, loc) | |
|
1246 | return match | |
|
1247 | ||
|
1248 | def match_bitap(self, text, pattern, loc): | |
|
1249 | """Locate the best instance of 'pattern' in 'text' near 'loc' using the | |
|
1250 | Bitap algorithm. | |
|
1251 | ||
|
1252 | Args: | |
|
1253 | text: The text to search. | |
|
1254 | pattern: The pattern to search for. | |
|
1255 | loc: The location to search around. | |
|
1256 | ||
|
1257 | Returns: | |
|
1258 | Best match index or -1. | |
|
1259 | """ | |
|
1260 | # Python doesn't have a maxint limit, so ignore this check. | |
|
1261 | #if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits: | |
|
1262 | # raise ValueError("Pattern too long for this application.") | |
|
1263 | ||
|
1264 | # Initialise the alphabet. | |
|
1265 | s = self.match_alphabet(pattern) | |
|
1266 | ||
|
1267 | def match_bitapScore(e, x): | |
|
1268 | """Compute and return the score for a match with e errors and x location. | |
|
1269 | Accesses loc and pattern through being a closure. | |
|
1270 | ||
|
1271 | Args: | |
|
1272 | e: Number of errors in match. | |
|
1273 | x: Location of match. | |
|
1274 | ||
|
1275 | Returns: | |
|
1276 | Overall score for match (0.0 = good, 1.0 = bad). | |
|
1277 | """ | |
|
1278 | accuracy = float(e) / len(pattern) | |
|
1279 | proximity = abs(loc - x) | |
|
1280 | if not self.Match_Distance: | |
|
1281 | # Dodge divide by zero error. | |
|
1282 | return proximity and 1.0 or accuracy | |
|
1283 | return accuracy + (proximity / float(self.Match_Distance)) | |
|
1284 | ||
|
1285 | # Highest score beyond which we give up. | |
|
1286 | score_threshold = self.Match_Threshold | |
|
1287 | # Is there a nearby exact match? (speedup) | |
|
1288 | best_loc = text.find(pattern, loc) | |
|
1289 | if best_loc != -1: | |
|
1290 | score_threshold = min(match_bitapScore(0, best_loc), score_threshold) | |
|
1291 | # What about in the other direction? (speedup) | |
|
1292 | best_loc = text.rfind(pattern, loc + len(pattern)) | |
|
1293 | if best_loc != -1: | |
|
1294 | score_threshold = min(match_bitapScore(0, best_loc), score_threshold) | |
|
1295 | ||
|
1296 | # Initialise the bit arrays. | |
|
1297 | matchmask = 1 << (len(pattern) - 1) | |
|
1298 | best_loc = -1 | |
|
1299 | ||
|
1300 | bin_max = len(pattern) + len(text) | |
|
1301 | # Empty initialization added to appease pychecker. | |
|
1302 | last_rd = None | |
|
1303 | for d in xrange(len(pattern)): | |
|
1304 | # Scan for the best match each iteration allows for one more error. | |
|
1305 | # Run a binary search to determine how far from 'loc' we can stray at | |
|
1306 | # this error level. | |
|
1307 | bin_min = 0 | |
|
1308 | bin_mid = bin_max | |
|
1309 | while bin_min < bin_mid: | |
|
1310 | if match_bitapScore(d, loc + bin_mid) <= score_threshold: | |
|
1311 | bin_min = bin_mid | |
|
1312 | else: | |
|
1313 | bin_max = bin_mid | |
|
1314 | bin_mid = (bin_max - bin_min) // 2 + bin_min | |
|
1315 | ||
|
1316 | # Use the result from this iteration as the maximum for the next. | |
|
1317 | bin_max = bin_mid | |
|
1318 | start = max(1, loc - bin_mid + 1) | |
|
1319 | finish = min(loc + bin_mid, len(text)) + len(pattern) | |
|
1320 | ||
|
1321 | rd = [0] * (finish + 2) | |
|
1322 | rd[finish + 1] = (1 << d) - 1 | |
|
1323 | for j in xrange(finish, start - 1, -1): | |
|
1324 | if len(text) <= j - 1: | |
|
1325 | # Out of range. | |
|
1326 | charMatch = 0 | |
|
1327 | else: | |
|
1328 | charMatch = s.get(text[j - 1], 0) | |
|
1329 | if d == 0: # First pass: exact match. | |
|
1330 | rd[j] = ((rd[j + 1] << 1) | 1) & charMatch | |
|
1331 | else: # Subsequent passes: fuzzy match. | |
|
1332 | rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | ( | |
|
1333 | ((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1] | |
|
1334 | if rd[j] & matchmask: | |
|
1335 | score = match_bitapScore(d, j - 1) | |
|
1336 | # This match will almost certainly be better than any existing match. | |
|
1337 | # But check anyway. | |
|
1338 | if score <= score_threshold: | |
|
1339 | # Told you so. | |
|
1340 | score_threshold = score | |
|
1341 | best_loc = j - 1 | |
|
1342 | if best_loc > loc: | |
|
1343 | # When passing loc, don't exceed our current distance from loc. | |
|
1344 | start = max(1, 2 * loc - best_loc) | |
|
1345 | else: | |
|
1346 | # Already passed loc, downhill from here on in. | |
|
1347 | break | |
|
1348 | # No hope for a (better) match at greater error levels. | |
|
1349 | if match_bitapScore(d + 1, loc) > score_threshold: | |
|
1350 | break | |
|
1351 | last_rd = rd | |
|
1352 | return best_loc | |
|
1353 | ||
|
1354 | def match_alphabet(self, pattern): | |
|
1355 | """Initialise the alphabet for the Bitap algorithm. | |
|
1356 | ||
|
1357 | Args: | |
|
1358 | pattern: The text to encode. | |
|
1359 | ||
|
1360 | Returns: | |
|
1361 | Hash of character locations. | |
|
1362 | """ | |
|
1363 | s = {} | |
|
1364 | for char in pattern: | |
|
1365 | s[char] = 0 | |
|
1366 | for i in xrange(len(pattern)): | |
|
1367 | s[pattern[i]] |= 1 << (len(pattern) - i - 1) | |
|
1368 | return s | |
|
1369 | ||
|
1370 | # PATCH FUNCTIONS | |
|
1371 | ||
|
1372 | def patch_addContext(self, patch, text): | |
|
1373 | """Increase the context until it is unique, | |
|
1374 | but don't let the pattern expand beyond Match_MaxBits. | |
|
1375 | ||
|
1376 | Args: | |
|
1377 | patch: The patch to grow. | |
|
1378 | text: Source text. | |
|
1379 | """ | |
|
1380 | if len(text) == 0: | |
|
1381 | return | |
|
1382 | pattern = text[patch.start2 : patch.start2 + patch.length1] | |
|
1383 | padding = 0 | |
|
1384 | ||
|
1385 | # Look for the first and last matches of pattern in text. If two different | |
|
1386 | # matches are found, increase the pattern length. | |
|
1387 | while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits == | |
|
1388 | 0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin - | |
|
1389 | self.Patch_Margin)): | |
|
1390 | padding += self.Patch_Margin | |
|
1391 | pattern = text[max(0, patch.start2 - padding) : | |
|
1392 | patch.start2 + patch.length1 + padding] | |
|
1393 | # Add one chunk for good luck. | |
|
1394 | padding += self.Patch_Margin | |
|
1395 | ||
|
1396 | # Add the prefix. | |
|
1397 | prefix = text[max(0, patch.start2 - padding) : patch.start2] | |
|
1398 | if prefix: | |
|
1399 | patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)] | |
|
1400 | # Add the suffix. | |
|
1401 | suffix = text[patch.start2 + patch.length1 : | |
|
1402 | patch.start2 + patch.length1 + padding] | |
|
1403 | if suffix: | |
|
1404 | patch.diffs.append((self.DIFF_EQUAL, suffix)) | |
|
1405 | ||
|
1406 | # Roll back the start points. | |
|
1407 | patch.start1 -= len(prefix) | |
|
1408 | patch.start2 -= len(prefix) | |
|
1409 | # Extend lengths. | |
|
1410 | patch.length1 += len(prefix) + len(suffix) | |
|
1411 | patch.length2 += len(prefix) + len(suffix) | |
|
1412 | ||
|
1413 | def patch_make(self, a, b=None, c=None): | |
|
1414 | """Compute a list of patches to turn text1 into text2. | |
|
1415 | Use diffs if provided, otherwise compute it ourselves. | |
|
1416 | There are four ways to call this function, depending on what data is | |
|
1417 | available to the caller: | |
|
1418 | Method 1: | |
|
1419 | a = text1, b = text2 | |
|
1420 | Method 2: | |
|
1421 | a = diffs | |
|
1422 | Method 3 (optimal): | |
|
1423 | a = text1, b = diffs | |
|
1424 | Method 4 (deprecated, use method 3): | |
|
1425 | a = text1, b = text2, c = diffs | |
|
1426 | ||
|
1427 | Args: | |
|
1428 | a: text1 (methods 1,3,4) or Array of diff tuples for text1 to | |
|
1429 | text2 (method 2). | |
|
1430 | b: text2 (methods 1,4) or Array of diff tuples for text1 to | |
|
1431 | text2 (method 3) or undefined (method 2). | |
|
1432 | c: Array of diff tuples for text1 to text2 (method 4) or | |
|
1433 | undefined (methods 1,2,3). | |
|
1434 | ||
|
1435 | Returns: | |
|
1436 | Array of Patch objects. | |
|
1437 | """ | |
|
1438 | text1 = None | |
|
1439 | diffs = None | |
|
1440 | # Note that texts may arrive as 'str' or 'unicode'. | |
|
1441 | if isinstance(a, basestring) and isinstance(b, basestring) and c is None: | |
|
1442 | # Method 1: text1, text2 | |
|
1443 | # Compute diffs from text1 and text2. | |
|
1444 | text1 = a | |
|
1445 | diffs = self.diff_main(text1, b, True) | |
|
1446 | if len(diffs) > 2: | |
|
1447 | self.diff_cleanupSemantic(diffs) | |
|
1448 | self.diff_cleanupEfficiency(diffs) | |
|
1449 | elif isinstance(a, list) and b is None and c is None: | |
|
1450 | # Method 2: diffs | |
|
1451 | # Compute text1 from diffs. | |
|
1452 | diffs = a | |
|
1453 | text1 = self.diff_text1(diffs) | |
|
1454 | elif isinstance(a, basestring) and isinstance(b, list) and c is None: | |
|
1455 | # Method 3: text1, diffs | |
|
1456 | text1 = a | |
|
1457 | diffs = b | |
|
1458 | elif (isinstance(a, basestring) and isinstance(b, basestring) and | |
|
1459 | isinstance(c, list)): | |
|
1460 | # Method 4: text1, text2, diffs | |
|
1461 | # text2 is not used. | |
|
1462 | text1 = a | |
|
1463 | diffs = c | |
|
1464 | else: | |
|
1465 | raise ValueError("Unknown call format to patch_make.") | |
|
1466 | ||
|
1467 | if not diffs: | |
|
1468 | return [] # Get rid of the None case. | |
|
1469 | patches = [] | |
|
1470 | patch = patch_obj() | |
|
1471 | char_count1 = 0 # Number of characters into the text1 string. | |
|
1472 | char_count2 = 0 # Number of characters into the text2 string. | |
|
1473 | prepatch_text = text1 # Recreate the patches to determine context info. | |
|
1474 | postpatch_text = text1 | |
|
1475 | for x in xrange(len(diffs)): | |
|
1476 | (diff_type, diff_text) = diffs[x] | |
|
1477 | if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL: | |
|
1478 | # A new patch starts here. | |
|
1479 | patch.start1 = char_count1 | |
|
1480 | patch.start2 = char_count2 | |
|
1481 | if diff_type == self.DIFF_INSERT: | |
|
1482 | # Insertion | |
|
1483 | patch.diffs.append(diffs[x]) | |
|
1484 | patch.length2 += len(diff_text) | |
|
1485 | postpatch_text = (postpatch_text[:char_count2] + diff_text + | |
|
1486 | postpatch_text[char_count2:]) | |
|
1487 | elif diff_type == self.DIFF_DELETE: | |
|
1488 | # Deletion. | |
|
1489 | patch.length1 += len(diff_text) | |
|
1490 | patch.diffs.append(diffs[x]) | |
|
1491 | postpatch_text = (postpatch_text[:char_count2] + | |
|
1492 | postpatch_text[char_count2 + len(diff_text):]) | |
|
1493 | elif (diff_type == self.DIFF_EQUAL and | |
|
1494 | len(diff_text) <= 2 * self.Patch_Margin and | |
|
1495 | len(patch.diffs) != 0 and len(diffs) != x + 1): | |
|
1496 | # Small equality inside a patch. | |
|
1497 | patch.diffs.append(diffs[x]) | |
|
1498 | patch.length1 += len(diff_text) | |
|
1499 | patch.length2 += len(diff_text) | |
|
1500 | ||
|
1501 | if (diff_type == self.DIFF_EQUAL and | |
|
1502 | len(diff_text) >= 2 * self.Patch_Margin): | |
|
1503 | # Time for a new patch. | |
|
1504 | if len(patch.diffs) != 0: | |
|
1505 | self.patch_addContext(patch, prepatch_text) | |
|
1506 | patches.append(patch) | |
|
1507 | patch = patch_obj() | |
|
1508 | # Unlike Unidiff, our patch lists have a rolling context. | |
|
1509 | # http://code.google.com/p/google-diff-match-patch/wiki/Unidiff | |
|
1510 | # Update prepatch text & pos to reflect the application of the | |
|
1511 | # just completed patch. | |
|
1512 | prepatch_text = postpatch_text | |
|
1513 | char_count1 = char_count2 | |
|
1514 | ||
|
1515 | # Update the current character count. | |
|
1516 | if diff_type != self.DIFF_INSERT: | |
|
1517 | char_count1 += len(diff_text) | |
|
1518 | if diff_type != self.DIFF_DELETE: | |
|
1519 | char_count2 += len(diff_text) | |
|
1520 | ||
|
1521 | # Pick up the leftover patch if not empty. | |
|
1522 | if len(patch.diffs) != 0: | |
|
1523 | self.patch_addContext(patch, prepatch_text) | |
|
1524 | patches.append(patch) | |
|
1525 | return patches | |
|
1526 | ||
|
1527 | def patch_deepCopy(self, patches): | |
|
1528 | """Given an array of patches, return another array that is identical. | |
|
1529 | ||
|
1530 | Args: | |
|
1531 | patches: Array of Patch objects. | |
|
1532 | ||
|
1533 | Returns: | |
|
1534 | Array of Patch objects. | |
|
1535 | """ | |
|
1536 | patchesCopy = [] | |
|
1537 | for patch in patches: | |
|
1538 | patchCopy = patch_obj() | |
|
1539 | # No need to deep copy the tuples since they are immutable. | |
|
1540 | patchCopy.diffs = patch.diffs[:] | |
|
1541 | patchCopy.start1 = patch.start1 | |
|
1542 | patchCopy.start2 = patch.start2 | |
|
1543 | patchCopy.length1 = patch.length1 | |
|
1544 | patchCopy.length2 = patch.length2 | |
|
1545 | patchesCopy.append(patchCopy) | |
|
1546 | return patchesCopy | |
|
1547 | ||
|
1548 | def patch_apply(self, patches, text): | |
|
1549 | """Merge a set of patches onto the text. Return a patched text, as well | |
|
1550 | as a list of true/false values indicating which patches were applied. | |
|
1551 | ||
|
1552 | Args: | |
|
1553 | patches: Array of Patch objects. | |
|
1554 | text: Old text. | |
|
1555 | ||
|
1556 | Returns: | |
|
1557 | Two element Array, containing the new text and an array of boolean values. | |
|
1558 | """ | |
|
1559 | if not patches: | |
|
1560 | return (text, []) | |
|
1561 | ||
|
1562 | # Deep copy the patches so that no changes are made to originals. | |
|
1563 | patches = self.patch_deepCopy(patches) | |
|
1564 | ||
|
1565 | nullPadding = self.patch_addPadding(patches) | |
|
1566 | text = nullPadding + text + nullPadding | |
|
1567 | self.patch_splitMax(patches) | |
|
1568 | ||
|
1569 | # delta keeps track of the offset between the expected and actual location | |
|
1570 | # of the previous patch. If there are patches expected at positions 10 and | |
|
1571 | # 20, but the first patch was found at 12, delta is 2 and the second patch | |
|
1572 | # has an effective expected position of 22. | |
|
1573 | delta = 0 | |
|
1574 | results = [] | |
|
1575 | for patch in patches: | |
|
1576 | expected_loc = patch.start2 + delta | |
|
1577 | text1 = self.diff_text1(patch.diffs) | |
|
1578 | end_loc = -1 | |
|
1579 | if len(text1) > self.Match_MaxBits: | |
|
1580 | # patch_splitMax will only provide an oversized pattern in the case of | |
|
1581 | # a monster delete. | |
|
1582 | start_loc = self.match_main(text, text1[:self.Match_MaxBits], | |
|
1583 | expected_loc) | |
|
1584 | if start_loc != -1: | |
|
1585 | end_loc = self.match_main(text, text1[-self.Match_MaxBits:], | |
|
1586 | expected_loc + len(text1) - self.Match_MaxBits) | |
|
1587 | if end_loc == -1 or start_loc >= end_loc: | |
|
1588 | # Can't find valid trailing context. Drop this patch. | |
|
1589 | start_loc = -1 | |
|
1590 | else: | |
|
1591 | start_loc = self.match_main(text, text1, expected_loc) | |
|
1592 | if start_loc == -1: | |
|
1593 | # No match found. :( | |
|
1594 | results.append(False) | |
|
1595 | # Subtract the delta for this failed patch from subsequent patches. | |
|
1596 | delta -= patch.length2 - patch.length1 | |
|
1597 | else: | |
|
1598 | # Found a match. :) | |
|
1599 | results.append(True) | |
|
1600 | delta = start_loc - expected_loc | |
|
1601 | if end_loc == -1: | |
|
1602 | text2 = text[start_loc : start_loc + len(text1)] | |
|
1603 | else: | |
|
1604 | text2 = text[start_loc : end_loc + self.Match_MaxBits] | |
|
1605 | if text1 == text2: | |
|
1606 | # Perfect match, just shove the replacement text in. | |
|
1607 | text = (text[:start_loc] + self.diff_text2(patch.diffs) + | |
|
1608 | text[start_loc + len(text1):]) | |
|
1609 | else: | |
|
1610 | # Imperfect match. | |
|
1611 | # Run a diff to get a framework of equivalent indices. | |
|
1612 | diffs = self.diff_main(text1, text2, False) | |
|
1613 | if (len(text1) > self.Match_MaxBits and | |
|
1614 | self.diff_levenshtein(diffs) / float(len(text1)) > | |
|
1615 | self.Patch_DeleteThreshold): | |
|
1616 | # The end points match, but the content is unacceptably bad. | |
|
1617 | results[-1] = False | |
|
1618 | else: | |
|
1619 | self.diff_cleanupSemanticLossless(diffs) | |
|
1620 | index1 = 0 | |
|
1621 | for (op, data) in patch.diffs: | |
|
1622 | if op != self.DIFF_EQUAL: | |
|
1623 | index2 = self.diff_xIndex(diffs, index1) | |
|
1624 | if op == self.DIFF_INSERT: # Insertion | |
|
1625 | text = text[:start_loc + index2] + data + text[start_loc + | |
|
1626 | index2:] | |
|
1627 | elif op == self.DIFF_DELETE: # Deletion | |
|
1628 | text = text[:start_loc + index2] + text[start_loc + | |
|
1629 | self.diff_xIndex(diffs, index1 + len(data)):] | |
|
1630 | if op != self.DIFF_DELETE: | |
|
1631 | index1 += len(data) | |
|
1632 | # Strip the padding off. | |
|
1633 | text = text[len(nullPadding):-len(nullPadding)] | |
|
1634 | return (text, results) | |
|
1635 | ||
|
1636 | def patch_addPadding(self, patches): | |
|
1637 | """Add some padding on text start and end so that edges can match | |
|
1638 | something. Intended to be called only from within patch_apply. | |
|
1639 | ||
|
1640 | Args: | |
|
1641 | patches: Array of Patch objects. | |
|
1642 | ||
|
1643 | Returns: | |
|
1644 | The padding string added to each side. | |
|
1645 | """ | |
|
1646 | paddingLength = self.Patch_Margin | |
|
1647 | nullPadding = "" | |
|
1648 | for x in xrange(1, paddingLength + 1): | |
|
1649 | nullPadding += chr(x) | |
|
1650 | ||
|
1651 | # Bump all the patches forward. | |
|
1652 | for patch in patches: | |
|
1653 | patch.start1 += paddingLength | |
|
1654 | patch.start2 += paddingLength | |
|
1655 | ||
|
1656 | # Add some padding on start of first diff. | |
|
1657 | patch = patches[0] | |
|
1658 | diffs = patch.diffs | |
|
1659 | if not diffs or diffs[0][0] != self.DIFF_EQUAL: | |
|
1660 | # Add nullPadding equality. | |
|
1661 | diffs.insert(0, (self.DIFF_EQUAL, nullPadding)) | |
|
1662 | patch.start1 -= paddingLength # Should be 0. | |
|
1663 | patch.start2 -= paddingLength # Should be 0. | |
|
1664 | patch.length1 += paddingLength | |
|
1665 | patch.length2 += paddingLength | |
|
1666 | elif paddingLength > len(diffs[0][1]): | |
|
1667 | # Grow first equality. | |
|
1668 | extraLength = paddingLength - len(diffs[0][1]) | |
|
1669 | newText = nullPadding[len(diffs[0][1]):] + diffs[0][1] | |
|
1670 | diffs[0] = (diffs[0][0], newText) | |
|
1671 | patch.start1 -= extraLength | |
|
1672 | patch.start2 -= extraLength | |
|
1673 | patch.length1 += extraLength | |
|
1674 | patch.length2 += extraLength | |
|
1675 | ||
|
1676 | # Add some padding on end of last diff. | |
|
1677 | patch = patches[-1] | |
|
1678 | diffs = patch.diffs | |
|
1679 | if not diffs or diffs[-1][0] != self.DIFF_EQUAL: | |
|
1680 | # Add nullPadding equality. | |
|
1681 | diffs.append((self.DIFF_EQUAL, nullPadding)) | |
|
1682 | patch.length1 += paddingLength | |
|
1683 | patch.length2 += paddingLength | |
|
1684 | elif paddingLength > len(diffs[-1][1]): | |
|
1685 | # Grow last equality. | |
|
1686 | extraLength = paddingLength - len(diffs[-1][1]) | |
|
1687 | newText = diffs[-1][1] + nullPadding[:extraLength] | |
|
1688 | diffs[-1] = (diffs[-1][0], newText) | |
|
1689 | patch.length1 += extraLength | |
|
1690 | patch.length2 += extraLength | |
|
1691 | ||
|
1692 | return nullPadding | |
|
1693 | ||
|
1694 | def patch_splitMax(self, patches): | |
|
1695 | """Look through the patches and break up any which are longer than the | |
|
1696 | maximum limit of the match algorithm. | |
|
1697 | Intended to be called only from within patch_apply. | |
|
1698 | ||
|
1699 | Args: | |
|
1700 | patches: Array of Patch objects. | |
|
1701 | """ | |
|
1702 | patch_size = self.Match_MaxBits | |
|
1703 | if patch_size == 0: | |
|
1704 | # Python has the option of not splitting strings due to its ability | |
|
1705 | # to handle integers of arbitrary precision. | |
|
1706 | return | |
|
1707 | for x in xrange(len(patches)): | |
|
1708 | if patches[x].length1 <= patch_size: | |
|
1709 | continue | |
|
1710 | bigpatch = patches[x] | |
|
1711 | # Remove the big old patch. | |
|
1712 | del patches[x] | |
|
1713 | x -= 1 | |
|
1714 | start1 = bigpatch.start1 | |
|
1715 | start2 = bigpatch.start2 | |
|
1716 | precontext = '' | |
|
1717 | while len(bigpatch.diffs) != 0: | |
|
1718 | # Create one of several smaller patches. | |
|
1719 | patch = patch_obj() | |
|
1720 | empty = True | |
|
1721 | patch.start1 = start1 - len(precontext) | |
|
1722 | patch.start2 = start2 - len(precontext) | |
|
1723 | if precontext: | |
|
1724 | patch.length1 = patch.length2 = len(precontext) | |
|
1725 | patch.diffs.append((self.DIFF_EQUAL, precontext)) | |
|
1726 | ||
|
1727 | while (len(bigpatch.diffs) != 0 and | |
|
1728 | patch.length1 < patch_size - self.Patch_Margin): | |
|
1729 | (diff_type, diff_text) = bigpatch.diffs[0] | |
|
1730 | if diff_type == self.DIFF_INSERT: | |
|
1731 | # Insertions are harmless. | |
|
1732 | patch.length2 += len(diff_text) | |
|
1733 | start2 += len(diff_text) | |
|
1734 | patch.diffs.append(bigpatch.diffs.pop(0)) | |
|
1735 | empty = False | |
|
1736 | elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and | |
|
1737 | patch.diffs[0][0] == self.DIFF_EQUAL and | |
|
1738 | len(diff_text) > 2 * patch_size): | |
|
1739 | # This is a large deletion. Let it pass in one chunk. | |
|
1740 | patch.length1 += len(diff_text) | |
|
1741 | start1 += len(diff_text) | |
|
1742 | empty = False | |
|
1743 | patch.diffs.append((diff_type, diff_text)) | |
|
1744 | del bigpatch.diffs[0] | |
|
1745 | else: | |
|
1746 | # Deletion or equality. Only take as much as we can stomach. | |
|
1747 | diff_text = diff_text[:patch_size - patch.length1 - | |
|
1748 | self.Patch_Margin] | |
|
1749 | patch.length1 += len(diff_text) | |
|
1750 | start1 += len(diff_text) | |
|
1751 | if diff_type == self.DIFF_EQUAL: | |
|
1752 | patch.length2 += len(diff_text) | |
|
1753 | start2 += len(diff_text) | |
|
1754 | else: | |
|
1755 | empty = False | |
|
1756 | ||
|
1757 | patch.diffs.append((diff_type, diff_text)) | |
|
1758 | if diff_text == bigpatch.diffs[0][1]: | |
|
1759 | del bigpatch.diffs[0] | |
|
1760 | else: | |
|
1761 | bigpatch.diffs[0] = (bigpatch.diffs[0][0], | |
|
1762 | bigpatch.diffs[0][1][len(diff_text):]) | |
|
1763 | ||
|
1764 | # Compute the head context for the next patch. | |
|
1765 | precontext = self.diff_text2(patch.diffs) | |
|
1766 | precontext = precontext[-self.Patch_Margin:] | |
|
1767 | # Append the end context for this patch. | |
|
1768 | postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin] | |
|
1769 | if postcontext: | |
|
1770 | patch.length1 += len(postcontext) | |
|
1771 | patch.length2 += len(postcontext) | |
|
1772 | if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL: | |
|
1773 | patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] + | |
|
1774 | postcontext) | |
|
1775 | else: | |
|
1776 | patch.diffs.append((self.DIFF_EQUAL, postcontext)) | |
|
1777 | ||
|
1778 | if not empty: | |
|
1779 | x += 1 | |
|
1780 | patches.insert(x, patch) | |
|
1781 | ||
|
1782 | def patch_toText(self, patches): | |
|
1783 | """Take a list of patches and return a textual representation. | |
|
1784 | ||
|
1785 | Args: | |
|
1786 | patches: Array of Patch objects. | |
|
1787 | ||
|
1788 | Returns: | |
|
1789 | Text representation of patches. | |
|
1790 | """ | |
|
1791 | text = [] | |
|
1792 | for patch in patches: | |
|
1793 | text.append(str(patch)) | |
|
1794 | return "".join(text) | |
|
1795 | ||
|
1796 | def patch_fromText(self, textline): | |
|
1797 | """Parse a textual representation of patches and return a list of patch | |
|
1798 | objects. | |
|
1799 | ||
|
1800 | Args: | |
|
1801 | textline: Text representation of patches. | |
|
1802 | ||
|
1803 | Returns: | |
|
1804 | Array of Patch objects. | |
|
1805 | ||
|
1806 | Raises: | |
|
1807 | ValueError: If invalid input. | |
|
1808 | """ | |
|
1809 | if type(textline) == unicode: | |
|
1810 | # Patches should be composed of a subset of ascii chars, Unicode not | |
|
1811 | # required. If this encode raises UnicodeEncodeError, patch is invalid. | |
|
1812 | textline = textline.encode("ascii") | |
|
1813 | patches = [] | |
|
1814 | if not textline: | |
|
1815 | return patches | |
|
1816 | text = textline.split('\n') | |
|
1817 | while len(text) != 0: | |
|
1818 | m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0]) | |
|
1819 | if not m: | |
|
1820 | raise ValueError("Invalid patch string: " + text[0]) | |
|
1821 | patch = patch_obj() | |
|
1822 | patches.append(patch) | |
|
1823 | patch.start1 = int(m.group(1)) | |
|
1824 | if m.group(2) == '': | |
|
1825 | patch.start1 -= 1 | |
|
1826 | patch.length1 = 1 | |
|
1827 | elif m.group(2) == '0': | |
|
1828 | patch.length1 = 0 | |
|
1829 | else: | |
|
1830 | patch.start1 -= 1 | |
|
1831 | patch.length1 = int(m.group(2)) | |
|
1832 | ||
|
1833 | patch.start2 = int(m.group(3)) | |
|
1834 | if m.group(4) == '': | |
|
1835 | patch.start2 -= 1 | |
|
1836 | patch.length2 = 1 | |
|
1837 | elif m.group(4) == '0': | |
|
1838 | patch.length2 = 0 | |
|
1839 | else: | |
|
1840 | patch.start2 -= 1 | |
|
1841 | patch.length2 = int(m.group(4)) | |
|
1842 | ||
|
1843 | del text[0] | |
|
1844 | ||
|
1845 | while len(text) != 0: | |
|
1846 | if text[0]: | |
|
1847 | sign = text[0][0] | |
|
1848 | else: | |
|
1849 | sign = '' | |
|
1850 | line = urllib.unquote(text[0][1:]) | |
|
1851 | line = line.decode("utf-8") | |
|
1852 | if sign == '+': | |
|
1853 | # Insertion. | |
|
1854 | patch.diffs.append((self.DIFF_INSERT, line)) | |
|
1855 | elif sign == '-': | |
|
1856 | # Deletion. | |
|
1857 | patch.diffs.append((self.DIFF_DELETE, line)) | |
|
1858 | elif sign == ' ': | |
|
1859 | # Minor equality. | |
|
1860 | patch.diffs.append((self.DIFF_EQUAL, line)) | |
|
1861 | elif sign == '@': | |
|
1862 | # Start of next patch. | |
|
1863 | break | |
|
1864 | elif sign == '': | |
|
1865 | # Blank line? Whatever. | |
|
1866 | pass | |
|
1867 | else: | |
|
1868 | # WTF? | |
|
1869 | raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line)) | |
|
1870 | del text[0] | |
|
1871 | return patches | |
|
1872 | ||
|
1873 | ||
|
1874 | class patch_obj: | |
|
1875 | """Class representing one patch operation. | |
|
1876 | """ | |
|
1877 | ||
|
1878 | def __init__(self): | |
|
1879 | """Initializes with an empty list of diffs. | |
|
1880 | """ | |
|
1881 | self.diffs = [] | |
|
1882 | self.start1 = None | |
|
1883 | self.start2 = None | |
|
1884 | self.length1 = 0 | |
|
1885 | self.length2 = 0 | |
|
1886 | ||
|
1887 | def __str__(self): | |
|
1888 | """Emmulate GNU diff's format. | |
|
1889 | Header: @@ -382,8 +481,9 @@ | |
|
1890 | Indicies are printed as 1-based, not 0-based. | |
|
1891 | ||
|
1892 | Returns: | |
|
1893 | The GNU diff string. | |
|
1894 | """ | |
|
1895 | if self.length1 == 0: | |
|
1896 | coords1 = str(self.start1) + ",0" | |
|
1897 | elif self.length1 == 1: | |
|
1898 | coords1 = str(self.start1 + 1) | |
|
1899 | else: | |
|
1900 | coords1 = str(self.start1 + 1) + "," + str(self.length1) | |
|
1901 | if self.length2 == 0: | |
|
1902 | coords2 = str(self.start2) + ",0" | |
|
1903 | elif self.length2 == 1: | |
|
1904 | coords2 = str(self.start2 + 1) | |
|
1905 | else: | |
|
1906 | coords2 = str(self.start2 + 1) + "," + str(self.length2) | |
|
1907 | text = ["@@ -", coords1, " +", coords2, " @@\n"] | |
|
1908 | # Escape the body of the patch with %xx notation. | |
|
1909 | for (op, data) in self.diffs: | |
|
1910 | if op == diff_match_patch.DIFF_INSERT: | |
|
1911 | text.append("+") | |
|
1912 | elif op == diff_match_patch.DIFF_DELETE: | |
|
1913 | text.append("-") | |
|
1914 | elif op == diff_match_patch.DIFF_EQUAL: | |
|
1915 | text.append(" ") | |
|
1916 | # High ascii will raise UnicodeDecodeError. Use Unicode instead. | |
|
1917 | data = data.encode("utf-8") | |
|
1918 | text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n") | |
|
1919 | return "".join(text) No newline at end of file |
@@ -0,0 +1,398 b'' | |||
|
1 | <%def name="diff_line_anchor(filename, line, type)"><% | |
|
2 | return '%s_%s_%i' % (h.safeid(filename), type, line) | |
|
3 | %></%def> | |
|
4 | ||
|
5 | <%def name="action_class(action)"><% | |
|
6 | return { | |
|
7 | '-': 'cb-deletion', | |
|
8 | '+': 'cb-addition', | |
|
9 | ' ': 'cb-context', | |
|
10 | }.get(action, 'cb-empty') | |
|
11 | %></%def> | |
|
12 | ||
|
13 | <%def name="op_class(op_id)"><% | |
|
14 | return { | |
|
15 | DEL_FILENODE: 'deletion', # file deleted | |
|
16 | BIN_FILENODE: 'warning' # binary diff hidden | |
|
17 | }.get(op_id, 'addition') | |
|
18 | %></%def> | |
|
19 | ||
|
20 | <%def name="link_for(**kw)"><% | |
|
21 | new_args = request.GET.mixed() | |
|
22 | new_args.update(kw) | |
|
23 | return h.url('', **new_args) | |
|
24 | %></%def> | |
|
25 | ||
|
26 | <%def name="render_diffset(diffset, | |
|
27 | ||
|
28 | # collapse all file diff entries when there are more than this amount of files in the diff | |
|
29 | collapse_when_files_over=20, | |
|
30 | ||
|
31 | # collapse lines in the diff when more than this amount of lines changed in the file diff | |
|
32 | lines_changed_limit=500, | |
|
33 | )"> | |
|
34 | <% | |
|
35 | # TODO: dan: move this to an argument - and set a cookie so that it is saved | |
|
36 | # default option for future requests | |
|
37 | diff_mode = request.GET.get('diffmode', 'sideside') | |
|
38 | if diff_mode not in ('sideside', 'unified'): | |
|
39 | diff_mode = 'sideside' | |
|
40 | ||
|
41 | collapse_all = len(diffset.files) > collapse_when_files_over | |
|
42 | %> | |
|
43 | ||
|
44 | %if diff_mode == 'sideside': | |
|
45 | <style> | |
|
46 | .wrapper { | |
|
47 | max-width: 1600px !important; | |
|
48 | } | |
|
49 | </style> | |
|
50 | %endif | |
|
51 | ||
|
52 | % if diffset.limited_diff: | |
|
53 | <div class="alert alert-warning"> | |
|
54 | ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
|
55 | </div> | |
|
56 | % endif | |
|
57 | ||
|
58 | <div class="cs_files"> | |
|
59 | <div class="cs_files_title"> | |
|
60 | %if diffset.files: | |
|
61 | <div class="pull-right"> | |
|
62 | <div class="btn-group"> | |
|
63 | <a | |
|
64 | class="btn ${diff_mode == 'sideside' and 'btn-primary'} tooltip" | |
|
65 | title="${_('View side by side')}" | |
|
66 | href="${link_for(diffmode='sideside')}"> | |
|
67 | <span>${_('Side by Side')}</span> | |
|
68 | </a> | |
|
69 | <a | |
|
70 | class="btn ${diff_mode == 'unified' and 'btn-primary'} tooltip" | |
|
71 | title="${_('View unified')}" href="${link_for(diffmode='unified')}"> | |
|
72 | <span>${_('Unified')}</span> | |
|
73 | </a> | |
|
74 | </div> | |
|
75 | </div> | |
|
76 | <div class="pull-left"> | |
|
77 | <div class="btn-group"> | |
|
78 | <a | |
|
79 | class="btn" | |
|
80 | href="#" | |
|
81 | onclick="$('input[class=diff-collapse-state]').prop('checked', false); return false">${_('Expand All')}</a> | |
|
82 | <a | |
|
83 | class="btn" | |
|
84 | href="#" | |
|
85 | onclick="$('input[class=diff-collapse-state]').prop('checked', true); return false">${_('Collapse All')}</a> | |
|
86 | </div> | |
|
87 | </div> | |
|
88 | %endif | |
|
89 | <h2 style="padding: 5px; text-align: center;"> | |
|
90 | %if diffset.limited_diff: | |
|
91 | ${ungettext('%(num)s file changed', '%(num)s files changed', diffset.changed_files) % {'num': diffset.changed_files}} | |
|
92 | %else: | |
|
93 | ${ungettext('%(num)s file changed: %(linesadd)s inserted, ''%(linesdel)s deleted', | |
|
94 | '%(num)s files changed: %(linesadd)s inserted, %(linesdel)s deleted', diffset.changed_files) % {'num': diffset.changed_files, 'linesadd': diffset.lines_added, 'linesdel': diffset.lines_deleted}} | |
|
95 | %endif | |
|
96 | </h2> | |
|
97 | </div> | |
|
98 | ||
|
99 | %if not diffset.files: | |
|
100 | <p class="empty_data">${_('No files')}</p> | |
|
101 | %endif | |
|
102 | ||
|
103 | <div class="filediffs"> | |
|
104 | %for i, filediff in enumerate(diffset.files): | |
|
105 | <% | |
|
106 | lines_changed = filediff['patch']['stats']['added'] + filediff['patch']['stats']['deleted'] | |
|
107 | over_lines_changed_limit = lines_changed > lines_changed_limit | |
|
108 | %> | |
|
109 | <input ${collapse_all and 'checked' or ''} class="diff-collapse-state" id="diff-collapse-${i}" type="checkbox"> | |
|
110 | <div | |
|
111 | class="diff" | |
|
112 | data-f-path="${filediff['patch']['filename']}" | |
|
113 | id="a_${h.FID('', filediff['patch']['filename'])}"> | |
|
114 | <label for="diff-collapse-${i}" class="diff-heading"> | |
|
115 | <div class="diff-collapse-indicator"></div> | |
|
116 | ${diff_ops(filediff)} | |
|
117 | </label> | |
|
118 | ${diff_menu(filediff)} | |
|
119 | <table class="cb cb-diff-${diff_mode} code-highlight ${over_lines_changed_limit and 'cb-collapsed' or ''}"> | |
|
120 | %if not filediff.hunks: | |
|
121 | %for op_id, op_text in filediff['patch']['stats']['ops'].items(): | |
|
122 | <tr> | |
|
123 | <td class="cb-text cb-${op_class(op_id)}" ${diff_mode == 'unified' and 'colspan=3' or 'colspan=4'}> | |
|
124 | %if op_id == DEL_FILENODE: | |
|
125 | ${_('File was deleted')} | |
|
126 | %elif op_id == BIN_FILENODE: | |
|
127 | ${_('Binary file hidden')} | |
|
128 | %else: | |
|
129 | ${op_text} | |
|
130 | %endif | |
|
131 | </td> | |
|
132 | </tr> | |
|
133 | %endfor | |
|
134 | %endif | |
|
135 | %if over_lines_changed_limit: | |
|
136 | <tr class="cb-warning cb-collapser"> | |
|
137 | <td class="cb-text" ${diff_mode == 'unified' and 'colspan=3' or 'colspan=4'}> | |
|
138 | ${_('This diff has been collapsed as it changes many lines, (%i lines changed)' % lines_changed)} | |
|
139 | <a href="#" class="cb-expand" | |
|
140 | onclick="$(this).closest('table').removeClass('cb-collapsed'); return false;">${_('Show them')} | |
|
141 | </a> | |
|
142 | <a href="#" class="cb-collapse" | |
|
143 | onclick="$(this).closest('table').addClass('cb-collapsed'); return false;">${_('Hide them')} | |
|
144 | </a> | |
|
145 | </td> | |
|
146 | </tr> | |
|
147 | %endif | |
|
148 | %if filediff.patch['is_limited_diff']: | |
|
149 | <tr class="cb-warning cb-collapser"> | |
|
150 | <td class="cb-text" ${diff_mode == 'unified' and 'colspan=3' or 'colspan=4'}> | |
|
151 | ${_('The requested commit is too big and content was truncated.')} <a href="${link_for(fulldiff=1)}" onclick="return confirm('${_("Showing a big diff might take some time and resources, continue?")}')">${_('Show full diff')}</a> | |
|
152 | </td> | |
|
153 | </tr> | |
|
154 | %endif | |
|
155 | %for hunk in filediff.hunks: | |
|
156 | <tr class="cb-hunk"> | |
|
157 | <td ${diff_mode == 'unified' and 'colspan=2' or ''}> | |
|
158 | ## TODO: dan: add ajax loading of more context here | |
|
159 | ## <a href="#"> | |
|
160 | <i class="icon-more"></i> | |
|
161 | ## </a> | |
|
162 | </td> | |
|
163 | <td ${diff_mode == 'sideside' and 'colspan=3' or ''}> | |
|
164 | @@ | |
|
165 | -${hunk.source_start},${hunk.source_length} | |
|
166 | +${hunk.target_start},${hunk.target_length} | |
|
167 | ${hunk.section_header} | |
|
168 | </td> | |
|
169 | </tr> | |
|
170 | %if diff_mode == 'unified': | |
|
171 | ${render_hunk_lines_unified(hunk)} | |
|
172 | %elif diff_mode == 'sideside': | |
|
173 | ${render_hunk_lines_sideside(hunk)} | |
|
174 | %else: | |
|
175 | <tr class="cb-line"> | |
|
176 | <td>unknown diff mode</td> | |
|
177 | </tr> | |
|
178 | %endif | |
|
179 | %endfor | |
|
180 | </table> | |
|
181 | </div> | |
|
182 | %endfor | |
|
183 | </div> | |
|
184 | </div> | |
|
185 | </%def> | |
|
186 | ||
|
187 | <%def name="diff_ops(filediff)"> | |
|
188 | <% | |
|
189 | stats = filediff['patch']['stats'] | |
|
190 | from rhodecode.lib.diffs import NEW_FILENODE, DEL_FILENODE, \ | |
|
191 | MOD_FILENODE, RENAMED_FILENODE, CHMOD_FILENODE, BIN_FILENODE | |
|
192 | %> | |
|
193 | <span class="diff-pill"> | |
|
194 | %if filediff.source_file_path and filediff.target_file_path: | |
|
195 | %if filediff.source_file_path != filediff.target_file_path: # file was renamed | |
|
196 | <strong>${filediff.target_file_path}</strong> β¬ <del>${filediff.source_file_path}</del> | |
|
197 | %else: | |
|
198 | ## file was modified | |
|
199 | <strong>${filediff.source_file_path}</strong> | |
|
200 | %endif | |
|
201 | %else: | |
|
202 | %if filediff.source_file_path: | |
|
203 | ## file was deleted | |
|
204 | <strong>${filediff.source_file_path}</strong> | |
|
205 | %else: | |
|
206 | ## file was added | |
|
207 | <strong>${filediff.target_file_path}</strong> | |
|
208 | %endif | |
|
209 | %endif | |
|
210 | </span> | |
|
211 | <span class="diff-pill-group" style="float: left"> | |
|
212 | %if filediff.patch['is_limited_diff']: | |
|
213 | <span class="diff-pill tooltip" op="limited" title="The stats for this diff are not complete">limited diff</span> | |
|
214 | %endif | |
|
215 | %if RENAMED_FILENODE in stats['ops']: | |
|
216 | <span class="diff-pill" op="renamed">renamed</span> | |
|
217 | %endif | |
|
218 | ||
|
219 | %if NEW_FILENODE in stats['ops']: | |
|
220 | <span class="diff-pill" op="created">created</span> | |
|
221 | %if filediff['target_mode'].startswith('120'): | |
|
222 | <span class="diff-pill" op="symlink">symlink</span> | |
|
223 | %else: | |
|
224 | <span class="diff-pill" op="mode">${nice_mode(filediff['target_mode'])}</span> | |
|
225 | %endif | |
|
226 | %endif | |
|
227 | ||
|
228 | %if DEL_FILENODE in stats['ops']: | |
|
229 | <span class="diff-pill" op="removed">removed</span> | |
|
230 | %endif | |
|
231 | ||
|
232 | %if CHMOD_FILENODE in stats['ops']: | |
|
233 | <span class="diff-pill" op="mode"> | |
|
234 | ${nice_mode(filediff['source_mode'])} β‘ ${nice_mode(filediff['target_mode'])} | |
|
235 | </span> | |
|
236 | %endif | |
|
237 | </span> | |
|
238 | ||
|
239 | <a class="diff-pill diff-anchor" href="#a_${h.FID('', filediff.patch['filename'])}">ΒΆ</a> | |
|
240 | ||
|
241 | <span class="diff-pill-group" style="float: right"> | |
|
242 | %if BIN_FILENODE in stats['ops']: | |
|
243 | <span class="diff-pill" op="binary">binary</span> | |
|
244 | %if MOD_FILENODE in stats['ops']: | |
|
245 | <span class="diff-pill" op="modified">modified</span> | |
|
246 | %endif | |
|
247 | %endif | |
|
248 | %if stats['deleted']: | |
|
249 | <span class="diff-pill" op="deleted">-${stats['deleted']}</span> | |
|
250 | %endif | |
|
251 | %if stats['added']: | |
|
252 | <span class="diff-pill" op="added">+${stats['added']}</span> | |
|
253 | %endif | |
|
254 | </span> | |
|
255 | ||
|
256 | </%def> | |
|
257 | ||
|
258 | <%def name="nice_mode(filemode)"> | |
|
259 | ${filemode.startswith('100') and filemode[3:] or filemode} | |
|
260 | </%def> | |
|
261 | ||
|
262 | <%def name="diff_menu(filediff)"> | |
|
263 | <div class="diff-menu"> | |
|
264 | %if filediff.diffset.source_ref: | |
|
265 | %if filediff.patch['operation'] in ['D', 'M']: | |
|
266 | <a | |
|
267 | class="tooltip" | |
|
268 | href="${h.url('files_home',repo_name=c.repo_name,f_path=filediff.source_file_path,revision=filediff.diffset.source_ref)}" | |
|
269 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
|
270 | > | |
|
271 | ${_('Show file before')} | |
|
272 | </a> | |
|
273 | %else: | |
|
274 | <a | |
|
275 | disabled | |
|
276 | class="tooltip" | |
|
277 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.source_ref[:12]})}" | |
|
278 | > | |
|
279 | ${_('Show file before')} | |
|
280 | </a> | |
|
281 | %endif | |
|
282 | %if filediff.patch['operation'] in ['A', 'M']: | |
|
283 | <a | |
|
284 | class="tooltip" | |
|
285 | href="${h.url('files_home',repo_name=c.repo_name,f_path=filediff.target_file_path,revision=filediff.diffset.target_ref)}" | |
|
286 | title="${h.tooltip(_('Show file at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
|
287 | > | |
|
288 | ${_('Show file after')} | |
|
289 | </a> | |
|
290 | %else: | |
|
291 | <a | |
|
292 | disabled | |
|
293 | class="tooltip" | |
|
294 | title="${h.tooltip(_('File no longer present at commit: %(commit_id)s') % {'commit_id': filediff.diffset.target_ref[:12]})}" | |
|
295 | > | |
|
296 | ${_('Show file after')} | |
|
297 | </a> | |
|
298 | %endif | |
|
299 | <a | |
|
300 | class="tooltip" | |
|
301 | title="${h.tooltip(_('Raw diff'))}" | |
|
302 | href="${h.url('files_diff_home',repo_name=c.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='raw')}" | |
|
303 | > | |
|
304 | ${_('Raw diff')} | |
|
305 | </a> | |
|
306 | <a | |
|
307 | class="tooltip" | |
|
308 | title="${h.tooltip(_('Download diff'))}" | |
|
309 | href="${h.url('files_diff_home',repo_name=c.repo_name,f_path=filediff.target_file_path,diff2=filediff.diffset.target_ref,diff1=filediff.diffset.source_ref,diff='download')}" | |
|
310 | > | |
|
311 | ${_('Download diff')} | |
|
312 | </a> | |
|
313 | %endif | |
|
314 | </div> | |
|
315 | </%def> | |
|
316 | ||
|
317 | ||
|
318 | <%def name="render_hunk_lines_sideside(hunk)"> | |
|
319 | %for i, line in enumerate(hunk.sideside): | |
|
320 | <% | |
|
321 | old_line_anchor, new_line_anchor = None, None | |
|
322 | if line.original.lineno: | |
|
323 | old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, line.original.lineno, 'o') | |
|
324 | if line.modified.lineno: | |
|
325 | new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, line.modified.lineno, 'n') | |
|
326 | %> | |
|
327 | <tr class="cb-line"> | |
|
328 | <td class="cb-lineno ${action_class(line.original.action)}" | |
|
329 | data-line-number="${line.original.lineno}" | |
|
330 | %if old_line_anchor: | |
|
331 | id="${old_line_anchor}" | |
|
332 | %endif | |
|
333 | > | |
|
334 | %if line.original.lineno: | |
|
335 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${line.original.lineno}</a> | |
|
336 | %endif | |
|
337 | </td> | |
|
338 | <td class="cb-content ${action_class(line.original.action)}" | |
|
339 | data-line-number="o${line.original.lineno}" | |
|
340 | ><span class="cb-code">${line.original.action} ${line.original.content or '' | n}</span> | |
|
341 | </td> | |
|
342 | <td class="cb-lineno ${action_class(line.modified.action)}" | |
|
343 | data-line-number="${line.modified.lineno}" | |
|
344 | %if new_line_anchor: | |
|
345 | id="${new_line_anchor}" | |
|
346 | %endif | |
|
347 | > | |
|
348 | %if line.modified.lineno: | |
|
349 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${line.modified.lineno}</a> | |
|
350 | %endif | |
|
351 | </td> | |
|
352 | <td class="cb-content ${action_class(line.modified.action)}" | |
|
353 | data-line-number="n${line.modified.lineno}" | |
|
354 | > | |
|
355 | <span class="cb-code">${line.modified.action} ${line.modified.content or '' | n}</span> | |
|
356 | </td> | |
|
357 | </tr> | |
|
358 | %endfor | |
|
359 | </%def> | |
|
360 | ||
|
361 | ||
|
362 | <%def name="render_hunk_lines_unified(hunk)"> | |
|
363 | %for old_line_no, new_line_no, action, content in hunk.unified: | |
|
364 | <% | |
|
365 | old_line_anchor, new_line_anchor = None, None | |
|
366 | if old_line_no: | |
|
367 | old_line_anchor = diff_line_anchor(hunk.filediff.source_file_path, old_line_no, 'o') | |
|
368 | if new_line_no: | |
|
369 | new_line_anchor = diff_line_anchor(hunk.filediff.target_file_path, new_line_no, 'n') | |
|
370 | %> | |
|
371 | <tr class="cb-line"> | |
|
372 | <td class="cb-lineno ${action_class(action)}" | |
|
373 | data-line-number="${old_line_no}" | |
|
374 | %if old_line_anchor: | |
|
375 | id="${old_line_anchor}" | |
|
376 | %endif | |
|
377 | > | |
|
378 | %if old_line_anchor: | |
|
379 | <a name="${old_line_anchor}" href="#${old_line_anchor}">${old_line_no}</a> | |
|
380 | %endif | |
|
381 | </td> | |
|
382 | <td class="cb-lineno ${action_class(action)}" | |
|
383 | data-line-number="${new_line_no}" | |
|
384 | %if new_line_anchor: | |
|
385 | id="${new_line_anchor}" | |
|
386 | %endif | |
|
387 | > | |
|
388 | %if new_line_anchor: | |
|
389 | <a name="${new_line_anchor}" href="#${new_line_anchor}">${new_line_no}</a> | |
|
390 | %endif | |
|
391 | </td> | |
|
392 | <td class="cb-content ${action_class(action)}" | |
|
393 | data-line-number="${new_line_no and 'n' or 'o'}${new_line_no or old_line_no}" | |
|
394 | ><span class="cb-code">${action} ${content or '' | n}</span> | |
|
395 | </td> | |
|
396 | </tr> | |
|
397 | %endfor | |
|
398 | </%def> |
@@ -1,694 +1,696 b'' | |||
|
1 | 1 | This program is free software: you can redistribute it and/or modify |
|
2 | 2 | it under the terms of the GNU Affero General Public License, version 3 |
|
3 | 3 | (only), as published by the Free Software Foundation. |
|
4 | 4 | |
|
5 | 5 | |
|
6 | 6 | This program incorporates work covered by the following copyright and |
|
7 | 7 | permission notice: |
|
8 | 8 | |
|
9 | 9 | Copyright (c) 2014-2016 - packaging |
|
10 | 10 | file: |
|
11 | 11 | Copyright (c) 2008-2011 - msgpack-python |
|
12 | 12 | file:licenses/msgpack_license.txt |
|
13 | 13 | Copyright (c) 2009 - tornado |
|
14 | 14 | file:licenses/tornado_license.txt |
|
15 | 15 | Copyright (c) 2015 - pygments-markdown-lexer |
|
16 | 16 | file:licenses/pygments_markdown_lexer_license.txt |
|
17 | Copyright 2006 - diff_match_patch | |
|
18 | file:licenses/diff_match_patch_license.txt | |
|
17 | 19 | |
|
18 | 20 | All licensed under the Apache License, Version 2.0 (the "License"); |
|
19 | 21 | you may not use this file except in compliance with the License. |
|
20 | 22 | You may obtain a copy of the License at |
|
21 | 23 | |
|
22 | 24 | http://www.apache.org/licenses/LICENSE-2.0 |
|
23 | 25 | |
|
24 | 26 | Unless required by applicable law or agreed to in writing, software |
|
25 | 27 | distributed under the License is distributed on an "AS IS" BASIS, |
|
26 | 28 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
27 | 29 | See the License for the specific language governing permissions and |
|
28 | 30 | imitations under the License. |
|
29 | 31 | |
|
30 | 32 | |
|
31 | 33 | Below is the full text of GNU Affero General Public License, version 3 |
|
32 | 34 | |
|
33 | 35 | |
|
34 | 36 | GNU AFFERO GENERAL PUBLIC LICENSE |
|
35 | 37 | Version 3, 19 November 2007 |
|
36 | 38 | |
|
37 | 39 | Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> |
|
38 | 40 | Everyone is permitted to copy and distribute verbatim copies |
|
39 | 41 | of this license document, but changing it is not allowed. |
|
40 | 42 | |
|
41 | 43 | Preamble |
|
42 | 44 | |
|
43 | 45 | The GNU Affero General Public License is a free, copyleft license for |
|
44 | 46 | software and other kinds of works, specifically designed to ensure |
|
45 | 47 | cooperation with the community in the case of network server software. |
|
46 | 48 | |
|
47 | 49 | The licenses for most software and other practical works are designed |
|
48 | 50 | to take away your freedom to share and change the works. By contrast, |
|
49 | 51 | our General Public Licenses are intended to guarantee your freedom to |
|
50 | 52 | share and change all versions of a program--to make sure it remains free |
|
51 | 53 | software for all its users. |
|
52 | 54 | |
|
53 | 55 | When we speak of free software, we are referring to freedom, not |
|
54 | 56 | price. Our General Public Licenses are designed to make sure that you |
|
55 | 57 | have the freedom to distribute copies of free software (and charge for |
|
56 | 58 | them if you wish), that you receive source code or can get it if you |
|
57 | 59 | want it, that you can change the software or use pieces of it in new |
|
58 | 60 | free programs, and that you know you can do these things. |
|
59 | 61 | |
|
60 | 62 | Developers that use our General Public Licenses protect your rights |
|
61 | 63 | with two steps: (1) assert copyright on the software, and (2) offer |
|
62 | 64 | you this License which gives you legal permission to copy, distribute |
|
63 | 65 | and/or modify the software. |
|
64 | 66 | |
|
65 | 67 | A secondary benefit of defending all users' freedom is that |
|
66 | 68 | improvements made in alternate versions of the program, if they |
|
67 | 69 | receive widespread use, become available for other developers to |
|
68 | 70 | incorporate. Many developers of free software are heartened and |
|
69 | 71 | encouraged by the resulting cooperation. However, in the case of |
|
70 | 72 | software used on network servers, this result may fail to come about. |
|
71 | 73 | The GNU General Public License permits making a modified version and |
|
72 | 74 | letting the public access it on a server without ever releasing its |
|
73 | 75 | source code to the public. |
|
74 | 76 | |
|
75 | 77 | The GNU Affero General Public License is designed specifically to |
|
76 | 78 | ensure that, in such cases, the modified source code becomes available |
|
77 | 79 | to the community. It requires the operator of a network server to |
|
78 | 80 | provide the source code of the modified version running there to the |
|
79 | 81 | users of that server. Therefore, public use of a modified version, on |
|
80 | 82 | a publicly accessible server, gives the public access to the source |
|
81 | 83 | code of the modified version. |
|
82 | 84 | |
|
83 | 85 | An older license, called the Affero General Public License and |
|
84 | 86 | published by Affero, was designed to accomplish similar goals. This is |
|
85 | 87 | a different license, not a version of the Affero GPL, but Affero has |
|
86 | 88 | released a new version of the Affero GPL which permits relicensing under |
|
87 | 89 | this license. |
|
88 | 90 | |
|
89 | 91 | The precise terms and conditions for copying, distribution and |
|
90 | 92 | modification follow. |
|
91 | 93 | |
|
92 | 94 | TERMS AND CONDITIONS |
|
93 | 95 | |
|
94 | 96 | 0. Definitions. |
|
95 | 97 | |
|
96 | 98 | "This License" refers to version 3 of the GNU Affero General Public License. |
|
97 | 99 | |
|
98 | 100 | "Copyright" also means copyright-like laws that apply to other kinds of |
|
99 | 101 | works, such as semiconductor masks. |
|
100 | 102 | |
|
101 | 103 | "The Program" refers to any copyrightable work licensed under this |
|
102 | 104 | License. Each licensee is addressed as "you". "Licensees" and |
|
103 | 105 | "recipients" may be individuals or organizations. |
|
104 | 106 | |
|
105 | 107 | To "modify" a work means to copy from or adapt all or part of the work |
|
106 | 108 | in a fashion requiring copyright permission, other than the making of an |
|
107 | 109 | exact copy. The resulting work is called a "modified version" of the |
|
108 | 110 | earlier work or a work "based on" the earlier work. |
|
109 | 111 | |
|
110 | 112 | A "covered work" means either the unmodified Program or a work based |
|
111 | 113 | on the Program. |
|
112 | 114 | |
|
113 | 115 | To "propagate" a work means to do anything with it that, without |
|
114 | 116 | permission, would make you directly or secondarily liable for |
|
115 | 117 | infringement under applicable copyright law, except executing it on a |
|
116 | 118 | computer or modifying a private copy. Propagation includes copying, |
|
117 | 119 | distribution (with or without modification), making available to the |
|
118 | 120 | public, and in some countries other activities as well. |
|
119 | 121 | |
|
120 | 122 | To "convey" a work means any kind of propagation that enables other |
|
121 | 123 | parties to make or receive copies. Mere interaction with a user through |
|
122 | 124 | a computer network, with no transfer of a copy, is not conveying. |
|
123 | 125 | |
|
124 | 126 | An interactive user interface displays "Appropriate Legal Notices" |
|
125 | 127 | to the extent that it includes a convenient and prominently visible |
|
126 | 128 | feature that (1) displays an appropriate copyright notice, and (2) |
|
127 | 129 | tells the user that there is no warranty for the work (except to the |
|
128 | 130 | extent that warranties are provided), that licensees may convey the |
|
129 | 131 | work under this License, and how to view a copy of this License. If |
|
130 | 132 | the interface presents a list of user commands or options, such as a |
|
131 | 133 | menu, a prominent item in the list meets this criterion. |
|
132 | 134 | |
|
133 | 135 | 1. Source Code. |
|
134 | 136 | |
|
135 | 137 | The "source code" for a work means the preferred form of the work |
|
136 | 138 | for making modifications to it. "Object code" means any non-source |
|
137 | 139 | form of a work. |
|
138 | 140 | |
|
139 | 141 | A "Standard Interface" means an interface that either is an official |
|
140 | 142 | standard defined by a recognized standards body, or, in the case of |
|
141 | 143 | interfaces specified for a particular programming language, one that |
|
142 | 144 | is widely used among developers working in that language. |
|
143 | 145 | |
|
144 | 146 | The "System Libraries" of an executable work include anything, other |
|
145 | 147 | than the work as a whole, that (a) is included in the normal form of |
|
146 | 148 | packaging a Major Component, but which is not part of that Major |
|
147 | 149 | Component, and (b) serves only to enable use of the work with that |
|
148 | 150 | Major Component, or to implement a Standard Interface for which an |
|
149 | 151 | implementation is available to the public in source code form. A |
|
150 | 152 | "Major Component", in this context, means a major essential component |
|
151 | 153 | (kernel, window system, and so on) of the specific operating system |
|
152 | 154 | (if any) on which the executable work runs, or a compiler used to |
|
153 | 155 | produce the work, or an object code interpreter used to run it. |
|
154 | 156 | |
|
155 | 157 | The "Corresponding Source" for a work in object code form means all |
|
156 | 158 | the source code needed to generate, install, and (for an executable |
|
157 | 159 | work) run the object code and to modify the work, including scripts to |
|
158 | 160 | control those activities. However, it does not include the work's |
|
159 | 161 | System Libraries, or general-purpose tools or generally available free |
|
160 | 162 | programs which are used unmodified in performing those activities but |
|
161 | 163 | which are not part of the work. For example, Corresponding Source |
|
162 | 164 | includes interface definition files associated with source files for |
|
163 | 165 | the work, and the source code for shared libraries and dynamically |
|
164 | 166 | linked subprograms that the work is specifically designed to require, |
|
165 | 167 | such as by intimate data communication or control flow between those |
|
166 | 168 | subprograms and other parts of the work. |
|
167 | 169 | |
|
168 | 170 | The Corresponding Source need not include anything that users |
|
169 | 171 | can regenerate automatically from other parts of the Corresponding |
|
170 | 172 | Source. |
|
171 | 173 | |
|
172 | 174 | The Corresponding Source for a work in source code form is that |
|
173 | 175 | same work. |
|
174 | 176 | |
|
175 | 177 | 2. Basic Permissions. |
|
176 | 178 | |
|
177 | 179 | All rights granted under this License are granted for the term of |
|
178 | 180 | copyright on the Program, and are irrevocable provided the stated |
|
179 | 181 | conditions are met. This License explicitly affirms your unlimited |
|
180 | 182 | permission to run the unmodified Program. The output from running a |
|
181 | 183 | covered work is covered by this License only if the output, given its |
|
182 | 184 | content, constitutes a covered work. This License acknowledges your |
|
183 | 185 | rights of fair use or other equivalent, as provided by copyright law. |
|
184 | 186 | |
|
185 | 187 | You may make, run and propagate covered works that you do not |
|
186 | 188 | convey, without conditions so long as your license otherwise remains |
|
187 | 189 | in force. You may convey covered works to others for the sole purpose |
|
188 | 190 | of having them make modifications exclusively for you, or provide you |
|
189 | 191 | with facilities for running those works, provided that you comply with |
|
190 | 192 | the terms of this License in conveying all material for which you do |
|
191 | 193 | not control copyright. Those thus making or running the covered works |
|
192 | 194 | for you must do so exclusively on your behalf, under your direction |
|
193 | 195 | and control, on terms that prohibit them from making any copies of |
|
194 | 196 | your copyrighted material outside their relationship with you. |
|
195 | 197 | |
|
196 | 198 | Conveying under any other circumstances is permitted solely under |
|
197 | 199 | the conditions stated below. Sublicensing is not allowed; section 10 |
|
198 | 200 | makes it unnecessary. |
|
199 | 201 | |
|
200 | 202 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. |
|
201 | 203 | |
|
202 | 204 | No covered work shall be deemed part of an effective technological |
|
203 | 205 | measure under any applicable law fulfilling obligations under article |
|
204 | 206 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or |
|
205 | 207 | similar laws prohibiting or restricting circumvention of such |
|
206 | 208 | measures. |
|
207 | 209 | |
|
208 | 210 | When you convey a covered work, you waive any legal power to forbid |
|
209 | 211 | circumvention of technological measures to the extent such circumvention |
|
210 | 212 | is effected by exercising rights under this License with respect to |
|
211 | 213 | the covered work, and you disclaim any intention to limit operation or |
|
212 | 214 | modification of the work as a means of enforcing, against the work's |
|
213 | 215 | users, your or third parties' legal rights to forbid circumvention of |
|
214 | 216 | technological measures. |
|
215 | 217 | |
|
216 | 218 | 4. Conveying Verbatim Copies. |
|
217 | 219 | |
|
218 | 220 | You may convey verbatim copies of the Program's source code as you |
|
219 | 221 | receive it, in any medium, provided that you conspicuously and |
|
220 | 222 | appropriately publish on each copy an appropriate copyright notice; |
|
221 | 223 | keep intact all notices stating that this License and any |
|
222 | 224 | non-permissive terms added in accord with section 7 apply to the code; |
|
223 | 225 | keep intact all notices of the absence of any warranty; and give all |
|
224 | 226 | recipients a copy of this License along with the Program. |
|
225 | 227 | |
|
226 | 228 | You may charge any price or no price for each copy that you convey, |
|
227 | 229 | and you may offer support or warranty protection for a fee. |
|
228 | 230 | |
|
229 | 231 | 5. Conveying Modified Source Versions. |
|
230 | 232 | |
|
231 | 233 | You may convey a work based on the Program, or the modifications to |
|
232 | 234 | produce it from the Program, in the form of source code under the |
|
233 | 235 | terms of section 4, provided that you also meet all of these conditions: |
|
234 | 236 | |
|
235 | 237 | a) The work must carry prominent notices stating that you modified |
|
236 | 238 | it, and giving a relevant date. |
|
237 | 239 | |
|
238 | 240 | b) The work must carry prominent notices stating that it is |
|
239 | 241 | released under this License and any conditions added under section |
|
240 | 242 | 7. This requirement modifies the requirement in section 4 to |
|
241 | 243 | "keep intact all notices". |
|
242 | 244 | |
|
243 | 245 | c) You must license the entire work, as a whole, under this |
|
244 | 246 | License to anyone who comes into possession of a copy. This |
|
245 | 247 | License will therefore apply, along with any applicable section 7 |
|
246 | 248 | additional terms, to the whole of the work, and all its parts, |
|
247 | 249 | regardless of how they are packaged. This License gives no |
|
248 | 250 | permission to license the work in any other way, but it does not |
|
249 | 251 | invalidate such permission if you have separately received it. |
|
250 | 252 | |
|
251 | 253 | d) If the work has interactive user interfaces, each must display |
|
252 | 254 | Appropriate Legal Notices; however, if the Program has interactive |
|
253 | 255 | interfaces that do not display Appropriate Legal Notices, your |
|
254 | 256 | work need not make them do so. |
|
255 | 257 | |
|
256 | 258 | A compilation of a covered work with other separate and independent |
|
257 | 259 | works, which are not by their nature extensions of the covered work, |
|
258 | 260 | and which are not combined with it such as to form a larger program, |
|
259 | 261 | in or on a volume of a storage or distribution medium, is called an |
|
260 | 262 | "aggregate" if the compilation and its resulting copyright are not |
|
261 | 263 | used to limit the access or legal rights of the compilation's users |
|
262 | 264 | beyond what the individual works permit. Inclusion of a covered work |
|
263 | 265 | in an aggregate does not cause this License to apply to the other |
|
264 | 266 | parts of the aggregate. |
|
265 | 267 | |
|
266 | 268 | 6. Conveying Non-Source Forms. |
|
267 | 269 | |
|
268 | 270 | You may convey a covered work in object code form under the terms |
|
269 | 271 | of sections 4 and 5, provided that you also convey the |
|
270 | 272 | machine-readable Corresponding Source under the terms of this License, |
|
271 | 273 | in one of these ways: |
|
272 | 274 | |
|
273 | 275 | a) Convey the object code in, or embodied in, a physical product |
|
274 | 276 | (including a physical distribution medium), accompanied by the |
|
275 | 277 | Corresponding Source fixed on a durable physical medium |
|
276 | 278 | customarily used for software interchange. |
|
277 | 279 | |
|
278 | 280 | b) Convey the object code in, or embodied in, a physical product |
|
279 | 281 | (including a physical distribution medium), accompanied by a |
|
280 | 282 | written offer, valid for at least three years and valid for as |
|
281 | 283 | long as you offer spare parts or customer support for that product |
|
282 | 284 | model, to give anyone who possesses the object code either (1) a |
|
283 | 285 | copy of the Corresponding Source for all the software in the |
|
284 | 286 | product that is covered by this License, on a durable physical |
|
285 | 287 | medium customarily used for software interchange, for a price no |
|
286 | 288 | more than your reasonable cost of physically performing this |
|
287 | 289 | conveying of source, or (2) access to copy the |
|
288 | 290 | Corresponding Source from a network server at no charge. |
|
289 | 291 | |
|
290 | 292 | c) Convey individual copies of the object code with a copy of the |
|
291 | 293 | written offer to provide the Corresponding Source. This |
|
292 | 294 | alternative is allowed only occasionally and noncommercially, and |
|
293 | 295 | only if you received the object code with such an offer, in accord |
|
294 | 296 | with subsection 6b. |
|
295 | 297 | |
|
296 | 298 | d) Convey the object code by offering access from a designated |
|
297 | 299 | place (gratis or for a charge), and offer equivalent access to the |
|
298 | 300 | Corresponding Source in the same way through the same place at no |
|
299 | 301 | further charge. You need not require recipients to copy the |
|
300 | 302 | Corresponding Source along with the object code. If the place to |
|
301 | 303 | copy the object code is a network server, the Corresponding Source |
|
302 | 304 | may be on a different server (operated by you or a third party) |
|
303 | 305 | that supports equivalent copying facilities, provided you maintain |
|
304 | 306 | clear directions next to the object code saying where to find the |
|
305 | 307 | Corresponding Source. Regardless of what server hosts the |
|
306 | 308 | Corresponding Source, you remain obligated to ensure that it is |
|
307 | 309 | available for as long as needed to satisfy these requirements. |
|
308 | 310 | |
|
309 | 311 | e) Convey the object code using peer-to-peer transmission, provided |
|
310 | 312 | you inform other peers where the object code and Corresponding |
|
311 | 313 | Source of the work are being offered to the general public at no |
|
312 | 314 | charge under subsection 6d. |
|
313 | 315 | |
|
314 | 316 | A separable portion of the object code, whose source code is excluded |
|
315 | 317 | from the Corresponding Source as a System Library, need not be |
|
316 | 318 | included in conveying the object code work. |
|
317 | 319 | |
|
318 | 320 | A "User Product" is either (1) a "consumer product", which means any |
|
319 | 321 | tangible personal property which is normally used for personal, family, |
|
320 | 322 | or household purposes, or (2) anything designed or sold for incorporation |
|
321 | 323 | into a dwelling. In determining whether a product is a consumer product, |
|
322 | 324 | doubtful cases shall be resolved in favor of coverage. For a particular |
|
323 | 325 | product received by a particular user, "normally used" refers to a |
|
324 | 326 | typical or common use of that class of product, regardless of the status |
|
325 | 327 | of the particular user or of the way in which the particular user |
|
326 | 328 | actually uses, or expects or is expected to use, the product. A product |
|
327 | 329 | is a consumer product regardless of whether the product has substantial |
|
328 | 330 | commercial, industrial or non-consumer uses, unless such uses represent |
|
329 | 331 | the only significant mode of use of the product. |
|
330 | 332 | |
|
331 | 333 | "Installation Information" for a User Product means any methods, |
|
332 | 334 | procedures, authorization keys, or other information required to install |
|
333 | 335 | and execute modified versions of a covered work in that User Product from |
|
334 | 336 | a modified version of its Corresponding Source. The information must |
|
335 | 337 | suffice to ensure that the continued functioning of the modified object |
|
336 | 338 | code is in no case prevented or interfered with solely because |
|
337 | 339 | modification has been made. |
|
338 | 340 | |
|
339 | 341 | If you convey an object code work under this section in, or with, or |
|
340 | 342 | specifically for use in, a User Product, and the conveying occurs as |
|
341 | 343 | part of a transaction in which the right of possession and use of the |
|
342 | 344 | User Product is transferred to the recipient in perpetuity or for a |
|
343 | 345 | fixed term (regardless of how the transaction is characterized), the |
|
344 | 346 | Corresponding Source conveyed under this section must be accompanied |
|
345 | 347 | by the Installation Information. But this requirement does not apply |
|
346 | 348 | if neither you nor any third party retains the ability to install |
|
347 | 349 | modified object code on the User Product (for example, the work has |
|
348 | 350 | been installed in ROM). |
|
349 | 351 | |
|
350 | 352 | The requirement to provide Installation Information does not include a |
|
351 | 353 | requirement to continue to provide support service, warranty, or updates |
|
352 | 354 | for a work that has been modified or installed by the recipient, or for |
|
353 | 355 | the User Product in which it has been modified or installed. Access to a |
|
354 | 356 | network may be denied when the modification itself materially and |
|
355 | 357 | adversely affects the operation of the network or violates the rules and |
|
356 | 358 | protocols for communication across the network. |
|
357 | 359 | |
|
358 | 360 | Corresponding Source conveyed, and Installation Information provided, |
|
359 | 361 | in accord with this section must be in a format that is publicly |
|
360 | 362 | documented (and with an implementation available to the public in |
|
361 | 363 | source code form), and must require no special password or key for |
|
362 | 364 | unpacking, reading or copying. |
|
363 | 365 | |
|
364 | 366 | 7. Additional Terms. |
|
365 | 367 | |
|
366 | 368 | "Additional permissions" are terms that supplement the terms of this |
|
367 | 369 | License by making exceptions from one or more of its conditions. |
|
368 | 370 | Additional permissions that are applicable to the entire Program shall |
|
369 | 371 | be treated as though they were included in this License, to the extent |
|
370 | 372 | that they are valid under applicable law. If additional permissions |
|
371 | 373 | apply only to part of the Program, that part may be used separately |
|
372 | 374 | under those permissions, but the entire Program remains governed by |
|
373 | 375 | this License without regard to the additional permissions. |
|
374 | 376 | |
|
375 | 377 | When you convey a copy of a covered work, you may at your option |
|
376 | 378 | remove any additional permissions from that copy, or from any part of |
|
377 | 379 | it. (Additional permissions may be written to require their own |
|
378 | 380 | removal in certain cases when you modify the work.) You may place |
|
379 | 381 | additional permissions on material, added by you to a covered work, |
|
380 | 382 | for which you have or can give appropriate copyright permission. |
|
381 | 383 | |
|
382 | 384 | Notwithstanding any other provision of this License, for material you |
|
383 | 385 | add to a covered work, you may (if authorized by the copyright holders of |
|
384 | 386 | that material) supplement the terms of this License with terms: |
|
385 | 387 | |
|
386 | 388 | a) Disclaiming warranty or limiting liability differently from the |
|
387 | 389 | terms of sections 15 and 16 of this License; or |
|
388 | 390 | |
|
389 | 391 | b) Requiring preservation of specified reasonable legal notices or |
|
390 | 392 | author attributions in that material or in the Appropriate Legal |
|
391 | 393 | Notices displayed by works containing it; or |
|
392 | 394 | |
|
393 | 395 | c) Prohibiting misrepresentation of the origin of that material, or |
|
394 | 396 | requiring that modified versions of such material be marked in |
|
395 | 397 | reasonable ways as different from the original version; or |
|
396 | 398 | |
|
397 | 399 | d) Limiting the use for publicity purposes of names of licensors or |
|
398 | 400 | authors of the material; or |
|
399 | 401 | |
|
400 | 402 | e) Declining to grant rights under trademark law for use of some |
|
401 | 403 | trade names, trademarks, or service marks; or |
|
402 | 404 | |
|
403 | 405 | f) Requiring indemnification of licensors and authors of that |
|
404 | 406 | material by anyone who conveys the material (or modified versions of |
|
405 | 407 | it) with contractual assumptions of liability to the recipient, for |
|
406 | 408 | any liability that these contractual assumptions directly impose on |
|
407 | 409 | those licensors and authors. |
|
408 | 410 | |
|
409 | 411 | All other non-permissive additional terms are considered "further |
|
410 | 412 | restrictions" within the meaning of section 10. If the Program as you |
|
411 | 413 | received it, or any part of it, contains a notice stating that it is |
|
412 | 414 | governed by this License along with a term that is a further |
|
413 | 415 | restriction, you may remove that term. If a license document contains |
|
414 | 416 | a further restriction but permits relicensing or conveying under this |
|
415 | 417 | License, you may add to a covered work material governed by the terms |
|
416 | 418 | of that license document, provided that the further restriction does |
|
417 | 419 | not survive such relicensing or conveying. |
|
418 | 420 | |
|
419 | 421 | If you add terms to a covered work in accord with this section, you |
|
420 | 422 | must place, in the relevant source files, a statement of the |
|
421 | 423 | additional terms that apply to those files, or a notice indicating |
|
422 | 424 | where to find the applicable terms. |
|
423 | 425 | |
|
424 | 426 | Additional terms, permissive or non-permissive, may be stated in the |
|
425 | 427 | form of a separately written license, or stated as exceptions; |
|
426 | 428 | the above requirements apply either way. |
|
427 | 429 | |
|
428 | 430 | 8. Termination. |
|
429 | 431 | |
|
430 | 432 | You may not propagate or modify a covered work except as expressly |
|
431 | 433 | provided under this License. Any attempt otherwise to propagate or |
|
432 | 434 | modify it is void, and will automatically terminate your rights under |
|
433 | 435 | this License (including any patent licenses granted under the third |
|
434 | 436 | paragraph of section 11). |
|
435 | 437 | |
|
436 | 438 | However, if you cease all violation of this License, then your |
|
437 | 439 | license from a particular copyright holder is reinstated (a) |
|
438 | 440 | provisionally, unless and until the copyright holder explicitly and |
|
439 | 441 | finally terminates your license, and (b) permanently, if the copyright |
|
440 | 442 | holder fails to notify you of the violation by some reasonable means |
|
441 | 443 | prior to 60 days after the cessation. |
|
442 | 444 | |
|
443 | 445 | Moreover, your license from a particular copyright holder is |
|
444 | 446 | reinstated permanently if the copyright holder notifies you of the |
|
445 | 447 | violation by some reasonable means, this is the first time you have |
|
446 | 448 | received notice of violation of this License (for any work) from that |
|
447 | 449 | copyright holder, and you cure the violation prior to 30 days after |
|
448 | 450 | your receipt of the notice. |
|
449 | 451 | |
|
450 | 452 | Termination of your rights under this section does not terminate the |
|
451 | 453 | licenses of parties who have received copies or rights from you under |
|
452 | 454 | this License. If your rights have been terminated and not permanently |
|
453 | 455 | reinstated, you do not qualify to receive new licenses for the same |
|
454 | 456 | material under section 10. |
|
455 | 457 | |
|
456 | 458 | 9. Acceptance Not Required for Having Copies. |
|
457 | 459 | |
|
458 | 460 | You are not required to accept this License in order to receive or |
|
459 | 461 | run a copy of the Program. Ancillary propagation of a covered work |
|
460 | 462 | occurring solely as a consequence of using peer-to-peer transmission |
|
461 | 463 | to receive a copy likewise does not require acceptance. However, |
|
462 | 464 | nothing other than this License grants you permission to propagate or |
|
463 | 465 | modify any covered work. These actions infringe copyright if you do |
|
464 | 466 | not accept this License. Therefore, by modifying or propagating a |
|
465 | 467 | covered work, you indicate your acceptance of this License to do so. |
|
466 | 468 | |
|
467 | 469 | 10. Automatic Licensing of Downstream Recipients. |
|
468 | 470 | |
|
469 | 471 | Each time you convey a covered work, the recipient automatically |
|
470 | 472 | receives a license from the original licensors, to run, modify and |
|
471 | 473 | propagate that work, subject to this License. You are not responsible |
|
472 | 474 | for enforcing compliance by third parties with this License. |
|
473 | 475 | |
|
474 | 476 | An "entity transaction" is a transaction transferring control of an |
|
475 | 477 | organization, or substantially all assets of one, or subdividing an |
|
476 | 478 | organization, or merging organizations. If propagation of a covered |
|
477 | 479 | work results from an entity transaction, each party to that |
|
478 | 480 | transaction who receives a copy of the work also receives whatever |
|
479 | 481 | licenses to the work the party's predecessor in interest had or could |
|
480 | 482 | give under the previous paragraph, plus a right to possession of the |
|
481 | 483 | Corresponding Source of the work from the predecessor in interest, if |
|
482 | 484 | the predecessor has it or can get it with reasonable efforts. |
|
483 | 485 | |
|
484 | 486 | You may not impose any further restrictions on the exercise of the |
|
485 | 487 | rights granted or affirmed under this License. For example, you may |
|
486 | 488 | not impose a license fee, royalty, or other charge for exercise of |
|
487 | 489 | rights granted under this License, and you may not initiate litigation |
|
488 | 490 | (including a cross-claim or counterclaim in a lawsuit) alleging that |
|
489 | 491 | any patent claim is infringed by making, using, selling, offering for |
|
490 | 492 | sale, or importing the Program or any portion of it. |
|
491 | 493 | |
|
492 | 494 | 11. Patents. |
|
493 | 495 | |
|
494 | 496 | A "contributor" is a copyright holder who authorizes use under this |
|
495 | 497 | License of the Program or a work on which the Program is based. The |
|
496 | 498 | work thus licensed is called the contributor's "contributor version". |
|
497 | 499 | |
|
498 | 500 | A contributor's "essential patent claims" are all patent claims |
|
499 | 501 | owned or controlled by the contributor, whether already acquired or |
|
500 | 502 | hereafter acquired, that would be infringed by some manner, permitted |
|
501 | 503 | by this License, of making, using, or selling its contributor version, |
|
502 | 504 | but do not include claims that would be infringed only as a |
|
503 | 505 | consequence of further modification of the contributor version. For |
|
504 | 506 | purposes of this definition, "control" includes the right to grant |
|
505 | 507 | patent sublicenses in a manner consistent with the requirements of |
|
506 | 508 | this License. |
|
507 | 509 | |
|
508 | 510 | Each contributor grants you a non-exclusive, worldwide, royalty-free |
|
509 | 511 | patent license under the contributor's essential patent claims, to |
|
510 | 512 | make, use, sell, offer for sale, import and otherwise run, modify and |
|
511 | 513 | propagate the contents of its contributor version. |
|
512 | 514 | |
|
513 | 515 | In the following three paragraphs, a "patent license" is any express |
|
514 | 516 | agreement or commitment, however denominated, not to enforce a patent |
|
515 | 517 | (such as an express permission to practice a patent or covenant not to |
|
516 | 518 | sue for patent infringement). To "grant" such a patent license to a |
|
517 | 519 | party means to make such an agreement or commitment not to enforce a |
|
518 | 520 | patent against the party. |
|
519 | 521 | |
|
520 | 522 | If you convey a covered work, knowingly relying on a patent license, |
|
521 | 523 | and the Corresponding Source of the work is not available for anyone |
|
522 | 524 | to copy, free of charge and under the terms of this License, through a |
|
523 | 525 | publicly available network server or other readily accessible means, |
|
524 | 526 | then you must either (1) cause the Corresponding Source to be so |
|
525 | 527 | available, or (2) arrange to deprive yourself of the benefit of the |
|
526 | 528 | patent license for this particular work, or (3) arrange, in a manner |
|
527 | 529 | consistent with the requirements of this License, to extend the patent |
|
528 | 530 | license to downstream recipients. "Knowingly relying" means you have |
|
529 | 531 | actual knowledge that, but for the patent license, your conveying the |
|
530 | 532 | covered work in a country, or your recipient's use of the covered work |
|
531 | 533 | in a country, would infringe one or more identifiable patents in that |
|
532 | 534 | country that you have reason to believe are valid. |
|
533 | 535 | |
|
534 | 536 | If, pursuant to or in connection with a single transaction or |
|
535 | 537 | arrangement, you convey, or propagate by procuring conveyance of, a |
|
536 | 538 | covered work, and grant a patent license to some of the parties |
|
537 | 539 | receiving the covered work authorizing them to use, propagate, modify |
|
538 | 540 | or convey a specific copy of the covered work, then the patent license |
|
539 | 541 | you grant is automatically extended to all recipients of the covered |
|
540 | 542 | work and works based on it. |
|
541 | 543 | |
|
542 | 544 | A patent license is "discriminatory" if it does not include within |
|
543 | 545 | the scope of its coverage, prohibits the exercise of, or is |
|
544 | 546 | conditioned on the non-exercise of one or more of the rights that are |
|
545 | 547 | specifically granted under this License. You may not convey a covered |
|
546 | 548 | work if you are a party to an arrangement with a third party that is |
|
547 | 549 | in the business of distributing software, under which you make payment |
|
548 | 550 | to the third party based on the extent of your activity of conveying |
|
549 | 551 | the work, and under which the third party grants, to any of the |
|
550 | 552 | parties who would receive the covered work from you, a discriminatory |
|
551 | 553 | patent license (a) in connection with copies of the covered work |
|
552 | 554 | conveyed by you (or copies made from those copies), or (b) primarily |
|
553 | 555 | for and in connection with specific products or compilations that |
|
554 | 556 | contain the covered work, unless you entered into that arrangement, |
|
555 | 557 | or that patent license was granted, prior to 28 March 2007. |
|
556 | 558 | |
|
557 | 559 | Nothing in this License shall be construed as excluding or limiting |
|
558 | 560 | any implied license or other defenses to infringement that may |
|
559 | 561 | otherwise be available to you under applicable patent law. |
|
560 | 562 | |
|
561 | 563 | 12. No Surrender of Others' Freedom. |
|
562 | 564 | |
|
563 | 565 | If conditions are imposed on you (whether by court order, agreement or |
|
564 | 566 | otherwise) that contradict the conditions of this License, they do not |
|
565 | 567 | excuse you from the conditions of this License. If you cannot convey a |
|
566 | 568 | covered work so as to satisfy simultaneously your obligations under this |
|
567 | 569 | License and any other pertinent obligations, then as a consequence you may |
|
568 | 570 | not convey it at all. For example, if you agree to terms that obligate you |
|
569 | 571 | to collect a royalty for further conveying from those to whom you convey |
|
570 | 572 | the Program, the only way you could satisfy both those terms and this |
|
571 | 573 | License would be to refrain entirely from conveying the Program. |
|
572 | 574 | |
|
573 | 575 | 13. Remote Network Interaction; Use with the GNU General Public License. |
|
574 | 576 | |
|
575 | 577 | Notwithstanding any other provision of this License, if you modify the |
|
576 | 578 | Program, your modified version must prominently offer all users |
|
577 | 579 | interacting with it remotely through a computer network (if your version |
|
578 | 580 | supports such interaction) an opportunity to receive the Corresponding |
|
579 | 581 | Source of your version by providing access to the Corresponding Source |
|
580 | 582 | from a network server at no charge, through some standard or customary |
|
581 | 583 | means of facilitating copying of software. This Corresponding Source |
|
582 | 584 | shall include the Corresponding Source for any work covered by version 3 |
|
583 | 585 | of the GNU General Public License that is incorporated pursuant to the |
|
584 | 586 | following paragraph. |
|
585 | 587 | |
|
586 | 588 | Notwithstanding any other provision of this License, you have |
|
587 | 589 | permission to link or combine any covered work with a work licensed |
|
588 | 590 | under version 3 of the GNU General Public License into a single |
|
589 | 591 | combined work, and to convey the resulting work. The terms of this |
|
590 | 592 | License will continue to apply to the part which is the covered work, |
|
591 | 593 | but the work with which it is combined will remain governed by version |
|
592 | 594 | 3 of the GNU General Public License. |
|
593 | 595 | |
|
594 | 596 | 14. Revised Versions of this License. |
|
595 | 597 | |
|
596 | 598 | The Free Software Foundation may publish revised and/or new versions of |
|
597 | 599 | the GNU Affero General Public License from time to time. Such new versions |
|
598 | 600 | will be similar in spirit to the present version, but may differ in detail to |
|
599 | 601 | address new problems or concerns. |
|
600 | 602 | |
|
601 | 603 | Each version is given a distinguishing version number. If the |
|
602 | 604 | Program specifies that a certain numbered version of the GNU Affero General |
|
603 | 605 | Public License "or any later version" applies to it, you have the |
|
604 | 606 | option of following the terms and conditions either of that numbered |
|
605 | 607 | version or of any later version published by the Free Software |
|
606 | 608 | Foundation. If the Program does not specify a version number of the |
|
607 | 609 | GNU Affero General Public License, you may choose any version ever published |
|
608 | 610 | by the Free Software Foundation. |
|
609 | 611 | |
|
610 | 612 | If the Program specifies that a proxy can decide which future |
|
611 | 613 | versions of the GNU Affero General Public License can be used, that proxy's |
|
612 | 614 | public statement of acceptance of a version permanently authorizes you |
|
613 | 615 | to choose that version for the Program. |
|
614 | 616 | |
|
615 | 617 | Later license versions may give you additional or different |
|
616 | 618 | permissions. However, no additional obligations are imposed on any |
|
617 | 619 | author or copyright holder as a result of your choosing to follow a |
|
618 | 620 | later version. |
|
619 | 621 | |
|
620 | 622 | 15. Disclaimer of Warranty. |
|
621 | 623 | |
|
622 | 624 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY |
|
623 | 625 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT |
|
624 | 626 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY |
|
625 | 627 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, |
|
626 | 628 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
|
627 | 629 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM |
|
628 | 630 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF |
|
629 | 631 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. |
|
630 | 632 | |
|
631 | 633 | 16. Limitation of Liability. |
|
632 | 634 | |
|
633 | 635 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING |
|
634 | 636 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS |
|
635 | 637 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY |
|
636 | 638 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE |
|
637 | 639 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF |
|
638 | 640 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD |
|
639 | 641 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), |
|
640 | 642 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF |
|
641 | 643 | SUCH DAMAGES. |
|
642 | 644 | |
|
643 | 645 | 17. Interpretation of Sections 15 and 16. |
|
644 | 646 | |
|
645 | 647 | If the disclaimer of warranty and limitation of liability provided |
|
646 | 648 | above cannot be given local legal effect according to their terms, |
|
647 | 649 | reviewing courts shall apply local law that most closely approximates |
|
648 | 650 | an absolute waiver of all civil liability in connection with the |
|
649 | 651 | Program, unless a warranty or assumption of liability accompanies a |
|
650 | 652 | copy of the Program in return for a fee. |
|
651 | 653 | |
|
652 | 654 | END OF TERMS AND CONDITIONS |
|
653 | 655 | |
|
654 | 656 | How to Apply These Terms to Your New Programs |
|
655 | 657 | |
|
656 | 658 | If you develop a new program, and you want it to be of the greatest |
|
657 | 659 | possible use to the public, the best way to achieve this is to make it |
|
658 | 660 | free software which everyone can redistribute and change under these terms. |
|
659 | 661 | |
|
660 | 662 | To do so, attach the following notices to the program. It is safest |
|
661 | 663 | to attach them to the start of each source file to most effectively |
|
662 | 664 | state the exclusion of warranty; and each file should have at least |
|
663 | 665 | the "copyright" line and a pointer to where the full notice is found. |
|
664 | 666 | |
|
665 | 667 | <one line to give the program's name and a brief idea of what it does.> |
|
666 | 668 | Copyright (C) <year> <name of author> |
|
667 | 669 | |
|
668 | 670 | This program is free software: you can redistribute it and/or modify |
|
669 | 671 | it under the terms of the GNU Affero General Public License as published by |
|
670 | 672 | the Free Software Foundation, either version 3 of the License, or |
|
671 | 673 | (at your option) any later version. |
|
672 | 674 | |
|
673 | 675 | This program is distributed in the hope that it will be useful, |
|
674 | 676 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
675 | 677 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
676 | 678 | GNU Affero General Public License for more details. |
|
677 | 679 | |
|
678 | 680 | You should have received a copy of the GNU Affero General Public License |
|
679 | 681 | along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
680 | 682 | |
|
681 | 683 | Also add information on how to contact you by electronic and paper mail. |
|
682 | 684 | |
|
683 | 685 | If your software can interact with users remotely through a computer |
|
684 | 686 | network, you should also make sure that it provides a way for users to |
|
685 | 687 | get its source. For example, if your program is a web application, its |
|
686 | 688 | interface could display a "Source" link that leads users to an archive |
|
687 | 689 | of the code. There are many ways you could offer source, and different |
|
688 | 690 | solutions will be better for different programs; see section 13 for the |
|
689 | 691 | specific requirements. |
|
690 | 692 | |
|
691 | 693 | You should also get your employer (if you work as a programmer) or school, |
|
692 | 694 | if any, to sign a "copyright disclaimer" for the program, if necessary. |
|
693 | 695 | For more information on this, and how to apply and follow the GNU AGPL, see |
|
694 | 696 | <http://www.gnu.org/licenses/>. |
@@ -1,267 +1,263 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2012-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | Compare controller for showing differences between two commits/refs/tags etc. |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import logging |
|
26 | 26 | |
|
27 | 27 | from webob.exc import HTTPBadRequest |
|
28 | 28 | from pylons import request, tmpl_context as c, url |
|
29 | 29 | from pylons.controllers.util import redirect |
|
30 | 30 | from pylons.i18n.translation import _ |
|
31 | 31 | |
|
32 | 32 | from rhodecode.controllers.utils import parse_path_ref, get_commit_from_ref_name |
|
33 | 33 | from rhodecode.lib import helpers as h |
|
34 | from rhodecode.lib import diffs | |
|
34 | from rhodecode.lib import diffs, codeblocks | |
|
35 | 35 | from rhodecode.lib.auth import LoginRequired, HasRepoPermissionAnyDecorator |
|
36 | 36 | from rhodecode.lib.base import BaseRepoController, render |
|
37 | 37 | from rhodecode.lib.utils import safe_str |
|
38 | 38 | from rhodecode.lib.utils2 import safe_unicode, str2bool |
|
39 | 39 | from rhodecode.lib.vcs.exceptions import ( |
|
40 |
EmptyRepositoryError, RepositoryError, RepositoryRequirementError |
|
|
40 | EmptyRepositoryError, RepositoryError, RepositoryRequirementError, | |
|
41 | NodeDoesNotExistError) | |
|
41 | 42 | from rhodecode.model.db import Repository, ChangesetStatus |
|
42 | 43 | |
|
43 | 44 | log = logging.getLogger(__name__) |
|
44 | 45 | |
|
45 | 46 | |
|
46 | 47 | class CompareController(BaseRepoController): |
|
47 | 48 | |
|
48 | 49 | def __before__(self): |
|
49 | 50 | super(CompareController, self).__before__() |
|
50 | 51 | |
|
51 | 52 | def _get_commit_or_redirect( |
|
52 | 53 | self, ref, ref_type, repo, redirect_after=True, partial=False): |
|
53 | 54 | """ |
|
54 | 55 | This is a safe way to get a commit. If an error occurs it |
|
55 | 56 | redirects to a commit with a proper message. If partial is set |
|
56 | 57 | then it does not do redirect raise and throws an exception instead. |
|
57 | 58 | """ |
|
58 | 59 | try: |
|
59 | 60 | return get_commit_from_ref_name(repo, safe_str(ref), ref_type) |
|
60 | 61 | except EmptyRepositoryError: |
|
61 | 62 | if not redirect_after: |
|
62 | 63 | return repo.scm_instance().EMPTY_COMMIT |
|
63 | 64 | h.flash(h.literal(_('There are no commits yet')), |
|
64 | 65 | category='warning') |
|
65 | 66 | redirect(url('summary_home', repo_name=repo.repo_name)) |
|
66 | 67 | |
|
67 | 68 | except RepositoryError as e: |
|
68 | 69 | msg = safe_str(e) |
|
69 | 70 | log.exception(msg) |
|
70 | 71 | h.flash(msg, category='warning') |
|
71 | 72 | if not partial: |
|
72 | 73 | redirect(h.url('summary_home', repo_name=repo.repo_name)) |
|
73 | 74 | raise HTTPBadRequest() |
|
74 | 75 | |
|
75 | 76 | @LoginRequired() |
|
76 | 77 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
77 | 78 | 'repository.admin') |
|
78 | 79 | def index(self, repo_name): |
|
79 | 80 | c.compare_home = True |
|
80 | 81 | c.commit_ranges = [] |
|
81 |
c. |
|
|
82 | c.diffset = None | |
|
82 | 83 | c.limited_diff = False |
|
83 | 84 | source_repo = c.rhodecode_db_repo.repo_name |
|
84 | 85 | target_repo = request.GET.get('target_repo', source_repo) |
|
85 | 86 | c.source_repo = Repository.get_by_repo_name(source_repo) |
|
86 | 87 | c.target_repo = Repository.get_by_repo_name(target_repo) |
|
87 | 88 | c.source_ref = c.target_ref = _('Select commit') |
|
88 | 89 | c.source_ref_type = "" |
|
89 | 90 | c.target_ref_type = "" |
|
90 | 91 | c.commit_statuses = ChangesetStatus.STATUSES |
|
91 | 92 | c.preview_mode = False |
|
92 | 93 | return render('compare/compare_diff.html') |
|
93 | 94 | |
|
94 | 95 | @LoginRequired() |
|
95 | 96 | @HasRepoPermissionAnyDecorator('repository.read', 'repository.write', |
|
96 | 97 | 'repository.admin') |
|
97 | 98 | def compare(self, repo_name, source_ref_type, source_ref, |
|
98 | 99 | target_ref_type, target_ref): |
|
99 | 100 | # source_ref will be evaluated in source_repo |
|
100 | 101 | source_repo_name = c.rhodecode_db_repo.repo_name |
|
101 | 102 | source_path, source_id = parse_path_ref(source_ref) |
|
102 | 103 | |
|
103 | 104 | # target_ref will be evaluated in target_repo |
|
104 | 105 | target_repo_name = request.GET.get('target_repo', source_repo_name) |
|
105 | 106 | target_path, target_id = parse_path_ref(target_ref) |
|
106 | 107 | |
|
107 | 108 | c.commit_statuses = ChangesetStatus.STATUSES |
|
108 | 109 | |
|
109 | 110 | # if merge is True |
|
110 | 111 | # Show what changes since the shared ancestor commit of target/source |
|
111 | 112 | # the source would get if it was merged with target. Only commits |
|
112 | 113 | # which are in target but not in source will be shown. |
|
113 | 114 | merge = str2bool(request.GET.get('merge')) |
|
114 | 115 | # if merge is False |
|
115 | 116 | # Show a raw diff of source/target refs even if no ancestor exists |
|
116 | 117 | |
|
117 | 118 | |
|
118 | 119 | # c.fulldiff disables cut_off_limit |
|
119 | 120 | c.fulldiff = str2bool(request.GET.get('fulldiff')) |
|
120 | 121 | |
|
121 | 122 | # if partial, returns just compare_commits.html (commits log) |
|
122 | 123 | partial = request.is_xhr |
|
123 | 124 | |
|
124 | 125 | # swap url for compare_diff page |
|
125 | 126 | c.swap_url = h.url( |
|
126 | 127 | 'compare_url', |
|
127 | 128 | repo_name=target_repo_name, |
|
128 | 129 | source_ref_type=target_ref_type, |
|
129 | 130 | source_ref=target_ref, |
|
130 | 131 | target_repo=source_repo_name, |
|
131 | 132 | target_ref_type=source_ref_type, |
|
132 | 133 | target_ref=source_ref, |
|
133 | 134 | merge=merge and '1' or '') |
|
134 | 135 | |
|
135 | 136 | source_repo = Repository.get_by_repo_name(source_repo_name) |
|
136 | 137 | target_repo = Repository.get_by_repo_name(target_repo_name) |
|
137 | 138 | |
|
138 | 139 | if source_repo is None: |
|
139 | 140 | msg = _('Could not find the original repo: %(repo)s') % { |
|
140 | 141 | 'repo': source_repo} |
|
141 | 142 | |
|
142 | 143 | log.error(msg) |
|
143 | 144 | h.flash(msg, category='error') |
|
144 | 145 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
145 | 146 | |
|
146 | 147 | if target_repo is None: |
|
147 | 148 | msg = _('Could not find the other repo: %(repo)s') % { |
|
148 | 149 | 'repo': target_repo_name} |
|
149 | 150 | log.error(msg) |
|
150 | 151 | h.flash(msg, category='error') |
|
151 | 152 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
152 | 153 | |
|
153 | 154 | source_alias = source_repo.scm_instance().alias |
|
154 | 155 | target_alias = target_repo.scm_instance().alias |
|
155 | 156 | if source_alias != target_alias: |
|
156 | 157 | msg = _('The comparison of two different kinds of remote repos ' |
|
157 | 158 | 'is not available') |
|
158 | 159 | log.error(msg) |
|
159 | 160 | h.flash(msg, category='error') |
|
160 | 161 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
161 | 162 | |
|
162 | 163 | source_commit = self._get_commit_or_redirect( |
|
163 | 164 | ref=source_id, ref_type=source_ref_type, repo=source_repo, |
|
164 | 165 | partial=partial) |
|
165 | 166 | target_commit = self._get_commit_or_redirect( |
|
166 | 167 | ref=target_id, ref_type=target_ref_type, repo=target_repo, |
|
167 | 168 | partial=partial) |
|
168 | 169 | |
|
169 | 170 | c.compare_home = False |
|
170 | 171 | c.source_repo = source_repo |
|
171 | 172 | c.target_repo = target_repo |
|
172 | 173 | c.source_ref = source_ref |
|
173 | 174 | c.target_ref = target_ref |
|
174 | 175 | c.source_ref_type = source_ref_type |
|
175 | 176 | c.target_ref_type = target_ref_type |
|
176 | 177 | |
|
177 | 178 | source_scm = source_repo.scm_instance() |
|
178 | 179 | target_scm = target_repo.scm_instance() |
|
179 | 180 | |
|
180 | 181 | pre_load = ["author", "branch", "date", "message"] |
|
181 | 182 | c.ancestor = None |
|
182 | 183 | try: |
|
183 | 184 | c.commit_ranges = source_scm.compare( |
|
184 | 185 | source_commit.raw_id, target_commit.raw_id, |
|
185 | 186 | target_scm, merge, pre_load=pre_load) |
|
186 | 187 | if merge: |
|
187 | 188 | c.ancestor = source_scm.get_common_ancestor( |
|
188 | 189 | source_commit.raw_id, target_commit.raw_id, target_scm) |
|
189 | 190 | except RepositoryRequirementError: |
|
190 | 191 | msg = _('Could not compare repos with different ' |
|
191 | 192 | 'large file settings') |
|
192 | 193 | log.error(msg) |
|
193 | 194 | if partial: |
|
194 | 195 | return msg |
|
195 | 196 | h.flash(msg, category='error') |
|
196 | 197 | return redirect(url('compare_home', repo_name=c.repo_name)) |
|
197 | 198 | |
|
198 | 199 | c.statuses = c.rhodecode_db_repo.statuses( |
|
199 | 200 | [x.raw_id for x in c.commit_ranges]) |
|
200 | 201 | |
|
201 | 202 | if partial: # for PR ajax commits loader |
|
202 | 203 | if not c.ancestor: |
|
203 | 204 | return '' # cannot merge if there is no ancestor |
|
204 | 205 | return render('compare/compare_commits.html') |
|
205 | 206 | |
|
206 | 207 | if c.ancestor: |
|
207 | 208 | # case we want a simple diff without incoming commits, |
|
208 | 209 | # previewing what will be merged. |
|
209 | 210 | # Make the diff on target repo (which is known to have target_ref) |
|
210 | 211 | log.debug('Using ancestor %s as source_ref instead of %s' |
|
211 | 212 | % (c.ancestor, source_ref)) |
|
212 | 213 | source_repo = target_repo |
|
213 | 214 | source_commit = target_repo.get_commit(commit_id=c.ancestor) |
|
214 | 215 | |
|
215 | 216 | # diff_limit will cut off the whole diff if the limit is applied |
|
216 | 217 | # otherwise it will just hide the big files from the front-end |
|
217 | 218 | diff_limit = self.cut_off_limit_diff |
|
218 | 219 | file_limit = self.cut_off_limit_file |
|
219 | 220 | |
|
220 | 221 | log.debug('calculating diff between ' |
|
221 | 222 | 'source_ref:%s and target_ref:%s for repo `%s`', |
|
222 | 223 | source_commit, target_commit, |
|
223 | 224 | safe_unicode(source_repo.scm_instance().path)) |
|
224 | 225 | |
|
225 | 226 | if source_commit.repository != target_commit.repository: |
|
226 | 227 | msg = _( |
|
227 | 228 | "Repositories unrelated. " |
|
228 | 229 | "Cannot compare commit %(commit1)s from repository %(repo1)s " |
|
229 | 230 | "with commit %(commit2)s from repository %(repo2)s.") % { |
|
230 | 231 | 'commit1': h.show_id(source_commit), |
|
231 | 232 | 'repo1': source_repo.repo_name, |
|
232 | 233 | 'commit2': h.show_id(target_commit), |
|
233 | 234 | 'repo2': target_repo.repo_name, |
|
234 | 235 | } |
|
235 | 236 | h.flash(msg, category='error') |
|
236 | 237 | raise HTTPBadRequest() |
|
237 | 238 | |
|
238 | 239 | txtdiff = source_repo.scm_instance().get_diff( |
|
239 | 240 | commit1=source_commit, commit2=target_commit, |
|
240 | 241 | path1=source_path, path=target_path) |
|
241 | 242 | diff_processor = diffs.DiffProcessor( |
|
242 |
txtdiff, format=' |
|
|
243 | txtdiff, format='newdiff', diff_limit=diff_limit, | |
|
243 | 244 | file_limit=file_limit, show_full_diff=c.fulldiff) |
|
244 | 245 | _parsed = diff_processor.prepare() |
|
245 | 246 | |
|
246 | c.limited_diff = False | |
|
247 | if isinstance(_parsed, diffs.LimitedDiffContainer): | |
|
248 | c.limited_diff = True | |
|
247 | def _node_getter(commit): | |
|
248 | """ Returns a function that returns a node for a commit or None """ | |
|
249 | def get_node(fname): | |
|
250 | try: | |
|
251 | return commit.get_node(fname) | |
|
252 | except NodeDoesNotExistError: | |
|
253 | return None | |
|
254 | return get_node | |
|
249 | 255 | |
|
250 | c.files = [] | |
|
251 | c.changes = {} | |
|
252 | c.lines_added = 0 | |
|
253 | c.lines_deleted = 0 | |
|
254 | for f in _parsed: | |
|
255 | st = f['stats'] | |
|
256 | if not st['binary']: | |
|
257 | c.lines_added += st['added'] | |
|
258 | c.lines_deleted += st['deleted'] | |
|
259 | fid = h.FID('', f['filename']) | |
|
260 | c.files.append([fid, f['operation'], f['filename'], f['stats'], f]) | |
|
261 | htmldiff = diff_processor.as_html( | |
|
262 | enable_comments=False, parsed_lines=[f]) | |
|
263 | c.changes[fid] = [f['operation'], f['filename'], htmldiff, f] | |
|
256 | c.diffset = codeblocks.DiffSet( | |
|
257 | source_node_getter=_node_getter(source_commit), | |
|
258 | target_node_getter=_node_getter(target_commit), | |
|
259 | ).render_patchset(_parsed, source_ref, target_ref) | |
|
264 | 260 | |
|
265 | 261 | c.preview_mode = merge |
|
266 | 262 | |
|
267 | 263 | return render('compare/compare_diff.html') |
@@ -1,214 +1,635 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2011-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import logging |
|
22 | import difflib | |
|
22 | 23 | from itertools import groupby |
|
23 | 24 | |
|
24 | 25 | from pygments import lex |
|
25 | 26 | from pygments.formatters.html import _get_ttype_class as pygment_token_class |
|
26 |
from rhodecode.lib.helpers import |
|
|
27 | from rhodecode.lib.helpers import ( | |
|
28 | get_lexer_for_filenode, get_lexer_safe, html_escape) | |
|
27 | 29 | from rhodecode.lib.utils2 import AttributeDict |
|
28 | 30 | from rhodecode.lib.vcs.nodes import FileNode |
|
31 | from rhodecode.lib.diff_match_patch import diff_match_patch | |
|
32 | from rhodecode.lib.diffs import LimitedDiffContainer | |
|
29 | 33 | from pygments.lexers import get_lexer_by_name |
|
30 | 34 | |
|
31 | 35 | plain_text_lexer = get_lexer_by_name( |
|
32 | 36 | 'text', stripall=False, stripnl=False, ensurenl=False) |
|
33 | 37 | |
|
34 | 38 | |
|
35 | 39 | log = logging.getLogger() |
|
36 | 40 | |
|
37 | 41 | |
|
38 | 42 | def filenode_as_lines_tokens(filenode, lexer=None): |
|
39 | 43 | lexer = lexer or get_lexer_for_filenode(filenode) |
|
40 | 44 | log.debug('Generating file node pygment tokens for %s, %s', lexer, filenode) |
|
41 |
tokens = tokenize_string(filenode.content, |
|
|
45 | tokens = tokenize_string(filenode.content, lexer) | |
|
42 | 46 | lines = split_token_stream(tokens, split_string='\n') |
|
43 | 47 | rv = list(lines) |
|
44 | 48 | return rv |
|
45 | 49 | |
|
46 | 50 | |
|
47 | 51 | def tokenize_string(content, lexer): |
|
48 | 52 | """ |
|
49 | 53 | Use pygments to tokenize some content based on a lexer |
|
50 | 54 | ensuring all original new lines and whitespace is preserved |
|
51 | 55 | """ |
|
52 | 56 | |
|
53 | 57 | lexer.stripall = False |
|
54 | 58 | lexer.stripnl = False |
|
55 | 59 | lexer.ensurenl = False |
|
56 | 60 | for token_type, token_text in lex(content, lexer): |
|
57 | 61 | yield pygment_token_class(token_type), token_text |
|
58 | 62 | |
|
59 | 63 | |
|
60 | 64 | def split_token_stream(tokens, split_string=u'\n'): |
|
61 | 65 | """ |
|
62 | 66 | Take a list of (TokenType, text) tuples and split them by a string |
|
63 | 67 | |
|
64 | 68 | >>> split_token_stream([(TEXT, 'some\ntext'), (TEXT, 'more\n')]) |
|
65 | 69 | [(TEXT, 'some'), (TEXT, 'text'), |
|
66 | 70 | (TEXT, 'more'), (TEXT, 'text')] |
|
67 | 71 | """ |
|
68 | 72 | |
|
69 | 73 | buffer = [] |
|
70 | 74 | for token_class, token_text in tokens: |
|
71 | 75 | parts = token_text.split(split_string) |
|
72 | 76 | for part in parts[:-1]: |
|
73 | 77 | buffer.append((token_class, part)) |
|
74 | 78 | yield buffer |
|
75 | 79 | buffer = [] |
|
76 | 80 | |
|
77 | 81 | buffer.append((token_class, parts[-1])) |
|
78 | 82 | |
|
79 | 83 | if buffer: |
|
80 | 84 | yield buffer |
|
81 | 85 | |
|
82 | 86 | |
|
83 | 87 | def filenode_as_annotated_lines_tokens(filenode): |
|
84 | 88 | """ |
|
85 | 89 | Take a file node and return a list of annotations => lines, if no annotation |
|
86 | 90 | is found, it will be None. |
|
87 | 91 | |
|
88 | 92 | eg: |
|
89 | 93 | |
|
90 | 94 | [ |
|
91 | 95 | (annotation1, [ |
|
92 | 96 | (1, line1_tokens_list), |
|
93 | 97 | (2, line2_tokens_list), |
|
94 | 98 | ]), |
|
95 | 99 | (annotation2, [ |
|
96 | 100 | (3, line1_tokens_list), |
|
97 | 101 | ]), |
|
98 | 102 | (None, [ |
|
99 | 103 | (4, line1_tokens_list), |
|
100 | 104 | ]), |
|
101 | 105 | (annotation1, [ |
|
102 | 106 | (5, line1_tokens_list), |
|
103 | 107 | (6, line2_tokens_list), |
|
104 | 108 | ]) |
|
105 | 109 | ] |
|
106 | 110 | """ |
|
107 | 111 | |
|
108 | 112 | commit_cache = {} # cache commit_getter lookups |
|
109 | 113 | |
|
110 | 114 | def _get_annotation(commit_id, commit_getter): |
|
111 | 115 | if commit_id not in commit_cache: |
|
112 | 116 | commit_cache[commit_id] = commit_getter() |
|
113 | 117 | return commit_cache[commit_id] |
|
114 | 118 | |
|
115 | 119 | annotation_lookup = { |
|
116 | 120 | line_no: _get_annotation(commit_id, commit_getter) |
|
117 | 121 | for line_no, commit_id, commit_getter, line_content |
|
118 | 122 | in filenode.annotate |
|
119 | 123 | } |
|
120 | 124 | |
|
121 | 125 | annotations_lines = ((annotation_lookup.get(line_no), line_no, tokens) |
|
122 | 126 | for line_no, tokens |
|
123 | 127 | in enumerate(filenode_as_lines_tokens(filenode), 1)) |
|
124 | 128 | |
|
125 | 129 | grouped_annotations_lines = groupby(annotations_lines, lambda x: x[0]) |
|
126 | 130 | |
|
127 | 131 | for annotation, group in grouped_annotations_lines: |
|
128 | 132 | yield ( |
|
129 | 133 | annotation, [(line_no, tokens) |
|
130 | 134 | for (_, line_no, tokens) in group] |
|
131 | 135 | ) |
|
132 | 136 | |
|
133 | 137 | |
|
134 | 138 | def render_tokenstream(tokenstream): |
|
135 | 139 | result = [] |
|
136 | 140 | for token_class, token_ops_texts in rollup_tokenstream(tokenstream): |
|
137 | 141 | |
|
138 | 142 | if token_class: |
|
139 | 143 | result.append(u'<span class="%s">' % token_class) |
|
140 | 144 | else: |
|
141 | 145 | result.append(u'<span>') |
|
142 | 146 | |
|
143 | 147 | for op_tag, token_text in token_ops_texts: |
|
144 | 148 | |
|
145 | 149 | if op_tag: |
|
146 | 150 | result.append(u'<%s>' % op_tag) |
|
147 | 151 | |
|
148 | 152 | escaped_text = html_escape(token_text) |
|
149 | escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') | |
|
153 | ||
|
154 | # TODO: dan: investigate showing hidden characters like space/nl/tab | |
|
155 | # escaped_text = escaped_text.replace(' ', '<sp> </sp>') | |
|
156 | # escaped_text = escaped_text.replace('\n', '<nl>\n</nl>') | |
|
157 | # escaped_text = escaped_text.replace('\t', '<tab>\t</tab>') | |
|
150 | 158 | |
|
151 | 159 | result.append(escaped_text) |
|
152 | 160 | |
|
153 | 161 | if op_tag: |
|
154 | 162 | result.append(u'</%s>' % op_tag) |
|
155 | 163 | |
|
156 | 164 | result.append(u'</span>') |
|
157 | 165 | |
|
158 | 166 | html = ''.join(result) |
|
159 | 167 | return html |
|
160 | 168 | |
|
161 | 169 | |
|
162 | 170 | def rollup_tokenstream(tokenstream): |
|
163 | 171 | """ |
|
164 | 172 | Group a token stream of the format: |
|
165 | 173 | |
|
166 | 174 | ('class', 'op', 'text') |
|
167 | 175 | or |
|
168 | 176 | ('class', 'text') |
|
169 | 177 | |
|
170 | 178 | into |
|
171 | 179 | |
|
172 | 180 | [('class1', |
|
173 | 181 | [('op1', 'text'), |
|
174 | 182 | ('op2', 'text')]), |
|
175 | 183 | ('class2', |
|
176 | 184 | [('op3', 'text')])] |
|
177 | 185 | |
|
178 | 186 | This is used to get the minimal tags necessary when |
|
179 | 187 | rendering to html eg for a token stream ie. |
|
180 | 188 | |
|
181 | 189 | <span class="A"><ins>he</ins>llo</span> |
|
182 | 190 | vs |
|
183 | 191 | <span class="A"><ins>he</ins></span><span class="A">llo</span> |
|
184 | 192 | |
|
185 | 193 | If a 2 tuple is passed in, the output op will be an empty string. |
|
186 | 194 | |
|
187 | 195 | eg: |
|
188 | 196 | |
|
189 | 197 | >>> rollup_tokenstream([('classA', '', 'h'), |
|
190 | 198 | ('classA', 'del', 'ell'), |
|
191 | 199 | ('classA', '', 'o'), |
|
192 | 200 | ('classB', '', ' '), |
|
193 | 201 | ('classA', '', 'the'), |
|
194 | 202 | ('classA', '', 're'), |
|
195 | 203 | ]) |
|
196 | 204 | |
|
197 | 205 | [('classA', [('', 'h'), ('del', 'ell'), ('', 'o')], |
|
198 | 206 | ('classB', [('', ' ')], |
|
199 | 207 | ('classA', [('', 'there')]] |
|
200 | 208 | |
|
201 | 209 | """ |
|
202 | 210 | if tokenstream and len(tokenstream[0]) == 2: |
|
203 | 211 | tokenstream = ((t[0], '', t[1]) for t in tokenstream) |
|
204 | 212 | |
|
205 | 213 | result = [] |
|
206 | 214 | for token_class, op_list in groupby(tokenstream, lambda t: t[0]): |
|
207 | 215 | ops = [] |
|
208 | 216 | for token_op, token_text_list in groupby(op_list, lambda o: o[1]): |
|
209 | 217 | text_buffer = [] |
|
210 | 218 | for t_class, t_op, t_text in token_text_list: |
|
211 | 219 | text_buffer.append(t_text) |
|
212 | 220 | ops.append((token_op, ''.join(text_buffer))) |
|
213 | 221 | result.append((token_class, ops)) |
|
214 | 222 | return result |
|
223 | ||
|
224 | ||
|
225 | def tokens_diff(old_tokens, new_tokens, use_diff_match_patch=True): | |
|
226 | """ | |
|
227 | Converts a list of (token_class, token_text) tuples to a list of | |
|
228 | (token_class, token_op, token_text) tuples where token_op is one of | |
|
229 | ('ins', 'del', '') | |
|
230 | ||
|
231 | :param old_tokens: list of (token_class, token_text) tuples of old line | |
|
232 | :param new_tokens: list of (token_class, token_text) tuples of new line | |
|
233 | :param use_diff_match_patch: boolean, will use google's diff match patch | |
|
234 | library which has options to 'smooth' out the character by character | |
|
235 | differences making nicer ins/del blocks | |
|
236 | """ | |
|
237 | ||
|
238 | old_tokens_result = [] | |
|
239 | new_tokens_result = [] | |
|
240 | ||
|
241 | similarity = difflib.SequenceMatcher(None, | |
|
242 | ''.join(token_text for token_class, token_text in old_tokens), | |
|
243 | ''.join(token_text for token_class, token_text in new_tokens) | |
|
244 | ).ratio() | |
|
245 | ||
|
246 | if similarity < 0.6: # return, the blocks are too different | |
|
247 | for token_class, token_text in old_tokens: | |
|
248 | old_tokens_result.append((token_class, '', token_text)) | |
|
249 | for token_class, token_text in new_tokens: | |
|
250 | new_tokens_result.append((token_class, '', token_text)) | |
|
251 | return old_tokens_result, new_tokens_result, similarity | |
|
252 | ||
|
253 | token_sequence_matcher = difflib.SequenceMatcher(None, | |
|
254 | [x[1] for x in old_tokens], | |
|
255 | [x[1] for x in new_tokens]) | |
|
256 | ||
|
257 | for tag, o1, o2, n1, n2 in token_sequence_matcher.get_opcodes(): | |
|
258 | # check the differences by token block types first to give a more | |
|
259 | # nicer "block" level replacement vs character diffs | |
|
260 | ||
|
261 | if tag == 'equal': | |
|
262 | for token_class, token_text in old_tokens[o1:o2]: | |
|
263 | old_tokens_result.append((token_class, '', token_text)) | |
|
264 | for token_class, token_text in new_tokens[n1:n2]: | |
|
265 | new_tokens_result.append((token_class, '', token_text)) | |
|
266 | elif tag == 'delete': | |
|
267 | for token_class, token_text in old_tokens[o1:o2]: | |
|
268 | old_tokens_result.append((token_class, 'del', token_text)) | |
|
269 | elif tag == 'insert': | |
|
270 | for token_class, token_text in new_tokens[n1:n2]: | |
|
271 | new_tokens_result.append((token_class, 'ins', token_text)) | |
|
272 | elif tag == 'replace': | |
|
273 | # if same type token blocks must be replaced, do a diff on the | |
|
274 | # characters in the token blocks to show individual changes | |
|
275 | ||
|
276 | old_char_tokens = [] | |
|
277 | new_char_tokens = [] | |
|
278 | for token_class, token_text in old_tokens[o1:o2]: | |
|
279 | for char in token_text: | |
|
280 | old_char_tokens.append((token_class, char)) | |
|
281 | ||
|
282 | for token_class, token_text in new_tokens[n1:n2]: | |
|
283 | for char in token_text: | |
|
284 | new_char_tokens.append((token_class, char)) | |
|
285 | ||
|
286 | old_string = ''.join([token_text for | |
|
287 | token_class, token_text in old_char_tokens]) | |
|
288 | new_string = ''.join([token_text for | |
|
289 | token_class, token_text in new_char_tokens]) | |
|
290 | ||
|
291 | char_sequence = difflib.SequenceMatcher( | |
|
292 | None, old_string, new_string) | |
|
293 | copcodes = char_sequence.get_opcodes() | |
|
294 | obuffer, nbuffer = [], [] | |
|
295 | ||
|
296 | if use_diff_match_patch: | |
|
297 | dmp = diff_match_patch() | |
|
298 | dmp.Diff_EditCost = 11 # TODO: dan: extract this to a setting | |
|
299 | reps = dmp.diff_main(old_string, new_string) | |
|
300 | dmp.diff_cleanupEfficiency(reps) | |
|
301 | ||
|
302 | a, b = 0, 0 | |
|
303 | for op, rep in reps: | |
|
304 | l = len(rep) | |
|
305 | if op == 0: | |
|
306 | for i, c in enumerate(rep): | |
|
307 | obuffer.append((old_char_tokens[a+i][0], '', c)) | |
|
308 | nbuffer.append((new_char_tokens[b+i][0], '', c)) | |
|
309 | a += l | |
|
310 | b += l | |
|
311 | elif op == -1: | |
|
312 | for i, c in enumerate(rep): | |
|
313 | obuffer.append((old_char_tokens[a+i][0], 'del', c)) | |
|
314 | a += l | |
|
315 | elif op == 1: | |
|
316 | for i, c in enumerate(rep): | |
|
317 | nbuffer.append((new_char_tokens[b+i][0], 'ins', c)) | |
|
318 | b += l | |
|
319 | else: | |
|
320 | for ctag, co1, co2, cn1, cn2 in copcodes: | |
|
321 | if ctag == 'equal': | |
|
322 | for token_class, token_text in old_char_tokens[co1:co2]: | |
|
323 | obuffer.append((token_class, '', token_text)) | |
|
324 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
|
325 | nbuffer.append((token_class, '', token_text)) | |
|
326 | elif ctag == 'delete': | |
|
327 | for token_class, token_text in old_char_tokens[co1:co2]: | |
|
328 | obuffer.append((token_class, 'del', token_text)) | |
|
329 | elif ctag == 'insert': | |
|
330 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
|
331 | nbuffer.append((token_class, 'ins', token_text)) | |
|
332 | elif ctag == 'replace': | |
|
333 | for token_class, token_text in old_char_tokens[co1:co2]: | |
|
334 | obuffer.append((token_class, 'del', token_text)) | |
|
335 | for token_class, token_text in new_char_tokens[cn1:cn2]: | |
|
336 | nbuffer.append((token_class, 'ins', token_text)) | |
|
337 | ||
|
338 | old_tokens_result.extend(obuffer) | |
|
339 | new_tokens_result.extend(nbuffer) | |
|
340 | ||
|
341 | return old_tokens_result, new_tokens_result, similarity | |
|
342 | ||
|
343 | ||
|
344 | class DiffSet(object): | |
|
345 | """ | |
|
346 | An object for parsing the diff result from diffs.DiffProcessor and | |
|
347 | adding highlighting, side by side/unified renderings and line diffs | |
|
348 | """ | |
|
349 | ||
|
350 | HL_REAL = 'REAL' # highlights using original file, slow | |
|
351 | HL_FAST = 'FAST' # highlights using just the line, fast but not correct | |
|
352 | # in the case of multiline code | |
|
353 | HL_NONE = 'NONE' # no highlighting, fastest | |
|
354 | ||
|
355 | def __init__(self, highlight_mode=HL_REAL, | |
|
356 | source_node_getter=lambda filename: None, | |
|
357 | target_node_getter=lambda filename: None, | |
|
358 | source_nodes=None, target_nodes=None, | |
|
359 | max_file_size_limit=150 * 1024, # files over this size will | |
|
360 | # use fast highlighting | |
|
361 | ): | |
|
362 | ||
|
363 | self.highlight_mode = highlight_mode | |
|
364 | self.highlighted_filenodes = {} | |
|
365 | self.source_node_getter = source_node_getter | |
|
366 | self.target_node_getter = target_node_getter | |
|
367 | self.source_nodes = source_nodes or {} | |
|
368 | self.target_nodes = target_nodes or {} | |
|
369 | ||
|
370 | ||
|
371 | self.max_file_size_limit = max_file_size_limit | |
|
372 | ||
|
373 | def render_patchset(self, patchset, source_ref=None, target_ref=None): | |
|
374 | diffset = AttributeDict(dict( | |
|
375 | lines_added=0, | |
|
376 | lines_deleted=0, | |
|
377 | changed_files=0, | |
|
378 | files=[], | |
|
379 | limited_diff=isinstance(patchset, LimitedDiffContainer), | |
|
380 | source_ref=source_ref, | |
|
381 | target_ref=target_ref, | |
|
382 | )) | |
|
383 | for patch in patchset: | |
|
384 | filediff = self.render_patch(patch) | |
|
385 | filediff.diffset = diffset | |
|
386 | diffset.files.append(filediff) | |
|
387 | diffset.changed_files += 1 | |
|
388 | if not patch['stats']['binary']: | |
|
389 | diffset.lines_added += patch['stats']['added'] | |
|
390 | diffset.lines_deleted += patch['stats']['deleted'] | |
|
391 | ||
|
392 | return diffset | |
|
393 | ||
|
394 | _lexer_cache = {} | |
|
395 | def _get_lexer_for_filename(self, filename): | |
|
396 | # cached because we might need to call it twice for source/target | |
|
397 | if filename not in self._lexer_cache: | |
|
398 | self._lexer_cache[filename] = get_lexer_safe(filepath=filename) | |
|
399 | return self._lexer_cache[filename] | |
|
400 | ||
|
401 | def render_patch(self, patch): | |
|
402 | log.debug('rendering diff for %r' % patch['filename']) | |
|
403 | ||
|
404 | source_filename = patch['original_filename'] | |
|
405 | target_filename = patch['filename'] | |
|
406 | ||
|
407 | source_lexer = plain_text_lexer | |
|
408 | target_lexer = plain_text_lexer | |
|
409 | ||
|
410 | if not patch['stats']['binary']: | |
|
411 | if self.highlight_mode == self.HL_REAL: | |
|
412 | if (source_filename and patch['operation'] in ('D', 'M') | |
|
413 | and source_filename not in self.source_nodes): | |
|
414 | self.source_nodes[source_filename] = ( | |
|
415 | self.source_node_getter(source_filename)) | |
|
416 | ||
|
417 | if (target_filename and patch['operation'] in ('A', 'M') | |
|
418 | and target_filename not in self.target_nodes): | |
|
419 | self.target_nodes[target_filename] = ( | |
|
420 | self.target_node_getter(target_filename)) | |
|
421 | ||
|
422 | elif self.highlight_mode == self.HL_FAST: | |
|
423 | source_lexer = self._get_lexer_for_filename(source_filename) | |
|
424 | target_lexer = self._get_lexer_for_filename(target_filename) | |
|
425 | ||
|
426 | source_file = self.source_nodes.get(source_filename, source_filename) | |
|
427 | target_file = self.target_nodes.get(target_filename, target_filename) | |
|
428 | ||
|
429 | source_filenode, target_filenode = None, None | |
|
430 | ||
|
431 | # TODO: dan: FileNode.lexer works on the content of the file - which | |
|
432 | # can be slow - issue #4289 explains a lexer clean up - which once | |
|
433 | # done can allow caching a lexer for a filenode to avoid the file lookup | |
|
434 | if isinstance(source_file, FileNode): | |
|
435 | source_filenode = source_file | |
|
436 | source_lexer = source_file.lexer | |
|
437 | if isinstance(target_file, FileNode): | |
|
438 | target_filenode = target_file | |
|
439 | target_lexer = target_file.lexer | |
|
440 | ||
|
441 | source_file_path, target_file_path = None, None | |
|
442 | ||
|
443 | if source_filename != '/dev/null': | |
|
444 | source_file_path = source_filename | |
|
445 | if target_filename != '/dev/null': | |
|
446 | target_file_path = target_filename | |
|
447 | ||
|
448 | source_file_type = source_lexer.name | |
|
449 | target_file_type = target_lexer.name | |
|
450 | ||
|
451 | op_hunks = patch['chunks'][0] | |
|
452 | hunks = patch['chunks'][1:] | |
|
453 | ||
|
454 | filediff = AttributeDict({ | |
|
455 | 'source_file_path': source_file_path, | |
|
456 | 'target_file_path': target_file_path, | |
|
457 | 'source_filenode': source_filenode, | |
|
458 | 'target_filenode': target_filenode, | |
|
459 | 'hunks': [], | |
|
460 | 'source_file_type': target_file_type, | |
|
461 | 'target_file_type': source_file_type, | |
|
462 | 'patch': patch, | |
|
463 | 'source_mode': patch['stats']['old_mode'], | |
|
464 | 'target_mode': patch['stats']['new_mode'], | |
|
465 | 'limited_diff': isinstance(patch, LimitedDiffContainer), | |
|
466 | 'diffset': self, | |
|
467 | }) | |
|
468 | ||
|
469 | for hunk in hunks: | |
|
470 | hunkbit = self.parse_hunk(hunk, source_file, target_file) | |
|
471 | hunkbit.filediff = filediff | |
|
472 | filediff.hunks.append(hunkbit) | |
|
473 | return filediff | |
|
474 | ||
|
475 | def parse_hunk(self, hunk, source_file, target_file): | |
|
476 | result = AttributeDict(dict( | |
|
477 | source_start=hunk['source_start'], | |
|
478 | source_length=hunk['source_length'], | |
|
479 | target_start=hunk['target_start'], | |
|
480 | target_length=hunk['target_length'], | |
|
481 | section_header=hunk['section_header'], | |
|
482 | lines=[], | |
|
483 | )) | |
|
484 | before, after = [], [] | |
|
485 | ||
|
486 | for line in hunk['lines']: | |
|
487 | if line['action'] == 'unmod': | |
|
488 | result.lines.extend( | |
|
489 | self.parse_lines(before, after, source_file, target_file)) | |
|
490 | after.append(line) | |
|
491 | before.append(line) | |
|
492 | elif line['action'] == 'add': | |
|
493 | after.append(line) | |
|
494 | elif line['action'] == 'del': | |
|
495 | before.append(line) | |
|
496 | elif line['action'] == 'context-old': | |
|
497 | before.append(line) | |
|
498 | elif line['action'] == 'context-new': | |
|
499 | after.append(line) | |
|
500 | ||
|
501 | result.lines.extend( | |
|
502 | self.parse_lines(before, after, source_file, target_file)) | |
|
503 | result.unified = self.as_unified(result.lines) | |
|
504 | result.sideside = result.lines | |
|
505 | return result | |
|
506 | ||
|
507 | def parse_lines(self, before_lines, after_lines, source_file, target_file): | |
|
508 | # TODO: dan: investigate doing the diff comparison and fast highlighting | |
|
509 | # on the entire before and after buffered block lines rather than by | |
|
510 | # line, this means we can get better 'fast' highlighting if the context | |
|
511 | # allows it - eg. | |
|
512 | # line 4: """ | |
|
513 | # line 5: this gets highlighted as a string | |
|
514 | # line 6: """ | |
|
515 | ||
|
516 | lines = [] | |
|
517 | while before_lines or after_lines: | |
|
518 | before, after = None, None | |
|
519 | before_tokens, after_tokens = None, None | |
|
520 | ||
|
521 | if before_lines: | |
|
522 | before = before_lines.pop(0) | |
|
523 | if after_lines: | |
|
524 | after = after_lines.pop(0) | |
|
525 | ||
|
526 | original = AttributeDict() | |
|
527 | modified = AttributeDict() | |
|
528 | ||
|
529 | if before: | |
|
530 | before_tokens = self.get_line_tokens( | |
|
531 | line_text=before['line'], line_number=before['old_lineno'], | |
|
532 | file=source_file) | |
|
533 | original.lineno = before['old_lineno'] | |
|
534 | original.content = before['line'] | |
|
535 | original.action = self.action_to_op(before['action']) | |
|
536 | ||
|
537 | if after: | |
|
538 | after_tokens = self.get_line_tokens( | |
|
539 | line_text=after['line'], line_number=after['new_lineno'], | |
|
540 | file=target_file) | |
|
541 | modified.lineno = after['new_lineno'] | |
|
542 | modified.content = after['line'] | |
|
543 | modified.action = self.action_to_op(after['action']) | |
|
544 | ||
|
545 | ||
|
546 | # diff the lines | |
|
547 | if before_tokens and after_tokens: | |
|
548 | o_tokens, m_tokens, similarity = tokens_diff(before_tokens, after_tokens) | |
|
549 | original.content = render_tokenstream(o_tokens) | |
|
550 | modified.content = render_tokenstream(m_tokens) | |
|
551 | elif before_tokens: | |
|
552 | original.content = render_tokenstream( | |
|
553 | [(x[0], '', x[1]) for x in before_tokens]) | |
|
554 | elif after_tokens: | |
|
555 | modified.content = render_tokenstream( | |
|
556 | [(x[0], '', x[1]) for x in after_tokens]) | |
|
557 | ||
|
558 | lines.append(AttributeDict({ | |
|
559 | 'original': original, | |
|
560 | 'modified': modified, | |
|
561 | })) | |
|
562 | ||
|
563 | return lines | |
|
564 | ||
|
565 | def get_line_tokens(self, line_text, line_number, file=None): | |
|
566 | filenode = None | |
|
567 | filename = None | |
|
568 | ||
|
569 | if isinstance(file, basestring): | |
|
570 | filename = file | |
|
571 | elif isinstance(file, FileNode): | |
|
572 | filenode = file | |
|
573 | filename = file.unicode_path | |
|
574 | ||
|
575 | if self.highlight_mode == self.HL_REAL and filenode: | |
|
576 | if line_number and file.size < self.max_file_size_limit: | |
|
577 | return self.get_tokenized_filenode_line(file, line_number) | |
|
578 | ||
|
579 | if self.highlight_mode in (self.HL_REAL, self.HL_FAST) and filename: | |
|
580 | lexer = self._get_lexer_for_filename(filename) | |
|
581 | return list(tokenize_string(line_text, lexer)) | |
|
582 | ||
|
583 | return list(tokenize_string(line_text, plain_text_lexer)) | |
|
584 | ||
|
585 | def get_tokenized_filenode_line(self, filenode, line_number): | |
|
586 | ||
|
587 | if filenode not in self.highlighted_filenodes: | |
|
588 | tokenized_lines = filenode_as_lines_tokens(filenode, filenode.lexer) | |
|
589 | self.highlighted_filenodes[filenode] = tokenized_lines | |
|
590 | return self.highlighted_filenodes[filenode][line_number - 1] | |
|
591 | ||
|
592 | def action_to_op(self, action): | |
|
593 | return { | |
|
594 | 'add': '+', | |
|
595 | 'del': '-', | |
|
596 | 'unmod': ' ', | |
|
597 | 'context-old': ' ', | |
|
598 | 'context-new': ' ', | |
|
599 | }.get(action, action) | |
|
600 | ||
|
601 | def as_unified(self, lines): | |
|
602 | """ Return a generator that yields the lines of a diff in unified order """ | |
|
603 | def generator(): | |
|
604 | buf = [] | |
|
605 | for line in lines: | |
|
606 | ||
|
607 | if buf and not line.original or line.original.action == ' ': | |
|
608 | for b in buf: | |
|
609 | yield b | |
|
610 | buf = [] | |
|
611 | ||
|
612 | if line.original: | |
|
613 | if line.original.action == ' ': | |
|
614 | yield (line.original.lineno, line.modified.lineno, | |
|
615 | line.original.action, line.original.content) | |
|
616 | continue | |
|
617 | ||
|
618 | if line.original.action == '-': | |
|
619 | yield (line.original.lineno, None, | |
|
620 | line.original.action, line.original.content) | |
|
621 | ||
|
622 | if line.modified.action == '+': | |
|
623 | buf.append(( | |
|
624 | None, line.modified.lineno, | |
|
625 | line.modified.action, line.modified.content)) | |
|
626 | continue | |
|
627 | ||
|
628 | if line.modified: | |
|
629 | yield (None, line.modified.lineno, | |
|
630 | line.modified.action, line.modified.content) | |
|
631 | ||
|
632 | for b in buf: | |
|
633 | yield b | |
|
634 | ||
|
635 | return generator() |
@@ -1,886 +1,1161 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2011-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | |
|
22 | 22 | """ |
|
23 | 23 | Set of diffing helpers, previously part of vcs |
|
24 | 24 | """ |
|
25 | 25 | |
|
26 | 26 | import collections |
|
27 | 27 | import re |
|
28 | 28 | import difflib |
|
29 | 29 | import logging |
|
30 | 30 | |
|
31 | 31 | from itertools import tee, imap |
|
32 | 32 | |
|
33 | 33 | from pylons.i18n.translation import _ |
|
34 | 34 | |
|
35 | 35 | from rhodecode.lib.vcs.exceptions import VCSError |
|
36 | 36 | from rhodecode.lib.vcs.nodes import FileNode, SubModuleNode |
|
37 | 37 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
38 | 38 | from rhodecode.lib.helpers import escape |
|
39 | 39 | from rhodecode.lib.utils2 import safe_unicode |
|
40 | 40 | |
|
41 | 41 | log = logging.getLogger(__name__) |
|
42 | 42 | |
|
43 | 43 | # define max context, a file with more than this numbers of lines is unusable |
|
44 | 44 | # in browser anyway |
|
45 | 45 | MAX_CONTEXT = 1024 * 1014 |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | class OPS(object): |
|
49 | 49 | ADD = 'A' |
|
50 | 50 | MOD = 'M' |
|
51 | 51 | DEL = 'D' |
|
52 | 52 | |
|
53 | 53 | |
|
54 | 54 | def wrap_to_table(str_): |
|
55 | 55 | return '''<table class="code-difftable"> |
|
56 | 56 | <tr class="line no-comment"> |
|
57 | 57 | <td class="add-comment-line tooltip" title="%s"><span class="add-comment-content"></span></td> |
|
58 | 58 | <td></td> |
|
59 | 59 | <td class="lineno new"></td> |
|
60 | 60 | <td class="code no-comment"><pre>%s</pre></td> |
|
61 | 61 | </tr> |
|
62 | 62 | </table>''' % (_('Click to comment'), str_) |
|
63 | 63 | |
|
64 | 64 | |
|
65 | 65 | def wrapped_diff(filenode_old, filenode_new, diff_limit=None, file_limit=None, |
|
66 | 66 | show_full_diff=False, ignore_whitespace=True, line_context=3, |
|
67 | 67 | enable_comments=False): |
|
68 | 68 | """ |
|
69 | 69 | returns a wrapped diff into a table, checks for cut_off_limit for file and |
|
70 | 70 | whole diff and presents proper message |
|
71 | 71 | """ |
|
72 | 72 | |
|
73 | 73 | if filenode_old is None: |
|
74 | 74 | filenode_old = FileNode(filenode_new.path, '', EmptyCommit()) |
|
75 | 75 | |
|
76 | 76 | if filenode_old.is_binary or filenode_new.is_binary: |
|
77 | 77 | diff = wrap_to_table(_('Binary file')) |
|
78 | 78 | stats = None |
|
79 | 79 | size = 0 |
|
80 | 80 | data = None |
|
81 | 81 | |
|
82 | 82 | elif diff_limit != -1 and (diff_limit is None or |
|
83 | 83 | (filenode_old.size < diff_limit and filenode_new.size < diff_limit)): |
|
84 | 84 | |
|
85 | 85 | f_gitdiff = get_gitdiff(filenode_old, filenode_new, |
|
86 | 86 | ignore_whitespace=ignore_whitespace, |
|
87 | 87 | context=line_context) |
|
88 | 88 | diff_processor = DiffProcessor( |
|
89 | 89 | f_gitdiff, format='gitdiff', diff_limit=diff_limit, |
|
90 | 90 | file_limit=file_limit, show_full_diff=show_full_diff) |
|
91 | 91 | _parsed = diff_processor.prepare() |
|
92 | 92 | |
|
93 | 93 | diff = diff_processor.as_html(enable_comments=enable_comments) |
|
94 | 94 | stats = _parsed[0]['stats'] if _parsed else None |
|
95 | 95 | size = len(diff or '') |
|
96 | 96 | data = _parsed[0] if _parsed else None |
|
97 | 97 | else: |
|
98 | 98 | diff = wrap_to_table(_('Changeset was too big and was cut off, use ' |
|
99 | 99 | 'diff menu to display this diff')) |
|
100 | 100 | stats = None |
|
101 | 101 | size = 0 |
|
102 | 102 | data = None |
|
103 | 103 | if not diff: |
|
104 | 104 | submodules = filter(lambda o: isinstance(o, SubModuleNode), |
|
105 | 105 | [filenode_new, filenode_old]) |
|
106 | 106 | if submodules: |
|
107 | 107 | diff = wrap_to_table(escape('Submodule %r' % submodules[0])) |
|
108 | 108 | else: |
|
109 | 109 | diff = wrap_to_table(_('No changes detected')) |
|
110 | 110 | |
|
111 | 111 | cs1 = filenode_old.commit.raw_id |
|
112 | 112 | cs2 = filenode_new.commit.raw_id |
|
113 | 113 | |
|
114 | 114 | return size, cs1, cs2, diff, stats, data |
|
115 | 115 | |
|
116 | 116 | |
|
117 | 117 | def get_gitdiff(filenode_old, filenode_new, ignore_whitespace=True, context=3): |
|
118 | 118 | """ |
|
119 | 119 | Returns git style diff between given ``filenode_old`` and ``filenode_new``. |
|
120 | 120 | |
|
121 | 121 | :param ignore_whitespace: ignore whitespaces in diff |
|
122 | 122 | """ |
|
123 | 123 | # make sure we pass in default context |
|
124 | 124 | context = context or 3 |
|
125 | 125 | # protect against IntOverflow when passing HUGE context |
|
126 | 126 | if context > MAX_CONTEXT: |
|
127 | 127 | context = MAX_CONTEXT |
|
128 | 128 | |
|
129 | 129 | submodules = filter(lambda o: isinstance(o, SubModuleNode), |
|
130 | 130 | [filenode_new, filenode_old]) |
|
131 | 131 | if submodules: |
|
132 | 132 | return '' |
|
133 | 133 | |
|
134 | 134 | for filenode in (filenode_old, filenode_new): |
|
135 | 135 | if not isinstance(filenode, FileNode): |
|
136 | 136 | raise VCSError( |
|
137 | 137 | "Given object should be FileNode object, not %s" |
|
138 | 138 | % filenode.__class__) |
|
139 | 139 | |
|
140 | 140 | repo = filenode_new.commit.repository |
|
141 | 141 | old_commit = filenode_old.commit or repo.EMPTY_COMMIT |
|
142 | 142 | new_commit = filenode_new.commit |
|
143 | 143 | |
|
144 | 144 | vcs_gitdiff = repo.get_diff( |
|
145 | 145 | old_commit, new_commit, filenode_new.path, |
|
146 | 146 | ignore_whitespace, context, path1=filenode_old.path) |
|
147 | 147 | return vcs_gitdiff |
|
148 | 148 | |
|
149 | 149 | NEW_FILENODE = 1 |
|
150 | 150 | DEL_FILENODE = 2 |
|
151 | 151 | MOD_FILENODE = 3 |
|
152 | 152 | RENAMED_FILENODE = 4 |
|
153 | 153 | COPIED_FILENODE = 5 |
|
154 | 154 | CHMOD_FILENODE = 6 |
|
155 | 155 | BIN_FILENODE = 7 |
|
156 | 156 | |
|
157 | 157 | |
|
158 | 158 | class LimitedDiffContainer(object): |
|
159 | 159 | |
|
160 | 160 | def __init__(self, diff_limit, cur_diff_size, diff): |
|
161 | 161 | self.diff = diff |
|
162 | 162 | self.diff_limit = diff_limit |
|
163 | 163 | self.cur_diff_size = cur_diff_size |
|
164 | 164 | |
|
165 | 165 | def __getitem__(self, key): |
|
166 | 166 | return self.diff.__getitem__(key) |
|
167 | 167 | |
|
168 | 168 | def __iter__(self): |
|
169 | 169 | for l in self.diff: |
|
170 | 170 | yield l |
|
171 | 171 | |
|
172 | 172 | |
|
173 | 173 | class Action(object): |
|
174 | 174 | """ |
|
175 | 175 | Contains constants for the action value of the lines in a parsed diff. |
|
176 | 176 | """ |
|
177 | 177 | |
|
178 | 178 | ADD = 'add' |
|
179 | 179 | DELETE = 'del' |
|
180 | 180 | UNMODIFIED = 'unmod' |
|
181 | 181 | |
|
182 | 182 | CONTEXT = 'context' |
|
183 | CONTEXT_OLD = 'context-old' | |
|
184 | CONTEXT_NEW = 'context-new' | |
|
183 | 185 | |
|
184 | 186 | |
|
185 | 187 | class DiffProcessor(object): |
|
186 | 188 | """ |
|
187 | 189 | Give it a unified or git diff and it returns a list of the files that were |
|
188 | 190 | mentioned in the diff together with a dict of meta information that |
|
189 | 191 | can be used to render it in a HTML template. |
|
190 | 192 | |
|
191 | 193 | .. note:: Unicode handling |
|
192 | 194 | |
|
193 | 195 | The original diffs are a byte sequence and can contain filenames |
|
194 | 196 | in mixed encodings. This class generally returns `unicode` objects |
|
195 | 197 | since the result is intended for presentation to the user. |
|
196 | 198 | |
|
197 | 199 | """ |
|
198 | 200 | _chunk_re = re.compile(r'^@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@(.*)') |
|
199 | 201 | _newline_marker = re.compile(r'^\\ No newline at end of file') |
|
200 | 202 | |
|
201 | 203 | # used for inline highlighter word split |
|
202 | 204 | _token_re = re.compile(r'()(>|<|&|\W+?)') |
|
203 | 205 | |
|
204 | 206 | def __init__(self, diff, format='gitdiff', diff_limit=None, |
|
205 | 207 | file_limit=None, show_full_diff=True): |
|
206 | 208 | """ |
|
207 | 209 | :param diff: A `Diff` object representing a diff from a vcs backend |
|
208 | 210 | :param format: format of diff passed, `udiff` or `gitdiff` |
|
209 | 211 | :param diff_limit: define the size of diff that is considered "big" |
|
210 | 212 | based on that parameter cut off will be triggered, set to None |
|
211 | 213 | to show full diff |
|
212 | 214 | """ |
|
213 | 215 | self._diff = diff |
|
214 | 216 | self._format = format |
|
215 | 217 | self.adds = 0 |
|
216 | 218 | self.removes = 0 |
|
217 | 219 | # calculate diff size |
|
218 | 220 | self.diff_limit = diff_limit |
|
219 | 221 | self.file_limit = file_limit |
|
220 | 222 | self.show_full_diff = show_full_diff |
|
221 | 223 | self.cur_diff_size = 0 |
|
222 | 224 | self.parsed = False |
|
223 | 225 | self.parsed_diff = [] |
|
224 | 226 | |
|
225 | 227 | if format == 'gitdiff': |
|
226 | 228 | self.differ = self._highlight_line_difflib |
|
227 | 229 | self._parser = self._parse_gitdiff |
|
228 | 230 | else: |
|
229 | 231 | self.differ = self._highlight_line_udiff |
|
230 |
self._parser = self._parse_ |
|
|
232 | self._parser = self._new_parse_gitdiff | |
|
231 | 233 | |
|
232 | 234 | def _copy_iterator(self): |
|
233 | 235 | """ |
|
234 | 236 | make a fresh copy of generator, we should not iterate thru |
|
235 | 237 | an original as it's needed for repeating operations on |
|
236 | 238 | this instance of DiffProcessor |
|
237 | 239 | """ |
|
238 | 240 | self.__udiff, iterator_copy = tee(self.__udiff) |
|
239 | 241 | return iterator_copy |
|
240 | 242 | |
|
241 | 243 | def _escaper(self, string): |
|
242 | 244 | """ |
|
243 | 245 | Escaper for diff escapes special chars and checks the diff limit |
|
244 | 246 | |
|
245 | 247 | :param string: |
|
246 | 248 | """ |
|
247 | 249 | |
|
248 | 250 | self.cur_diff_size += len(string) |
|
249 | 251 | |
|
250 | 252 | if not self.show_full_diff and (self.cur_diff_size > self.diff_limit): |
|
251 | 253 | raise DiffLimitExceeded('Diff Limit Exceeded') |
|
252 | 254 | |
|
253 | 255 | return safe_unicode(string)\ |
|
254 | 256 | .replace('&', '&')\ |
|
255 | 257 | .replace('<', '<')\ |
|
256 | 258 | .replace('>', '>') |
|
257 | 259 | |
|
258 | 260 | def _line_counter(self, l): |
|
259 | 261 | """ |
|
260 | 262 | Checks each line and bumps total adds/removes for this diff |
|
261 | 263 | |
|
262 | 264 | :param l: |
|
263 | 265 | """ |
|
264 | 266 | if l.startswith('+') and not l.startswith('+++'): |
|
265 | 267 | self.adds += 1 |
|
266 | 268 | elif l.startswith('-') and not l.startswith('---'): |
|
267 | 269 | self.removes += 1 |
|
268 | 270 | return safe_unicode(l) |
|
269 | 271 | |
|
270 | 272 | def _highlight_line_difflib(self, line, next_): |
|
271 | 273 | """ |
|
272 | 274 | Highlight inline changes in both lines. |
|
273 | 275 | """ |
|
274 | 276 | |
|
275 | 277 | if line['action'] == Action.DELETE: |
|
276 | 278 | old, new = line, next_ |
|
277 | 279 | else: |
|
278 | 280 | old, new = next_, line |
|
279 | 281 | |
|
280 | 282 | oldwords = self._token_re.split(old['line']) |
|
281 | 283 | newwords = self._token_re.split(new['line']) |
|
282 | 284 | sequence = difflib.SequenceMatcher(None, oldwords, newwords) |
|
283 | 285 | |
|
284 | 286 | oldfragments, newfragments = [], [] |
|
285 | 287 | for tag, i1, i2, j1, j2 in sequence.get_opcodes(): |
|
286 | 288 | oldfrag = ''.join(oldwords[i1:i2]) |
|
287 | 289 | newfrag = ''.join(newwords[j1:j2]) |
|
288 | 290 | if tag != 'equal': |
|
289 | 291 | if oldfrag: |
|
290 | 292 | oldfrag = '<del>%s</del>' % oldfrag |
|
291 | 293 | if newfrag: |
|
292 | 294 | newfrag = '<ins>%s</ins>' % newfrag |
|
293 | 295 | oldfragments.append(oldfrag) |
|
294 | 296 | newfragments.append(newfrag) |
|
295 | 297 | |
|
296 | 298 | old['line'] = "".join(oldfragments) |
|
297 | 299 | new['line'] = "".join(newfragments) |
|
298 | 300 | |
|
299 | 301 | def _highlight_line_udiff(self, line, next_): |
|
300 | 302 | """ |
|
301 | 303 | Highlight inline changes in both lines. |
|
302 | 304 | """ |
|
303 | 305 | start = 0 |
|
304 | 306 | limit = min(len(line['line']), len(next_['line'])) |
|
305 | 307 | while start < limit and line['line'][start] == next_['line'][start]: |
|
306 | 308 | start += 1 |
|
307 | 309 | end = -1 |
|
308 | 310 | limit -= start |
|
309 | 311 | while -end <= limit and line['line'][end] == next_['line'][end]: |
|
310 | 312 | end -= 1 |
|
311 | 313 | end += 1 |
|
312 | 314 | if start or end: |
|
313 | 315 | def do(l): |
|
314 | 316 | last = end + len(l['line']) |
|
315 | 317 | if l['action'] == Action.ADD: |
|
316 | 318 | tag = 'ins' |
|
317 | 319 | else: |
|
318 | 320 | tag = 'del' |
|
319 | 321 | l['line'] = '%s<%s>%s</%s>%s' % ( |
|
320 | 322 | l['line'][:start], |
|
321 | 323 | tag, |
|
322 | 324 | l['line'][start:last], |
|
323 | 325 | tag, |
|
324 | 326 | l['line'][last:] |
|
325 | 327 | ) |
|
326 | 328 | do(line) |
|
327 | 329 | do(next_) |
|
328 | 330 | |
|
329 | 331 | def _clean_line(self, line, command): |
|
330 | 332 | if command in ['+', '-', ' ']: |
|
331 | 333 | # only modify the line if it's actually a diff thing |
|
332 | 334 | line = line[1:] |
|
333 | 335 | return line |
|
334 | 336 | |
|
335 | 337 | def _parse_gitdiff(self, inline_diff=True): |
|
336 | 338 | _files = [] |
|
337 | 339 | diff_container = lambda arg: arg |
|
338 | 340 | |
|
339 | 341 | for chunk in self._diff.chunks(): |
|
340 | 342 | head = chunk.header |
|
341 | 343 | |
|
342 | 344 | diff = imap(self._escaper, chunk.diff.splitlines(1)) |
|
343 | 345 | raw_diff = chunk.raw |
|
344 | 346 | limited_diff = False |
|
345 | 347 | exceeds_limit = False |
|
346 | 348 | |
|
347 | 349 | op = None |
|
348 | 350 | stats = { |
|
349 | 351 | 'added': 0, |
|
350 | 352 | 'deleted': 0, |
|
351 | 353 | 'binary': False, |
|
352 | 354 | 'ops': {}, |
|
353 | 355 | } |
|
354 | 356 | |
|
355 | 357 | if head['deleted_file_mode']: |
|
356 | 358 | op = OPS.DEL |
|
357 | 359 | stats['binary'] = True |
|
358 | 360 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
359 | 361 | |
|
360 | 362 | elif head['new_file_mode']: |
|
361 | 363 | op = OPS.ADD |
|
362 | 364 | stats['binary'] = True |
|
363 | 365 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] |
|
364 | 366 | else: # modify operation, can be copy, rename or chmod |
|
365 | 367 | |
|
366 | 368 | # CHMOD |
|
367 | 369 | if head['new_mode'] and head['old_mode']: |
|
368 | 370 | op = OPS.MOD |
|
369 | 371 | stats['binary'] = True |
|
370 | 372 | stats['ops'][CHMOD_FILENODE] = ( |
|
371 | 373 | 'modified file chmod %s => %s' % ( |
|
372 | 374 | head['old_mode'], head['new_mode'])) |
|
373 | 375 | # RENAME |
|
374 | 376 | if head['rename_from'] != head['rename_to']: |
|
375 | 377 | op = OPS.MOD |
|
376 | 378 | stats['binary'] = True |
|
377 | 379 | stats['ops'][RENAMED_FILENODE] = ( |
|
378 | 380 | 'file renamed from %s to %s' % ( |
|
379 | 381 | head['rename_from'], head['rename_to'])) |
|
380 | 382 | # COPY |
|
381 | 383 | if head.get('copy_from') and head.get('copy_to'): |
|
382 | 384 | op = OPS.MOD |
|
383 | 385 | stats['binary'] = True |
|
384 | 386 | stats['ops'][COPIED_FILENODE] = ( |
|
385 | 387 | 'file copied from %s to %s' % ( |
|
386 | 388 | head['copy_from'], head['copy_to'])) |
|
387 | 389 | |
|
388 | 390 | # If our new parsed headers didn't match anything fallback to |
|
389 | 391 | # old style detection |
|
390 | 392 | if op is None: |
|
391 | 393 | if not head['a_file'] and head['b_file']: |
|
392 | 394 | op = OPS.ADD |
|
393 | 395 | stats['binary'] = True |
|
394 | 396 | stats['ops'][NEW_FILENODE] = 'new file' |
|
395 | 397 | |
|
396 | 398 | elif head['a_file'] and not head['b_file']: |
|
397 | 399 | op = OPS.DEL |
|
398 | 400 | stats['binary'] = True |
|
399 | 401 | stats['ops'][DEL_FILENODE] = 'deleted file' |
|
400 | 402 | |
|
401 | 403 | # it's not ADD not DELETE |
|
402 | 404 | if op is None: |
|
403 | 405 | op = OPS.MOD |
|
404 | 406 | stats['binary'] = True |
|
405 | 407 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
406 | 408 | |
|
407 | 409 | # a real non-binary diff |
|
408 | 410 | if head['a_file'] or head['b_file']: |
|
409 | 411 | try: |
|
410 | 412 | raw_diff, chunks, _stats = self._parse_lines(diff) |
|
411 | 413 | stats['binary'] = False |
|
412 | 414 | stats['added'] = _stats[0] |
|
413 | 415 | stats['deleted'] = _stats[1] |
|
414 | 416 | # explicit mark that it's a modified file |
|
415 | 417 | if op == OPS.MOD: |
|
416 | 418 | stats['ops'][MOD_FILENODE] = 'modified file' |
|
417 | 419 | exceeds_limit = len(raw_diff) > self.file_limit |
|
418 | 420 | |
|
419 | 421 | # changed from _escaper function so we validate size of |
|
420 | 422 | # each file instead of the whole diff |
|
421 | 423 | # diff will hide big files but still show small ones |
|
422 | 424 | # from my tests, big files are fairly safe to be parsed |
|
423 | 425 | # but the browser is the bottleneck |
|
424 | 426 | if not self.show_full_diff and exceeds_limit: |
|
425 | 427 | raise DiffLimitExceeded('File Limit Exceeded') |
|
426 | 428 | |
|
427 | 429 | except DiffLimitExceeded: |
|
428 | 430 | diff_container = lambda _diff: \ |
|
429 | 431 | LimitedDiffContainer( |
|
430 | 432 | self.diff_limit, self.cur_diff_size, _diff) |
|
431 | 433 | |
|
432 | 434 | exceeds_limit = len(raw_diff) > self.file_limit |
|
433 | 435 | limited_diff = True |
|
434 | 436 | chunks = [] |
|
435 | 437 | |
|
436 | 438 | else: # GIT format binary patch, or possibly empty diff |
|
437 | 439 | if head['bin_patch']: |
|
438 | 440 | # we have operation already extracted, but we mark simply |
|
439 | 441 | # it's a diff we wont show for binary files |
|
440 | 442 | stats['ops'][BIN_FILENODE] = 'binary diff hidden' |
|
441 | 443 | chunks = [] |
|
442 | 444 | |
|
443 | 445 | if chunks and not self.show_full_diff and op == OPS.DEL: |
|
444 | 446 | # if not full diff mode show deleted file contents |
|
445 | 447 | # TODO: anderson: if the view is not too big, there is no way |
|
446 | 448 | # to see the content of the file |
|
447 | 449 | chunks = [] |
|
448 | 450 | |
|
449 | 451 | chunks.insert(0, [{ |
|
450 | 452 | 'old_lineno': '', |
|
451 | 453 | 'new_lineno': '', |
|
452 | 454 | 'action': Action.CONTEXT, |
|
453 | 455 | 'line': msg, |
|
454 | 456 | } for _op, msg in stats['ops'].iteritems() |
|
455 | 457 | if _op not in [MOD_FILENODE]]) |
|
456 | 458 | |
|
457 | 459 | _files.append({ |
|
458 | 460 | 'filename': safe_unicode(head['b_path']), |
|
459 | 461 | 'old_revision': head['a_blob_id'], |
|
460 | 462 | 'new_revision': head['b_blob_id'], |
|
461 | 463 | 'chunks': chunks, |
|
462 | 464 | 'raw_diff': safe_unicode(raw_diff), |
|
463 | 465 | 'operation': op, |
|
464 | 466 | 'stats': stats, |
|
465 | 467 | 'exceeds_limit': exceeds_limit, |
|
466 | 468 | 'is_limited_diff': limited_diff, |
|
467 | 469 | }) |
|
468 | 470 | |
|
469 | 471 | sorter = lambda info: {OPS.ADD: 0, OPS.MOD: 1, |
|
470 | 472 | OPS.DEL: 2}.get(info['operation']) |
|
471 | 473 | |
|
472 | 474 | if not inline_diff: |
|
473 | 475 | return diff_container(sorted(_files, key=sorter)) |
|
474 | 476 | |
|
475 | 477 | # highlight inline changes |
|
476 | 478 | for diff_data in _files: |
|
477 | 479 | for chunk in diff_data['chunks']: |
|
478 | 480 | lineiter = iter(chunk) |
|
479 | 481 | try: |
|
480 | 482 | while 1: |
|
481 | 483 | line = lineiter.next() |
|
482 | 484 | if line['action'] not in ( |
|
483 | 485 | Action.UNMODIFIED, Action.CONTEXT): |
|
484 | 486 | nextline = lineiter.next() |
|
485 | 487 | if nextline['action'] in ['unmod', 'context'] or \ |
|
486 | 488 | nextline['action'] == line['action']: |
|
487 | 489 | continue |
|
488 | 490 | self.differ(line, nextline) |
|
489 | 491 | except StopIteration: |
|
490 | 492 | pass |
|
491 | 493 | |
|
492 | 494 | return diff_container(sorted(_files, key=sorter)) |
|
493 | 495 | |
|
494 | def _parse_udiff(self, inline_diff=True): | |
|
495 | raise NotImplementedError() | |
|
496 | ||
|
497 | # FIXME: NEWDIFFS: dan: this replaces the old _escaper function | |
|
498 | def _process_line(self, string): | |
|
499 | """ | |
|
500 | Process a diff line, checks the diff limit | |
|
501 | ||
|
502 | :param string: | |
|
503 | """ | |
|
504 | ||
|
505 | self.cur_diff_size += len(string) | |
|
506 | ||
|
507 | if not self.show_full_diff and (self.cur_diff_size > self.diff_limit): | |
|
508 | raise DiffLimitExceeded('Diff Limit Exceeded') | |
|
509 | ||
|
510 | return safe_unicode(string) | |
|
511 | ||
|
512 | # FIXME: NEWDIFFS: dan: this replaces _parse_gitdiff | |
|
513 | def _new_parse_gitdiff(self, inline_diff=True): | |
|
514 | _files = [] | |
|
515 | diff_container = lambda arg: arg | |
|
516 | for chunk in self._diff.chunks(): | |
|
517 | head = chunk.header | |
|
518 | log.debug('parsing diff %r' % head) | |
|
519 | ||
|
520 | diff = imap(self._process_line, chunk.diff.splitlines(1)) | |
|
521 | raw_diff = chunk.raw | |
|
522 | limited_diff = False | |
|
523 | exceeds_limit = False | |
|
524 | # if 'empty_file_to_modify_and_rename' in head['a_path']: | |
|
525 | # 1/0 | |
|
526 | op = None | |
|
527 | stats = { | |
|
528 | 'added': 0, | |
|
529 | 'deleted': 0, | |
|
530 | 'binary': False, | |
|
531 | 'old_mode': None, | |
|
532 | 'new_mode': None, | |
|
533 | 'ops': {}, | |
|
534 | } | |
|
535 | if head['old_mode']: | |
|
536 | stats['old_mode'] = head['old_mode'] | |
|
537 | if head['new_mode']: | |
|
538 | stats['new_mode'] = head['new_mode'] | |
|
539 | if head['b_mode']: | |
|
540 | stats['new_mode'] = head['b_mode'] | |
|
541 | ||
|
542 | if head['deleted_file_mode']: | |
|
543 | op = OPS.DEL | |
|
544 | stats['binary'] = True | |
|
545 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
|
546 | ||
|
547 | elif head['new_file_mode']: | |
|
548 | op = OPS.ADD | |
|
549 | stats['binary'] = True | |
|
550 | stats['old_mode'] = None | |
|
551 | stats['new_mode'] = head['new_file_mode'] | |
|
552 | stats['ops'][NEW_FILENODE] = 'new file %s' % head['new_file_mode'] | |
|
553 | else: # modify operation, can be copy, rename or chmod | |
|
554 | ||
|
555 | # CHMOD | |
|
556 | if head['new_mode'] and head['old_mode']: | |
|
557 | op = OPS.MOD | |
|
558 | stats['binary'] = True | |
|
559 | stats['ops'][CHMOD_FILENODE] = ( | |
|
560 | 'modified file chmod %s => %s' % ( | |
|
561 | head['old_mode'], head['new_mode'])) | |
|
562 | ||
|
563 | # RENAME | |
|
564 | if head['rename_from'] != head['rename_to']: | |
|
565 | op = OPS.MOD | |
|
566 | stats['binary'] = True | |
|
567 | stats['renamed'] = (head['rename_from'], head['rename_to']) | |
|
568 | stats['ops'][RENAMED_FILENODE] = ( | |
|
569 | 'file renamed from %s to %s' % ( | |
|
570 | head['rename_from'], head['rename_to'])) | |
|
571 | # COPY | |
|
572 | if head.get('copy_from') and head.get('copy_to'): | |
|
573 | op = OPS.MOD | |
|
574 | stats['binary'] = True | |
|
575 | stats['copied'] = (head['copy_from'], head['copy_to']) | |
|
576 | stats['ops'][COPIED_FILENODE] = ( | |
|
577 | 'file copied from %s to %s' % ( | |
|
578 | head['copy_from'], head['copy_to'])) | |
|
496 | 579 | |
|
580 | # If our new parsed headers didn't match anything fallback to | |
|
581 | # old style detection | |
|
582 | if op is None: | |
|
583 | if not head['a_file'] and head['b_file']: | |
|
584 | op = OPS.ADD | |
|
585 | stats['binary'] = True | |
|
586 | stats['new_file'] = True | |
|
587 | stats['ops'][NEW_FILENODE] = 'new file' | |
|
588 | ||
|
589 | elif head['a_file'] and not head['b_file']: | |
|
590 | op = OPS.DEL | |
|
591 | stats['binary'] = True | |
|
592 | stats['ops'][DEL_FILENODE] = 'deleted file' | |
|
593 | ||
|
594 | # it's not ADD not DELETE | |
|
595 | if op is None: | |
|
596 | op = OPS.MOD | |
|
597 | stats['binary'] = True | |
|
598 | stats['ops'][MOD_FILENODE] = 'modified file' | |
|
599 | ||
|
600 | # a real non-binary diff | |
|
601 | if head['a_file'] or head['b_file']: | |
|
602 | try: | |
|
603 | raw_diff, chunks, _stats = self._new_parse_lines(diff) | |
|
604 | stats['binary'] = False | |
|
605 | stats['added'] = _stats[0] | |
|
606 | stats['deleted'] = _stats[1] | |
|
607 | # explicit mark that it's a modified file | |
|
608 | if op == OPS.MOD: | |
|
609 | stats['ops'][MOD_FILENODE] = 'modified file' | |
|
610 | exceeds_limit = len(raw_diff) > self.file_limit | |
|
611 | ||
|
612 | # changed from _escaper function so we validate size of | |
|
613 | # each file instead of the whole diff | |
|
614 | # diff will hide big files but still show small ones | |
|
615 | # from my tests, big files are fairly safe to be parsed | |
|
616 | # but the browser is the bottleneck | |
|
617 | if not self.show_full_diff and exceeds_limit: | |
|
618 | raise DiffLimitExceeded('File Limit Exceeded') | |
|
619 | ||
|
620 | except DiffLimitExceeded: | |
|
621 | diff_container = lambda _diff: \ | |
|
622 | LimitedDiffContainer( | |
|
623 | self.diff_limit, self.cur_diff_size, _diff) | |
|
624 | ||
|
625 | exceeds_limit = len(raw_diff) > self.file_limit | |
|
626 | limited_diff = True | |
|
627 | chunks = [] | |
|
628 | ||
|
629 | else: # GIT format binary patch, or possibly empty diff | |
|
630 | if head['bin_patch']: | |
|
631 | # we have operation already extracted, but we mark simply | |
|
632 | # it's a diff we wont show for binary files | |
|
633 | stats['ops'][BIN_FILENODE] = 'binary diff hidden' | |
|
634 | chunks = [] | |
|
635 | ||
|
636 | if chunks and not self.show_full_diff and op == OPS.DEL: | |
|
637 | # if not full diff mode show deleted file contents | |
|
638 | # TODO: anderson: if the view is not too big, there is no way | |
|
639 | # to see the content of the file | |
|
640 | chunks = [] | |
|
641 | ||
|
642 | chunks.insert(0, [{ | |
|
643 | 'old_lineno': '', | |
|
644 | 'new_lineno': '', | |
|
645 | 'action': Action.CONTEXT, | |
|
646 | 'line': msg, | |
|
647 | } for _op, msg in stats['ops'].iteritems() | |
|
648 | if _op not in [MOD_FILENODE]]) | |
|
649 | ||
|
650 | original_filename = safe_unicode(head['a_path']) | |
|
651 | _files.append({ | |
|
652 | 'original_filename': original_filename, | |
|
653 | 'filename': safe_unicode(head['b_path']), | |
|
654 | 'old_revision': head['a_blob_id'], | |
|
655 | 'new_revision': head['b_blob_id'], | |
|
656 | 'chunks': chunks, | |
|
657 | 'raw_diff': safe_unicode(raw_diff), | |
|
658 | 'operation': op, | |
|
659 | 'stats': stats, | |
|
660 | 'exceeds_limit': exceeds_limit, | |
|
661 | 'is_limited_diff': limited_diff, | |
|
662 | }) | |
|
663 | ||
|
664 | ||
|
665 | sorter = lambda info: {OPS.ADD: 0, OPS.MOD: 1, | |
|
666 | OPS.DEL: 2}.get(info['operation']) | |
|
667 | ||
|
668 | return diff_container(sorted(_files, key=sorter)) | |
|
669 | ||
|
670 | # FIXME: NEWDIFFS: dan: this gets replaced by _new_parse_lines | |
|
497 | 671 | def _parse_lines(self, diff): |
|
498 | 672 | """ |
|
499 | 673 | Parse the diff an return data for the template. |
|
500 | 674 | """ |
|
501 | 675 | |
|
502 | 676 | lineiter = iter(diff) |
|
503 | 677 | stats = [0, 0] |
|
504 | 678 | chunks = [] |
|
505 | 679 | raw_diff = [] |
|
506 | 680 | |
|
507 | 681 | try: |
|
508 | 682 | line = lineiter.next() |
|
509 | 683 | |
|
510 | 684 | while line: |
|
511 | 685 | raw_diff.append(line) |
|
512 | 686 | lines = [] |
|
513 | 687 | chunks.append(lines) |
|
514 | 688 | |
|
515 | 689 | match = self._chunk_re.match(line) |
|
516 | 690 | |
|
517 | 691 | if not match: |
|
518 | 692 | break |
|
519 | 693 | |
|
520 | 694 | gr = match.groups() |
|
521 | 695 | (old_line, old_end, |
|
522 | 696 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] |
|
523 | 697 | old_line -= 1 |
|
524 | 698 | new_line -= 1 |
|
525 | 699 | |
|
526 | 700 | context = len(gr) == 5 |
|
527 | 701 | old_end += old_line |
|
528 | 702 | new_end += new_line |
|
529 | 703 | |
|
530 | 704 | if context: |
|
531 | 705 | # skip context only if it's first line |
|
532 | 706 | if int(gr[0]) > 1: |
|
533 | 707 | lines.append({ |
|
534 | 708 | 'old_lineno': '...', |
|
535 | 709 | 'new_lineno': '...', |
|
536 | 710 | 'action': Action.CONTEXT, |
|
537 | 711 | 'line': line, |
|
538 | 712 | }) |
|
539 | 713 | |
|
540 | 714 | line = lineiter.next() |
|
541 | 715 | |
|
542 | 716 | while old_line < old_end or new_line < new_end: |
|
543 | 717 | command = ' ' |
|
544 | 718 | if line: |
|
545 | 719 | command = line[0] |
|
546 | 720 | |
|
547 | 721 | affects_old = affects_new = False |
|
548 | 722 | |
|
549 | 723 | # ignore those if we don't expect them |
|
550 | 724 | if command in '#@': |
|
551 | 725 | continue |
|
552 | 726 | elif command == '+': |
|
553 | 727 | affects_new = True |
|
554 | 728 | action = Action.ADD |
|
555 | 729 | stats[0] += 1 |
|
556 | 730 | elif command == '-': |
|
557 | 731 | affects_old = True |
|
558 | 732 | action = Action.DELETE |
|
559 | 733 | stats[1] += 1 |
|
560 | 734 | else: |
|
561 | 735 | affects_old = affects_new = True |
|
562 | 736 | action = Action.UNMODIFIED |
|
563 | 737 | |
|
564 | 738 | if not self._newline_marker.match(line): |
|
565 | 739 | old_line += affects_old |
|
566 | 740 | new_line += affects_new |
|
567 | 741 | lines.append({ |
|
568 | 742 | 'old_lineno': affects_old and old_line or '', |
|
569 | 743 | 'new_lineno': affects_new and new_line or '', |
|
570 | 744 | 'action': action, |
|
571 | 745 | 'line': self._clean_line(line, command) |
|
572 | 746 | }) |
|
573 | 747 | raw_diff.append(line) |
|
574 | 748 | |
|
575 | 749 | line = lineiter.next() |
|
576 | 750 | |
|
577 | 751 | if self._newline_marker.match(line): |
|
578 | 752 | # we need to append to lines, since this is not |
|
579 | 753 | # counted in the line specs of diff |
|
580 | 754 | lines.append({ |
|
581 | 755 | 'old_lineno': '...', |
|
582 | 756 | 'new_lineno': '...', |
|
583 | 757 | 'action': Action.CONTEXT, |
|
584 | 758 | 'line': self._clean_line(line, command) |
|
585 | 759 | }) |
|
586 | 760 | |
|
587 | 761 | except StopIteration: |
|
588 | 762 | pass |
|
589 | 763 | return ''.join(raw_diff), chunks, stats |
|
590 | 764 | |
|
765 | # FIXME: NEWDIFFS: dan: this replaces _parse_lines | |
|
766 | def _new_parse_lines(self, diff): | |
|
767 | """ | |
|
768 | Parse the diff an return data for the template. | |
|
769 | """ | |
|
770 | ||
|
771 | lineiter = iter(diff) | |
|
772 | stats = [0, 0] | |
|
773 | chunks = [] | |
|
774 | raw_diff = [] | |
|
775 | ||
|
776 | try: | |
|
777 | line = lineiter.next() | |
|
778 | ||
|
779 | while line: | |
|
780 | raw_diff.append(line) | |
|
781 | match = self._chunk_re.match(line) | |
|
782 | ||
|
783 | if not match: | |
|
784 | break | |
|
785 | ||
|
786 | gr = match.groups() | |
|
787 | (old_line, old_end, | |
|
788 | new_line, new_end) = [int(x or 1) for x in gr[:-1]] | |
|
789 | ||
|
790 | lines = [] | |
|
791 | hunk = { | |
|
792 | 'section_header': gr[-1], | |
|
793 | 'source_start': old_line, | |
|
794 | 'source_length': old_end, | |
|
795 | 'target_start': new_line, | |
|
796 | 'target_length': new_end, | |
|
797 | 'lines': lines, | |
|
798 | } | |
|
799 | chunks.append(hunk) | |
|
800 | ||
|
801 | old_line -= 1 | |
|
802 | new_line -= 1 | |
|
803 | ||
|
804 | context = len(gr) == 5 | |
|
805 | old_end += old_line | |
|
806 | new_end += new_line | |
|
807 | ||
|
808 | line = lineiter.next() | |
|
809 | ||
|
810 | while old_line < old_end or new_line < new_end: | |
|
811 | command = ' ' | |
|
812 | if line: | |
|
813 | command = line[0] | |
|
814 | ||
|
815 | affects_old = affects_new = False | |
|
816 | ||
|
817 | # ignore those if we don't expect them | |
|
818 | if command in '#@': | |
|
819 | continue | |
|
820 | elif command == '+': | |
|
821 | affects_new = True | |
|
822 | action = Action.ADD | |
|
823 | stats[0] += 1 | |
|
824 | elif command == '-': | |
|
825 | affects_old = True | |
|
826 | action = Action.DELETE | |
|
827 | stats[1] += 1 | |
|
828 | else: | |
|
829 | affects_old = affects_new = True | |
|
830 | action = Action.UNMODIFIED | |
|
831 | ||
|
832 | if not self._newline_marker.match(line): | |
|
833 | old_line += affects_old | |
|
834 | new_line += affects_new | |
|
835 | lines.append({ | |
|
836 | 'old_lineno': affects_old and old_line or '', | |
|
837 | 'new_lineno': affects_new and new_line or '', | |
|
838 | 'action': action, | |
|
839 | 'line': self._clean_line(line, command) | |
|
840 | }) | |
|
841 | raw_diff.append(line) | |
|
842 | ||
|
843 | line = lineiter.next() | |
|
844 | ||
|
845 | if self._newline_marker.match(line): | |
|
846 | # we need to append to lines, since this is not | |
|
847 | # counted in the line specs of diff | |
|
848 | if affects_old: | |
|
849 | action = Action.CONTEXT_OLD | |
|
850 | elif affects_new: | |
|
851 | action = Action.CONTEXT_NEW | |
|
852 | else: | |
|
853 | raise Exception('invalid context for no newline') | |
|
854 | ||
|
855 | lines.append({ | |
|
856 | 'old_lineno': None, | |
|
857 | 'new_lineno': None, | |
|
858 | 'action': action, | |
|
859 | 'line': self._clean_line(line, command) | |
|
860 | }) | |
|
861 | ||
|
862 | except StopIteration: | |
|
863 | pass | |
|
864 | return ''.join(raw_diff), chunks, stats | |
|
865 | ||
|
591 | 866 | def _safe_id(self, idstring): |
|
592 | 867 | """Make a string safe for including in an id attribute. |
|
593 | 868 | |
|
594 | 869 | The HTML spec says that id attributes 'must begin with |
|
595 | 870 | a letter ([A-Za-z]) and may be followed by any number |
|
596 | 871 | of letters, digits ([0-9]), hyphens ("-"), underscores |
|
597 | 872 | ("_"), colons (":"), and periods (".")'. These regexps |
|
598 | 873 | are slightly over-zealous, in that they remove colons |
|
599 | 874 | and periods unnecessarily. |
|
600 | 875 | |
|
601 | 876 | Whitespace is transformed into underscores, and then |
|
602 | 877 | anything which is not a hyphen or a character that |
|
603 | 878 | matches \w (alphanumerics and underscore) is removed. |
|
604 | 879 | |
|
605 | 880 | """ |
|
606 | 881 | # Transform all whitespace to underscore |
|
607 | 882 | idstring = re.sub(r'\s', "_", '%s' % idstring) |
|
608 | 883 | # Remove everything that is not a hyphen or a member of \w |
|
609 | 884 | idstring = re.sub(r'(?!-)\W', "", idstring).lower() |
|
610 | 885 | return idstring |
|
611 | 886 | |
|
612 | 887 | def prepare(self, inline_diff=True): |
|
613 | 888 | """ |
|
614 | 889 | Prepare the passed udiff for HTML rendering. |
|
615 | 890 | |
|
616 | 891 | :return: A list of dicts with diff information. |
|
617 | 892 | """ |
|
618 | 893 | parsed = self._parser(inline_diff=inline_diff) |
|
619 | 894 | self.parsed = True |
|
620 | 895 | self.parsed_diff = parsed |
|
621 | 896 | return parsed |
|
622 | 897 | |
|
623 | 898 | def as_raw(self, diff_lines=None): |
|
624 | 899 | """ |
|
625 | 900 | Returns raw diff as a byte string |
|
626 | 901 | """ |
|
627 | 902 | return self._diff.raw |
|
628 | 903 | |
|
629 | 904 | def as_html(self, table_class='code-difftable', line_class='line', |
|
630 | 905 | old_lineno_class='lineno old', new_lineno_class='lineno new', |
|
631 | 906 | code_class='code', enable_comments=False, parsed_lines=None): |
|
632 | 907 | """ |
|
633 | 908 | Return given diff as html table with customized css classes |
|
634 | 909 | """ |
|
635 | 910 | def _link_to_if(condition, label, url): |
|
636 | 911 | """ |
|
637 | 912 | Generates a link if condition is meet or just the label if not. |
|
638 | 913 | """ |
|
639 | 914 | |
|
640 | 915 | if condition: |
|
641 | 916 | return '''<a href="%(url)s" class="tooltip" |
|
642 | 917 | title="%(title)s">%(label)s</a>''' % { |
|
643 | 918 | 'title': _('Click to select line'), |
|
644 | 919 | 'url': url, |
|
645 | 920 | 'label': label |
|
646 | 921 | } |
|
647 | 922 | else: |
|
648 | 923 | return label |
|
649 | 924 | if not self.parsed: |
|
650 | 925 | self.prepare() |
|
651 | 926 | |
|
652 | 927 | diff_lines = self.parsed_diff |
|
653 | 928 | if parsed_lines: |
|
654 | 929 | diff_lines = parsed_lines |
|
655 | 930 | |
|
656 | 931 | _html_empty = True |
|
657 | 932 | _html = [] |
|
658 | 933 | _html.append('''<table class="%(table_class)s">\n''' % { |
|
659 | 934 | 'table_class': table_class |
|
660 | 935 | }) |
|
661 | 936 | |
|
662 | 937 | for diff in diff_lines: |
|
663 | 938 | for line in diff['chunks']: |
|
664 | 939 | _html_empty = False |
|
665 | 940 | for change in line: |
|
666 | 941 | _html.append('''<tr class="%(lc)s %(action)s">\n''' % { |
|
667 | 942 | 'lc': line_class, |
|
668 | 943 | 'action': change['action'] |
|
669 | 944 | }) |
|
670 | 945 | anchor_old_id = '' |
|
671 | 946 | anchor_new_id = '' |
|
672 | 947 | anchor_old = "%(filename)s_o%(oldline_no)s" % { |
|
673 | 948 | 'filename': self._safe_id(diff['filename']), |
|
674 | 949 | 'oldline_no': change['old_lineno'] |
|
675 | 950 | } |
|
676 | 951 | anchor_new = "%(filename)s_n%(oldline_no)s" % { |
|
677 | 952 | 'filename': self._safe_id(diff['filename']), |
|
678 | 953 | 'oldline_no': change['new_lineno'] |
|
679 | 954 | } |
|
680 | 955 | cond_old = (change['old_lineno'] != '...' and |
|
681 | 956 | change['old_lineno']) |
|
682 | 957 | cond_new = (change['new_lineno'] != '...' and |
|
683 | 958 | change['new_lineno']) |
|
684 | 959 | if cond_old: |
|
685 | 960 | anchor_old_id = 'id="%s"' % anchor_old |
|
686 | 961 | if cond_new: |
|
687 | 962 | anchor_new_id = 'id="%s"' % anchor_new |
|
688 | 963 | |
|
689 | 964 | if change['action'] != Action.CONTEXT: |
|
690 | 965 | anchor_link = True |
|
691 | 966 | else: |
|
692 | 967 | anchor_link = False |
|
693 | 968 | |
|
694 | 969 | ########################################################### |
|
695 | 970 | # COMMENT ICONS |
|
696 | 971 | ########################################################### |
|
697 | 972 | _html.append('''\t<td class="add-comment-line"><span class="add-comment-content">''') |
|
698 | 973 | |
|
699 | 974 | if enable_comments and change['action'] != Action.CONTEXT: |
|
700 | 975 | _html.append('''<a href="#"><span class="icon-comment-add"></span></a>''') |
|
701 | 976 | |
|
702 | 977 | _html.append('''</span></td><td class="comment-toggle tooltip" title="Toggle Comment Thread"><i class="icon-comment"></i></td>\n''') |
|
703 | 978 | |
|
704 | 979 | ########################################################### |
|
705 | 980 | # OLD LINE NUMBER |
|
706 | 981 | ########################################################### |
|
707 | 982 | _html.append('''\t<td %(a_id)s class="%(olc)s">''' % { |
|
708 | 983 | 'a_id': anchor_old_id, |
|
709 | 984 | 'olc': old_lineno_class |
|
710 | 985 | }) |
|
711 | 986 | |
|
712 | 987 | _html.append('''%(link)s''' % { |
|
713 | 988 | 'link': _link_to_if(anchor_link, change['old_lineno'], |
|
714 | 989 | '#%s' % anchor_old) |
|
715 | 990 | }) |
|
716 | 991 | _html.append('''</td>\n''') |
|
717 | 992 | ########################################################### |
|
718 | 993 | # NEW LINE NUMBER |
|
719 | 994 | ########################################################### |
|
720 | 995 | |
|
721 | 996 | _html.append('''\t<td %(a_id)s class="%(nlc)s">''' % { |
|
722 | 997 | 'a_id': anchor_new_id, |
|
723 | 998 | 'nlc': new_lineno_class |
|
724 | 999 | }) |
|
725 | 1000 | |
|
726 | 1001 | _html.append('''%(link)s''' % { |
|
727 | 1002 | 'link': _link_to_if(anchor_link, change['new_lineno'], |
|
728 | 1003 | '#%s' % anchor_new) |
|
729 | 1004 | }) |
|
730 | 1005 | _html.append('''</td>\n''') |
|
731 | 1006 | ########################################################### |
|
732 | 1007 | # CODE |
|
733 | 1008 | ########################################################### |
|
734 | 1009 | code_classes = [code_class] |
|
735 | 1010 | if (not enable_comments or |
|
736 | 1011 | change['action'] == Action.CONTEXT): |
|
737 | 1012 | code_classes.append('no-comment') |
|
738 | 1013 | _html.append('\t<td class="%s">' % ' '.join(code_classes)) |
|
739 | 1014 | _html.append('''\n\t\t<pre>%(code)s</pre>\n''' % { |
|
740 | 1015 | 'code': change['line'] |
|
741 | 1016 | }) |
|
742 | 1017 | |
|
743 | 1018 | _html.append('''\t</td>''') |
|
744 | 1019 | _html.append('''\n</tr>\n''') |
|
745 | 1020 | _html.append('''</table>''') |
|
746 | 1021 | if _html_empty: |
|
747 | 1022 | return None |
|
748 | 1023 | return ''.join(_html) |
|
749 | 1024 | |
|
750 | 1025 | def stat(self): |
|
751 | 1026 | """ |
|
752 | 1027 | Returns tuple of added, and removed lines for this instance |
|
753 | 1028 | """ |
|
754 | 1029 | return self.adds, self.removes |
|
755 | 1030 | |
|
756 | 1031 | def get_context_of_line( |
|
757 | 1032 | self, path, diff_line=None, context_before=3, context_after=3): |
|
758 | 1033 | """ |
|
759 | 1034 | Returns the context lines for the specified diff line. |
|
760 | 1035 | |
|
761 | 1036 | :type diff_line: :class:`DiffLineNumber` |
|
762 | 1037 | """ |
|
763 | 1038 | assert self.parsed, "DiffProcessor is not initialized." |
|
764 | 1039 | |
|
765 | 1040 | if None not in diff_line: |
|
766 | 1041 | raise ValueError( |
|
767 | 1042 | "Cannot specify both line numbers: {}".format(diff_line)) |
|
768 | 1043 | |
|
769 | 1044 | file_diff = self._get_file_diff(path) |
|
770 | 1045 | chunk, idx = self._find_chunk_line_index(file_diff, diff_line) |
|
771 | 1046 | |
|
772 | 1047 | first_line_to_include = max(idx - context_before, 0) |
|
773 | 1048 | first_line_after_context = idx + context_after + 1 |
|
774 | 1049 | context_lines = chunk[first_line_to_include:first_line_after_context] |
|
775 | 1050 | |
|
776 | 1051 | line_contents = [ |
|
777 | 1052 | _context_line(line) for line in context_lines |
|
778 | 1053 | if _is_diff_content(line)] |
|
779 | 1054 | # TODO: johbo: Interim fixup, the diff chunks drop the final newline. |
|
780 | 1055 | # Once they are fixed, we can drop this line here. |
|
781 | 1056 | if line_contents: |
|
782 | 1057 | line_contents[-1] = ( |
|
783 | 1058 | line_contents[-1][0], line_contents[-1][1].rstrip('\n') + '\n') |
|
784 | 1059 | return line_contents |
|
785 | 1060 | |
|
786 | 1061 | def find_context(self, path, context, offset=0): |
|
787 | 1062 | """ |
|
788 | 1063 | Finds the given `context` inside of the diff. |
|
789 | 1064 | |
|
790 | 1065 | Use the parameter `offset` to specify which offset the target line has |
|
791 | 1066 | inside of the given `context`. This way the correct diff line will be |
|
792 | 1067 | returned. |
|
793 | 1068 | |
|
794 | 1069 | :param offset: Shall be used to specify the offset of the main line |
|
795 | 1070 | within the given `context`. |
|
796 | 1071 | """ |
|
797 | 1072 | if offset < 0 or offset >= len(context): |
|
798 | 1073 | raise ValueError( |
|
799 | 1074 | "Only positive values up to the length of the context " |
|
800 | 1075 | "minus one are allowed.") |
|
801 | 1076 | |
|
802 | 1077 | matches = [] |
|
803 | 1078 | file_diff = self._get_file_diff(path) |
|
804 | 1079 | |
|
805 | 1080 | for chunk in file_diff['chunks']: |
|
806 | 1081 | context_iter = iter(context) |
|
807 | 1082 | for line_idx, line in enumerate(chunk): |
|
808 | 1083 | try: |
|
809 | 1084 | if _context_line(line) == context_iter.next(): |
|
810 | 1085 | continue |
|
811 | 1086 | except StopIteration: |
|
812 | 1087 | matches.append((line_idx, chunk)) |
|
813 | 1088 | context_iter = iter(context) |
|
814 | 1089 | |
|
815 | 1090 | # Increment position and triger StopIteration |
|
816 | 1091 | # if we had a match at the end |
|
817 | 1092 | line_idx += 1 |
|
818 | 1093 | try: |
|
819 | 1094 | context_iter.next() |
|
820 | 1095 | except StopIteration: |
|
821 | 1096 | matches.append((line_idx, chunk)) |
|
822 | 1097 | |
|
823 | 1098 | effective_offset = len(context) - offset |
|
824 | 1099 | found_at_diff_lines = [ |
|
825 | 1100 | _line_to_diff_line_number(chunk[idx - effective_offset]) |
|
826 | 1101 | for idx, chunk in matches] |
|
827 | 1102 | |
|
828 | 1103 | return found_at_diff_lines |
|
829 | 1104 | |
|
830 | 1105 | def _get_file_diff(self, path): |
|
831 | 1106 | for file_diff in self.parsed_diff: |
|
832 | 1107 | if file_diff['filename'] == path: |
|
833 | 1108 | break |
|
834 | 1109 | else: |
|
835 | 1110 | raise FileNotInDiffException("File {} not in diff".format(path)) |
|
836 | 1111 | return file_diff |
|
837 | 1112 | |
|
838 | 1113 | def _find_chunk_line_index(self, file_diff, diff_line): |
|
839 | 1114 | for chunk in file_diff['chunks']: |
|
840 | 1115 | for idx, line in enumerate(chunk): |
|
841 | 1116 | if line['old_lineno'] == diff_line.old: |
|
842 | 1117 | return chunk, idx |
|
843 | 1118 | if line['new_lineno'] == diff_line.new: |
|
844 | 1119 | return chunk, idx |
|
845 | 1120 | raise LineNotInDiffException( |
|
846 | 1121 | "The line {} is not part of the diff.".format(diff_line)) |
|
847 | 1122 | |
|
848 | 1123 | |
|
849 | 1124 | def _is_diff_content(line): |
|
850 | 1125 | return line['action'] in ( |
|
851 | 1126 | Action.UNMODIFIED, Action.ADD, Action.DELETE) |
|
852 | 1127 | |
|
853 | 1128 | |
|
854 | 1129 | def _context_line(line): |
|
855 | 1130 | return (line['action'], line['line']) |
|
856 | 1131 | |
|
857 | 1132 | |
|
858 | 1133 | DiffLineNumber = collections.namedtuple('DiffLineNumber', ['old', 'new']) |
|
859 | 1134 | |
|
860 | 1135 | |
|
861 | 1136 | def _line_to_diff_line_number(line): |
|
862 | 1137 | new_line_no = line['new_lineno'] or None |
|
863 | 1138 | old_line_no = line['old_lineno'] or None |
|
864 | 1139 | return DiffLineNumber(old=old_line_no, new=new_line_no) |
|
865 | 1140 | |
|
866 | 1141 | |
|
867 | 1142 | class FileNotInDiffException(Exception): |
|
868 | 1143 | """ |
|
869 | 1144 | Raised when the context for a missing file is requested. |
|
870 | 1145 | |
|
871 | 1146 | If you request the context for a line in a file which is not part of the |
|
872 | 1147 | given diff, then this exception is raised. |
|
873 | 1148 | """ |
|
874 | 1149 | |
|
875 | 1150 | |
|
876 | 1151 | class LineNotInDiffException(Exception): |
|
877 | 1152 | """ |
|
878 | 1153 | Raised when the context for a missing line is requested. |
|
879 | 1154 | |
|
880 | 1155 | If you request the context for a line in a file and this line is not |
|
881 | 1156 | part of the given diff, then this exception is raised. |
|
882 | 1157 | """ |
|
883 | 1158 | |
|
884 | 1159 | |
|
885 | 1160 | class DiffLimitExceeded(Exception): |
|
886 | 1161 | pass |
@@ -1,47 +1,49 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2014-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | """ |
|
22 | 22 | GIT diff module |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | import re |
|
26 | 26 | |
|
27 | 27 | from rhodecode.lib.vcs.backends import base |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | class GitDiff(base.Diff): |
|
31 | 31 | |
|
32 | 32 | _header_re = re.compile(r""" |
|
33 | 33 | #^diff[ ]--git |
|
34 | 34 | [ ]"?a/(?P<a_path>.+?)"?[ ]"?b/(?P<b_path>.+?)"?\n |
|
35 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%\n | |
|
36 | ^rename[ ]from[ ](?P<rename_from>[^\r\n]+)\n | |
|
37 | ^rename[ ]to[ ](?P<rename_to>[^\r\n]+)(?:\n|$))? | |
|
38 | 35 | (?:^old[ ]mode[ ](?P<old_mode>\d+)\n |
|
39 | 36 | ^new[ ]mode[ ](?P<new_mode>\d+)(?:\n|$))? |
|
37 | (?:^similarity[ ]index[ ](?P<similarity_index>\d+)%(?:\n|$))? | |
|
38 | (?:^rename[ ]from[ ](?P<rename_from>[^\r\n]+)\n | |
|
39 | ^rename[ ]to[ ](?P<rename_to>[^\r\n]+)(?:\n|$))? | |
|
40 | (?:^copy[ ]from[ ](?P<copy_from>[^\r\n]+)\n | |
|
41 | ^copy[ ]to[ ](?P<copy_to>[^\r\n]+)(?:\n|$))? | |
|
40 | 42 | (?:^new[ ]file[ ]mode[ ](?P<new_file_mode>.+)(?:\n|$))? |
|
41 | 43 | (?:^deleted[ ]file[ ]mode[ ](?P<deleted_file_mode>.+)(?:\n|$))? |
|
42 | 44 | (?:^index[ ](?P<a_blob_id>[0-9A-Fa-f]+) |
|
43 | 45 | \.\.(?P<b_blob_id>[0-9A-Fa-f]+)[ ]?(?P<b_mode>.+)?(?:\n|$))? |
|
44 | 46 | (?:^(?P<bin_patch>GIT[ ]binary[ ]patch)(?:\n|$))? |
|
45 | 47 | (?:^---[ ](a/(?P<a_file>.+)|/dev/null)(?:\n|$))? |
|
46 | 48 | (?:^\+\+\+[ ](b/(?P<b_file>.+)|/dev/null)(?:\n|$))? |
|
47 | 49 | """, re.VERBOSE | re.MULTILINE) |
@@ -1,381 +1,389 b'' | |||
|
1 | 1 | |
|
2 | 2 | |
|
3 | 3 | //BUTTONS |
|
4 | 4 | button, |
|
5 | 5 | .btn, |
|
6 | 6 | input[type="button"] { |
|
7 | 7 | -webkit-appearance: none; |
|
8 | 8 | display: inline-block; |
|
9 | 9 | margin: 0 @padding/3 0 0; |
|
10 | 10 | padding: @button-padding; |
|
11 | 11 | text-align: center; |
|
12 | 12 | font-size: @basefontsize; |
|
13 | 13 | line-height: 1em; |
|
14 | 14 | font-family: @text-light; |
|
15 | 15 | text-decoration: none; |
|
16 | 16 | text-shadow: none; |
|
17 | 17 | color: @grey4; |
|
18 | 18 | background-color: white; |
|
19 | 19 | background-image: none; |
|
20 | 20 | border: none; |
|
21 | 21 | .border ( @border-thickness-buttons, @grey4 ); |
|
22 | 22 | .border-radius (@border-radius); |
|
23 | 23 | cursor: pointer; |
|
24 | 24 | white-space: nowrap; |
|
25 | 25 | -webkit-transition: background .3s,color .3s; |
|
26 | 26 | -moz-transition: background .3s,color .3s; |
|
27 | 27 | -o-transition: background .3s,color .3s; |
|
28 | 28 | transition: background .3s,color .3s; |
|
29 | 29 | |
|
30 | 30 | a { |
|
31 | 31 | display: block; |
|
32 | 32 | margin: 0; |
|
33 | 33 | padding: 0; |
|
34 | 34 | color: inherit; |
|
35 | 35 | text-decoration: none; |
|
36 | 36 | |
|
37 | 37 | &:hover { |
|
38 | 38 | text-decoration: none; |
|
39 | 39 | } |
|
40 | 40 | } |
|
41 | 41 | |
|
42 | 42 | &:focus, |
|
43 | 43 | &:active { |
|
44 | 44 | outline:none; |
|
45 | 45 | } |
|
46 | 46 | &:hover { |
|
47 | 47 | color: white; |
|
48 | 48 | background-color: @grey4; |
|
49 | 49 | } |
|
50 | 50 | |
|
51 | 51 | .icon-remove-sign { |
|
52 | 52 | display: none; |
|
53 | 53 | } |
|
54 | 54 | |
|
55 | 55 | //disabled buttons |
|
56 | 56 | //last; overrides any other styles |
|
57 | 57 | &:disabled { |
|
58 | 58 | opacity: .7; |
|
59 | 59 | cursor: auto; |
|
60 | 60 | background-color: white; |
|
61 | 61 | color: @grey4; |
|
62 | 62 | text-shadow: none; |
|
63 | 63 | } |
|
64 | 64 | |
|
65 | 65 | } |
|
66 | 66 | |
|
67 | 67 | |
|
68 | 68 | .btn-default { |
|
69 | 69 | .border ( @border-thickness-buttons, @rcblue ); |
|
70 | 70 | background-image: none; |
|
71 | 71 | color: @rcblue; |
|
72 | 72 | |
|
73 | 73 | a { |
|
74 | 74 | color: @rcblue; |
|
75 | 75 | } |
|
76 | 76 | |
|
77 | 77 | &:hover, |
|
78 | 78 | &.active { |
|
79 | 79 | color: white; |
|
80 | 80 | background-color: @rcdarkblue; |
|
81 | 81 | .border ( @border-thickness, @rcdarkblue ); |
|
82 | 82 | |
|
83 | 83 | a { |
|
84 | 84 | color: white; |
|
85 | 85 | } |
|
86 | 86 | } |
|
87 | 87 | &:disabled { |
|
88 | 88 | .border ( @border-thickness-buttons, @grey4 ); |
|
89 | 89 | background-color: none; |
|
90 | 90 | } |
|
91 | 91 | } |
|
92 | 92 | |
|
93 | 93 | .btn-primary, |
|
94 | 94 | .btn-small, /* TODO: anderson: remove .btn-small to not mix with the new btn-sm */ |
|
95 | 95 | .btn-success { |
|
96 | 96 | .border ( @border-thickness, @rcblue ); |
|
97 | 97 | background-color: @rcblue; |
|
98 | 98 | color: white; |
|
99 | 99 | |
|
100 | 100 | a { |
|
101 | 101 | color: white; |
|
102 | 102 | } |
|
103 | 103 | |
|
104 | 104 | &:hover, |
|
105 | 105 | &.active { |
|
106 | 106 | .border ( @border-thickness, @rcdarkblue ); |
|
107 | 107 | color: white; |
|
108 | 108 | background-color: @rcdarkblue; |
|
109 | 109 | |
|
110 | 110 | a { |
|
111 | 111 | color: white; |
|
112 | 112 | } |
|
113 | 113 | } |
|
114 | 114 | &:disabled { |
|
115 | 115 | background-color: @rcblue; |
|
116 | 116 | } |
|
117 | 117 | } |
|
118 | 118 | |
|
119 | 119 | .btn-secondary { |
|
120 | 120 | &:extend(.btn-default); |
|
121 | 121 | |
|
122 | 122 | background-color: white; |
|
123 | 123 | |
|
124 | 124 | &:focus { |
|
125 | 125 | outline: 0; |
|
126 | 126 | } |
|
127 | 127 | |
|
128 | 128 | &:hover { |
|
129 | 129 | &:extend(.btn-default:hover); |
|
130 | 130 | } |
|
131 | 131 | |
|
132 | 132 | &.btn-link { |
|
133 | 133 | &:extend(.btn-link); |
|
134 | 134 | color: @rcblue; |
|
135 | 135 | } |
|
136 | 136 | |
|
137 | 137 | &:disabled { |
|
138 | 138 | color: @rcblue; |
|
139 | 139 | background-color: white; |
|
140 | 140 | } |
|
141 | 141 | } |
|
142 | 142 | |
|
143 | 143 | .btn-warning, |
|
144 | 144 | .btn-danger, |
|
145 | 145 | .revoke_perm, |
|
146 | 146 | .btn-x, |
|
147 | 147 | .form .action_button.btn-x { |
|
148 | 148 | .border ( @border-thickness, @alert2 ); |
|
149 | 149 | background-color: white; |
|
150 | 150 | color: @alert2; |
|
151 | 151 | |
|
152 | 152 | a { |
|
153 | 153 | color: @alert2; |
|
154 | 154 | } |
|
155 | 155 | |
|
156 | 156 | &:hover, |
|
157 | 157 | &.active { |
|
158 | 158 | .border ( @border-thickness, @alert2 ); |
|
159 | 159 | color: white; |
|
160 | 160 | background-color: @alert2; |
|
161 | 161 | |
|
162 | 162 | a { |
|
163 | 163 | color: white; |
|
164 | 164 | } |
|
165 | 165 | } |
|
166 | 166 | |
|
167 | 167 | i { |
|
168 | 168 | display:none; |
|
169 | 169 | } |
|
170 | 170 | |
|
171 | 171 | &:disabled { |
|
172 | 172 | background-color: white; |
|
173 | 173 | color: @alert2; |
|
174 | 174 | } |
|
175 | 175 | } |
|
176 | 176 | |
|
177 | 177 | .btn-sm, |
|
178 | 178 | .btn-mini, |
|
179 | 179 | .field-sm .btn { |
|
180 | 180 | padding: @padding/3; |
|
181 | 181 | } |
|
182 | 182 | |
|
183 | 183 | .btn-xs { |
|
184 | 184 | padding: @padding/4; |
|
185 | 185 | } |
|
186 | 186 | |
|
187 | 187 | .btn-lg { |
|
188 | 188 | padding: @padding * 1.2; |
|
189 | 189 | } |
|
190 | 190 | |
|
191 | .btn-group { | |
|
192 | display: inline-block; | |
|
193 | .btn { | |
|
194 | float: left; | |
|
195 | margin: 0 0 0 -1px; | |
|
196 | } | |
|
197 | } | |
|
198 | ||
|
191 | 199 | .btn-link { |
|
192 | 200 | background: transparent; |
|
193 | 201 | border: none; |
|
194 | 202 | padding: 0; |
|
195 | 203 | color: @rcblue; |
|
196 | 204 | |
|
197 | 205 | &:hover { |
|
198 | 206 | background: transparent; |
|
199 | 207 | border: none; |
|
200 | 208 | color: @rcdarkblue; |
|
201 | 209 | } |
|
202 | 210 | |
|
203 | 211 | &:disabled { |
|
204 | 212 | color: @grey4; |
|
205 | 213 | } |
|
206 | 214 | |
|
207 | 215 | // TODO: johbo: Check if we can avoid this, indicates that the structure |
|
208 | 216 | // is not yet good. |
|
209 | 217 | // lisa: The button CSS reflects the button HTML; both need a cleanup. |
|
210 | 218 | &.btn-danger { |
|
211 | 219 | color: @alert2; |
|
212 | 220 | |
|
213 | 221 | &:hover { |
|
214 | 222 | color: darken(@alert2,30%); |
|
215 | 223 | } |
|
216 | 224 | |
|
217 | 225 | &:disabled { |
|
218 | 226 | color: @alert2; |
|
219 | 227 | } |
|
220 | 228 | } |
|
221 | 229 | } |
|
222 | 230 | |
|
223 | 231 | .btn-social { |
|
224 | 232 | &:extend(.btn-default); |
|
225 | 233 | margin: 5px 5px 5px 0px; |
|
226 | 234 | min-width: 150px; |
|
227 | 235 | } |
|
228 | 236 | |
|
229 | 237 | // TODO: johbo: check these exceptions |
|
230 | 238 | |
|
231 | 239 | .links { |
|
232 | 240 | |
|
233 | 241 | .btn + .btn { |
|
234 | 242 | margin-top: @padding; |
|
235 | 243 | } |
|
236 | 244 | } |
|
237 | 245 | |
|
238 | 246 | |
|
239 | 247 | .action_button { |
|
240 | 248 | display:inline; |
|
241 | 249 | margin: 0; |
|
242 | 250 | padding: 0 1em 0 0; |
|
243 | 251 | font-size: inherit; |
|
244 | 252 | color: @rcblue; |
|
245 | 253 | border: none; |
|
246 | 254 | .border-radius (0); |
|
247 | 255 | background-color: transparent; |
|
248 | 256 | |
|
249 | 257 | &:last-child { |
|
250 | 258 | border: none; |
|
251 | 259 | } |
|
252 | 260 | |
|
253 | 261 | &:hover { |
|
254 | 262 | color: @rcdarkblue; |
|
255 | 263 | background-color: transparent; |
|
256 | 264 | border: none; |
|
257 | 265 | } |
|
258 | 266 | } |
|
259 | 267 | .grid_delete { |
|
260 | 268 | .action_button { |
|
261 | 269 | border: none; |
|
262 | 270 | } |
|
263 | 271 | } |
|
264 | 272 | |
|
265 | 273 | |
|
266 | 274 | // TODO: johbo: Form button tweaks, check if we can use the classes instead |
|
267 | 275 | input[type="submit"] { |
|
268 | 276 | &:extend(.btn-primary); |
|
269 | 277 | |
|
270 | 278 | &:focus { |
|
271 | 279 | outline: 0; |
|
272 | 280 | } |
|
273 | 281 | |
|
274 | 282 | &:hover { |
|
275 | 283 | &:extend(.btn-primary:hover); |
|
276 | 284 | } |
|
277 | 285 | |
|
278 | 286 | &.btn-link { |
|
279 | 287 | &:extend(.btn-link); |
|
280 | 288 | color: @rcblue; |
|
281 | 289 | |
|
282 | 290 | &:disabled { |
|
283 | 291 | color: @rcblue; |
|
284 | 292 | background-color: transparent; |
|
285 | 293 | } |
|
286 | 294 | } |
|
287 | 295 | |
|
288 | 296 | &:disabled { |
|
289 | 297 | .border ( @border-thickness-buttons, @rcblue ); |
|
290 | 298 | background-color: @rcblue; |
|
291 | 299 | color: white; |
|
292 | 300 | } |
|
293 | 301 | } |
|
294 | 302 | |
|
295 | 303 | input[type="reset"] { |
|
296 | 304 | &:extend(.btn-default); |
|
297 | 305 | |
|
298 | 306 | // TODO: johbo: Check if this tweak can be avoided. |
|
299 | 307 | background: transparent; |
|
300 | 308 | |
|
301 | 309 | &:focus { |
|
302 | 310 | outline: 0; |
|
303 | 311 | } |
|
304 | 312 | |
|
305 | 313 | &:hover { |
|
306 | 314 | &:extend(.btn-default:hover); |
|
307 | 315 | } |
|
308 | 316 | |
|
309 | 317 | &.btn-link { |
|
310 | 318 | &:extend(.btn-link); |
|
311 | 319 | color: @rcblue; |
|
312 | 320 | |
|
313 | 321 | &:disabled { |
|
314 | 322 | border: none; |
|
315 | 323 | } |
|
316 | 324 | } |
|
317 | 325 | |
|
318 | 326 | &:disabled { |
|
319 | 327 | .border ( @border-thickness-buttons, @rcblue ); |
|
320 | 328 | background-color: white; |
|
321 | 329 | color: @rcblue; |
|
322 | 330 | } |
|
323 | 331 | } |
|
324 | 332 | |
|
325 | 333 | input[type="submit"], |
|
326 | 334 | input[type="reset"] { |
|
327 | 335 | &.btn-danger { |
|
328 | 336 | &:extend(.btn-danger); |
|
329 | ||
|
337 | ||
|
330 | 338 | &:focus { |
|
331 | 339 | outline: 0; |
|
332 | 340 | } |
|
333 | ||
|
341 | ||
|
334 | 342 | &:hover { |
|
335 | 343 | &:extend(.btn-danger:hover); |
|
336 | 344 | } |
|
337 | ||
|
345 | ||
|
338 | 346 | &.btn-link { |
|
339 | 347 | &:extend(.btn-link); |
|
340 | 348 | color: @alert2; |
|
341 | 349 | |
|
342 | 350 | &:hover { |
|
343 | 351 | color: darken(@alert2,30%); |
|
344 | 352 | } |
|
345 | 353 | } |
|
346 | 354 | |
|
347 | 355 | &:disabled { |
|
348 | 356 | color: @alert2; |
|
349 | 357 | background-color: white; |
|
350 | 358 | } |
|
351 | 359 | } |
|
352 | 360 | &.btn-danger-action { |
|
353 | 361 | .border ( @border-thickness, @alert2 ); |
|
354 | 362 | background-color: @alert2; |
|
355 | 363 | color: white; |
|
356 | 364 | |
|
357 | 365 | a { |
|
358 | 366 | color: white; |
|
359 | 367 | } |
|
360 | 368 | |
|
361 | 369 | &:hover { |
|
362 | 370 | background-color: darken(@alert2,20%); |
|
363 | 371 | } |
|
364 | 372 | |
|
365 | 373 | &.active { |
|
366 | 374 | .border ( @border-thickness, @alert2 ); |
|
367 | 375 | color: white; |
|
368 | 376 | background-color: @alert2; |
|
369 | 377 | |
|
370 | 378 | a { |
|
371 | 379 | color: white; |
|
372 | 380 | } |
|
373 | 381 | } |
|
374 | 382 | |
|
375 | 383 | &:disabled { |
|
376 | 384 | background-color: white; |
|
377 | 385 | color: @alert2; |
|
378 | 386 | } |
|
379 | 387 | } |
|
380 | 388 | } |
|
381 | 389 |
@@ -1,753 +1,1005 b'' | |||
|
1 | 1 | // Default styles |
|
2 | 2 | |
|
3 | 3 | .diff-collapse { |
|
4 | 4 | margin: @padding 0; |
|
5 | 5 | text-align: right; |
|
6 | 6 | } |
|
7 | 7 | |
|
8 | 8 | .diff-container { |
|
9 | 9 | margin-bottom: @space; |
|
10 | 10 | |
|
11 | 11 | .diffblock { |
|
12 | 12 | margin-bottom: @space; |
|
13 | 13 | } |
|
14 | 14 | |
|
15 | 15 | &.hidden { |
|
16 | 16 | display: none; |
|
17 | 17 | overflow: hidden; |
|
18 | 18 | } |
|
19 | 19 | } |
|
20 | 20 | |
|
21 | 21 | .compare_view_files { |
|
22 | 22 | |
|
23 | 23 | .diff-container { |
|
24 | 24 | |
|
25 | 25 | .diffblock { |
|
26 | 26 | margin-bottom: 0; |
|
27 | 27 | } |
|
28 | 28 | } |
|
29 | 29 | } |
|
30 | 30 | |
|
31 | 31 | div.diffblock .sidebyside { |
|
32 | 32 | background: #ffffff; |
|
33 | 33 | } |
|
34 | 34 | |
|
35 | 35 | div.diffblock { |
|
36 | 36 | overflow-x: auto; |
|
37 | 37 | overflow-y: hidden; |
|
38 | 38 | clear: both; |
|
39 | 39 | padding: 0px; |
|
40 | 40 | background: @grey6; |
|
41 | 41 | border: @border-thickness solid @grey5; |
|
42 | 42 | -webkit-border-radius: @border-radius @border-radius 0px 0px; |
|
43 | 43 | border-radius: @border-radius @border-radius 0px 0px; |
|
44 | 44 | |
|
45 | 45 | |
|
46 | 46 | .comments-number { |
|
47 | 47 | float: right; |
|
48 | 48 | } |
|
49 | 49 | |
|
50 | 50 | // BEGIN CODE-HEADER STYLES |
|
51 | 51 | |
|
52 | 52 | .code-header { |
|
53 | 53 | background: @grey6; |
|
54 | 54 | padding: 10px 0 10px 0; |
|
55 | 55 | height: auto; |
|
56 | 56 | width: 100%; |
|
57 | 57 | |
|
58 | 58 | .hash { |
|
59 | 59 | float: left; |
|
60 | 60 | padding: 2px 0 0 2px; |
|
61 | 61 | } |
|
62 | 62 | |
|
63 | 63 | .date { |
|
64 | 64 | float: left; |
|
65 | 65 | text-transform: uppercase; |
|
66 | 66 | padding: 4px 0px 0px 2px; |
|
67 | 67 | } |
|
68 | 68 | |
|
69 | 69 | div { |
|
70 | 70 | margin-left: 4px; |
|
71 | 71 | } |
|
72 | 72 | |
|
73 | 73 | div.compare_header { |
|
74 | 74 | min-height: 40px; |
|
75 | 75 | margin: 0; |
|
76 | 76 | padding: 0 @padding; |
|
77 | 77 | |
|
78 | 78 | .drop-menu { |
|
79 | 79 | float:left; |
|
80 | 80 | display: block; |
|
81 | 81 | margin:0 0 @padding 0; |
|
82 | 82 | } |
|
83 | 83 | |
|
84 | 84 | .compare-label { |
|
85 | 85 | float: left; |
|
86 | 86 | clear: both; |
|
87 | 87 | display: inline-block; |
|
88 | 88 | min-width: 5em; |
|
89 | 89 | margin: 0; |
|
90 | 90 | padding: @button-padding @button-padding @button-padding 0; |
|
91 | 91 | font-family: @text-semibold; |
|
92 | 92 | } |
|
93 | 93 | |
|
94 | 94 | .compare-buttons { |
|
95 | 95 | float: left; |
|
96 | 96 | margin: 0; |
|
97 | 97 | padding: 0 0 @padding; |
|
98 | 98 | |
|
99 | 99 | .btn { |
|
100 | 100 | margin: 0 @padding 0 0; |
|
101 | 101 | } |
|
102 | 102 | } |
|
103 | 103 | } |
|
104 | 104 | |
|
105 | 105 | } |
|
106 | 106 | |
|
107 | 107 | .parents { |
|
108 | 108 | float: left; |
|
109 | 109 | width: 100px; |
|
110 | 110 | font-weight: 400; |
|
111 | 111 | vertical-align: middle; |
|
112 | 112 | padding: 0px 2px 0px 2px; |
|
113 | 113 | background-color: @grey6; |
|
114 | 114 | |
|
115 | 115 | #parent_link { |
|
116 | 116 | margin: 00px 2px; |
|
117 | 117 | |
|
118 | 118 | &.double { |
|
119 | 119 | margin: 0px 2px; |
|
120 | 120 | } |
|
121 | 121 | |
|
122 | 122 | &.disabled{ |
|
123 | 123 | margin-right: @padding; |
|
124 | 124 | } |
|
125 | 125 | } |
|
126 | 126 | } |
|
127 | 127 | |
|
128 | 128 | .children { |
|
129 | 129 | float: right; |
|
130 | 130 | width: 100px; |
|
131 | 131 | font-weight: 400; |
|
132 | 132 | vertical-align: middle; |
|
133 | 133 | text-align: right; |
|
134 | 134 | padding: 0px 2px 0px 2px; |
|
135 | 135 | background-color: @grey6; |
|
136 | 136 | |
|
137 | 137 | #child_link { |
|
138 | 138 | margin: 0px 2px; |
|
139 | 139 | |
|
140 | 140 | &.double { |
|
141 | 141 | margin: 0px 2px; |
|
142 | 142 | } |
|
143 | 143 | |
|
144 | 144 | &.disabled{ |
|
145 | 145 | margin-right: @padding; |
|
146 | 146 | } |
|
147 | 147 | } |
|
148 | 148 | } |
|
149 | 149 | |
|
150 | 150 | .changeset_header { |
|
151 | 151 | height: 16px; |
|
152 | 152 | |
|
153 | 153 | & > div{ |
|
154 | 154 | margin-right: @padding; |
|
155 | 155 | } |
|
156 | 156 | } |
|
157 | 157 | |
|
158 | 158 | .changeset_file { |
|
159 | 159 | text-align: left; |
|
160 | 160 | float: left; |
|
161 | 161 | padding: 0; |
|
162 | 162 | |
|
163 | 163 | a{ |
|
164 | 164 | display: inline-block; |
|
165 | 165 | margin-right: 0.5em; |
|
166 | 166 | } |
|
167 | 167 | |
|
168 | 168 | #selected_mode{ |
|
169 | 169 | margin-left: 0; |
|
170 | 170 | } |
|
171 | 171 | } |
|
172 | 172 | |
|
173 | 173 | .diff-menu-wrapper { |
|
174 | 174 | float: left; |
|
175 | 175 | } |
|
176 | 176 | |
|
177 | 177 | .diff-menu { |
|
178 | 178 | position: absolute; |
|
179 | 179 | background: none repeat scroll 0 0 #FFFFFF; |
|
180 | 180 | border-color: #003367 @grey3 @grey3; |
|
181 | 181 | border-right: 1px solid @grey3; |
|
182 | 182 | border-style: solid solid solid; |
|
183 | 183 | border-width: @border-thickness; |
|
184 | 184 | box-shadow: 2px 8px 4px rgba(0, 0, 0, 0.2); |
|
185 | 185 | margin-top: 5px; |
|
186 | 186 | margin-left: 1px; |
|
187 | 187 | } |
|
188 | 188 | |
|
189 | 189 | .diff-actions, .editor-actions { |
|
190 | 190 | float: left; |
|
191 | 191 | |
|
192 | 192 | input{ |
|
193 | 193 | margin: 0 0.5em 0 0; |
|
194 | 194 | } |
|
195 | 195 | } |
|
196 | 196 | |
|
197 | 197 | // END CODE-HEADER STYLES |
|
198 | 198 | |
|
199 | 199 | // BEGIN CODE-BODY STYLES |
|
200 | 200 | |
|
201 | 201 | .code-body { |
|
202 | 202 | background: white; |
|
203 | 203 | padding: 0; |
|
204 | 204 | background-color: #ffffff; |
|
205 | 205 | position: relative; |
|
206 | 206 | max-width: none; |
|
207 | 207 | box-sizing: border-box; |
|
208 | 208 | // TODO: johbo: Parent has overflow: auto, this forces the child here |
|
209 | 209 | // to have the intended size and to scroll. Should be simplified. |
|
210 | 210 | width: 100%; |
|
211 | 211 | overflow-x: auto; |
|
212 | 212 | } |
|
213 | 213 | |
|
214 | 214 | pre.raw { |
|
215 | 215 | background: white; |
|
216 | 216 | color: @grey1; |
|
217 | 217 | } |
|
218 | 218 | // END CODE-BODY STYLES |
|
219 | 219 | |
|
220 | 220 | } |
|
221 | 221 | |
|
222 | 222 | |
|
223 | 223 | table.code-difftable { |
|
224 | 224 | border-collapse: collapse; |
|
225 | 225 | width: 99%; |
|
226 | 226 | border-radius: 0px !important; |
|
227 | 227 | |
|
228 | 228 | td { |
|
229 | 229 | padding: 0 !important; |
|
230 | 230 | background: none !important; |
|
231 | 231 | border: 0 !important; |
|
232 | 232 | } |
|
233 | 233 | |
|
234 | 234 | .context { |
|
235 | 235 | background: none repeat scroll 0 0 #DDE7EF; |
|
236 | 236 | } |
|
237 | 237 | |
|
238 | 238 | .add { |
|
239 | 239 | background: none repeat scroll 0 0 #DDFFDD; |
|
240 | 240 | |
|
241 | 241 | ins { |
|
242 | 242 | background: none repeat scroll 0 0 #AAFFAA; |
|
243 | 243 | text-decoration: none; |
|
244 | 244 | } |
|
245 | 245 | } |
|
246 | 246 | |
|
247 | 247 | .del { |
|
248 | 248 | background: none repeat scroll 0 0 #FFDDDD; |
|
249 | 249 | |
|
250 | 250 | del { |
|
251 | 251 | background: none repeat scroll 0 0 #FFAAAA; |
|
252 | 252 | text-decoration: none; |
|
253 | 253 | } |
|
254 | 254 | } |
|
255 | 255 | |
|
256 | 256 | /** LINE NUMBERS **/ |
|
257 | 257 | .lineno { |
|
258 | 258 | padding-left: 2px !important; |
|
259 | 259 | padding-right: 2px; |
|
260 | 260 | text-align: right; |
|
261 | 261 | width: 32px; |
|
262 | 262 | -moz-user-select: none; |
|
263 | 263 | -webkit-user-select: none; |
|
264 | 264 | border-right: @border-thickness solid @grey5 !important; |
|
265 | 265 | border-left: 0px solid #CCC !important; |
|
266 | 266 | border-top: 0px solid #CCC !important; |
|
267 | 267 | border-bottom: none !important; |
|
268 | 268 | |
|
269 | 269 | a { |
|
270 | 270 | &:extend(pre); |
|
271 | 271 | text-align: right; |
|
272 | 272 | padding-right: 2px; |
|
273 | 273 | cursor: pointer; |
|
274 | 274 | display: block; |
|
275 | 275 | width: 32px; |
|
276 | 276 | } |
|
277 | 277 | } |
|
278 | 278 | |
|
279 | 279 | .context { |
|
280 | 280 | cursor: auto; |
|
281 | 281 | &:extend(pre); |
|
282 | 282 | } |
|
283 | 283 | |
|
284 | 284 | .lineno-inline { |
|
285 | 285 | background: none repeat scroll 0 0 #FFF !important; |
|
286 | 286 | padding-left: 2px; |
|
287 | 287 | padding-right: 2px; |
|
288 | 288 | text-align: right; |
|
289 | 289 | width: 30px; |
|
290 | 290 | -moz-user-select: none; |
|
291 | 291 | -webkit-user-select: none; |
|
292 | 292 | } |
|
293 | 293 | |
|
294 | 294 | /** CODE **/ |
|
295 | 295 | .code { |
|
296 | 296 | display: block; |
|
297 | 297 | width: 100%; |
|
298 | 298 | |
|
299 | 299 | td { |
|
300 | 300 | margin: 0; |
|
301 | 301 | padding: 0; |
|
302 | 302 | } |
|
303 | 303 | |
|
304 | 304 | pre { |
|
305 | 305 | margin: 0; |
|
306 | 306 | padding: 0; |
|
307 | 307 | margin-left: .5em; |
|
308 | 308 | } |
|
309 | 309 | } |
|
310 | 310 | } |
|
311 | 311 | |
|
312 | 312 | |
|
313 | 313 | // Comments |
|
314 | 314 | |
|
315 | 315 | div.comment:target { |
|
316 | 316 | border-left: 6px solid @comment-highlight-color; |
|
317 | 317 | padding-left: 3px; |
|
318 | 318 | margin-left: -9px; |
|
319 | 319 | } |
|
320 | 320 | |
|
321 | 321 | //TODO: anderson: can't get an absolute number out of anything, so had to put the |
|
322 | 322 | //current values that might change. But to make it clear I put as a calculation |
|
323 | 323 | @comment-max-width: 1065px; |
|
324 | 324 | @pr-extra-margin: 34px; |
|
325 | 325 | @pr-border-spacing: 4px; |
|
326 | 326 | @pr-comment-width: @comment-max-width - @pr-extra-margin - @pr-border-spacing; |
|
327 | 327 | |
|
328 | 328 | // Pull Request |
|
329 | 329 | .cs_files .code-difftable { |
|
330 | 330 | border: @border-thickness solid @grey5; //borders only on PRs |
|
331 | 331 |
|
|
332 | 332 | .comment-inline-form, |
|
333 | 333 | div.comment { |
|
334 | 334 | width: @pr-comment-width; |
|
335 | 335 | } |
|
336 | 336 |
|
|
337 | 337 | |
|
338 | 338 | // Changeset |
|
339 | 339 | .code-difftable { |
|
340 | 340 | .comment-inline-form, |
|
341 | 341 | div.comment { |
|
342 | 342 | width: @comment-max-width; |
|
343 | 343 | } |
|
344 | 344 |
|
|
345 | 345 | |
|
346 | 346 | //Style page |
|
347 | 347 | @style-extra-margin: @sidebar-width + (@sidebarpadding * 3) + @padding; |
|
348 | 348 | #style-page .code-difftable{ |
|
349 | 349 | .comment-inline-form, |
|
350 | 350 | div.comment { |
|
351 | 351 | width: @comment-max-width - @style-extra-margin; |
|
352 | 352 | } |
|
353 | 353 |
|
|
354 | 354 | |
|
355 | 355 | #context-bar > h2 { |
|
356 | 356 | font-size: 20px; |
|
357 | 357 |
|
|
358 | 358 | |
|
359 | 359 | #context-bar > h2> a { |
|
360 | 360 | font-size: 20px; |
|
361 | 361 |
|
|
362 | 362 | // end of defaults |
|
363 | 363 | |
|
364 | 364 | .file_diff_buttons { |
|
365 | 365 | padding: 0 0 @padding; |
|
366 | 366 | |
|
367 | 367 | .drop-menu { |
|
368 | 368 | float: left; |
|
369 | 369 | margin: 0 @padding 0 0; |
|
370 | 370 | } |
|
371 | 371 | .btn { |
|
372 | 372 | margin: 0 @padding 0 0; |
|
373 | 373 | } |
|
374 | 374 |
|
|
375 | 375 | |
|
376 | 376 | .code-body.textarea.editor { |
|
377 | 377 | max-width: none; |
|
378 | 378 | padding: 15px; |
|
379 | 379 |
|
|
380 | 380 | |
|
381 | 381 | td.injected_diff{ |
|
382 | 382 | max-width: 1178px; |
|
383 | 383 | overflow-x: auto; |
|
384 | 384 | overflow-y: hidden; |
|
385 | 385 | |
|
386 | 386 | div.diff-container, |
|
387 | 387 | div.diffblock{ |
|
388 | 388 | max-width: 100%; |
|
389 | 389 | } |
|
390 | 390 | |
|
391 | 391 | div.code-body { |
|
392 | 392 | max-width: 1124px; |
|
393 | 393 | overflow-x: auto; |
|
394 | 394 | overflow-y: hidden; |
|
395 | 395 | padding: 0; |
|
396 | 396 | } |
|
397 | 397 | div.diffblock { |
|
398 | 398 | border: none; |
|
399 | 399 | } |
|
400 | 400 | |
|
401 | 401 | &.inline-form { |
|
402 | 402 | width: 99% |
|
403 | 403 | } |
|
404 | 404 | } |
|
405 | 405 | |
|
406 | 406 | |
|
407 | 407 | table.code-difftable { |
|
408 | 408 | width: 100%; |
|
409 | 409 |
|
|
410 | 410 | |
|
411 | 411 |
|
|
412 | 412 | div.codeblock { |
|
413 | 413 | |
|
414 | 414 | // TODO: johbo: Added interim to get rid of the margin around |
|
415 | 415 | // Select2 widgets. This needs further cleanup. |
|
416 | 416 | margin-top: @padding; |
|
417 | 417 | |
|
418 | 418 | overflow: auto; |
|
419 | 419 | padding: 0px; |
|
420 | 420 | border: @border-thickness solid @grey5; |
|
421 | 421 | background: @grey6; |
|
422 | 422 | .border-radius(@border-radius); |
|
423 | 423 | |
|
424 | 424 | #remove_gist { |
|
425 | 425 | float: right; |
|
426 | 426 | } |
|
427 | 427 | |
|
428 | 428 | .author { |
|
429 | 429 | clear: both; |
|
430 | 430 | vertical-align: middle; |
|
431 | 431 | font-family: @text-bold; |
|
432 | 432 | } |
|
433 | 433 | |
|
434 | 434 | .btn-mini { |
|
435 | 435 | float: left; |
|
436 | 436 | margin: 0 5px 0 0; |
|
437 | 437 | } |
|
438 | 438 | |
|
439 | 439 | .code-header { |
|
440 | 440 | padding: @padding; |
|
441 | 441 | border-bottom: @border-thickness solid @grey5; |
|
442 | 442 | |
|
443 | 443 | .rc-user { |
|
444 | 444 | min-width: 0; |
|
445 | 445 | margin-right: .5em; |
|
446 | 446 | } |
|
447 | 447 | |
|
448 | 448 | .stats { |
|
449 | 449 | clear: both; |
|
450 | 450 | margin: 0 0 @padding 0; |
|
451 | 451 | padding: 0; |
|
452 | 452 | .left { |
|
453 | 453 | float: left; |
|
454 | 454 | clear: left; |
|
455 | 455 | max-width: 75%; |
|
456 | 456 | margin: 0 0 @padding 0; |
|
457 | 457 | |
|
458 | 458 | &.item { |
|
459 | 459 | margin-right: @padding; |
|
460 | 460 | &.last { border-right: none; } |
|
461 | 461 | } |
|
462 | 462 | } |
|
463 | 463 | .buttons { float: right; } |
|
464 | 464 | .author { |
|
465 | 465 | height: 25px; margin-left: 15px; font-weight: bold; |
|
466 | 466 | } |
|
467 | 467 | } |
|
468 | 468 | |
|
469 | 469 | .commit { |
|
470 | 470 | margin: 5px 0 0 26px; |
|
471 | 471 | font-weight: normal; |
|
472 | 472 | white-space: pre-wrap; |
|
473 | 473 | } |
|
474 | 474 | } |
|
475 | 475 | |
|
476 | 476 | .message { |
|
477 | 477 | position: relative; |
|
478 | 478 | margin: @padding; |
|
479 | 479 | |
|
480 | 480 | .codeblock-label { |
|
481 | 481 | margin: 0 0 1em 0; |
|
482 | 482 | } |
|
483 | 483 | } |
|
484 | 484 | |
|
485 | 485 | .code-body { |
|
486 | 486 | padding: @padding; |
|
487 | 487 | background-color: #ffffff; |
|
488 | 488 | min-width: 100%; |
|
489 | 489 | box-sizing: border-box; |
|
490 | 490 | // TODO: johbo: Parent has overflow: auto, this forces the child here |
|
491 | 491 | // to have the intended size and to scroll. Should be simplified. |
|
492 | 492 | width: 100%; |
|
493 | 493 | overflow-x: auto; |
|
494 | 494 | } |
|
495 | 495 |
|
|
496 | 496 | |
|
497 | 497 | .code-highlighttable, |
|
498 | 498 | div.codeblock { |
|
499 | 499 | |
|
500 | 500 | &.readme { |
|
501 | 501 | background-color: white; |
|
502 | 502 | } |
|
503 | 503 | |
|
504 | 504 | .markdown-block table { |
|
505 | 505 | border-collapse: collapse; |
|
506 | 506 | |
|
507 | 507 | th, |
|
508 | 508 | td { |
|
509 | 509 | padding: .5em; |
|
510 | 510 | border: @border-thickness solid @border-default-color; |
|
511 | 511 | } |
|
512 | 512 | } |
|
513 | 513 | |
|
514 | 514 | table { |
|
515 | 515 | border: 0px; |
|
516 | 516 | margin: 0; |
|
517 | 517 | letter-spacing: normal; |
|
518 | 518 | |
|
519 | 519 | |
|
520 | 520 | td { |
|
521 | 521 | border: 0px; |
|
522 | 522 | vertical-align: top; |
|
523 | 523 | } |
|
524 | 524 | } |
|
525 | 525 |
|
|
526 | 526 | |
|
527 | 527 | div.codeblock .code-header .search-path { padding: 0 0 0 10px; } |
|
528 | 528 | div.search-code-body { |
|
529 | 529 | background-color: #ffffff; padding: 5px 0 5px 10px; |
|
530 | 530 | pre { |
|
531 | 531 | .match { background-color: #faffa6;} |
|
532 | 532 | .break { display: block; width: 100%; background-color: #DDE7EF; color: #747474; } |
|
533 | 533 | } |
|
534 | 534 | .code-highlighttable { |
|
535 | 535 | border-collapse: collapse; |
|
536 | 536 | |
|
537 | 537 | tr:hover { |
|
538 | 538 | background: #fafafa; |
|
539 | 539 | } |
|
540 | 540 | td.code { |
|
541 | 541 | padding-left: 10px; |
|
542 | 542 | } |
|
543 | 543 | td.line { |
|
544 | 544 | border-right: 1px solid #ccc !important; |
|
545 | 545 | padding-right: 10px; |
|
546 | 546 | text-align: right; |
|
547 | 547 | font-family: "Lucida Console",Monaco,monospace; |
|
548 | 548 | span { |
|
549 | 549 | white-space: pre-wrap; |
|
550 | 550 | color: #666666; |
|
551 | 551 | } |
|
552 | 552 | } |
|
553 | 553 | } |
|
554 | 554 |
|
|
555 | 555 | |
|
556 | 556 | div.annotatediv { margin-left: 2px; margin-right: 4px; } |
|
557 | 557 | .code-highlight { |
|
558 | 558 | margin: 0; padding: 0; border-left: @border-thickness solid @grey5; |
|
559 | 559 | pre, .linenodiv pre { padding: 0 5px; margin: 0; } |
|
560 | 560 | pre div:target {background-color: @comment-highlight-color !important;} |
|
561 | 561 |
|
|
562 | 562 | |
|
563 | 563 | .linenos a { text-decoration: none; } |
|
564 | 564 | |
|
565 | 565 | .CodeMirror-selected { background: @rchighlightblue; } |
|
566 | 566 | .CodeMirror-focused .CodeMirror-selected { background: @rchighlightblue; } |
|
567 | 567 | .CodeMirror ::selection { background: @rchighlightblue; } |
|
568 | 568 | .CodeMirror ::-moz-selection { background: @rchighlightblue; } |
|
569 | 569 | |
|
570 | 570 | .code { display: block; border:0px !important; } |
|
571 | 571 | .code-highlight, /* TODO: dan: merge codehilite into code-highlight */ |
|
572 | 572 | .codehilite { |
|
573 | 573 | .hll { background-color: #ffffcc } |
|
574 | 574 | .c { color: #408080; font-style: italic } /* Comment */ |
|
575 | 575 | .err, .codehilite .err { border: @border-thickness solid #FF0000 } /* Error */ |
|
576 | 576 | .k { color: #008000; font-weight: bold } /* Keyword */ |
|
577 | 577 | .o { color: #666666 } /* Operator */ |
|
578 | 578 | .cm { color: #408080; font-style: italic } /* Comment.Multiline */ |
|
579 | 579 | .cp { color: #BC7A00 } /* Comment.Preproc */ |
|
580 | 580 | .c1 { color: #408080; font-style: italic } /* Comment.Single */ |
|
581 | 581 | .cs { color: #408080; font-style: italic } /* Comment.Special */ |
|
582 | 582 | .gd { color: #A00000 } /* Generic.Deleted */ |
|
583 | 583 | .ge { font-style: italic } /* Generic.Emph */ |
|
584 | 584 | .gr { color: #FF0000 } /* Generic.Error */ |
|
585 | 585 | .gh { color: #000080; font-weight: bold } /* Generic.Heading */ |
|
586 | 586 | .gi { color: #00A000 } /* Generic.Inserted */ |
|
587 | 587 | .go { color: #808080 } /* Generic.Output */ |
|
588 | 588 | .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ |
|
589 | 589 | .gs { font-weight: bold } /* Generic.Strong */ |
|
590 | 590 | .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ |
|
591 | 591 | .gt { color: #0040D0 } /* Generic.Traceback */ |
|
592 | 592 | .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ |
|
593 | 593 | .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ |
|
594 | 594 | .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ |
|
595 | 595 | .kp { color: #008000 } /* Keyword.Pseudo */ |
|
596 | 596 | .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ |
|
597 | 597 | .kt { color: #B00040 } /* Keyword.Type */ |
|
598 | 598 | .m { color: #666666 } /* Literal.Number */ |
|
599 | 599 | .s { color: #BA2121 } /* Literal.String */ |
|
600 | 600 | .na { color: #7D9029 } /* Name.Attribute */ |
|
601 | 601 | .nb { color: #008000 } /* Name.Builtin */ |
|
602 | 602 | .nc { color: #0000FF; font-weight: bold } /* Name.Class */ |
|
603 | 603 | .no { color: #880000 } /* Name.Constant */ |
|
604 | 604 | .nd { color: #AA22FF } /* Name.Decorator */ |
|
605 | 605 | .ni { color: #999999; font-weight: bold } /* Name.Entity */ |
|
606 | 606 | .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ |
|
607 | 607 | .nf { color: #0000FF } /* Name.Function */ |
|
608 | 608 | .nl { color: #A0A000 } /* Name.Label */ |
|
609 | 609 | .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ |
|
610 | 610 | .nt { color: #008000; font-weight: bold } /* Name.Tag */ |
|
611 | 611 | .nv { color: #19177C } /* Name.Variable */ |
|
612 | 612 | .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ |
|
613 | 613 | .w { color: #bbbbbb } /* Text.Whitespace */ |
|
614 | 614 | .mf { color: #666666 } /* Literal.Number.Float */ |
|
615 | 615 | .mh { color: #666666 } /* Literal.Number.Hex */ |
|
616 | 616 | .mi { color: #666666 } /* Literal.Number.Integer */ |
|
617 | 617 | .mo { color: #666666 } /* Literal.Number.Oct */ |
|
618 | 618 | .sb { color: #BA2121 } /* Literal.String.Backtick */ |
|
619 | 619 | .sc { color: #BA2121 } /* Literal.String.Char */ |
|
620 | 620 | .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ |
|
621 | 621 | .s2 { color: #BA2121 } /* Literal.String.Double */ |
|
622 | 622 | .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ |
|
623 | 623 | .sh { color: #BA2121 } /* Literal.String.Heredoc */ |
|
624 | 624 | .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ |
|
625 | 625 | .sx { color: #008000 } /* Literal.String.Other */ |
|
626 | 626 | .sr { color: #BB6688 } /* Literal.String.Regex */ |
|
627 | 627 | .s1 { color: #BA2121 } /* Literal.String.Single */ |
|
628 | 628 | .ss { color: #19177C } /* Literal.String.Symbol */ |
|
629 | 629 | .bp { color: #008000 } /* Name.Builtin.Pseudo */ |
|
630 | 630 | .vc { color: #19177C } /* Name.Variable.Class */ |
|
631 | 631 | .vg { color: #19177C } /* Name.Variable.Global */ |
|
632 | 632 | .vi { color: #19177C } /* Name.Variable.Instance */ |
|
633 | 633 | .il { color: #666666 } /* Literal.Number.Integer.Long */ |
|
634 | 634 |
|
|
635 | 635 | |
|
636 | 636 |
|
|
637 | 637 | pre.literal-block, .codehilite pre{ |
|
638 | 638 | padding: @padding; |
|
639 | 639 | border: 1px solid @grey6; |
|
640 | 640 | .border-radius(@border-radius); |
|
641 | 641 | background-color: @grey7; |
|
642 | 642 |
|
|
643 | 643 | |
|
644 | 644 | |
|
645 | 645 |
|
|
646 | 646 | |
|
647 | 647 | @cb-line-height: 18px; |
|
648 | 648 | @cb-line-code-padding: 10px; |
|
649 | @cb-text-padding: 5px; | |
|
649 | 650 | |
|
651 | @diff-pill-padding: 2px 7px; | |
|
652 | ||
|
653 | input.diff-collapse-state { | |
|
654 | display: none; | |
|
655 | ||
|
656 | &:checked + .diff { /* file diff is collapsed */ | |
|
657 | .cb { | |
|
658 | display: none | |
|
659 | } | |
|
660 | .diff-collapse-indicator { | |
|
661 | border-width: 9px 0 9px 15.6px; | |
|
662 | border-color: transparent transparent transparent #ccc; | |
|
663 | } | |
|
664 | .diff-menu { | |
|
665 | display: none; | |
|
666 | } | |
|
667 | margin: -1px 0 0 0; | |
|
668 | } | |
|
669 | ||
|
670 | &+ .diff { /* file diff is expanded */ | |
|
671 | .diff-collapse-indicator { | |
|
672 | border-width: 15.6px 9px 0 9px; | |
|
673 | border-color: #ccc transparent transparent transparent; | |
|
674 | } | |
|
675 | .diff-menu { | |
|
676 | display: block; | |
|
677 | } | |
|
678 | margin: 20px 0; | |
|
679 | } | |
|
680 | } | |
|
681 | .diff { | |
|
682 | border: 1px solid @grey5; | |
|
683 | ||
|
684 | /* START OVERRIDES */ | |
|
685 | .code-highlight { | |
|
686 | border: none; // TODO: remove this border from the global | |
|
687 | // .code-highlight, it doesn't belong there | |
|
688 | } | |
|
689 | label { | |
|
690 | margin: 0; // TODO: remove this margin definition from global label | |
|
691 | // it doesn't belong there - if margin on labels | |
|
692 | // are needed for a form they should be defined | |
|
693 | // in the form's class | |
|
694 | } | |
|
695 | /* END OVERRIDES */ | |
|
696 | ||
|
697 | * { | |
|
698 | box-sizing: border-box; | |
|
699 | } | |
|
700 | .diff-anchor { | |
|
701 | visibility: hidden; | |
|
702 | } | |
|
703 | &:hover { | |
|
704 | .diff-anchor { | |
|
705 | visibility: visible; | |
|
706 | } | |
|
707 | } | |
|
708 | ||
|
709 | .diff-collapse-indicator { | |
|
710 | width: 0; | |
|
711 | height: 0; | |
|
712 | border-style: solid; | |
|
713 | float: left; | |
|
714 | margin: 2px 2px 0 0; | |
|
715 | cursor: pointer; | |
|
716 | } | |
|
717 | ||
|
718 | .diff-heading { | |
|
719 | background: @grey7; | |
|
720 | cursor: pointer; | |
|
721 | display: block; | |
|
722 | padding: 5px 10px; | |
|
723 | } | |
|
724 | .diff-heading:after { | |
|
725 | content: ""; | |
|
726 | display: table; | |
|
727 | clear: both; | |
|
728 | } | |
|
729 | .diff-heading:hover { | |
|
730 | background: #e1e9f4 !important; | |
|
731 | } | |
|
732 | ||
|
733 | .diff-menu { | |
|
734 | float: right; | |
|
735 | a, button { | |
|
736 | padding: 5px; | |
|
737 | display: block; | |
|
738 | float: left | |
|
739 | } | |
|
740 | } | |
|
741 | .diff-pill { | |
|
742 | display: block; | |
|
743 | float: left; | |
|
744 | padding: @diff-pill-padding; | |
|
745 | } | |
|
746 | .diff-pill-group { | |
|
747 | .diff-pill { | |
|
748 | opacity: .8; | |
|
749 | &:first-child { | |
|
750 | border-radius: @border-radius 0 0 @border-radius; | |
|
751 | } | |
|
752 | &:last-child { | |
|
753 | border-radius: 0 @border-radius @border-radius 0; | |
|
754 | } | |
|
755 | &:only-child { | |
|
756 | border-radius: @border-radius; | |
|
757 | } | |
|
758 | } | |
|
759 | } | |
|
760 | .diff-pill { | |
|
761 | &[op="name"] { | |
|
762 | background: none; | |
|
763 | color: @grey2; | |
|
764 | opacity: 1; | |
|
765 | color: white; | |
|
766 | } | |
|
767 | &[op="limited"] { | |
|
768 | background: @grey2; | |
|
769 | color: white; | |
|
770 | } | |
|
771 | &[op="binary"] { | |
|
772 | background: @color7; | |
|
773 | color: white; | |
|
774 | } | |
|
775 | &[op="modified"] { | |
|
776 | background: @alert1; | |
|
777 | color: white; | |
|
778 | } | |
|
779 | &[op="renamed"] { | |
|
780 | background: @color4; | |
|
781 | color: white; | |
|
782 | } | |
|
783 | &[op="mode"] { | |
|
784 | background: @grey3; | |
|
785 | color: white; | |
|
786 | } | |
|
787 | &[op="symlink"] { | |
|
788 | background: @color8; | |
|
789 | color: white; | |
|
790 | } | |
|
791 | ||
|
792 | &[op="added"] { /* added lines */ | |
|
793 | background: @alert1; | |
|
794 | color: white; | |
|
795 | } | |
|
796 | &[op="deleted"] { /* deleted lines */ | |
|
797 | background: @alert2; | |
|
798 | color: white; | |
|
799 | } | |
|
800 | ||
|
801 | &[op="created"] { /* created file */ | |
|
802 | background: @alert1; | |
|
803 | color: white; | |
|
804 | } | |
|
805 | &[op="removed"] { /* deleted file */ | |
|
806 | background: @color5; | |
|
807 | color: white; | |
|
808 | } | |
|
809 | } | |
|
810 | ||
|
811 | .diff-collapse-button, .diff-expand-button { | |
|
812 | cursor: pointer; | |
|
813 | } | |
|
814 | .diff-collapse-button { | |
|
815 | display: inline; | |
|
816 | } | |
|
817 | .diff-expand-button { | |
|
818 | display: none; | |
|
819 | } | |
|
820 | .diff-collapsed .diff-collapse-button { | |
|
821 | display: none; | |
|
822 | } | |
|
823 | .diff-collapsed .diff-expand-button { | |
|
824 | display: inline; | |
|
825 | } | |
|
826 | } | |
|
650 | 827 | table.cb { |
|
651 | 828 | width: 100%; |
|
652 | 829 | border-collapse: collapse; |
|
653 | margin-bottom: 10px; | |
|
654 | 830 | |
|
655 | * { | |
|
656 | box-sizing: border-box; | |
|
831 | .cb-text { | |
|
832 | padding: @cb-text-padding; | |
|
833 | } | |
|
834 | .cb-hunk { | |
|
835 | padding: @cb-text-padding; | |
|
836 | } | |
|
837 | .cb-expand { | |
|
838 | display: none; | |
|
839 | } | |
|
840 | .cb-collapse { | |
|
841 | display: inline; | |
|
842 | } | |
|
843 | &.cb-collapsed { | |
|
844 | .cb-line { | |
|
845 | display: none; | |
|
846 | } | |
|
847 | .cb-expand { | |
|
848 | display: inline; | |
|
849 | } | |
|
850 | .cb-collapse { | |
|
851 | display: none; | |
|
852 | } | |
|
657 | 853 | } |
|
658 | 854 | |
|
659 | 855 | /* intentionally general selector since .cb-line-selected must override it |
|
660 | 856 | and they both use !important since the td itself may have a random color |
|
661 | 857 | generated by annotation blocks. TLDR: if you change it, make sure |
|
662 | 858 | annotated block selection and line selection in file view still work */ |
|
663 | 859 | .cb-line-fresh .cb-content { |
|
664 | 860 | background: white !important; |
|
665 | 861 | } |
|
862 | .cb-warning { | |
|
863 | background: #fff4dd; | |
|
864 | } | |
|
666 | 865 | |
|
667 | tr.cb-annotate { | |
|
668 | border-top: 1px solid #eee; | |
|
866 | &.cb-diff-sideside { | |
|
867 | td { | |
|
868 | &.cb-content { | |
|
869 | width: 50%; | |
|
870 | } | |
|
871 | } | |
|
872 | } | |
|
669 | 873 | |
|
670 | &+ .cb-line { | |
|
874 | tr { | |
|
875 | &.cb-annotate { | |
|
671 | 876 | border-top: 1px solid #eee; |
|
877 | ||
|
878 | &+ .cb-line { | |
|
879 | border-top: 1px solid #eee; | |
|
880 | } | |
|
881 | ||
|
882 | &:first-child { | |
|
883 | border-top: none; | |
|
884 | &+ .cb-line { | |
|
885 | border-top: none; | |
|
886 | } | |
|
887 | } | |
|
672 | 888 | } |
|
673 | 889 | |
|
674 |
& |
|
|
675 | border-top: none; | |
|
676 | &+ .cb-line { | |
|
677 | border-top: none; | |
|
890 | &.cb-hunk { | |
|
891 | font-family: @font-family-monospace; | |
|
892 | color: rgba(0, 0, 0, 0.3); | |
|
893 | ||
|
894 | td { | |
|
895 | &:first-child { | |
|
896 | background: #edf2f9; | |
|
897 | } | |
|
898 | &:last-child { | |
|
899 | background: #f4f7fb; | |
|
900 | } | |
|
678 | 901 | } |
|
679 | 902 | } |
|
680 | 903 | } |
|
681 | 904 | |
|
682 | 905 | td { |
|
683 | 906 | vertical-align: top; |
|
684 | 907 | padding: 0; |
|
685 | 908 | |
|
686 | 909 | &.cb-content { |
|
687 | 910 | font-size: 12.35px; |
|
688 | 911 | |
|
912 | &.cb-line-selected .cb-code { | |
|
913 | background: @comment-highlight-color !important; | |
|
914 | } | |
|
915 | ||
|
689 | 916 | span.cb-code { |
|
690 | 917 | line-height: @cb-line-height; |
|
691 | 918 | padding-left: @cb-line-code-padding; |
|
919 | padding-right: @cb-line-code-padding; | |
|
692 | 920 | display: block; |
|
693 | 921 | white-space: pre-wrap; |
|
694 | 922 | font-family: @font-family-monospace; |
|
695 | 923 | word-break: break-word; |
|
696 | 924 | } |
|
697 | 925 | } |
|
698 | 926 | |
|
699 | 927 | &.cb-lineno { |
|
700 | 928 | padding: 0; |
|
701 | 929 | width: 50px; |
|
702 | 930 | color: rgba(0, 0, 0, 0.3); |
|
703 | 931 | text-align: right; |
|
704 | 932 | border-right: 1px solid #eee; |
|
705 | 933 | font-family: @font-family-monospace; |
|
706 | 934 | |
|
707 | 935 | a::before { |
|
708 | 936 | content: attr(data-line-no); |
|
709 | 937 | } |
|
710 | 938 | &.cb-line-selected a { |
|
711 | 939 | background: @comment-highlight-color !important; |
|
712 | 940 | } |
|
713 | 941 | |
|
714 | 942 | a { |
|
715 | 943 | display: block; |
|
716 | 944 | padding-right: @cb-line-code-padding; |
|
945 | padding-left: @cb-line-code-padding; | |
|
717 | 946 | line-height: @cb-line-height; |
|
718 | 947 | color: rgba(0, 0, 0, 0.3); |
|
719 | 948 | } |
|
720 | 949 | } |
|
721 | 950 | |
|
722 |
&.cb- |
|
|
723 | &.cb-line-selected .cb-code { | |
|
724 | background: @comment-highlight-color !important; | |
|
951 | &.cb-empty { | |
|
952 | background: @grey7; | |
|
953 | } | |
|
954 | ||
|
955 | ins { | |
|
956 | color: black; | |
|
957 | background: #a6f3a6; | |
|
958 | text-decoration: none; | |
|
959 | } | |
|
960 | del { | |
|
961 | color: black; | |
|
962 | background: #f8cbcb; | |
|
963 | text-decoration: none; | |
|
964 | } | |
|
965 | &.cb-addition { | |
|
966 | background: #ecffec; | |
|
967 | ||
|
968 | &.blob-lineno { | |
|
969 | background: #ddffdd; | |
|
970 | } | |
|
971 | } | |
|
972 | &.cb-deletion { | |
|
973 | background: #ffecec; | |
|
974 | ||
|
975 | &.blob-lineno { | |
|
976 | background: #ffdddd; | |
|
725 | 977 | } |
|
726 | 978 | } |
|
727 | 979 | |
|
728 | 980 | &.cb-annotate-info { |
|
729 | 981 | width: 320px; |
|
730 | 982 | min-width: 320px; |
|
731 | 983 | max-width: 320px; |
|
732 | 984 | padding: 5px 2px; |
|
733 | 985 | font-size: 13px; |
|
734 | 986 | |
|
735 | 987 | strong.cb-annotate-message { |
|
736 | 988 | padding: 5px 0; |
|
737 | 989 | white-space: pre-line; |
|
738 | 990 | display: inline-block; |
|
739 | 991 | } |
|
740 | 992 | .rc-user { |
|
741 | 993 | float: none; |
|
742 | 994 | padding: 0 6px 0 17px; |
|
743 | 995 | min-width: auto; |
|
744 | 996 | min-height: auto; |
|
745 | 997 | } |
|
746 | 998 | } |
|
747 | 999 | |
|
748 | 1000 | &.cb-annotate-revision { |
|
749 | 1001 | cursor: pointer; |
|
750 | 1002 | text-align: right; |
|
751 | 1003 | } |
|
752 | 1004 | } |
|
753 | 1005 | } |
@@ -1,476 +1,492 b'' | |||
|
1 | 1 | // # Copyright (C) 2010-2016 RhodeCode GmbH |
|
2 | 2 | // # |
|
3 | 3 | // # This program is free software: you can redistribute it and/or modify |
|
4 | 4 | // # it under the terms of the GNU Affero General Public License, version 3 |
|
5 | 5 | // # (only), as published by the Free Software Foundation. |
|
6 | 6 | // # |
|
7 | 7 | // # This program is distributed in the hope that it will be useful, |
|
8 | 8 | // # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | 9 | // # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | 10 | // # GNU General Public License for more details. |
|
11 | 11 | // # |
|
12 | 12 | // # You should have received a copy of the GNU Affero General Public License |
|
13 | 13 | // # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
14 | 14 | // # |
|
15 | 15 | // # This program is dual-licensed. If you wish to learn more about the |
|
16 | 16 | // # RhodeCode Enterprise Edition, including its added features, Support services, |
|
17 | 17 | // # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
18 | 18 | |
|
19 | 19 | /** |
|
20 | 20 | RhodeCode JS Files |
|
21 | 21 | **/ |
|
22 | 22 | |
|
23 | 23 | if (typeof console == "undefined" || typeof console.log == "undefined"){ |
|
24 | 24 | console = { log: function() {} } |
|
25 | 25 | } |
|
26 | 26 | |
|
27 | 27 | // TODO: move the following function to submodules |
|
28 | 28 | |
|
29 | 29 | /** |
|
30 | 30 | * show more |
|
31 | 31 | */ |
|
32 | 32 | var show_more_event = function(){ |
|
33 | 33 | $('table .show_more').click(function(e) { |
|
34 | 34 | var cid = e.target.id.substring(1); |
|
35 | 35 | var button = $(this); |
|
36 | 36 | if (button.hasClass('open')) { |
|
37 | 37 | $('#'+cid).hide(); |
|
38 | 38 | button.removeClass('open'); |
|
39 | 39 | } else { |
|
40 | 40 | $('#'+cid).show(); |
|
41 | 41 | button.addClass('open one'); |
|
42 | 42 | } |
|
43 | 43 | }); |
|
44 | 44 | }; |
|
45 | 45 | |
|
46 | 46 | var compare_radio_buttons = function(repo_name, compare_ref_type){ |
|
47 | 47 | $('#compare_action').on('click', function(e){ |
|
48 | 48 | e.preventDefault(); |
|
49 | 49 | |
|
50 | 50 | var source = $('input[name=compare_source]:checked').val(); |
|
51 | 51 | var target = $('input[name=compare_target]:checked').val(); |
|
52 | 52 | if(source && target){ |
|
53 | 53 | var url_data = { |
|
54 | 54 | repo_name: repo_name, |
|
55 | 55 | source_ref: source, |
|
56 | 56 | source_ref_type: compare_ref_type, |
|
57 | 57 | target_ref: target, |
|
58 | 58 | target_ref_type: compare_ref_type, |
|
59 | 59 | merge: 1 |
|
60 | 60 | }; |
|
61 | 61 | window.location = pyroutes.url('compare_url', url_data); |
|
62 | 62 | } |
|
63 | 63 | }); |
|
64 | 64 | $('.compare-radio-button').on('click', function(e){ |
|
65 | 65 | var source = $('input[name=compare_source]:checked').val(); |
|
66 | 66 | var target = $('input[name=compare_target]:checked').val(); |
|
67 | 67 | if(source && target){ |
|
68 | 68 | $('#compare_action').removeAttr("disabled"); |
|
69 | 69 | $('#compare_action').removeClass("disabled"); |
|
70 | 70 | } |
|
71 | 71 | }) |
|
72 | 72 | }; |
|
73 | 73 | |
|
74 | 74 | var showRepoSize = function(target, repo_name, commit_id, callback) { |
|
75 | 75 | var container = $('#' + target); |
|
76 | 76 | var url = pyroutes.url('repo_stats', |
|
77 | 77 | {"repo_name": repo_name, "commit_id": commit_id}); |
|
78 | 78 | |
|
79 | 79 | if (!container.hasClass('loaded')) { |
|
80 | 80 | $.ajax({url: url}) |
|
81 | 81 | .complete(function (data) { |
|
82 | 82 | var responseJSON = data.responseJSON; |
|
83 | 83 | container.addClass('loaded'); |
|
84 | 84 | container.html(responseJSON.size); |
|
85 | 85 | callback(responseJSON.code_stats) |
|
86 | 86 | }) |
|
87 | 87 | .fail(function (data) { |
|
88 | 88 | console.log('failed to load repo stats'); |
|
89 | 89 | }); |
|
90 | 90 | } |
|
91 | 91 | |
|
92 | 92 | }; |
|
93 | 93 | |
|
94 | 94 | var showRepoStats = function(target, data){ |
|
95 | 95 | var container = $('#' + target); |
|
96 | 96 | |
|
97 | 97 | if (container.hasClass('loaded')) { |
|
98 | 98 | return |
|
99 | 99 | } |
|
100 | 100 | |
|
101 | 101 | var total = 0; |
|
102 | 102 | var no_data = true; |
|
103 | 103 | var tbl = document.createElement('table'); |
|
104 | 104 | tbl.setAttribute('class', 'trending_language_tbl'); |
|
105 | 105 | |
|
106 | 106 | $.each(data, function(key, val){ |
|
107 | 107 | total += val.count; |
|
108 | 108 | }); |
|
109 | 109 | |
|
110 | 110 | var sortedStats = []; |
|
111 | 111 | for (var obj in data){ |
|
112 | 112 | sortedStats.push([obj, data[obj]]) |
|
113 | 113 | } |
|
114 | 114 | var sortedData = sortedStats.sort(function (a, b) { |
|
115 | 115 | return b[1].count - a[1].count |
|
116 | 116 | }); |
|
117 | 117 | var cnt = 0; |
|
118 | 118 | $.each(sortedData, function(idx, val){ |
|
119 | 119 | cnt += 1; |
|
120 | 120 | no_data = false; |
|
121 | 121 | |
|
122 | 122 | var hide = cnt > 2; |
|
123 | 123 | var tr = document.createElement('tr'); |
|
124 | 124 | if (hide) { |
|
125 | 125 | tr.setAttribute('style', 'display:none'); |
|
126 | 126 | tr.setAttribute('class', 'stats_hidden'); |
|
127 | 127 | } |
|
128 | 128 | |
|
129 | 129 | var key = val[0]; |
|
130 | 130 | var obj = {"desc": val[1].desc, "count": val[1].count}; |
|
131 | 131 | |
|
132 | 132 | var percentage = Math.round((obj.count / total * 100), 2); |
|
133 | 133 | |
|
134 | 134 | var td1 = document.createElement('td'); |
|
135 | 135 | td1.width = 300; |
|
136 | 136 | var trending_language_label = document.createElement('div'); |
|
137 | 137 | trending_language_label.innerHTML = obj.desc + " (.{0})".format(key); |
|
138 | 138 | td1.appendChild(trending_language_label); |
|
139 | 139 | |
|
140 | 140 | var td2 = document.createElement('td'); |
|
141 | 141 | var trending_language = document.createElement('div'); |
|
142 | 142 | var nr_files = obj.count +" "+ _ngettext('file', 'files', obj.count); |
|
143 | 143 | |
|
144 | 144 | trending_language.title = key + " " + nr_files; |
|
145 | 145 | |
|
146 | 146 | trending_language.innerHTML = "<span>" + percentage + "% " + nr_files |
|
147 | 147 | + "</span><b>" + percentage + "% " + nr_files + "</b>"; |
|
148 | 148 | |
|
149 | 149 | trending_language.setAttribute("class", 'trending_language'); |
|
150 | 150 | $('b', trending_language)[0].style.width = percentage + "%"; |
|
151 | 151 | td2.appendChild(trending_language); |
|
152 | 152 | |
|
153 | 153 | tr.appendChild(td1); |
|
154 | 154 | tr.appendChild(td2); |
|
155 | 155 | tbl.appendChild(tr); |
|
156 | 156 | if (cnt == 3) { |
|
157 | 157 | var show_more = document.createElement('tr'); |
|
158 | 158 | var td = document.createElement('td'); |
|
159 | 159 | lnk = document.createElement('a'); |
|
160 | 160 | |
|
161 | 161 | lnk.href = '#'; |
|
162 | 162 | lnk.innerHTML = _gettext('Show more'); |
|
163 | 163 | lnk.id = 'code_stats_show_more'; |
|
164 | 164 | td.appendChild(lnk); |
|
165 | 165 | |
|
166 | 166 | show_more.appendChild(td); |
|
167 | 167 | show_more.appendChild(document.createElement('td')); |
|
168 | 168 | tbl.appendChild(show_more); |
|
169 | 169 | } |
|
170 | 170 | }); |
|
171 | 171 | |
|
172 | 172 | $(container).html(tbl); |
|
173 | 173 | $(container).addClass('loaded'); |
|
174 | 174 | |
|
175 | 175 | $('#code_stats_show_more').on('click', function (e) { |
|
176 | 176 | e.preventDefault(); |
|
177 | 177 | $('.stats_hidden').each(function (idx) { |
|
178 | 178 | $(this).css("display", ""); |
|
179 | 179 | }); |
|
180 | 180 | $('#code_stats_show_more').hide(); |
|
181 | 181 | }); |
|
182 | 182 | |
|
183 | 183 | }; |
|
184 | 184 | |
|
185 | 185 | |
|
186 | 186 | // Toggle Collapsable Content |
|
187 | 187 | function collapsableContent() { |
|
188 | 188 | |
|
189 | 189 | $('.collapsable-content').not('.no-hide').hide(); |
|
190 | 190 | |
|
191 | 191 | $('.btn-collapse').unbind(); //in case we've been here before |
|
192 | 192 | $('.btn-collapse').click(function() { |
|
193 | 193 | var button = $(this); |
|
194 | 194 | var togglename = $(this).data("toggle"); |
|
195 | 195 | $('.collapsable-content[data-toggle='+togglename+']').toggle(); |
|
196 | 196 | if ($(this).html()=="Show Less") |
|
197 | 197 | $(this).html("Show More"); |
|
198 | 198 | else |
|
199 | 199 | $(this).html("Show Less"); |
|
200 | 200 | }); |
|
201 | 201 | }; |
|
202 | 202 | |
|
203 | 203 | var timeagoActivate = function() { |
|
204 | 204 | $("time.timeago").timeago(); |
|
205 | 205 | }; |
|
206 | 206 | |
|
207 | 207 | // Formatting values in a Select2 dropdown of commit references |
|
208 | 208 | var formatSelect2SelectionRefs = function(commit_ref){ |
|
209 | 209 | var tmpl = ''; |
|
210 | 210 | if (!commit_ref.text || commit_ref.type === 'sha'){ |
|
211 | 211 | return commit_ref.text; |
|
212 | 212 | } |
|
213 | 213 | if (commit_ref.type === 'branch'){ |
|
214 | 214 | tmpl = tmpl.concat('<i class="icon-branch"></i> '); |
|
215 | 215 | } else if (commit_ref.type === 'tag'){ |
|
216 | 216 | tmpl = tmpl.concat('<i class="icon-tag"></i> '); |
|
217 | 217 | } else if (commit_ref.type === 'book'){ |
|
218 | 218 | tmpl = tmpl.concat('<i class="icon-bookmark"></i> '); |
|
219 | 219 | } |
|
220 | 220 | return tmpl.concat(commit_ref.text); |
|
221 | 221 | }; |
|
222 | 222 | |
|
223 | 223 | // takes a given html element and scrolls it down offset pixels |
|
224 | function offsetScroll(element, offset){ | |
|
225 | setTimeout(function(){ | |
|
224 | function offsetScroll(element, offset) { | |
|
225 | setTimeout(function() { | |
|
226 | 226 | var location = element.offset().top; |
|
227 | 227 | // some browsers use body, some use html |
|
228 | 228 | $('html, body').animate({ scrollTop: (location - offset) }); |
|
229 | 229 | }, 100); |
|
230 | 230 | } |
|
231 | 231 | |
|
232 | // scroll an element `percent`% from the top of page in `time` ms | |
|
233 | function scrollToElement(element, percent, time) { | |
|
234 | percent = (percent === undefined ? 25 : percent); | |
|
235 | time = (time === undefined ? 100 : time); | |
|
236 | ||
|
237 | var $element = $(element); | |
|
238 | var elOffset = $element.offset().top; | |
|
239 | var elHeight = $element.height(); | |
|
240 | var windowHeight = $(window).height(); | |
|
241 | var offset = elOffset; | |
|
242 | if (elHeight < windowHeight) { | |
|
243 | offset = elOffset - ((windowHeight / (100 / percent)) - (elHeight / 2)); | |
|
244 | } | |
|
245 | setTimeout(function() { | |
|
246 | $('html, body').animate({ scrollTop: offset}); | |
|
247 | }, time); | |
|
248 | } | |
|
249 | ||
|
232 | 250 | /** |
|
233 | 251 | * global hooks after DOM is loaded |
|
234 | 252 | */ |
|
235 | 253 | $(document).ready(function() { |
|
236 | 254 | firefoxAnchorFix(); |
|
237 | 255 | |
|
238 | 256 | $('.navigation a.menulink').on('click', function(e){ |
|
239 | 257 | var menuitem = $(this).parent('li'); |
|
240 | 258 | if (menuitem.hasClass('open')) { |
|
241 | 259 | menuitem.removeClass('open'); |
|
242 | 260 | } else { |
|
243 | 261 | menuitem.addClass('open'); |
|
244 | 262 | $(document).on('click', function(event) { |
|
245 | 263 | if (!$(event.target).closest(menuitem).length) { |
|
246 | 264 | menuitem.removeClass('open'); |
|
247 | 265 | } |
|
248 | 266 | }); |
|
249 | 267 | } |
|
250 | 268 | }); |
|
251 | 269 | $('.compare_view_files').on( |
|
252 | 270 | 'mouseenter mouseleave', 'tr.line .lineno a',function(event) { |
|
253 | 271 | if (event.type === "mouseenter") { |
|
254 | 272 | $(this).parents('tr.line').addClass('hover'); |
|
255 | 273 | } else { |
|
256 | 274 | $(this).parents('tr.line').removeClass('hover'); |
|
257 | 275 | } |
|
258 | 276 | }); |
|
259 | 277 | |
|
260 | 278 | $('.compare_view_files').on( |
|
261 | 279 | 'mouseenter mouseleave', 'tr.line .add-comment-line a',function(event){ |
|
262 | 280 | if (event.type === "mouseenter") { |
|
263 | 281 | $(this).parents('tr.line').addClass('commenting'); |
|
264 | 282 | } else { |
|
265 | 283 | $(this).parents('tr.line').removeClass('commenting'); |
|
266 | 284 | } |
|
267 | 285 | }); |
|
268 | 286 | |
|
269 | 287 | $('body').on( /* TODO: replace the $('.compare_view_files').on('click') below |
|
270 | 288 | when new diffs are integrated */ |
|
271 | 289 | 'click', '.cb-lineno a', function(event) { |
|
272 | 290 | |
|
273 | 291 | if ($(this).attr('data-line-no') !== ""){ |
|
274 | 292 | $('.cb-line-selected').removeClass('cb-line-selected'); |
|
275 | 293 | var td = $(this).parent(); |
|
276 | 294 | td.addClass('cb-line-selected'); // line number td |
|
277 | 295 | td.next().addClass('cb-line-selected'); // line content td |
|
278 | 296 | |
|
279 | 297 | // Replace URL without jumping to it if browser supports. |
|
280 | 298 | // Default otherwise |
|
281 | 299 | if (history.pushState) { |
|
282 | 300 | var new_location = location.href.rstrip('#'); |
|
283 | 301 | if (location.hash) { |
|
284 | 302 | new_location = new_location.replace(location.hash, ""); |
|
285 | 303 | } |
|
286 | 304 | |
|
287 | 305 | // Make new anchor url |
|
288 | 306 | new_location = new_location + $(this).attr('href'); |
|
289 | 307 | history.pushState(true, document.title, new_location); |
|
290 | 308 | |
|
291 | 309 | return false; |
|
292 | 310 | } |
|
293 | 311 | } |
|
294 | 312 | }); |
|
295 | 313 | |
|
296 | 314 | $('.compare_view_files').on( /* TODO: replace this with .cb function above |
|
297 | 315 | when new diffs are integrated */ |
|
298 | 316 | 'click', 'tr.line .lineno a',function(event) { |
|
299 | 317 | if ($(this).text() != ""){ |
|
300 | 318 | $('tr.line').removeClass('selected'); |
|
301 | 319 | $(this).parents("tr.line").addClass('selected'); |
|
302 | 320 | |
|
303 | 321 | // Replace URL without jumping to it if browser supports. |
|
304 | 322 | // Default otherwise |
|
305 | 323 | if (history.pushState) { |
|
306 | 324 | var new_location = location.href; |
|
307 | 325 | if (location.hash){ |
|
308 | 326 | new_location = new_location.replace(location.hash, ""); |
|
309 | 327 | } |
|
310 | 328 | |
|
311 | 329 | // Make new anchor url |
|
312 | 330 | var new_location = new_location+$(this).attr('href'); |
|
313 | 331 | history.pushState(true, document.title, new_location); |
|
314 | 332 | |
|
315 | 333 | return false; |
|
316 | 334 | } |
|
317 | 335 | } |
|
318 | 336 | }); |
|
319 | 337 | |
|
320 | 338 | $('.compare_view_files').on( |
|
321 | 339 | 'click', 'tr.line .add-comment-line a',function(event) { |
|
322 | 340 | var tr = $(event.currentTarget).parents('tr.line')[0]; |
|
323 | 341 | injectInlineForm(tr); |
|
324 | 342 | return false; |
|
325 | 343 | }); |
|
326 | 344 | |
|
327 | 345 | $('.collapse_file').on('click', function(e) { |
|
328 | 346 | e.stopPropagation(); |
|
329 | 347 | if ($(e.target).is('a')) { return; } |
|
330 | 348 | var node = $(e.delegateTarget).first(); |
|
331 | 349 | var icon = $($(node.children().first()).children().first()); |
|
332 | 350 | var id = node.attr('fid'); |
|
333 | 351 | var target = $('#'+id); |
|
334 | 352 | var tr = $('#tr_'+id); |
|
335 | 353 | var diff = $('#diff_'+id); |
|
336 | 354 | if(node.hasClass('expand_file')){ |
|
337 | 355 | node.removeClass('expand_file'); |
|
338 | 356 | icon.removeClass('expand_file_icon'); |
|
339 | 357 | node.addClass('collapse_file'); |
|
340 | 358 | icon.addClass('collapse_file_icon'); |
|
341 | 359 | diff.show(); |
|
342 | 360 | tr.show(); |
|
343 | 361 | target.show(); |
|
344 | 362 | } else { |
|
345 | 363 | node.removeClass('collapse_file'); |
|
346 | 364 | icon.removeClass('collapse_file_icon'); |
|
347 | 365 | node.addClass('expand_file'); |
|
348 | 366 | icon.addClass('expand_file_icon'); |
|
349 | 367 | diff.hide(); |
|
350 | 368 | tr.hide(); |
|
351 | 369 | target.hide(); |
|
352 | 370 | } |
|
353 | 371 | }); |
|
354 | 372 | |
|
355 | 373 | $('#expand_all_files').click(function() { |
|
356 | 374 | $('.expand_file').each(function() { |
|
357 | 375 | var node = $(this); |
|
358 | 376 | var icon = $($(node.children().first()).children().first()); |
|
359 | 377 | var id = $(this).attr('fid'); |
|
360 | 378 | var target = $('#'+id); |
|
361 | 379 | var tr = $('#tr_'+id); |
|
362 | 380 | var diff = $('#diff_'+id); |
|
363 | 381 | node.removeClass('expand_file'); |
|
364 | 382 | icon.removeClass('expand_file_icon'); |
|
365 | 383 | node.addClass('collapse_file'); |
|
366 | 384 | icon.addClass('collapse_file_icon'); |
|
367 | 385 | diff.show(); |
|
368 | 386 | tr.show(); |
|
369 | 387 | target.show(); |
|
370 | 388 | }); |
|
371 | 389 | }); |
|
372 | 390 | |
|
373 | 391 | $('#collapse_all_files').click(function() { |
|
374 | 392 | $('.collapse_file').each(function() { |
|
375 | 393 | var node = $(this); |
|
376 | 394 | var icon = $($(node.children().first()).children().first()); |
|
377 | 395 | var id = $(this).attr('fid'); |
|
378 | 396 | var target = $('#'+id); |
|
379 | 397 | var tr = $('#tr_'+id); |
|
380 | 398 | var diff = $('#diff_'+id); |
|
381 | 399 | node.removeClass('collapse_file'); |
|
382 | 400 | icon.removeClass('collapse_file_icon'); |
|
383 | 401 | node.addClass('expand_file'); |
|
384 | 402 | icon.addClass('expand_file_icon'); |
|
385 | 403 | diff.hide(); |
|
386 | 404 | tr.hide(); |
|
387 | 405 | target.hide(); |
|
388 | 406 | }); |
|
389 | 407 | }); |
|
390 | 408 | |
|
391 | 409 | // Mouse over behavior for comments and line selection |
|
392 | 410 | |
|
393 | 411 | // Select the line that comes from the url anchor |
|
394 | 412 | // At the time of development, Chrome didn't seem to support jquery's :target |
|
395 | 413 | // element, so I had to scroll manually |
|
396 | 414 | |
|
397 | 415 | if (location.hash) { /* TODO: dan: remove this and replace with code block |
|
398 | 416 | below when new diffs are ready */ |
|
399 | 417 | var result = splitDelimitedHash(location.hash); |
|
400 | 418 | var loc = result.loc; |
|
401 | 419 | if (loc.length > 1){ |
|
402 | 420 | var lineno = $(loc+'.lineno'); |
|
403 | 421 | if (lineno.length > 0){ |
|
404 | 422 | var tr = lineno.parents('tr.line'); |
|
405 | 423 | tr.addClass('selected'); |
|
406 | 424 | |
|
407 | 425 | tr[0].scrollIntoView(); |
|
408 | 426 | |
|
409 | 427 | $.Topic('/ui/plugins/code/anchor_focus').prepareOrPublish({ |
|
410 | 428 | tr: tr, |
|
411 | 429 | remainder: result.remainder}); |
|
412 | 430 | } |
|
413 | 431 | } |
|
414 | 432 | } |
|
415 | 433 | |
|
416 | 434 | if (location.hash) { /* TODO: dan: use this to replace the code block above |
|
417 | 435 | when new diffs are ready */ |
|
418 | 436 | var result = splitDelimitedHash(location.hash); |
|
419 | 437 | var loc = result.loc; |
|
420 | 438 | if (loc.length > 1) { |
|
439 | ||
|
440 | var highlightable_line_tds = []; | |
|
441 | ||
|
442 | // source code line format | |
|
421 | 443 | var page_highlights = loc.substring( |
|
422 | 444 | loc.indexOf('#') + 1).split('L'); |
|
423 | 445 | |
|
424 | 446 | if (page_highlights.length > 1) { |
|
425 | 447 | var highlight_ranges = page_highlights[1].split(","); |
|
426 | 448 | var h_lines = []; |
|
427 | 449 | for (var pos in highlight_ranges) { |
|
428 | 450 | var _range = highlight_ranges[pos].split('-'); |
|
429 | 451 | if (_range.length === 2) { |
|
430 | 452 | var start = parseInt(_range[0]); |
|
431 | 453 | var end = parseInt(_range[1]); |
|
432 | 454 | if (start < end) { |
|
433 | 455 | for (var i = start; i <= end; i++) { |
|
434 | 456 | h_lines.push(i); |
|
435 | 457 | } |
|
436 | 458 | } |
|
437 | 459 | } |
|
438 | 460 | else { |
|
439 | 461 | h_lines.push(parseInt(highlight_ranges[pos])); |
|
440 | 462 | } |
|
441 | 463 | } |
|
442 | 464 | for (pos in h_lines) { |
|
443 | 465 | var line_td = $('td.cb-lineno#L' + h_lines[pos]); |
|
444 | 466 | if (line_td.length) { |
|
445 |
line_td |
|
|
446 | line_td.next().addClass('cb-line-selected'); // line content | |
|
467 | highlightable_line_tds.push(line_td); | |
|
447 | 468 | } |
|
448 | 469 | } |
|
449 | var first_line_td = $('td.cb-lineno#L' + h_lines[0]); | |
|
450 | if (first_line_td.length) { | |
|
451 | var elOffset = first_line_td.offset().top; | |
|
452 | var elHeight = first_line_td.height(); | |
|
453 | var windowHeight = $(window).height(); | |
|
454 | var offset; | |
|
470 | } | |
|
455 | 471 | |
|
456 | if (elHeight < windowHeight) { | |
|
457 | offset = elOffset - ((windowHeight / 4) - (elHeight / 2)); | |
|
458 | } | |
|
459 |
|
|
|
460 | offset = elOffset; | |
|
461 | } | |
|
462 | $(function() { // let browser scroll to hash first, then | |
|
463 | // scroll the line to the middle of page | |
|
464 | setTimeout(function() { | |
|
465 | $('html, body').animate({ scrollTop: offset }); | |
|
466 | }, 100); | |
|
467 | }); | |
|
468 |
|
|
|
469 |
|
|
|
470 |
|
|
|
471 | } | |
|
472 | // now check a direct id reference (diff page) | |
|
473 | if ($(loc).length && $(loc).hasClass('cb-lineno')) { | |
|
474 | highlightable_line_tds.push($(loc)); | |
|
475 | } | |
|
476 | $.each(highlightable_line_tds, function (i, $td) { | |
|
477 | $td.addClass('cb-line-selected'); // line number td | |
|
478 | $td.next().addClass('cb-line-selected'); // line content | |
|
479 | }); | |
|
480 | ||
|
481 | if (highlightable_line_tds.length) { | |
|
482 | var $first_line_td = highlightable_line_tds[0]; | |
|
483 | scrollToElement($first_line_td); | |
|
484 | $.Topic('/ui/plugins/code/anchor_focus').prepareOrPublish({ | |
|
485 | lineno: $first_line_td, | |
|
486 | remainder: result.remainder | |
|
487 | }); | |
|
472 | 488 | } |
|
473 | 489 | } |
|
474 | 490 | } |
|
475 | 491 | collapsableContent(); |
|
476 | 492 | }); |
@@ -1,322 +1,258 b'' | |||
|
1 | 1 | ## -*- coding: utf-8 -*- |
|
2 | 2 | <%inherit file="/base/base.html"/> |
|
3 | <%namespace name="cbdiffs" file="/codeblocks/diffs.html"/> | |
|
3 | 4 | |
|
4 | 5 | <%def name="title()"> |
|
5 | 6 | %if c.compare_home: |
|
6 | 7 | ${_('%s Compare') % c.repo_name} |
|
7 | 8 | %else: |
|
8 | 9 | ${_('%s Compare') % c.repo_name} - ${'%s@%s' % (c.source_repo.repo_name, c.source_ref)} > ${'%s@%s' % (c.target_repo.repo_name, c.target_ref)} |
|
9 | 10 | %endif |
|
10 | 11 | %if c.rhodecode_name: |
|
11 | 12 | · ${h.branding(c.rhodecode_name)} |
|
12 | 13 | %endif |
|
13 | 14 | </%def> |
|
14 | 15 | |
|
15 | 16 | <%def name="breadcrumbs_links()"> |
|
16 | 17 | ${ungettext('%s commit','%s commits', len(c.commit_ranges)) % len(c.commit_ranges)} |
|
17 | 18 | </%def> |
|
18 | 19 | |
|
19 | 20 | <%def name="menu_bar_nav()"> |
|
20 | 21 | ${self.menu_items(active='repositories')} |
|
21 | 22 | </%def> |
|
22 | 23 | |
|
23 | 24 | <%def name="menu_bar_subnav()"> |
|
24 | 25 | ${self.repo_menu(active='compare')} |
|
25 | 26 | </%def> |
|
26 | 27 | |
|
27 | 28 | <%def name="main()"> |
|
28 | 29 | <script type="text/javascript"> |
|
29 | 30 | // set fake commitId on this commit-range page |
|
30 | 31 | templateContext.commit_data.commit_id = "${h.EmptyCommit().raw_id}"; |
|
31 | 32 | </script> |
|
32 | 33 | |
|
33 | 34 | <div class="box"> |
|
34 | 35 | <div class="title"> |
|
35 | 36 | ${self.repo_page_title(c.rhodecode_db_repo)} |
|
36 | 37 | <div class="breadcrumbs"> |
|
37 | 38 | ${_('Compare Commits')} |
|
38 | 39 | </div> |
|
39 | 40 | </div> |
|
40 | 41 | |
|
41 | 42 | <div class="table"> |
|
42 | 43 | <div id="codeblock" class="diffblock"> |
|
43 | 44 | <div class="code-header" > |
|
44 | 45 | <div class="compare_header"> |
|
45 | 46 | ## The hidden elements are replaced with a select2 widget |
|
46 | 47 | <div class="compare-label">${_('Target')}</div>${h.hidden('compare_source')} |
|
47 | 48 | <div class="compare-label">${_('Source')}</div>${h.hidden('compare_target')} |
|
48 | 49 | |
|
49 | 50 | %if not c.preview_mode: |
|
50 | 51 | <div class="compare-label"></div> |
|
51 | 52 | <div class="compare-buttons"> |
|
52 | 53 | %if not c.compare_home: |
|
53 | 54 | <a id="btn-swap" class="btn btn-primary" href="${c.swap_url}"><i class="icon-refresh"></i> ${_('Swap')}</a> |
|
54 | 55 | %endif |
|
55 | 56 | <div id="compare_revs" class="btn btn-primary"><i class ="icon-loop"></i> ${_('Compare Commits')}</div> |
|
56 | %if c.files: | |
|
57 | %if c.diffset and c.diffset.files: | |
|
57 | 58 | <div id="compare_changeset_status_toggle" class="btn btn-primary">${_('Comment')}</div> |
|
58 | 59 | %endif |
|
59 | 60 | </div> |
|
60 | 61 | %endif |
|
61 | 62 | </div> |
|
62 | 63 | </div> |
|
63 | 64 | </div> |
|
64 | 65 | ## use JS script to load it quickly before potentially large diffs render long time |
|
65 | 66 | ## this prevents from situation when large diffs block rendering of select2 fields |
|
66 | 67 | <script type="text/javascript"> |
|
67 | 68 | |
|
68 | 69 | var cache = {}; |
|
69 | 70 | |
|
70 | 71 | var formatSelection = function(repoName){ |
|
71 | 72 | return function(data, container, escapeMarkup) { |
|
72 | 73 | var selection = data ? this.text(data) : ""; |
|
73 | 74 | return escapeMarkup('{0}@{1}'.format(repoName, selection)); |
|
74 | 75 | } |
|
75 | 76 | }; |
|
76 | 77 | |
|
77 | 78 | var feedCompareData = function(query, cachedValue){ |
|
78 | 79 | var data = {results: []}; |
|
79 | 80 | //filter results |
|
80 | 81 | $.each(cachedValue.results, function() { |
|
81 | 82 | var section = this.text; |
|
82 | 83 | var children = []; |
|
83 | 84 | $.each(this.children, function() { |
|
84 | 85 | if (query.term.length === 0 || this.text.toUpperCase().indexOf(query.term.toUpperCase()) >= 0) { |
|
85 | 86 | children.push({ |
|
86 | 87 | 'id': this.id, |
|
87 | 88 | 'text': this.text, |
|
88 | 89 | 'type': this.type |
|
89 | 90 | }) |
|
90 | 91 | } |
|
91 | 92 | }); |
|
92 | 93 | data.results.push({ |
|
93 | 94 | 'text': section, |
|
94 | 95 | 'children': children |
|
95 | 96 | }) |
|
96 | 97 | }); |
|
97 | 98 | //push the typed in changeset |
|
98 | 99 | data.results.push({ |
|
99 | 100 | 'text': _gettext('specify commit'), |
|
100 | 101 | 'children': [{ |
|
101 | 102 | 'id': query.term, |
|
102 | 103 | 'text': query.term, |
|
103 | 104 | 'type': 'rev' |
|
104 | 105 | }] |
|
105 | 106 | }); |
|
106 | 107 | query.callback(data); |
|
107 | 108 | }; |
|
108 | 109 | |
|
109 | 110 | var loadCompareData = function(repoName, query, cache){ |
|
110 | 111 | $.ajax({ |
|
111 | 112 | url: pyroutes.url('repo_refs_data', {'repo_name': repoName}), |
|
112 | 113 | data: {}, |
|
113 | 114 | dataType: 'json', |
|
114 | 115 | type: 'GET', |
|
115 | 116 | success: function(data) { |
|
116 | 117 | cache[repoName] = data; |
|
117 | 118 | query.callback({results: data.results}); |
|
118 | 119 | } |
|
119 | 120 | }) |
|
120 | 121 | }; |
|
121 | 122 | |
|
122 | 123 | var enable_fields = ${"false" if c.preview_mode else "true"}; |
|
123 | 124 | $("#compare_source").select2({ |
|
124 | 125 | placeholder: "${'%s@%s' % (c.source_repo.repo_name, c.source_ref)}", |
|
125 | 126 | containerCssClass: "drop-menu", |
|
126 | 127 | dropdownCssClass: "drop-menu-dropdown", |
|
127 | 128 | formatSelection: formatSelection("${c.source_repo.repo_name}"), |
|
128 | 129 | dropdownAutoWidth: true, |
|
129 | 130 | query: function(query) { |
|
130 | 131 | var repoName = '${c.source_repo.repo_name}'; |
|
131 | 132 | var cachedValue = cache[repoName]; |
|
132 | 133 | |
|
133 | 134 | if (cachedValue){ |
|
134 | 135 | feedCompareData(query, cachedValue); |
|
135 | 136 | } |
|
136 | 137 | else { |
|
137 | 138 | loadCompareData(repoName, query, cache); |
|
138 | 139 | } |
|
139 | 140 | } |
|
140 | 141 | }).select2("enable", enable_fields); |
|
141 | 142 | |
|
142 | 143 | $("#compare_target").select2({ |
|
143 | 144 | placeholder: "${'%s@%s' % (c.target_repo.repo_name, c.target_ref)}", |
|
144 | 145 | dropdownAutoWidth: true, |
|
145 | 146 | containerCssClass: "drop-menu", |
|
146 | 147 | dropdownCssClass: "drop-menu-dropdown", |
|
147 | 148 | formatSelection: formatSelection("${c.target_repo.repo_name}"), |
|
148 | 149 | query: function(query) { |
|
149 | 150 | var repoName = '${c.target_repo.repo_name}'; |
|
150 | 151 | var cachedValue = cache[repoName]; |
|
151 | 152 | |
|
152 | 153 | if (cachedValue){ |
|
153 | 154 | feedCompareData(query, cachedValue); |
|
154 | 155 | } |
|
155 | 156 | else { |
|
156 | 157 | loadCompareData(repoName, query, cache); |
|
157 | 158 | } |
|
158 | 159 | } |
|
159 | 160 | }).select2("enable", enable_fields); |
|
160 | 161 | var initial_compare_source = {id: "${c.source_ref}", type:"${c.source_ref_type}"}; |
|
161 | 162 | var initial_compare_target = {id: "${c.target_ref}", type:"${c.target_ref_type}"}; |
|
162 | 163 | |
|
163 | 164 | $('#compare_revs').on('click', function(e) { |
|
164 | 165 | var source = $('#compare_source').select2('data') || initial_compare_source; |
|
165 | 166 | var target = $('#compare_target').select2('data') || initial_compare_target; |
|
166 | 167 | if (source && target) { |
|
167 | 168 | var url_data = { |
|
168 | 169 | repo_name: "${c.repo_name}", |
|
169 | 170 | source_ref: source.id, |
|
170 | 171 | source_ref_type: source.type, |
|
171 | 172 | target_ref: target.id, |
|
172 | 173 | target_ref_type: target.type |
|
173 | 174 | }; |
|
174 | 175 | window.location = pyroutes.url('compare_url', url_data); |
|
175 | 176 | } |
|
176 | 177 | }); |
|
177 | 178 | $('#compare_changeset_status_toggle').on('click', function(e) { |
|
178 | 179 | $('#compare_changeset_status').toggle(); |
|
179 | 180 | }); |
|
180 | 181 | |
|
181 | 182 | </script> |
|
182 | 183 | |
|
183 | 184 | ## changeset status form |
|
184 | 185 | <%namespace name="comment" file="/changeset/changeset_file_comment.html"/> |
|
185 | 186 | ## main comment form and it status |
|
186 | 187 | <% |
|
187 | 188 | def revs(_revs): |
|
188 | 189 | form_inputs = [] |
|
189 | 190 | for cs in _revs: |
|
190 | 191 | tmpl = '<input type="hidden" data-commit-id="%(cid)s" name="commit_ids" value="%(cid)s">' % {'cid': cs.raw_id} |
|
191 | 192 | form_inputs.append(tmpl) |
|
192 | 193 | return form_inputs |
|
193 | 194 | %> |
|
194 | 195 | <div id="compare_changeset_status" style="display: none;"> |
|
195 | 196 | ${comment.comments(h.url('changeset_comment', repo_name=c.repo_name, revision='0'*16), None, is_compare=True, form_extras=revs(c.commit_ranges))} |
|
196 | 197 | <script type="text/javascript"> |
|
197 | 198 | |
|
198 | 199 | mainCommentForm.setHandleFormSubmit(function(o) { |
|
199 | 200 | var text = mainCommentForm.cm.getValue(); |
|
200 | 201 | var status = mainCommentForm.getCommentStatus(); |
|
201 | 202 | |
|
202 | 203 | if (text === "" && !status) { |
|
203 | 204 | return; |
|
204 | 205 | } |
|
205 | 206 | |
|
206 | 207 | // we can pick which commits we want to make the comment by |
|
207 | 208 | // selecting them via click on preview pane, this will alter the hidden inputs |
|
208 | 209 | var cherryPicked = $('#changeset_compare_view_content .compare_select.hl').length > 0; |
|
209 | 210 | |
|
210 | 211 | var commitIds = []; |
|
211 | 212 | $('#changeset_compare_view_content .compare_select').each(function(el) { |
|
212 | 213 | var commitId = this.id.replace('row-', ''); |
|
213 | 214 | if ($(this).hasClass('hl') || !cherryPicked) { |
|
214 | 215 | $("input[data-commit-id='{0}']".format(commitId)).val(commitId) |
|
215 | 216 | commitIds.push(commitId); |
|
216 | 217 | } else { |
|
217 | 218 | $("input[data-commit-id='{0}']".format(commitId)).val('') |
|
218 | 219 | } |
|
219 | 220 | }); |
|
220 | 221 | |
|
221 | 222 | mainCommentForm.setActionButtonsDisabled(true); |
|
222 | 223 | mainCommentForm.cm.setOption("readOnly", true); |
|
223 | 224 | var postData = { |
|
224 | 225 | 'text': text, |
|
225 | 226 | 'changeset_status': status, |
|
226 | 227 | 'commit_ids': commitIds, |
|
227 | 228 | 'csrf_token': CSRF_TOKEN |
|
228 | 229 | }; |
|
229 | 230 | |
|
230 | 231 | var submitSuccessCallback = function(o) { |
|
231 | 232 | location.reload(true); |
|
232 | 233 | }; |
|
233 | 234 | var submitFailCallback = function(){ |
|
234 | 235 | mainCommentForm.resetCommentFormState(text) |
|
235 | 236 | }; |
|
236 | 237 | mainCommentForm.submitAjaxPOST( |
|
237 | 238 | mainCommentForm.submitUrl, postData, submitSuccessCallback, submitFailCallback); |
|
238 | 239 | }); |
|
239 | 240 | </script> |
|
240 | 241 | |
|
241 | 242 | </div> |
|
242 | 243 | |
|
243 | 244 | %if c.compare_home: |
|
244 | 245 | <div id="changeset_compare_view_content"> |
|
245 | 246 | <div class="help-block">${_('Compare commits, branches, bookmarks or tags.')}</div> |
|
246 | 247 | </div> |
|
247 | 248 | %else: |
|
248 | 249 | <div id="changeset_compare_view_content"> |
|
249 | 250 | ##CS |
|
250 | 251 | <%include file="compare_commits.html"/> |
|
251 | ||
|
252 | ## FILES | |
|
253 | <div class="cs_files_title"> | |
|
254 | <span class="cs_files_expand"> | |
|
255 | <span id="expand_all_files">${_('Expand All')}</span> | <span id="collapse_all_files">${_('Collapse All')}</span> | |
|
256 | </span> | |
|
257 | <h2> | |
|
258 | ${diff_block.diff_summary_text(len(c.files), c.lines_added, c.lines_deleted, c.limited_diff)} | |
|
259 | </h2> | |
|
260 | </div> | |
|
261 | <div class="cs_files"> | |
|
262 | %if not c.files: | |
|
263 | <p class="empty_data">${_('No files')}</p> | |
|
264 | %endif | |
|
265 | <table class="compare_view_files"> | |
|
266 | <%namespace name="diff_block" file="/changeset/diff_block.html"/> | |
|
267 | %for FID, change, path, stats, file in c.files: | |
|
268 | <tr class="cs_${change} collapse_file" fid="${FID}"> | |
|
269 | <td class="cs_icon_td"> | |
|
270 | <span class="collapse_file_icon" fid="${FID}"></span> | |
|
271 | </td> | |
|
272 | <td class="cs_icon_td"> | |
|
273 | <div class="flag_status not_reviewed hidden"></div> | |
|
274 | </td> | |
|
275 | <td class="cs_${change}" id="a_${FID}"> | |
|
276 | <div class="node"> | |
|
277 | <a href="#a_${FID}"> | |
|
278 | <i class="icon-file-${change.lower()}"></i> | |
|
279 | ${h.safe_unicode(path)} | |
|
280 | </a> | |
|
281 | </div> | |
|
282 | </td> | |
|
283 | <td> | |
|
284 | <div class="changes pull-right">${h.fancy_file_stats(stats)}</div> | |
|
285 | <div class="comment-bubble pull-right" data-path="${path}"> | |
|
286 | <i class="icon-comment"></i> | |
|
287 | </div> | |
|
288 | </td> | |
|
289 | </tr> | |
|
290 | <tr fid="${FID}" id="diff_${FID}" class="diff_links"> | |
|
291 | <td></td> | |
|
292 | <td></td> | |
|
293 | <td class="cs_${change}"> | |
|
294 | %if c.target_repo.repo_name == c.repo_name: | |
|
295 | ${diff_block.diff_menu(c.repo_name, h.safe_unicode(path), c.source_ref, c.target_ref, change, file)} | |
|
296 | %else: | |
|
297 | ## this is slightly different case later, since the target repo can have this | |
|
298 | ## file in target state than the source repo | |
|
299 | ${diff_block.diff_menu(c.target_repo.repo_name, h.safe_unicode(path), c.source_ref, c.target_ref, change, file)} | |
|
300 | %endif | |
|
301 | </td> | |
|
302 | <td class="td-actions rc-form"> | |
|
303 | </td> | |
|
304 | </tr> | |
|
305 | <tr id="tr_${FID}"> | |
|
306 | <td></td> | |
|
307 | <td></td> | |
|
308 | <td class="injected_diff" colspan="2"> | |
|
309 | ${diff_block.diff_block_simple([c.changes[FID]])} | |
|
310 | </td> | |
|
311 | </tr> | |
|
312 | %endfor | |
|
313 | </table> | |
|
314 | % if c.limited_diff: | |
|
315 | ${diff_block.changeset_message()} | |
|
316 | % endif | |
|
252 | ${cbdiffs.render_diffset(c.diffset)} | |
|
317 | 253 | </div> |
|
318 | 254 | %endif |
|
319 | 255 | </div> |
|
320 | 256 | </div> |
|
321 | 257 | </div> |
|
322 | 258 | </%def> |
@@ -1,286 +1,299 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import pytest |
|
22 | 22 | |
|
23 | 23 | from rhodecode.lib.helpers import _shorten_commit_id |
|
24 | 24 | from rhodecode.tests import url |
|
25 | 25 | |
|
26 | 26 | |
|
27 | 27 | @pytest.mark.usefixtures("app") |
|
28 | 28 | class TestChangesetController(object): |
|
29 | 29 | |
|
30 | 30 | def test_index(self, backend): |
|
31 | 31 | commit_id = self.commit_id[backend.alias] |
|
32 | 32 | response = self.app.get(url( |
|
33 | 33 | controller='changeset', action='index', |
|
34 | 34 | repo_name=backend.repo_name, revision=commit_id)) |
|
35 | 35 | response.mustcontain('Added a symlink') |
|
36 | 36 | response.mustcontain(commit_id) |
|
37 | 37 | response.mustcontain('No newline at end of file') |
|
38 | 38 | |
|
39 | 39 | def test_index_raw(self, backend): |
|
40 | 40 | commit_id = self.commit_id[backend.alias] |
|
41 | 41 | response = self.app.get(url( |
|
42 | 42 | controller='changeset', action='changeset_raw', |
|
43 | 43 | repo_name=backend.repo_name, revision=commit_id)) |
|
44 | 44 | assert response.body == self.diffs[backend.alias] |
|
45 | 45 | |
|
46 | 46 | def test_index_raw_patch(self, backend): |
|
47 | 47 | response = self.app.get(url( |
|
48 | 48 | controller='changeset', action='changeset_patch', |
|
49 | 49 | repo_name=backend.repo_name, |
|
50 | 50 | revision=self.commit_id[backend.alias])) |
|
51 | 51 | assert response.body == self.patches[backend.alias] |
|
52 | 52 | |
|
53 | 53 | def test_index_changeset_download(self, backend): |
|
54 | 54 | response = self.app.get(url( |
|
55 | 55 | controller='changeset', action='changeset_download', |
|
56 | 56 | repo_name=backend.repo_name, |
|
57 | 57 | revision=self.commit_id[backend.alias])) |
|
58 | 58 | assert response.body == self.diffs[backend.alias] |
|
59 | 59 | |
|
60 | 60 | def test_single_commit_page_different_ops(self, backend): |
|
61 | 61 | commit_id = { |
|
62 | 62 | 'hg': '603d6c72c46d953420c89d36372f08d9f305f5dd', |
|
63 | 63 | 'git': '03fa803d7e9fb14daa9a3089e0d1494eda75d986', |
|
64 | 64 | 'svn': '337', |
|
65 | 65 | } |
|
66 | 66 | commit_id = commit_id[backend.alias] |
|
67 | 67 | response = self.app.get(url( |
|
68 | 68 | controller='changeset', action='index', |
|
69 | 69 | repo_name=backend.repo_name, revision=commit_id)) |
|
70 | 70 | |
|
71 | 71 | response.mustcontain(_shorten_commit_id(commit_id)) |
|
72 | 72 | response.mustcontain('21 files changed: 943 inserted, 288 deleted') |
|
73 | 73 | |
|
74 | 74 | # files op files |
|
75 | 75 | response.mustcontain('File no longer present at commit: %s' % |
|
76 | 76 | _shorten_commit_id(commit_id)) |
|
77 | 77 | |
|
78 | 78 | # svn uses a different filename |
|
79 | 79 | if backend.alias == 'svn': |
|
80 | 80 | response.mustcontain('new file 10644') |
|
81 | 81 | else: |
|
82 | 82 | response.mustcontain('new file 100644') |
|
83 | 83 | response.mustcontain('Changed theme to ADC theme') # commit msg |
|
84 | 84 | |
|
85 | 85 | self._check_diff_menus(response, right_menu=True) |
|
86 | 86 | |
|
87 | 87 | def test_commit_range_page_different_ops(self, backend): |
|
88 | 88 | commit_id_range = { |
|
89 | 89 | 'hg': ( |
|
90 | 90 | '25d7e49c18b159446cadfa506a5cf8ad1cb04067', |
|
91 | 91 | '603d6c72c46d953420c89d36372f08d9f305f5dd'), |
|
92 | 92 | 'git': ( |
|
93 | 93 | '6fc9270775aaf5544c1deb014f4ddd60c952fcbb', |
|
94 | 94 | '03fa803d7e9fb14daa9a3089e0d1494eda75d986'), |
|
95 | 95 | 'svn': ( |
|
96 | 96 | '335', |
|
97 | 97 | '337'), |
|
98 | 98 | } |
|
99 | 99 | commit_ids = commit_id_range[backend.alias] |
|
100 | 100 | commit_id = '%s...%s' % (commit_ids[0], commit_ids[1]) |
|
101 | 101 | response = self.app.get(url( |
|
102 | 102 | controller='changeset', action='index', |
|
103 | 103 | repo_name=backend.repo_name, revision=commit_id)) |
|
104 | 104 | |
|
105 | 105 | response.mustcontain(_shorten_commit_id(commit_ids[0])) |
|
106 | 106 | response.mustcontain(_shorten_commit_id(commit_ids[1])) |
|
107 | ||
|
107 | ||
|
108 | 108 | # svn is special |
|
109 | 109 | if backend.alias == 'svn': |
|
110 | 110 | response.mustcontain('new file 10644') |
|
111 | 111 | response.mustcontain('34 files changed: 1184 inserted, 311 deleted') |
|
112 | 112 | else: |
|
113 | 113 | response.mustcontain('new file 100644') |
|
114 | 114 | response.mustcontain('33 files changed: 1165 inserted, 308 deleted') |
|
115 | 115 | |
|
116 | 116 | # files op files |
|
117 | 117 | response.mustcontain('File no longer present at commit: %s' % |
|
118 | 118 | _shorten_commit_id(commit_ids[1])) |
|
119 | 119 | response.mustcontain('Added docstrings to vcs.cli') # commit msg |
|
120 | 120 | response.mustcontain('Changed theme to ADC theme') # commit msg |
|
121 | 121 | |
|
122 | 122 | self._check_diff_menus(response) |
|
123 | 123 | |
|
124 | 124 | def test_combined_compare_commit_page_different_ops(self, backend): |
|
125 | 125 | commit_id_range = { |
|
126 | 126 | 'hg': ( |
|
127 | 127 | '4fdd71e9427417b2e904e0464c634fdee85ec5a7', |
|
128 | 128 | '603d6c72c46d953420c89d36372f08d9f305f5dd'), |
|
129 | 129 | 'git': ( |
|
130 | 130 | 'f5fbf9cfd5f1f1be146f6d3b38bcd791a7480c13', |
|
131 | 131 | '03fa803d7e9fb14daa9a3089e0d1494eda75d986'), |
|
132 | 132 | 'svn': ( |
|
133 | 133 | '335', |
|
134 | 134 | '337'), |
|
135 | 135 | } |
|
136 | 136 | commit_ids = commit_id_range[backend.alias] |
|
137 | 137 | response = self.app.get(url( |
|
138 | 138 | controller='compare', action='compare', |
|
139 | 139 | repo_name=backend.repo_name, |
|
140 | 140 | source_ref_type='rev', source_ref=commit_ids[0], |
|
141 | 141 | target_ref_type='rev', target_ref=commit_ids[1], )) |
|
142 | 142 | |
|
143 | 143 | response.mustcontain(_shorten_commit_id(commit_ids[0])) |
|
144 | 144 | response.mustcontain(_shorten_commit_id(commit_ids[1])) |
|
145 | 145 | |
|
146 | 146 | # files op files |
|
147 | 147 | response.mustcontain('File no longer present at commit: %s' % |
|
148 | 148 | _shorten_commit_id(commit_ids[1])) |
|
149 | ||
|
149 | ||
|
150 | 150 | # svn is special |
|
151 | 151 | if backend.alias == 'svn': |
|
152 | 152 | response.mustcontain('new file 10644') |
|
153 | 153 | response.mustcontain('32 files changed: 1179 inserted, 310 deleted') |
|
154 | 154 | else: |
|
155 | 155 | response.mustcontain('new file 100644') |
|
156 | 156 | response.mustcontain('32 files changed: 1165 inserted, 308 deleted') |
|
157 | 157 | |
|
158 | 158 | response.mustcontain('Added docstrings to vcs.cli') # commit msg |
|
159 | 159 | response.mustcontain('Changed theme to ADC theme') # commit msg |
|
160 | 160 | |
|
161 | self._check_diff_menus(response) | |
|
161 | self._check_new_diff_menus(response) | |
|
162 | 162 | |
|
163 | 163 | def test_changeset_range(self, backend): |
|
164 | 164 | self._check_changeset_range( |
|
165 | 165 | backend, self.commit_id_range, self.commit_id_range_result) |
|
166 | 166 | |
|
167 | 167 | def test_changeset_range_with_initial_commit(self, backend): |
|
168 | 168 | commit_id_range = { |
|
169 | 169 | 'hg': ( |
|
170 | 170 | 'b986218ba1c9b0d6a259fac9b050b1724ed8e545' |
|
171 | 171 | '...6cba7170863a2411822803fa77a0a264f1310b35'), |
|
172 | 172 | 'git': ( |
|
173 | 173 | 'c1214f7e79e02fc37156ff215cd71275450cffc3' |
|
174 | 174 | '...fa6600f6848800641328adbf7811fd2372c02ab2'), |
|
175 | 175 | 'svn': '1...3', |
|
176 | 176 | } |
|
177 | 177 | commit_id_range_result = { |
|
178 | 178 | 'hg': ['b986218ba1c9', '3d8f361e72ab', '6cba7170863a'], |
|
179 | 179 | 'git': ['c1214f7e79e0', '38b5fe81f109', 'fa6600f68488'], |
|
180 | 180 | 'svn': ['1', '2', '3'], |
|
181 | 181 | } |
|
182 | 182 | self._check_changeset_range( |
|
183 | 183 | backend, commit_id_range, commit_id_range_result) |
|
184 | 184 | |
|
185 | 185 | def _check_changeset_range( |
|
186 | 186 | self, backend, commit_id_ranges, commit_id_range_result): |
|
187 | 187 | response = self.app.get( |
|
188 | 188 | url(controller='changeset', action='index', |
|
189 | 189 | repo_name=backend.repo_name, |
|
190 | 190 | revision=commit_id_ranges[backend.alias])) |
|
191 | 191 | expected_result = commit_id_range_result[backend.alias] |
|
192 | 192 | response.mustcontain('{} commits'.format(len(expected_result))) |
|
193 | 193 | for commit_id in expected_result: |
|
194 | 194 | response.mustcontain(commit_id) |
|
195 | 195 | |
|
196 | 196 | commit_id = { |
|
197 | 197 | 'hg': '2062ec7beeeaf9f44a1c25c41479565040b930b2', |
|
198 | 198 | 'svn': '393', |
|
199 | 199 | 'git': 'fd627b9e0dd80b47be81af07c4a98518244ed2f7', |
|
200 | 200 | } |
|
201 | 201 | |
|
202 | 202 | commit_id_range = { |
|
203 | 203 | 'hg': ( |
|
204 | 204 | 'a53d9201d4bc278910d416d94941b7ea007ecd52' |
|
205 | 205 | '...2062ec7beeeaf9f44a1c25c41479565040b930b2'), |
|
206 | 206 | 'git': ( |
|
207 | 207 | '7ab37bc680b4aa72c34d07b230c866c28e9fc204' |
|
208 | 208 | '...fd627b9e0dd80b47be81af07c4a98518244ed2f7'), |
|
209 | 209 | 'svn': '391...393', |
|
210 | 210 | } |
|
211 | 211 | |
|
212 | 212 | commit_id_range_result = { |
|
213 | 213 | 'hg': ['a53d9201d4bc', '96507bd11ecc', '2062ec7beeea'], |
|
214 | 214 | 'git': ['7ab37bc680b4', '5f2c6ee19592', 'fd627b9e0dd8'], |
|
215 | 215 | 'svn': ['391', '392', '393'], |
|
216 | 216 | } |
|
217 | 217 | |
|
218 | 218 | diffs = { |
|
219 | 219 | 'hg': r"""diff --git a/README b/README |
|
220 | 220 | new file mode 120000 |
|
221 | 221 | --- /dev/null |
|
222 | 222 | +++ b/README |
|
223 | 223 | @@ -0,0 +1,1 @@ |
|
224 | 224 | +README.rst |
|
225 | 225 | \ No newline at end of file |
|
226 | 226 | """, |
|
227 | 227 | 'git': r"""diff --git a/README b/README |
|
228 | 228 | new file mode 120000 |
|
229 | 229 | index 0000000000000000000000000000000000000000..92cacd285355271487b7e379dba6ca60f9a554a4 |
|
230 | 230 | --- /dev/null |
|
231 | 231 | +++ b/README |
|
232 | 232 | @@ -0,0 +1 @@ |
|
233 | 233 | +README.rst |
|
234 | 234 | \ No newline at end of file |
|
235 | 235 | """, |
|
236 | 236 | 'svn': """Index: README |
|
237 | 237 | =================================================================== |
|
238 | 238 | diff --git a/README b/README |
|
239 | 239 | new file mode 10644 |
|
240 | 240 | --- /dev/null\t(revision 0) |
|
241 | 241 | +++ b/README\t(revision 393) |
|
242 | 242 | @@ -0,0 +1 @@ |
|
243 | 243 | +link README.rst |
|
244 | 244 | \\ No newline at end of file |
|
245 | 245 | """, |
|
246 | 246 | } |
|
247 | 247 | |
|
248 | 248 | patches = { |
|
249 | 249 | 'hg': r"""# HG changeset patch |
|
250 | 250 | # User Marcin Kuzminski <marcin@python-works.com> |
|
251 | 251 | # Date 2014-01-07 12:21:40 |
|
252 | 252 | # Node ID 2062ec7beeeaf9f44a1c25c41479565040b930b2 |
|
253 | 253 | # Parent 96507bd11ecc815ebc6270fdf6db110928c09c1e |
|
254 | 254 | |
|
255 | 255 | Added a symlink |
|
256 | 256 | |
|
257 | 257 | """ + diffs['hg'], |
|
258 | 258 | 'git': r"""From fd627b9e0dd80b47be81af07c4a98518244ed2f7 2014-01-07 12:22:20 |
|
259 | 259 | From: Marcin Kuzminski <marcin@python-works.com> |
|
260 | 260 | Date: 2014-01-07 12:22:20 |
|
261 | 261 | Subject: [PATCH] Added a symlink |
|
262 | 262 | |
|
263 | 263 | --- |
|
264 | 264 | |
|
265 | 265 | """ + diffs['git'], |
|
266 | 266 | 'svn': r"""# SVN changeset patch |
|
267 | 267 | # User marcin |
|
268 | 268 | # Date 2014-09-02 12:25:22.071142 |
|
269 | 269 | # Revision 393 |
|
270 | 270 | |
|
271 | 271 | Added a symlink |
|
272 | 272 | |
|
273 | 273 | """ + diffs['svn'], |
|
274 | 274 | } |
|
275 | 275 | |
|
276 | def _check_diff_menus(self, response, right_menu=False): | |
|
276 | def _check_diff_menus(self, response, right_menu=False,): | |
|
277 | 277 | # diff menus |
|
278 | 278 | for elem in ['Show File', 'Unified Diff', 'Side-by-side Diff', |
|
279 | 279 | 'Raw Diff', 'Download Diff']: |
|
280 | 280 | response.mustcontain(elem) |
|
281 | 281 | |
|
282 | 282 | # right pane diff menus |
|
283 | 283 | if right_menu: |
|
284 | 284 | for elem in ['Ignore whitespace', 'Increase context', |
|
285 | 285 | 'Hide comments']: |
|
286 | 286 | response.mustcontain(elem) |
|
287 | ||
|
288 | ||
|
289 | def _check_new_diff_menus(self, response, right_menu=False,): | |
|
290 | # diff menus | |
|
291 | for elem in ['Show file before', 'Show file after', | |
|
292 | 'Raw diff', 'Download diff']: | |
|
293 | response.mustcontain(elem) | |
|
294 | ||
|
295 | # right pane diff menus | |
|
296 | if right_menu: | |
|
297 | for elem in ['Ignore whitespace', 'Increase context', | |
|
298 | 'Hide comments']: | |
|
299 | response.mustcontain(elem) |
@@ -1,691 +1,695 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2010-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import mock |
|
22 | 22 | import pytest |
|
23 | import lxml.html | |
|
23 | 24 | |
|
24 | 25 | from rhodecode.lib.vcs.backends.base import EmptyCommit |
|
25 | 26 | from rhodecode.lib.vcs.exceptions import RepositoryRequirementError |
|
26 | 27 | from rhodecode.model.db import Repository |
|
27 | 28 | from rhodecode.model.scm import ScmModel |
|
28 | 29 | from rhodecode.tests import url, TEST_USER_ADMIN_LOGIN, assert_session_flash |
|
29 | 30 | from rhodecode.tests.utils import AssertResponse |
|
30 | 31 | |
|
31 | 32 | |
|
32 | 33 | @pytest.mark.usefixtures("autologin_user", "app") |
|
33 | 34 | class TestCompareController: |
|
34 | 35 | |
|
35 | 36 | @pytest.mark.xfail_backends("svn", reason="Requires pull") |
|
36 | 37 | def test_compare_remote_with_different_commit_indexes(self, backend): |
|
37 | 38 | # Preparing the following repository structure: |
|
38 | 39 | # |
|
39 | 40 | # Origin repository has two commits: |
|
40 | 41 | # |
|
41 | 42 | # 0 1 |
|
42 | 43 | # A -- D |
|
43 | 44 | # |
|
44 | 45 | # The fork of it has a few more commits and "D" has a commit index |
|
45 | 46 | # which does not exist in origin. |
|
46 | 47 | # |
|
47 | 48 | # 0 1 2 3 4 |
|
48 | 49 | # A -- -- -- D -- E |
|
49 | 50 | # \- B -- C |
|
50 | 51 | # |
|
51 | 52 | |
|
52 | 53 | fork = backend.create_repo() |
|
53 | 54 | |
|
54 | 55 | # prepare fork |
|
55 | 56 | commit0 = _commit_change( |
|
56 | 57 | fork.repo_name, filename='file1', content='A', |
|
57 | 58 | message='A', vcs_type=backend.alias, parent=None, newfile=True) |
|
58 | 59 | |
|
59 | 60 | commit1 = _commit_change( |
|
60 | 61 | fork.repo_name, filename='file1', content='B', |
|
61 | 62 | message='B, child of A', vcs_type=backend.alias, parent=commit0) |
|
62 | 63 | |
|
63 | 64 | _commit_change( # commit 2 |
|
64 | 65 | fork.repo_name, filename='file1', content='C', |
|
65 | 66 | message='C, child of B', vcs_type=backend.alias, parent=commit1) |
|
66 | 67 | |
|
67 | 68 | commit3 = _commit_change( |
|
68 | 69 | fork.repo_name, filename='file1', content='D', |
|
69 | 70 | message='D, child of A', vcs_type=backend.alias, parent=commit0) |
|
70 | 71 | |
|
71 | 72 | commit4 = _commit_change( |
|
72 | 73 | fork.repo_name, filename='file1', content='E', |
|
73 | 74 | message='E, child of D', vcs_type=backend.alias, parent=commit3) |
|
74 | 75 | |
|
75 | 76 | # prepare origin repository, taking just the history up to D |
|
76 | 77 | origin = backend.create_repo() |
|
77 | 78 | |
|
78 | 79 | origin_repo = origin.scm_instance(cache=False) |
|
79 | 80 | origin_repo.config.clear_section('hooks') |
|
80 | 81 | origin_repo.pull(fork.repo_full_path, commit_ids=[commit3.raw_id]) |
|
81 | 82 | |
|
82 | 83 | # Verify test fixture setup |
|
83 | 84 | # This does not work for git |
|
84 | 85 | if backend.alias != 'git': |
|
85 | 86 | assert 5 == len(fork.scm_instance().commit_ids) |
|
86 | 87 | assert 2 == len(origin_repo.commit_ids) |
|
87 | 88 | |
|
88 | 89 | # Comparing the revisions |
|
89 | 90 | response = self.app.get( |
|
90 | 91 | url('compare_url', |
|
91 | 92 | repo_name=origin.repo_name, |
|
92 | 93 | source_ref_type="rev", |
|
93 | 94 | source_ref=commit3.raw_id, |
|
94 | 95 | target_repo=fork.repo_name, |
|
95 | 96 | target_ref_type="rev", |
|
96 | 97 | target_ref=commit4.raw_id, |
|
97 | 98 | merge='1',)) |
|
98 | 99 | |
|
99 | 100 | compare_page = ComparePage(response) |
|
100 | 101 | compare_page.contains_commits([commit4]) |
|
101 | 102 | |
|
102 | 103 | @pytest.mark.xfail_backends("svn", reason="Depends on branch support") |
|
103 | 104 | def test_compare_forks_on_branch_extra_commits(self, backend): |
|
104 | 105 | repo1 = backend.create_repo() |
|
105 | 106 | |
|
106 | 107 | # commit something ! |
|
107 | 108 | commit0 = _commit_change( |
|
108 | 109 | repo1.repo_name, filename='file1', content='line1\n', |
|
109 | 110 | message='commit1', vcs_type=backend.alias, parent=None, |
|
110 | 111 | newfile=True) |
|
111 | 112 | |
|
112 | 113 | # fork this repo |
|
113 | 114 | repo2 = backend.create_fork() |
|
114 | 115 | |
|
115 | 116 | # add two extra commit into fork |
|
116 | 117 | commit1 = _commit_change( |
|
117 | 118 | repo2.repo_name, filename='file1', content='line1\nline2\n', |
|
118 | 119 | message='commit2', vcs_type=backend.alias, parent=commit0) |
|
119 | 120 | |
|
120 | 121 | commit2 = _commit_change( |
|
121 | 122 | repo2.repo_name, filename='file1', content='line1\nline2\nline3\n', |
|
122 | 123 | message='commit3', vcs_type=backend.alias, parent=commit1) |
|
123 | 124 | |
|
124 | 125 | commit_id1 = repo1.scm_instance().DEFAULT_BRANCH_NAME |
|
125 | 126 | commit_id2 = repo2.scm_instance().DEFAULT_BRANCH_NAME |
|
126 | 127 | |
|
127 | 128 | response = self.app.get( |
|
128 | 129 | url('compare_url', |
|
129 | 130 | repo_name=repo1.repo_name, |
|
130 | 131 | source_ref_type="branch", |
|
131 | 132 | source_ref=commit_id2, |
|
132 | 133 | target_repo=repo2.repo_name, |
|
133 | 134 | target_ref_type="branch", |
|
134 | 135 | target_ref=commit_id1, |
|
135 | 136 | merge='1',)) |
|
136 | 137 | |
|
137 | 138 | response.mustcontain('%s@%s' % (repo1.repo_name, commit_id2)) |
|
138 | 139 | response.mustcontain('%s@%s' % (repo2.repo_name, commit_id1)) |
|
139 | 140 | |
|
140 | 141 | compare_page = ComparePage(response) |
|
141 | 142 | compare_page.contains_change_summary(1, 2, 0) |
|
142 | 143 | compare_page.contains_commits([commit1, commit2]) |
|
143 | 144 | compare_page.contains_file_links_and_anchors([ |
|
144 | 145 | ('file1', 'a_c--826e8142e6ba'), |
|
145 | 146 | ]) |
|
146 | 147 | |
|
147 | 148 | # Swap is removed when comparing branches since it's a PR feature and |
|
148 | 149 | # it is then a preview mode |
|
149 | 150 | compare_page.swap_is_hidden() |
|
150 | 151 | compare_page.target_source_are_disabled() |
|
151 | 152 | |
|
152 | 153 | @pytest.mark.xfail_backends("svn", reason="Depends on branch support") |
|
153 | 154 | def test_compare_forks_on_branch_extra_commits_origin_has_incomming( |
|
154 | 155 | self, backend): |
|
155 | 156 | repo1 = backend.create_repo() |
|
156 | 157 | |
|
157 | 158 | # commit something ! |
|
158 | 159 | commit0 = _commit_change( |
|
159 | 160 | repo1.repo_name, filename='file1', content='line1\n', |
|
160 | 161 | message='commit1', vcs_type=backend.alias, parent=None, |
|
161 | 162 | newfile=True) |
|
162 | 163 | |
|
163 | 164 | # fork this repo |
|
164 | 165 | repo2 = backend.create_fork() |
|
165 | 166 | |
|
166 | 167 | # now commit something to origin repo |
|
167 | 168 | _commit_change( |
|
168 | 169 | repo1.repo_name, filename='file2', content='line1file2\n', |
|
169 | 170 | message='commit2', vcs_type=backend.alias, parent=commit0, |
|
170 | 171 | newfile=True) |
|
171 | 172 | |
|
172 | 173 | # add two extra commit into fork |
|
173 | 174 | commit1 = _commit_change( |
|
174 | 175 | repo2.repo_name, filename='file1', content='line1\nline2\n', |
|
175 | 176 | message='commit2', vcs_type=backend.alias, parent=commit0) |
|
176 | 177 | |
|
177 | 178 | commit2 = _commit_change( |
|
178 | 179 | repo2.repo_name, filename='file1', content='line1\nline2\nline3\n', |
|
179 | 180 | message='commit3', vcs_type=backend.alias, parent=commit1) |
|
180 | 181 | |
|
181 | 182 | commit_id1 = repo1.scm_instance().DEFAULT_BRANCH_NAME |
|
182 | 183 | commit_id2 = repo2.scm_instance().DEFAULT_BRANCH_NAME |
|
183 | 184 | |
|
184 | 185 | response = self.app.get( |
|
185 | 186 | url('compare_url', |
|
186 | 187 | repo_name=repo1.repo_name, |
|
187 | 188 | source_ref_type="branch", |
|
188 | 189 | source_ref=commit_id2, |
|
189 | 190 | target_repo=repo2.repo_name, |
|
190 | 191 | target_ref_type="branch", |
|
191 | 192 | target_ref=commit_id1, |
|
192 | 193 | merge='1')) |
|
193 | 194 | |
|
194 | 195 | response.mustcontain('%s@%s' % (repo1.repo_name, commit_id2)) |
|
195 | 196 | response.mustcontain('%s@%s' % (repo2.repo_name, commit_id1)) |
|
196 | 197 | |
|
197 | 198 | compare_page = ComparePage(response) |
|
198 | 199 | compare_page.contains_change_summary(1, 2, 0) |
|
199 | 200 | compare_page.contains_commits([commit1, commit2]) |
|
200 | 201 | compare_page.contains_file_links_and_anchors([ |
|
201 | 202 | ('file1', 'a_c--826e8142e6ba'), |
|
202 | 203 | ]) |
|
203 | 204 | |
|
204 | 205 | # Swap is removed when comparing branches since it's a PR feature and |
|
205 | 206 | # it is then a preview mode |
|
206 | 207 | compare_page.swap_is_hidden() |
|
207 | 208 | compare_page.target_source_are_disabled() |
|
208 | 209 | |
|
209 | 210 | @pytest.mark.xfail_backends("svn", "git") |
|
210 | 211 | def test_compare_of_unrelated_forks(self, backend): |
|
211 | 212 | # TODO: johbo: Fails for git due to some other issue it seems |
|
212 | 213 | orig = backend.create_repo(number_of_commits=1) |
|
213 | 214 | fork = backend.create_repo(number_of_commits=1) |
|
214 | 215 | |
|
215 | 216 | response = self.app.get( |
|
216 | 217 | url('compare_url', |
|
217 | 218 | repo_name=orig.repo_name, |
|
218 | 219 | action="compare", |
|
219 | 220 | source_ref_type="rev", |
|
220 | 221 | source_ref="tip", |
|
221 | 222 | target_ref_type="rev", |
|
222 | 223 | target_ref="tip", |
|
223 | 224 | merge='1', |
|
224 | 225 | target_repo=fork.repo_name), |
|
225 | 226 | status=400) |
|
226 | 227 | |
|
227 | 228 | response.mustcontain("Repositories unrelated.") |
|
228 | 229 | |
|
229 | 230 | @pytest.mark.xfail_backends("svn") |
|
230 | 231 | def test_compare_cherry_pick_commits_from_bottom(self, backend): |
|
231 | 232 | |
|
232 | 233 | # repo1: |
|
233 | 234 | # commit0: |
|
234 | 235 | # commit1: |
|
235 | 236 | # repo1-fork- in which we will cherry pick bottom commits |
|
236 | 237 | # commit0: |
|
237 | 238 | # commit1: |
|
238 | 239 | # commit2: x |
|
239 | 240 | # commit3: x |
|
240 | 241 | # commit4: x |
|
241 | 242 | # commit5: |
|
242 | 243 | # make repo1, and commit1+commit2 |
|
243 | 244 | |
|
244 | 245 | repo1 = backend.create_repo() |
|
245 | 246 | |
|
246 | 247 | # commit something ! |
|
247 | 248 | commit0 = _commit_change( |
|
248 | 249 | repo1.repo_name, filename='file1', content='line1\n', |
|
249 | 250 | message='commit1', vcs_type=backend.alias, parent=None, |
|
250 | 251 | newfile=True) |
|
251 | 252 | commit1 = _commit_change( |
|
252 | 253 | repo1.repo_name, filename='file1', content='line1\nline2\n', |
|
253 | 254 | message='commit2', vcs_type=backend.alias, parent=commit0) |
|
254 | 255 | |
|
255 | 256 | # fork this repo |
|
256 | 257 | repo2 = backend.create_fork() |
|
257 | 258 | |
|
258 | 259 | # now make commit3-6 |
|
259 | 260 | commit2 = _commit_change( |
|
260 | 261 | repo1.repo_name, filename='file1', content='line1\nline2\nline3\n', |
|
261 | 262 | message='commit3', vcs_type=backend.alias, parent=commit1) |
|
262 | 263 | commit3 = _commit_change( |
|
263 | 264 | repo1.repo_name, filename='file1', |
|
264 | 265 | content='line1\nline2\nline3\nline4\n', message='commit4', |
|
265 | 266 | vcs_type=backend.alias, parent=commit2) |
|
266 | 267 | commit4 = _commit_change( |
|
267 | 268 | repo1.repo_name, filename='file1', |
|
268 | 269 | content='line1\nline2\nline3\nline4\nline5\n', message='commit5', |
|
269 | 270 | vcs_type=backend.alias, parent=commit3) |
|
270 | 271 | _commit_change( # commit 5 |
|
271 | 272 | repo1.repo_name, filename='file1', |
|
272 | 273 | content='line1\nline2\nline3\nline4\nline5\nline6\n', |
|
273 | 274 | message='commit6', vcs_type=backend.alias, parent=commit4) |
|
274 | 275 | |
|
275 | 276 | response = self.app.get( |
|
276 | 277 | url('compare_url', |
|
277 | 278 | repo_name=repo2.repo_name, |
|
278 | 279 | source_ref_type="rev", |
|
279 | 280 | # parent of commit2, in target repo2 |
|
280 | 281 | source_ref=commit1.raw_id, |
|
281 | 282 | target_repo=repo1.repo_name, |
|
282 | 283 | target_ref_type="rev", |
|
283 | 284 | target_ref=commit4.raw_id, |
|
284 | 285 | merge='1',)) |
|
285 | 286 | response.mustcontain('%s@%s' % (repo2.repo_name, commit1.short_id)) |
|
286 | 287 | response.mustcontain('%s@%s' % (repo1.repo_name, commit4.short_id)) |
|
287 | 288 | |
|
288 | 289 | # files |
|
289 | 290 | compare_page = ComparePage(response) |
|
290 | 291 | compare_page.contains_change_summary(1, 3, 0) |
|
291 | 292 | compare_page.contains_commits([commit2, commit3, commit4]) |
|
292 | 293 | compare_page.contains_file_links_and_anchors([ |
|
293 | 294 | ('file1', 'a_c--826e8142e6ba'), |
|
294 | 295 | ]) |
|
295 | 296 | |
|
296 | 297 | @pytest.mark.xfail_backends("svn") |
|
297 | 298 | def test_compare_cherry_pick_commits_from_top(self, backend): |
|
298 | 299 | # repo1: |
|
299 | 300 | # commit0: |
|
300 | 301 | # commit1: |
|
301 | 302 | # repo1-fork- in which we will cherry pick bottom commits |
|
302 | 303 | # commit0: |
|
303 | 304 | # commit1: |
|
304 | 305 | # commit2: |
|
305 | 306 | # commit3: x |
|
306 | 307 | # commit4: x |
|
307 | 308 | # commit5: x |
|
308 | 309 | |
|
309 | 310 | # make repo1, and commit1+commit2 |
|
310 | 311 | repo1 = backend.create_repo() |
|
311 | 312 | |
|
312 | 313 | # commit something ! |
|
313 | 314 | commit0 = _commit_change( |
|
314 | 315 | repo1.repo_name, filename='file1', content='line1\n', |
|
315 | 316 | message='commit1', vcs_type=backend.alias, parent=None, |
|
316 | 317 | newfile=True) |
|
317 | 318 | commit1 = _commit_change( |
|
318 | 319 | repo1.repo_name, filename='file1', content='line1\nline2\n', |
|
319 | 320 | message='commit2', vcs_type=backend.alias, parent=commit0) |
|
320 | 321 | |
|
321 | 322 | # fork this repo |
|
322 | 323 | backend.create_fork() |
|
323 | 324 | |
|
324 | 325 | # now make commit3-6 |
|
325 | 326 | commit2 = _commit_change( |
|
326 | 327 | repo1.repo_name, filename='file1', content='line1\nline2\nline3\n', |
|
327 | 328 | message='commit3', vcs_type=backend.alias, parent=commit1) |
|
328 | 329 | commit3 = _commit_change( |
|
329 | 330 | repo1.repo_name, filename='file1', |
|
330 | 331 | content='line1\nline2\nline3\nline4\n', message='commit4', |
|
331 | 332 | vcs_type=backend.alias, parent=commit2) |
|
332 | 333 | commit4 = _commit_change( |
|
333 | 334 | repo1.repo_name, filename='file1', |
|
334 | 335 | content='line1\nline2\nline3\nline4\nline5\n', message='commit5', |
|
335 | 336 | vcs_type=backend.alias, parent=commit3) |
|
336 | 337 | commit5 = _commit_change( |
|
337 | 338 | repo1.repo_name, filename='file1', |
|
338 | 339 | content='line1\nline2\nline3\nline4\nline5\nline6\n', |
|
339 | 340 | message='commit6', vcs_type=backend.alias, parent=commit4) |
|
340 | 341 | |
|
341 | 342 | response = self.app.get( |
|
342 | 343 | url('compare_url', |
|
343 | 344 | repo_name=repo1.repo_name, |
|
344 | 345 | source_ref_type="rev", |
|
345 | 346 | # parent of commit3, not in source repo2 |
|
346 | 347 | source_ref=commit2.raw_id, |
|
347 | 348 | target_ref_type="rev", |
|
348 | 349 | target_ref=commit5.raw_id, |
|
349 | 350 | merge='1',)) |
|
350 | 351 | |
|
351 | 352 | response.mustcontain('%s@%s' % (repo1.repo_name, commit2.short_id)) |
|
352 | 353 | response.mustcontain('%s@%s' % (repo1.repo_name, commit5.short_id)) |
|
353 | 354 | |
|
354 | 355 | compare_page = ComparePage(response) |
|
355 | 356 | compare_page.contains_change_summary(1, 3, 0) |
|
356 | 357 | compare_page.contains_commits([commit3, commit4, commit5]) |
|
357 | 358 | |
|
358 | 359 | # files |
|
359 | 360 | compare_page.contains_file_links_and_anchors([ |
|
360 | 361 | ('file1', 'a_c--826e8142e6ba'), |
|
361 | 362 | ]) |
|
362 | 363 | |
|
363 | 364 | @pytest.mark.xfail_backends("svn") |
|
364 | 365 | def test_compare_remote_branches(self, backend): |
|
365 | 366 | repo1 = backend.repo |
|
366 | 367 | repo2 = backend.create_fork() |
|
367 | 368 | |
|
368 | 369 | commit_id1 = repo1.get_commit(commit_idx=3).raw_id |
|
369 | 370 | commit_id2 = repo1.get_commit(commit_idx=6).raw_id |
|
370 | 371 | |
|
371 | 372 | response = self.app.get( |
|
372 | 373 | url('compare_url', |
|
373 | 374 | repo_name=repo1.repo_name, |
|
374 | 375 | source_ref_type="rev", |
|
375 | 376 | source_ref=commit_id1, |
|
376 | 377 | target_ref_type="rev", |
|
377 | 378 | target_ref=commit_id2, |
|
378 | 379 | target_repo=repo2.repo_name, |
|
379 | 380 | merge='1',)) |
|
380 | 381 | |
|
381 | 382 | response.mustcontain('%s@%s' % (repo1.repo_name, commit_id1)) |
|
382 | 383 | response.mustcontain('%s@%s' % (repo2.repo_name, commit_id2)) |
|
383 | 384 | |
|
384 | 385 | compare_page = ComparePage(response) |
|
385 | 386 | |
|
386 | 387 | # outgoing commits between those commits |
|
387 | 388 | compare_page.contains_commits( |
|
388 | 389 | [repo2.get_commit(commit_idx=x) for x in [4, 5, 6]]) |
|
389 | 390 | |
|
390 | 391 | # files |
|
391 | 392 | compare_page.contains_file_links_and_anchors([ |
|
392 | 393 | ('vcs/backends/hg.py', 'a_c--9c390eb52cd6'), |
|
393 | 394 | ('vcs/backends/__init__.py', 'a_c--41b41c1f2796'), |
|
394 | 395 | ('vcs/backends/base.py', 'a_c--2f574d260608'), |
|
395 | 396 | ]) |
|
396 | 397 | |
|
397 | 398 | @pytest.mark.xfail_backends("svn") |
|
398 | 399 | def test_source_repo_new_commits_after_forking_simple_diff(self, backend): |
|
399 | 400 | repo1 = backend.create_repo() |
|
400 | 401 | r1_name = repo1.repo_name |
|
401 | 402 | |
|
402 | 403 | commit0 = _commit_change( |
|
403 | 404 | repo=r1_name, filename='file1', |
|
404 | 405 | content='line1', message='commit1', vcs_type=backend.alias, |
|
405 | 406 | newfile=True) |
|
406 | 407 | assert repo1.scm_instance().commit_ids == [commit0.raw_id] |
|
407 | 408 | |
|
408 | 409 | # fork the repo1 |
|
409 | 410 | repo2 = backend.create_fork() |
|
410 | 411 | assert repo2.scm_instance().commit_ids == [commit0.raw_id] |
|
411 | 412 | |
|
412 | 413 | self.r2_id = repo2.repo_id |
|
413 | 414 | r2_name = repo2.repo_name |
|
414 | 415 | |
|
415 | 416 | commit1 = _commit_change( |
|
416 | 417 | repo=r2_name, filename='file1-fork', |
|
417 | 418 | content='file1-line1-from-fork', message='commit1-fork', |
|
418 | 419 | vcs_type=backend.alias, parent=repo2.scm_instance()[-1], |
|
419 | 420 | newfile=True) |
|
420 | 421 | |
|
421 | 422 | commit2 = _commit_change( |
|
422 | 423 | repo=r2_name, filename='file2-fork', |
|
423 | 424 | content='file2-line1-from-fork', message='commit2-fork', |
|
424 | 425 | vcs_type=backend.alias, parent=commit1, |
|
425 | 426 | newfile=True) |
|
426 | 427 | |
|
427 | 428 | _commit_change( # commit 3 |
|
428 | 429 | repo=r2_name, filename='file3-fork', |
|
429 | 430 | content='file3-line1-from-fork', message='commit3-fork', |
|
430 | 431 | vcs_type=backend.alias, parent=commit2, newfile=True) |
|
431 | 432 | |
|
432 | 433 | # compare ! |
|
433 | 434 | commit_id1 = repo1.scm_instance().DEFAULT_BRANCH_NAME |
|
434 | 435 | commit_id2 = repo2.scm_instance().DEFAULT_BRANCH_NAME |
|
435 | 436 | |
|
436 | 437 | response = self.app.get( |
|
437 | 438 | url('compare_url', |
|
438 | 439 | repo_name=r2_name, |
|
439 | 440 | source_ref_type="branch", |
|
440 | 441 | source_ref=commit_id1, |
|
441 | 442 | target_ref_type="branch", |
|
442 | 443 | target_ref=commit_id2, |
|
443 | 444 | target_repo=r1_name, |
|
444 | 445 | merge='1',)) |
|
445 | 446 | |
|
446 | 447 | response.mustcontain('%s@%s' % (r2_name, commit_id1)) |
|
447 | 448 | response.mustcontain('%s@%s' % (r1_name, commit_id2)) |
|
448 | 449 | response.mustcontain('No files') |
|
449 | 450 | response.mustcontain('No Commits') |
|
450 | 451 | |
|
451 | 452 | commit0 = _commit_change( |
|
452 | 453 | repo=r1_name, filename='file2', |
|
453 | 454 | content='line1-added-after-fork', message='commit2-parent', |
|
454 | 455 | vcs_type=backend.alias, parent=None, newfile=True) |
|
455 | 456 | |
|
456 | 457 | # compare ! |
|
457 | 458 | response = self.app.get( |
|
458 | 459 | url('compare_url', |
|
459 | 460 | repo_name=r2_name, |
|
460 | 461 | source_ref_type="branch", |
|
461 | 462 | source_ref=commit_id1, |
|
462 | 463 | target_ref_type="branch", |
|
463 | 464 | target_ref=commit_id2, |
|
464 | 465 | target_repo=r1_name, |
|
465 | 466 | merge='1',)) |
|
466 | 467 | |
|
467 | 468 | response.mustcontain('%s@%s' % (r2_name, commit_id1)) |
|
468 | 469 | response.mustcontain('%s@%s' % (r1_name, commit_id2)) |
|
469 | 470 | |
|
470 | 471 | response.mustcontain("""commit2-parent""") |
|
471 | 472 | response.mustcontain("""line1-added-after-fork""") |
|
472 | 473 | compare_page = ComparePage(response) |
|
473 | 474 | compare_page.contains_change_summary(1, 1, 0) |
|
474 | 475 | |
|
475 | 476 | @pytest.mark.xfail_backends("svn") |
|
476 | 477 | def test_compare_commits(self, backend): |
|
477 | 478 | commit0 = backend.repo.get_commit(commit_idx=0) |
|
478 | 479 | commit1 = backend.repo.get_commit(commit_idx=1) |
|
479 | 480 | |
|
480 | 481 | response = self.app.get( |
|
481 | 482 | url('compare_url', |
|
482 | 483 | repo_name=backend.repo_name, |
|
483 | 484 | source_ref_type="rev", |
|
484 | 485 | source_ref=commit0.raw_id, |
|
485 | 486 | target_ref_type="rev", |
|
486 | 487 | target_ref=commit1.raw_id, |
|
487 | 488 | merge='1',), |
|
488 | 489 | extra_environ={'HTTP_X_PARTIAL_XHR': '1'},) |
|
489 | 490 | |
|
490 | 491 | # outgoing commits between those commits |
|
491 | 492 | compare_page = ComparePage(response) |
|
492 | 493 | compare_page.contains_commits(commits=[commit1], ancestors=[commit0]) |
|
493 | 494 | |
|
494 | 495 | def test_errors_when_comparing_unknown_repo(self, backend): |
|
495 | 496 | repo = backend.repo |
|
496 | 497 | badrepo = 'badrepo' |
|
497 | 498 | |
|
498 | 499 | response = self.app.get( |
|
499 | 500 | url('compare_url', |
|
500 | 501 | repo_name=repo.repo_name, |
|
501 | 502 | source_ref_type="rev", |
|
502 | 503 | source_ref='tip', |
|
503 | 504 | target_ref_type="rev", |
|
504 | 505 | target_ref='tip', |
|
505 | 506 | target_repo=badrepo, |
|
506 | 507 | merge='1',), |
|
507 | 508 | status=302) |
|
508 | 509 | redirected = response.follow() |
|
509 | 510 | redirected.mustcontain('Could not find the other repo: %s' % badrepo) |
|
510 | 511 | |
|
511 | 512 | def test_compare_not_in_preview_mode(self, backend_stub): |
|
512 | 513 | commit0 = backend_stub.repo.get_commit(commit_idx=0) |
|
513 | 514 | commit1 = backend_stub.repo.get_commit(commit_idx=1) |
|
514 | 515 | |
|
515 | 516 | response = self.app.get(url('compare_url', |
|
516 | 517 | repo_name=backend_stub.repo_name, |
|
517 | 518 | source_ref_type="rev", |
|
518 | 519 | source_ref=commit0.raw_id, |
|
519 | 520 | target_ref_type="rev", |
|
520 | 521 | target_ref=commit1.raw_id, |
|
521 | 522 | ),) |
|
522 | 523 | |
|
523 | 524 | # outgoing commits between those commits |
|
524 | 525 | compare_page = ComparePage(response) |
|
525 | 526 | compare_page.swap_is_visible() |
|
526 | 527 | compare_page.target_source_are_enabled() |
|
527 | 528 | |
|
528 | 529 | def test_compare_of_fork_with_largefiles(self, backend_hg, settings_util): |
|
529 | 530 | orig = backend_hg.create_repo(number_of_commits=1) |
|
530 | 531 | fork = backend_hg.create_fork() |
|
531 | 532 | |
|
532 | 533 | settings_util.create_repo_rhodecode_ui( |
|
533 | 534 | orig, 'extensions', value='', key='largefiles', active=False) |
|
534 | 535 | settings_util.create_repo_rhodecode_ui( |
|
535 | 536 | fork, 'extensions', value='', key='largefiles', active=True) |
|
536 | 537 | |
|
537 | 538 | compare_module = ('rhodecode.lib.vcs.backends.hg.repository.' |
|
538 | 539 | 'MercurialRepository.compare') |
|
539 | 540 | with mock.patch(compare_module) as compare_mock: |
|
540 | 541 | compare_mock.side_effect = RepositoryRequirementError() |
|
541 | 542 | |
|
542 | 543 | response = self.app.get( |
|
543 | 544 | url('compare_url', |
|
544 | 545 | repo_name=orig.repo_name, |
|
545 | 546 | action="compare", |
|
546 | 547 | source_ref_type="rev", |
|
547 | 548 | source_ref="tip", |
|
548 | 549 | target_ref_type="rev", |
|
549 | 550 | target_ref="tip", |
|
550 | 551 | merge='1', |
|
551 | 552 | target_repo=fork.repo_name), |
|
552 | 553 | status=302) |
|
553 | 554 | |
|
554 | 555 | assert_session_flash( |
|
555 | 556 | response, |
|
556 | 557 | 'Could not compare repos with different large file settings') |
|
557 | 558 | |
|
558 | 559 | |
|
559 | 560 | @pytest.mark.usefixtures("autologin_user") |
|
560 | 561 | class TestCompareControllerSvn: |
|
561 | 562 | |
|
562 | 563 | def test_supports_references_with_path(self, app, backend_svn): |
|
563 | 564 | repo = backend_svn['svn-simple-layout'] |
|
564 | 565 | commit_id = repo.get_commit(commit_idx=-1).raw_id |
|
565 | 566 | response = app.get( |
|
566 | 567 | url('compare_url', |
|
567 | 568 | repo_name=repo.repo_name, |
|
568 | 569 | source_ref_type="tag", |
|
569 | 570 | source_ref="%s@%s" % ('tags/v0.1', commit_id), |
|
570 | 571 | target_ref_type="tag", |
|
571 | 572 | target_ref="%s@%s" % ('tags/v0.2', commit_id), |
|
572 | 573 | merge='1',), |
|
573 | 574 | status=200) |
|
574 | 575 | |
|
575 | 576 | # Expecting no commits, since both paths are at the same revision |
|
576 | 577 | response.mustcontain('No Commits') |
|
577 | 578 | |
|
578 | 579 | # Should find only one file changed when comparing those two tags |
|
579 | 580 | response.mustcontain('example.py') |
|
580 | 581 | compare_page = ComparePage(response) |
|
581 | 582 | compare_page.contains_change_summary(1, 5, 1) |
|
582 | 583 | |
|
583 | 584 | def test_shows_commits_if_different_ids(self, app, backend_svn): |
|
584 | 585 | repo = backend_svn['svn-simple-layout'] |
|
585 | 586 | source_id = repo.get_commit(commit_idx=-6).raw_id |
|
586 | 587 | target_id = repo.get_commit(commit_idx=-1).raw_id |
|
587 | 588 | response = app.get( |
|
588 | 589 | url('compare_url', |
|
589 | 590 | repo_name=repo.repo_name, |
|
590 | 591 | source_ref_type="tag", |
|
591 | 592 | source_ref="%s@%s" % ('tags/v0.1', source_id), |
|
592 | 593 | target_ref_type="tag", |
|
593 | 594 | target_ref="%s@%s" % ('tags/v0.2', target_id), |
|
594 | 595 | merge='1',), |
|
595 | 596 | status=200) |
|
596 | 597 | |
|
597 | 598 | # It should show commits |
|
598 | 599 | assert 'No Commits' not in response.body |
|
599 | 600 | |
|
600 | 601 | # Should find only one file changed when comparing those two tags |
|
601 | 602 | response.mustcontain('example.py') |
|
602 | 603 | compare_page = ComparePage(response) |
|
603 | 604 | compare_page.contains_change_summary(1, 5, 1) |
|
604 | 605 | |
|
605 | 606 | |
|
606 | 607 | class ComparePage(AssertResponse): |
|
607 | 608 | """ |
|
608 | 609 | Abstracts the page template from the tests |
|
609 | 610 | """ |
|
610 | 611 | |
|
611 | 612 | def contains_file_links_and_anchors(self, files): |
|
613 | doc = lxml.html.fromstring(self.response.body) | |
|
612 | 614 | for filename, file_id in files: |
|
613 | self.contains_one_link(filename, '#' + file_id) | |
|
614 | 615 | self.contains_one_anchor(file_id) |
|
616 | diffblock = doc.cssselect('[data-f-path="%s"]' % filename) | |
|
617 | assert len(diffblock) == 1 | |
|
618 | assert len(diffblock[0].cssselect('a[href="#%s"]' % file_id)) == 1 | |
|
615 | 619 | |
|
616 | 620 | def contains_change_summary(self, files_changed, inserted, deleted): |
|
617 | 621 | template = ( |
|
618 | 622 | "{files_changed} file{plural} changed: " |
|
619 | 623 | "{inserted} inserted, {deleted} deleted") |
|
620 | 624 | self.response.mustcontain(template.format( |
|
621 | 625 | files_changed=files_changed, |
|
622 | 626 | plural="s" if files_changed > 1 else "", |
|
623 | 627 | inserted=inserted, |
|
624 | 628 | deleted=deleted)) |
|
625 | 629 | |
|
626 | 630 | def contains_commits(self, commits, ancestors=None): |
|
627 | 631 | response = self.response |
|
628 | 632 | |
|
629 | 633 | for commit in commits: |
|
630 | 634 | # Expecting to see the commit message in an element which |
|
631 | 635 | # has the ID "c-{commit.raw_id}" |
|
632 | 636 | self.element_contains('#c-' + commit.raw_id, commit.message) |
|
633 | 637 | self.contains_one_link( |
|
634 | 638 | 'r%s:%s' % (commit.idx, commit.short_id), |
|
635 | 639 | self._commit_url(commit)) |
|
636 | 640 | if ancestors: |
|
637 | 641 | response.mustcontain('Ancestor') |
|
638 | 642 | for ancestor in ancestors: |
|
639 | 643 | self.contains_one_link( |
|
640 | 644 | ancestor.short_id, self._commit_url(ancestor)) |
|
641 | 645 | |
|
642 | 646 | def _commit_url(self, commit): |
|
643 | 647 | return '/%s/changeset/%s' % (commit.repository.name, commit.raw_id) |
|
644 | 648 | |
|
645 | 649 | def swap_is_hidden(self): |
|
646 | 650 | assert '<a id="btn-swap"' not in self.response.text |
|
647 | 651 | |
|
648 | 652 | def swap_is_visible(self): |
|
649 | 653 | assert '<a id="btn-swap"' in self.response.text |
|
650 | 654 | |
|
651 | 655 | def target_source_are_disabled(self): |
|
652 | 656 | response = self.response |
|
653 | 657 | response.mustcontain("var enable_fields = false;") |
|
654 | 658 | response.mustcontain('.select2("enable", enable_fields)') |
|
655 | 659 | |
|
656 | 660 | def target_source_are_enabled(self): |
|
657 | 661 | response = self.response |
|
658 | 662 | response.mustcontain("var enable_fields = true;") |
|
659 | 663 | |
|
660 | 664 | |
|
661 | 665 | def _commit_change( |
|
662 | 666 | repo, filename, content, message, vcs_type, parent=None, |
|
663 | 667 | newfile=False): |
|
664 | 668 | repo = Repository.get_by_repo_name(repo) |
|
665 | 669 | _commit = parent |
|
666 | 670 | if not parent: |
|
667 | 671 | _commit = EmptyCommit(alias=vcs_type) |
|
668 | 672 | |
|
669 | 673 | if newfile: |
|
670 | 674 | nodes = { |
|
671 | 675 | filename: { |
|
672 | 676 | 'content': content |
|
673 | 677 | } |
|
674 | 678 | } |
|
675 | 679 | commit = ScmModel().create_nodes( |
|
676 | 680 | user=TEST_USER_ADMIN_LOGIN, repo=repo, |
|
677 | 681 | message=message, |
|
678 | 682 | nodes=nodes, |
|
679 | 683 | parent_commit=_commit, |
|
680 | 684 | author=TEST_USER_ADMIN_LOGIN, |
|
681 | 685 | ) |
|
682 | 686 | else: |
|
683 | 687 | commit = ScmModel().commit_change( |
|
684 | 688 | repo=repo.scm_instance(), repo_name=repo.repo_name, |
|
685 | 689 | commit=parent, user=TEST_USER_ADMIN_LOGIN, |
|
686 | 690 | author=TEST_USER_ADMIN_LOGIN, |
|
687 | 691 | message=message, |
|
688 | 692 | content=content, |
|
689 | 693 | f_path=filename |
|
690 | 694 | ) |
|
691 | 695 | return commit |
@@ -1,330 +1,330 b'' | |||
|
1 | 1 | # -*- coding: utf-8 -*- |
|
2 | 2 | |
|
3 | 3 | # Copyright (C) 2016-2016 RhodeCode GmbH |
|
4 | 4 | # |
|
5 | 5 | # This program is free software: you can redistribute it and/or modify |
|
6 | 6 | # it under the terms of the GNU Affero General Public License, version 3 |
|
7 | 7 | # (only), as published by the Free Software Foundation. |
|
8 | 8 | # |
|
9 | 9 | # This program is distributed in the hope that it will be useful, |
|
10 | 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 | 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
12 | 12 | # GNU General Public License for more details. |
|
13 | 13 | # |
|
14 | 14 | # You should have received a copy of the GNU Affero General Public License |
|
15 | 15 | # along with this program. If not, see <http://www.gnu.org/licenses/>. |
|
16 | 16 | # |
|
17 | 17 | # This program is dual-licensed. If you wish to learn more about the |
|
18 | 18 | # RhodeCode Enterprise Edition, including its added features, Support services, |
|
19 | 19 | # and proprietary license terms, please see https://rhodecode.com/licenses/ |
|
20 | 20 | |
|
21 | 21 | import pytest |
|
22 | 22 | |
|
23 | 23 | from rhodecode.lib.codeblocks import ( |
|
24 | 24 | tokenize_string, split_token_stream, rollup_tokenstream, |
|
25 | 25 | render_tokenstream) |
|
26 | 26 | from pygments.lexers import get_lexer_by_name |
|
27 | 27 | |
|
28 | 28 | |
|
29 | 29 | class TestTokenizeString(object): |
|
30 | 30 | |
|
31 | 31 | python_code = ''' |
|
32 | 32 | import this |
|
33 | 33 | |
|
34 | 34 | var = 6 |
|
35 | 35 | print "this" |
|
36 | 36 | |
|
37 | 37 | ''' |
|
38 | 38 | |
|
39 | 39 | def test_tokenize_as_python(self): |
|
40 | 40 | lexer = get_lexer_by_name('python') |
|
41 | 41 | tokens = list(tokenize_string(self.python_code, lexer)) |
|
42 | 42 | |
|
43 | 43 | assert tokens == [ |
|
44 | 44 | ('', u'\n'), |
|
45 | 45 | ('', u' '), |
|
46 | 46 | ('kn', u'import'), |
|
47 | 47 | ('', u' '), |
|
48 | 48 | ('nn', u'this'), |
|
49 | 49 | ('', u'\n'), |
|
50 | 50 | ('', u'\n'), |
|
51 | 51 | ('', u' '), |
|
52 | 52 | ('n', u'var'), |
|
53 | 53 | ('', u' '), |
|
54 | 54 | ('o', u'='), |
|
55 | 55 | ('', u' '), |
|
56 | 56 | ('mi', u'6'), |
|
57 | 57 | ('', u'\n'), |
|
58 | 58 | ('', u' '), |
|
59 | 59 | ('k', u'print'), |
|
60 | 60 | ('', u' '), |
|
61 | 61 | ('s2', u'"'), |
|
62 | 62 | ('s2', u'this'), |
|
63 | 63 | ('s2', u'"'), |
|
64 | 64 | ('', u'\n'), |
|
65 | 65 | ('', u'\n'), |
|
66 | 66 | ('', u' ') |
|
67 | 67 | ] |
|
68 | 68 | |
|
69 | 69 | def test_tokenize_as_text(self): |
|
70 | 70 | lexer = get_lexer_by_name('text') |
|
71 | 71 | tokens = list(tokenize_string(self.python_code, lexer)) |
|
72 | 72 | |
|
73 | 73 | assert tokens == [ |
|
74 | 74 | ('', |
|
75 | 75 | u'\n import this\n\n var = 6\n print "this"\n\n ') |
|
76 | 76 | ] |
|
77 | 77 | |
|
78 | 78 | |
|
79 | 79 | class TestSplitTokenStream(object): |
|
80 | 80 | |
|
81 | 81 | def test_split_token_stream(self): |
|
82 | 82 | lines = list(split_token_stream( |
|
83 | 83 | [('type1', 'some\ntext'), ('type2', 'more\n')])) |
|
84 | 84 | |
|
85 | 85 | assert lines == [ |
|
86 | 86 | [('type1', u'some')], |
|
87 | 87 | [('type1', u'text'), ('type2', u'more')], |
|
88 | 88 | [('type2', u'')], |
|
89 | 89 | ] |
|
90 | 90 | |
|
91 | 91 | def test_split_token_stream_other_char(self): |
|
92 | 92 | lines = list(split_token_stream( |
|
93 | 93 | [('type1', 'some\ntext'), ('type2', 'more\n')], |
|
94 | 94 | split_string='m')) |
|
95 | 95 | |
|
96 | 96 | assert lines == [ |
|
97 | 97 | [('type1', 'so')], |
|
98 | 98 | [('type1', 'e\ntext'), ('type2', '')], |
|
99 | 99 | [('type2', 'ore\n')], |
|
100 | 100 | ] |
|
101 | 101 | |
|
102 | 102 | def test_split_token_stream_without_char(self): |
|
103 | 103 | lines = list(split_token_stream( |
|
104 | 104 | [('type1', 'some\ntext'), ('type2', 'more\n')], |
|
105 | 105 | split_string='z')) |
|
106 | 106 | |
|
107 | 107 | assert lines == [ |
|
108 | 108 | [('type1', 'some\ntext'), ('type2', 'more\n')] |
|
109 | 109 | ] |
|
110 | 110 | |
|
111 | 111 | def test_split_token_stream_single(self): |
|
112 | 112 | lines = list(split_token_stream( |
|
113 | 113 | [('type1', '\n')], split_string='\n')) |
|
114 | 114 | |
|
115 | 115 | assert lines == [ |
|
116 | 116 | [('type1', '')], |
|
117 | 117 | [('type1', '')], |
|
118 | 118 | ] |
|
119 | 119 | |
|
120 | 120 | def test_split_token_stream_single_repeat(self): |
|
121 | 121 | lines = list(split_token_stream( |
|
122 | 122 | [('type1', '\n\n\n')], split_string='\n')) |
|
123 | 123 | |
|
124 | 124 | assert lines == [ |
|
125 | 125 | [('type1', '')], |
|
126 | 126 | [('type1', '')], |
|
127 | 127 | [('type1', '')], |
|
128 | 128 | [('type1', '')], |
|
129 | 129 | ] |
|
130 | 130 | |
|
131 | 131 | def test_split_token_stream_multiple_repeat(self): |
|
132 | 132 | lines = list(split_token_stream( |
|
133 | 133 | [('type1', '\n\n'), ('type2', '\n\n')], split_string='\n')) |
|
134 | 134 | |
|
135 | 135 | assert lines == [ |
|
136 | 136 | [('type1', '')], |
|
137 | 137 | [('type1', '')], |
|
138 | 138 | [('type1', ''), ('type2', '')], |
|
139 | 139 | [('type2', '')], |
|
140 | 140 | [('type2', '')], |
|
141 | 141 | ] |
|
142 | 142 | |
|
143 | 143 | |
|
144 | 144 | class TestRollupTokens(object): |
|
145 | 145 | |
|
146 | 146 | @pytest.mark.parametrize('tokenstream,output', [ |
|
147 | 147 | ([], |
|
148 | 148 | []), |
|
149 | 149 | ([('A', 'hell'), ('A', 'o')], [ |
|
150 | 150 | ('A', [ |
|
151 | 151 | ('', 'hello')]), |
|
152 | 152 | ]), |
|
153 | 153 | ([('A', 'hell'), ('B', 'o')], [ |
|
154 | 154 | ('A', [ |
|
155 | 155 | ('', 'hell')]), |
|
156 | 156 | ('B', [ |
|
157 | 157 | ('', 'o')]), |
|
158 | 158 | ]), |
|
159 | 159 | ([('A', 'hel'), ('A', 'lo'), ('B', ' '), ('A', 'there')], [ |
|
160 | 160 | ('A', [ |
|
161 | 161 | ('', 'hello')]), |
|
162 | 162 | ('B', [ |
|
163 | 163 | ('', ' ')]), |
|
164 | 164 | ('A', [ |
|
165 | 165 | ('', 'there')]), |
|
166 | 166 | ]), |
|
167 | 167 | ]) |
|
168 | 168 | def test_rollup_tokenstream_without_ops(self, tokenstream, output): |
|
169 | 169 | assert list(rollup_tokenstream(tokenstream)) == output |
|
170 | 170 | |
|
171 | 171 | @pytest.mark.parametrize('tokenstream,output', [ |
|
172 | 172 | ([], |
|
173 | 173 | []), |
|
174 | 174 | ([('A', '', 'hell'), ('A', '', 'o')], [ |
|
175 | 175 | ('A', [ |
|
176 | 176 | ('', 'hello')]), |
|
177 | 177 | ]), |
|
178 | 178 | ([('A', '', 'hell'), ('B', '', 'o')], [ |
|
179 | 179 | ('A', [ |
|
180 | 180 | ('', 'hell')]), |
|
181 | 181 | ('B', [ |
|
182 | 182 | ('', 'o')]), |
|
183 | 183 | ]), |
|
184 | 184 | ([('A', '', 'h'), ('B', '', 'e'), ('C', '', 'y')], [ |
|
185 | 185 | ('A', [ |
|
186 | 186 | ('', 'h')]), |
|
187 | 187 | ('B', [ |
|
188 | 188 | ('', 'e')]), |
|
189 | 189 | ('C', [ |
|
190 | 190 | ('', 'y')]), |
|
191 | 191 | ]), |
|
192 | 192 | ([('A', '', 'h'), ('A', '', 'e'), ('C', '', 'y')], [ |
|
193 | 193 | ('A', [ |
|
194 | 194 | ('', 'he')]), |
|
195 | 195 | ('C', [ |
|
196 | 196 | ('', 'y')]), |
|
197 | 197 | ]), |
|
198 | 198 | ([('A', 'ins', 'h'), ('A', 'ins', 'e')], [ |
|
199 | 199 | ('A', [ |
|
200 | 200 | ('ins', 'he') |
|
201 | 201 | ]), |
|
202 | 202 | ]), |
|
203 | 203 | ([('A', 'ins', 'h'), ('A', 'del', 'e')], [ |
|
204 | 204 | ('A', [ |
|
205 | 205 | ('ins', 'h'), |
|
206 | 206 | ('del', 'e') |
|
207 | 207 | ]), |
|
208 | 208 | ]), |
|
209 | 209 | ([('A', 'ins', 'h'), ('B', 'del', 'e'), ('B', 'del', 'y')], [ |
|
210 | 210 | ('A', [ |
|
211 | 211 | ('ins', 'h'), |
|
212 | 212 | ]), |
|
213 | 213 | ('B', [ |
|
214 | 214 | ('del', 'ey'), |
|
215 | 215 | ]), |
|
216 | 216 | ]), |
|
217 | 217 | ([('A', 'ins', 'h'), ('A', 'del', 'e'), ('B', 'del', 'y')], [ |
|
218 | 218 | ('A', [ |
|
219 | 219 | ('ins', 'h'), |
|
220 | 220 | ('del', 'e'), |
|
221 | 221 | ]), |
|
222 | 222 | ('B', [ |
|
223 | 223 | ('del', 'y'), |
|
224 | 224 | ]), |
|
225 | 225 | ]), |
|
226 | 226 | ([('A', '', 'some'), ('A', 'ins', 'new'), ('A', '', 'name')], [ |
|
227 | 227 | ('A', [ |
|
228 | 228 | ('', 'some'), |
|
229 | 229 | ('ins', 'new'), |
|
230 | 230 | ('', 'name'), |
|
231 | 231 | ]), |
|
232 | 232 | ]), |
|
233 | 233 | ]) |
|
234 | 234 | def test_rollup_tokenstream_with_ops(self, tokenstream, output): |
|
235 | 235 | assert list(rollup_tokenstream(tokenstream)) == output |
|
236 | 236 | |
|
237 | 237 | |
|
238 | 238 | class TestRenderTokenStream(object): |
|
239 | 239 | |
|
240 | 240 | @pytest.mark.parametrize('tokenstream,output', [ |
|
241 | 241 | ( |
|
242 | 242 | [], |
|
243 | 243 | '', |
|
244 | 244 | ), |
|
245 | 245 | ( |
|
246 | 246 | [('', '', u'')], |
|
247 | 247 | '<span></span>', |
|
248 | 248 | ), |
|
249 | 249 | ( |
|
250 | 250 | [('', '', u'text')], |
|
251 | 251 | '<span>text</span>', |
|
252 | 252 | ), |
|
253 | 253 | ( |
|
254 | 254 | [('A', '', u'')], |
|
255 | 255 | '<span class="A"></span>', |
|
256 | 256 | ), |
|
257 | 257 | ( |
|
258 | 258 | [('A', '', u'hello')], |
|
259 | 259 | '<span class="A">hello</span>', |
|
260 | 260 | ), |
|
261 | 261 | ( |
|
262 | 262 | [('A', '', u'hel'), ('A', '', u'lo')], |
|
263 | 263 | '<span class="A">hello</span>', |
|
264 | 264 | ), |
|
265 | 265 | ( |
|
266 | 266 | [('A', '', u'two\n'), ('A', '', u'lines')], |
|
267 |
'<span class="A">two |
|
|
267 | '<span class="A">two\nlines</span>', | |
|
268 | 268 | ), |
|
269 | 269 | ( |
|
270 | 270 | [('A', '', u'\nthree\n'), ('A', '', u'lines')], |
|
271 |
'<span class="A"> |
|
|
271 | '<span class="A">\nthree\nlines</span>', | |
|
272 | 272 | ), |
|
273 | 273 | ( |
|
274 | 274 | [('', '', u'\n'), ('A', '', u'line')], |
|
275 |
'<span> |
|
|
275 | '<span>\n</span><span class="A">line</span>', | |
|
276 | 276 | ), |
|
277 | 277 | ( |
|
278 | 278 | [('', 'ins', u'\n'), ('A', '', u'line')], |
|
279 |
'<span><ins> |
|
|
279 | '<span><ins>\n</ins></span><span class="A">line</span>', | |
|
280 | 280 | ), |
|
281 | 281 | ( |
|
282 | 282 | [('A', '', u'hel'), ('A', 'ins', u'lo')], |
|
283 | 283 | '<span class="A">hel<ins>lo</ins></span>', |
|
284 | 284 | ), |
|
285 | 285 | ( |
|
286 | 286 | [('A', '', u'hel'), ('A', 'ins', u'l'), ('A', 'ins', u'o')], |
|
287 | 287 | '<span class="A">hel<ins>lo</ins></span>', |
|
288 | 288 | ), |
|
289 | 289 | ( |
|
290 | 290 | [('A', '', u'hel'), ('A', 'ins', u'l'), ('A', 'del', u'o')], |
|
291 | 291 | '<span class="A">hel<ins>l</ins><del>o</del></span>', |
|
292 | 292 | ), |
|
293 | 293 | ( |
|
294 | 294 | [('A', '', u'hel'), ('B', '', u'lo')], |
|
295 | 295 | '<span class="A">hel</span><span class="B">lo</span>', |
|
296 | 296 | ), |
|
297 | 297 | ( |
|
298 | 298 | [('A', '', u'hel'), ('B', 'ins', u'lo')], |
|
299 | 299 | '<span class="A">hel</span><span class="B"><ins>lo</ins></span>', |
|
300 | 300 | ), |
|
301 | 301 | ]) |
|
302 | 302 | def test_render_tokenstream_with_ops(self, tokenstream, output): |
|
303 | 303 | html = render_tokenstream(tokenstream) |
|
304 | 304 | assert html == output |
|
305 | 305 | |
|
306 | 306 | @pytest.mark.parametrize('tokenstream,output', [ |
|
307 | 307 | ( |
|
308 | 308 | [('A', u'hel'), ('A', u'lo')], |
|
309 | 309 | '<span class="A">hello</span>', |
|
310 | 310 | ), |
|
311 | 311 | ( |
|
312 | 312 | [('A', u'hel'), ('A', u'l'), ('A', u'o')], |
|
313 | 313 | '<span class="A">hello</span>', |
|
314 | 314 | ), |
|
315 | 315 | ( |
|
316 | 316 | [('A', u'hel'), ('A', u'l'), ('A', u'o')], |
|
317 | 317 | '<span class="A">hello</span>', |
|
318 | 318 | ), |
|
319 | 319 | ( |
|
320 | 320 | [('A', u'hel'), ('B', u'lo')], |
|
321 | 321 | '<span class="A">hel</span><span class="B">lo</span>', |
|
322 | 322 | ), |
|
323 | 323 | ( |
|
324 | 324 | [('A', u'hel'), ('B', u'lo')], |
|
325 | 325 | '<span class="A">hel</span><span class="B">lo</span>', |
|
326 | 326 | ), |
|
327 | 327 | ]) |
|
328 | 328 | def test_render_tokenstream_without_ops(self, tokenstream, output): |
|
329 | 329 | html = render_tokenstream(tokenstream) |
|
330 | 330 | assert html == output |
General Comments 0
You need to be logged in to leave comments.
Login now