##// END OF EJS Templates
py3: use s.startswith() instead of s[n] while parsing patches...
Yuya Nishihara -
r37489:51d5e1ff default
parent child Browse files
Show More
@@ -1,422 +1,423 b''
1 test-abort-checkin.t
1 test-abort-checkin.t
2 test-add.t
2 test-add.t
3 test-addremove-similar.t
3 test-addremove-similar.t
4 test-addremove.t
4 test-addremove.t
5 test-amend-subrepo.t
5 test-amend-subrepo.t
6 test-amend.t
6 test-amend.t
7 test-ancestor.py
7 test-ancestor.py
8 test-annotate.py
8 test-annotate.py
9 test-annotate.t
9 test-annotate.t
10 test-archive-symlinks.t
10 test-archive-symlinks.t
11 test-atomictempfile.py
11 test-atomictempfile.py
12 test-audit-path.t
12 test-audit-path.t
13 test-audit-subrepo.t
13 test-audit-subrepo.t
14 test-automv.t
14 test-automv.t
15 test-backout.t
15 test-backout.t
16 test-backwards-remove.t
16 test-backwards-remove.t
17 test-basic.t
17 test-basic.t
18 test-bheads.t
18 test-bheads.t
19 test-bisect.t
19 test-bisect.t
20 test-bisect2.t
20 test-bisect2.t
21 test-bisect3.t
21 test-bisect3.t
22 test-blackbox.t
22 test-blackbox.t
23 test-bookmarks-current.t
23 test-bookmarks-current.t
24 test-bookmarks-merge.t
24 test-bookmarks-merge.t
25 test-bookmarks-rebase.t
25 test-bookmarks-rebase.t
26 test-bookmarks-strip.t
26 test-bookmarks-strip.t
27 test-bookmarks.t
27 test-bookmarks.t
28 test-branch-change.t
28 test-branch-change.t
29 test-branch-option.t
29 test-branch-option.t
30 test-branch-tag-confict.t
30 test-branch-tag-confict.t
31 test-branches.t
31 test-branches.t
32 test-bundle-phases.t
32 test-bundle-phases.t
33 test-bundle-type.t
33 test-bundle-type.t
34 test-bundle-vs-outgoing.t
34 test-bundle-vs-outgoing.t
35 test-bundle2-multiple-changegroups.t
35 test-bundle2-multiple-changegroups.t
36 test-cappedreader.py
36 test-cappedreader.py
37 test-casecollision.t
37 test-casecollision.t
38 test-cat.t
38 test-cat.t
39 test-censor.t
39 test-censor.t
40 test-changelog-exec.t
40 test-changelog-exec.t
41 test-check-commit.t
41 test-check-commit.t
42 test-check-execute.t
42 test-check-execute.t
43 test-check-module-imports.t
43 test-check-module-imports.t
44 test-check-pyflakes.t
44 test-check-pyflakes.t
45 test-check-pylint.t
45 test-check-pylint.t
46 test-check-shbang.t
46 test-check-shbang.t
47 test-children.t
47 test-children.t
48 test-clone-pull-corruption.t
48 test-clone-pull-corruption.t
49 test-clone-r.t
49 test-clone-r.t
50 test-clone-update-order.t
50 test-clone-update-order.t
51 test-command-template.t
51 test-command-template.t
52 test-commit-amend.t
52 test-commit-amend.t
53 test-commit-interactive.t
53 test-commit-interactive.t
54 test-commit-multiple.t
54 test-commit-multiple.t
55 test-commit-unresolved.t
55 test-commit-unresolved.t
56 test-commit.t
56 test-commit.t
57 test-committer.t
57 test-committer.t
58 test-completion.t
58 test-completion.t
59 test-config-env.py
59 test-config-env.py
60 test-config.t
60 test-config.t
61 test-conflict.t
61 test-conflict.t
62 test-confused-revert.t
62 test-confused-revert.t
63 test-contrib-check-code.t
63 test-contrib-check-code.t
64 test-contrib-check-commit.t
64 test-contrib-check-commit.t
65 test-convert-authormap.t
65 test-convert-authormap.t
66 test-convert-clonebranches.t
66 test-convert-clonebranches.t
67 test-convert-datesort.t
67 test-convert-datesort.t
68 test-convert-filemap.t
68 test-convert-filemap.t
69 test-convert-hg-sink.t
69 test-convert-hg-sink.t
70 test-convert-hg-source.t
70 test-convert-hg-source.t
71 test-convert-hg-startrev.t
71 test-convert-hg-startrev.t
72 test-copy-move-merge.t
72 test-copy-move-merge.t
73 test-copy.t
73 test-copy.t
74 test-copytrace-heuristics.t
74 test-copytrace-heuristics.t
75 test-debugbuilddag.t
75 test-debugbuilddag.t
76 test-debugbundle.t
76 test-debugbundle.t
77 test-debugextensions.t
77 test-debugextensions.t
78 test-debugindexdot.t
78 test-debugindexdot.t
79 test-debugrename.t
79 test-debugrename.t
80 test-default-push.t
80 test-default-push.t
81 test-diff-binary-file.t
81 test-diff-binary-file.t
82 test-diff-change.t
82 test-diff-change.t
83 test-diff-copy-depth.t
83 test-diff-copy-depth.t
84 test-diff-hashes.t
84 test-diff-hashes.t
85 test-diff-ignore-whitespace.t
85 test-diff-ignore-whitespace.t
86 test-diff-indent-heuristic.t
86 test-diff-indent-heuristic.t
87 test-diff-issue2761.t
87 test-diff-issue2761.t
88 test-diff-newlines.t
88 test-diff-newlines.t
89 test-diff-reverse.t
89 test-diff-reverse.t
90 test-diff-subdir.t
90 test-diff-subdir.t
91 test-diff-unified.t
91 test-diff-unified.t
92 test-diff-upgrade.t
92 test-diff-upgrade.t
93 test-diffdir.t
93 test-diffdir.t
94 test-directaccess.t
94 test-directaccess.t
95 test-dirstate-backup.t
95 test-dirstate-backup.t
96 test-dirstate-nonnormalset.t
96 test-dirstate-nonnormalset.t
97 test-doctest.py
97 test-doctest.py
98 test-double-merge.t
98 test-double-merge.t
99 test-drawdag.t
99 test-drawdag.t
100 test-duplicateoptions.py
100 test-duplicateoptions.py
101 test-editor-filename.t
101 test-editor-filename.t
102 test-empty-dir.t
102 test-empty-dir.t
103 test-empty-file.t
103 test-empty-file.t
104 test-empty-group.t
104 test-empty-group.t
105 test-empty.t
105 test-empty.t
106 test-encode.t
106 test-encode.t
107 test-encoding-func.py
107 test-encoding-func.py
108 test-encoding.t
108 test-encoding.t
109 test-eol-add.t
109 test-eol-add.t
110 test-eol-clone.t
110 test-eol-clone.t
111 test-eol-hook.t
111 test-eol-hook.t
112 test-eol-tag.t
112 test-eol-tag.t
113 test-eol-update.t
113 test-eol-update.t
114 test-excessive-merge.t
114 test-excessive-merge.t
115 test-exchange-obsmarkers-case-A1.t
115 test-exchange-obsmarkers-case-A1.t
116 test-exchange-obsmarkers-case-A2.t
116 test-exchange-obsmarkers-case-A2.t
117 test-exchange-obsmarkers-case-A3.t
117 test-exchange-obsmarkers-case-A3.t
118 test-exchange-obsmarkers-case-A4.t
118 test-exchange-obsmarkers-case-A4.t
119 test-exchange-obsmarkers-case-A5.t
119 test-exchange-obsmarkers-case-A5.t
120 test-exchange-obsmarkers-case-A6.t
120 test-exchange-obsmarkers-case-A6.t
121 test-exchange-obsmarkers-case-A7.t
121 test-exchange-obsmarkers-case-A7.t
122 test-exchange-obsmarkers-case-B1.t
122 test-exchange-obsmarkers-case-B1.t
123 test-exchange-obsmarkers-case-B2.t
123 test-exchange-obsmarkers-case-B2.t
124 test-exchange-obsmarkers-case-B3.t
124 test-exchange-obsmarkers-case-B3.t
125 test-exchange-obsmarkers-case-B4.t
125 test-exchange-obsmarkers-case-B4.t
126 test-exchange-obsmarkers-case-B5.t
126 test-exchange-obsmarkers-case-B5.t
127 test-exchange-obsmarkers-case-B6.t
127 test-exchange-obsmarkers-case-B6.t
128 test-exchange-obsmarkers-case-B7.t
128 test-exchange-obsmarkers-case-B7.t
129 test-exchange-obsmarkers-case-C1.t
129 test-exchange-obsmarkers-case-C1.t
130 test-exchange-obsmarkers-case-C2.t
130 test-exchange-obsmarkers-case-C2.t
131 test-exchange-obsmarkers-case-C3.t
131 test-exchange-obsmarkers-case-C3.t
132 test-exchange-obsmarkers-case-C4.t
132 test-exchange-obsmarkers-case-C4.t
133 test-exchange-obsmarkers-case-D1.t
133 test-exchange-obsmarkers-case-D1.t
134 test-exchange-obsmarkers-case-D2.t
134 test-exchange-obsmarkers-case-D2.t
135 test-exchange-obsmarkers-case-D3.t
135 test-exchange-obsmarkers-case-D3.t
136 test-exchange-obsmarkers-case-D4.t
136 test-exchange-obsmarkers-case-D4.t
137 test-execute-bit.t
137 test-execute-bit.t
138 test-extdiff.t
138 test-extdiff.t
139 test-extra-filelog-entry.t
139 test-extra-filelog-entry.t
140 test-filebranch.t
140 test-filebranch.t
141 test-fileset-generated.t
141 test-fileset-generated.t
142 test-flags.t
142 test-flags.t
143 test-generaldelta.t
143 test-generaldelta.t
144 test-getbundle.t
144 test-getbundle.t
145 test-git-export.t
145 test-git-export.t
146 test-glog-topological.t
146 test-glog-topological.t
147 test-gpg.t
147 test-gpg.t
148 test-graft.t
148 test-graft.t
149 test-hghave.t
149 test-hghave.t
150 test-hgignore.t
150 test-hgignore.t
151 test-hgk.t
151 test-hgk.t
152 test-hgweb-bundle.t
152 test-hgweb-bundle.t
153 test-hgweb-descend-empties.t
153 test-hgweb-descend-empties.t
154 test-hgweb-removed.t
154 test-hgweb-removed.t
155 test-histedit-arguments.t
155 test-histedit-arguments.t
156 test-histedit-base.t
156 test-histedit-base.t
157 test-histedit-bookmark-motion.t
157 test-histedit-bookmark-motion.t
158 test-histedit-commute.t
158 test-histedit-commute.t
159 test-histedit-drop.t
159 test-histedit-drop.t
160 test-histedit-edit.t
160 test-histedit-edit.t
161 test-histedit-fold-non-commute.t
161 test-histedit-fold-non-commute.t
162 test-histedit-fold.t
162 test-histedit-fold.t
163 test-histedit-no-change.t
163 test-histedit-no-change.t
164 test-histedit-non-commute-abort.t
164 test-histedit-non-commute-abort.t
165 test-histedit-non-commute.t
165 test-histedit-non-commute.t
166 test-histedit-obsolete.t
166 test-histedit-obsolete.t
167 test-histedit-outgoing.t
167 test-histedit-outgoing.t
168 test-histedit-templates.t
168 test-histedit-templates.t
169 test-http-branchmap.t
169 test-http-branchmap.t
170 test-http-bundle1.t
170 test-http-bundle1.t
171 test-http-clone-r.t
171 test-http-clone-r.t
172 test-identify.t
172 test-identify.t
173 test-import-unknown.t
173 test-import-unknown.t
174 test-import.t
174 test-imports-checker.t
175 test-imports-checker.t
175 test-inherit-mode.t
176 test-inherit-mode.t
176 test-issue1089.t
177 test-issue1089.t
177 test-issue1102.t
178 test-issue1102.t
178 test-issue1175.t
179 test-issue1175.t
179 test-issue1306.t
180 test-issue1306.t
180 test-issue1438.t
181 test-issue1438.t
181 test-issue1502.t
182 test-issue1502.t
182 test-issue1802.t
183 test-issue1802.t
183 test-issue1877.t
184 test-issue1877.t
184 test-issue1993.t
185 test-issue1993.t
185 test-issue2137.t
186 test-issue2137.t
186 test-issue3084.t
187 test-issue3084.t
187 test-issue4074.t
188 test-issue4074.t
188 test-issue522.t
189 test-issue522.t
189 test-issue586.t
190 test-issue586.t
190 test-issue612.t
191 test-issue612.t
191 test-issue619.t
192 test-issue619.t
192 test-issue660.t
193 test-issue660.t
193 test-issue672.t
194 test-issue672.t
194 test-issue842.t
195 test-issue842.t
195 test-journal-exists.t
196 test-journal-exists.t
196 test-journal-share.t
197 test-journal-share.t
197 test-journal.t
198 test-journal.t
198 test-largefiles-cache.t
199 test-largefiles-cache.t
199 test-largefiles-misc.t
200 test-largefiles-misc.t
200 test-largefiles-small-disk.t
201 test-largefiles-small-disk.t
201 test-largefiles-update.t
202 test-largefiles-update.t
202 test-lfs-largefiles.t
203 test-lfs-largefiles.t
203 test-locate.t
204 test-locate.t
204 test-lock-badness.t
205 test-lock-badness.t
205 test-log-linerange.t
206 test-log-linerange.t
206 test-log.t
207 test-log.t
207 test-logexchange.t
208 test-logexchange.t
208 test-lrucachedict.py
209 test-lrucachedict.py
209 test-mactext.t
210 test-mactext.t
210 test-mailmap.t
211 test-mailmap.t
211 test-manifest-merging.t
212 test-manifest-merging.t
212 test-manifest.py
213 test-manifest.py
213 test-manifest.t
214 test-manifest.t
214 test-match.py
215 test-match.py
215 test-mdiff.py
216 test-mdiff.py
216 test-merge-changedelete.t
217 test-merge-changedelete.t
217 test-merge-closedheads.t
218 test-merge-closedheads.t
218 test-merge-commit.t
219 test-merge-commit.t
219 test-merge-criss-cross.t
220 test-merge-criss-cross.t
220 test-merge-default.t
221 test-merge-default.t
221 test-merge-force.t
222 test-merge-force.t
222 test-merge-halt.t
223 test-merge-halt.t
223 test-merge-internal-tools-pattern.t
224 test-merge-internal-tools-pattern.t
224 test-merge-local.t
225 test-merge-local.t
225 test-merge-remove.t
226 test-merge-remove.t
226 test-merge-revert.t
227 test-merge-revert.t
227 test-merge-revert2.t
228 test-merge-revert2.t
228 test-merge-subrepos.t
229 test-merge-subrepos.t
229 test-merge-symlinks.t
230 test-merge-symlinks.t
230 test-merge-tools.t
231 test-merge-tools.t
231 test-merge-types.t
232 test-merge-types.t
232 test-merge1.t
233 test-merge1.t
233 test-merge10.t
234 test-merge10.t
234 test-merge2.t
235 test-merge2.t
235 test-merge4.t
236 test-merge4.t
236 test-merge5.t
237 test-merge5.t
237 test-merge6.t
238 test-merge6.t
238 test-merge7.t
239 test-merge7.t
239 test-merge8.t
240 test-merge8.t
240 test-merge9.t
241 test-merge9.t
241 test-mq-git.t
242 test-mq-git.t
242 test-mq-header-date.t
243 test-mq-header-date.t
243 test-mq-header-from.t
244 test-mq-header-from.t
244 test-mq-pull-from-bundle.t
245 test-mq-pull-from-bundle.t
245 test-mq-qdiff.t
246 test-mq-qdiff.t
246 test-mq-qfold.t
247 test-mq-qfold.t
247 test-mq-qgoto.t
248 test-mq-qgoto.t
248 test-mq-qimport-fail-cleanup.t
249 test-mq-qimport-fail-cleanup.t
249 test-mq-qpush-exact.t
250 test-mq-qpush-exact.t
250 test-mq-qqueue.t
251 test-mq-qqueue.t
251 test-mq-qrefresh-interactive.t
252 test-mq-qrefresh-interactive.t
252 test-mq-qrefresh-replace-log-message.t
253 test-mq-qrefresh-replace-log-message.t
253 test-mq-qrefresh.t
254 test-mq-qrefresh.t
254 test-mq-qrename.t
255 test-mq-qrename.t
255 test-mq-qsave.t
256 test-mq-qsave.t
256 test-mq-safety.t
257 test-mq-safety.t
257 test-mq-subrepo.t
258 test-mq-subrepo.t
258 test-mq-symlinks.t
259 test-mq-symlinks.t
259 test-mv-cp-st-diff.t
260 test-mv-cp-st-diff.t
260 test-narrow-archive.t
261 test-narrow-archive.t
261 test-narrow-clone-no-ellipsis.t
262 test-narrow-clone-no-ellipsis.t
262 test-narrow-clone-nonlinear.t
263 test-narrow-clone-nonlinear.t
263 test-narrow-clone.t
264 test-narrow-clone.t
264 test-narrow-commit.t
265 test-narrow-commit.t
265 test-narrow-copies.t
266 test-narrow-copies.t
266 test-narrow-debugcommands.t
267 test-narrow-debugcommands.t
267 test-narrow-debugrebuilddirstate.t
268 test-narrow-debugrebuilddirstate.t
268 test-narrow-exchange-merges.t
269 test-narrow-exchange-merges.t
269 test-narrow-exchange.t
270 test-narrow-exchange.t
270 test-narrow-expanddirstate.t
271 test-narrow-expanddirstate.t
271 test-narrow-merge.t
272 test-narrow-merge.t
272 test-narrow-patch.t
273 test-narrow-patch.t
273 test-narrow-patterns.t
274 test-narrow-patterns.t
274 test-narrow-pull.t
275 test-narrow-pull.t
275 test-narrow-rebase.t
276 test-narrow-rebase.t
276 test-narrow-shallow-merges.t
277 test-narrow-shallow-merges.t
277 test-narrow-shallow.t
278 test-narrow-shallow.t
278 test-narrow-strip.t
279 test-narrow-strip.t
279 test-narrow-update.t
280 test-narrow-update.t
280 test-nested-repo.t
281 test-nested-repo.t
281 test-newbranch.t
282 test-newbranch.t
282 test-obshistory.t
283 test-obshistory.t
283 test-obsmarker-template.t
284 test-obsmarker-template.t
284 test-obsmarkers-effectflag.t
285 test-obsmarkers-effectflag.t
285 test-obsolete-bundle-strip.t
286 test-obsolete-bundle-strip.t
286 test-obsolete-changeset-exchange.t
287 test-obsolete-changeset-exchange.t
287 test-obsolete-checkheads.t
288 test-obsolete-checkheads.t
288 test-obsolete-distributed.t
289 test-obsolete-distributed.t
289 test-obsolete-tag-cache.t
290 test-obsolete-tag-cache.t
290 test-parents.t
291 test-parents.t
291 test-pathconflicts-merge.t
292 test-pathconflicts-merge.t
292 test-pathconflicts-update.t
293 test-pathconflicts-update.t
293 test-pending.t
294 test-pending.t
294 test-permissions.t
295 test-permissions.t
295 test-phases.t
296 test-phases.t
296 test-pull-branch.t
297 test-pull-branch.t
297 test-pull-http.t
298 test-pull-http.t
298 test-pull-permission.t
299 test-pull-permission.t
299 test-pull-pull-corruption.t
300 test-pull-pull-corruption.t
300 test-pull-r.t
301 test-pull-r.t
301 test-pull-update.t
302 test-pull-update.t
302 test-purge.t
303 test-purge.t
303 test-push-checkheads-partial-C1.t
304 test-push-checkheads-partial-C1.t
304 test-push-checkheads-partial-C2.t
305 test-push-checkheads-partial-C2.t
305 test-push-checkheads-partial-C3.t
306 test-push-checkheads-partial-C3.t
306 test-push-checkheads-partial-C4.t
307 test-push-checkheads-partial-C4.t
307 test-push-checkheads-pruned-B1.t
308 test-push-checkheads-pruned-B1.t
308 test-push-checkheads-pruned-B2.t
309 test-push-checkheads-pruned-B2.t
309 test-push-checkheads-pruned-B3.t
310 test-push-checkheads-pruned-B3.t
310 test-push-checkheads-pruned-B4.t
311 test-push-checkheads-pruned-B4.t
311 test-push-checkheads-pruned-B5.t
312 test-push-checkheads-pruned-B5.t
312 test-push-checkheads-pruned-B6.t
313 test-push-checkheads-pruned-B6.t
313 test-push-checkheads-pruned-B7.t
314 test-push-checkheads-pruned-B7.t
314 test-push-checkheads-pruned-B8.t
315 test-push-checkheads-pruned-B8.t
315 test-push-checkheads-superceed-A1.t
316 test-push-checkheads-superceed-A1.t
316 test-push-checkheads-superceed-A2.t
317 test-push-checkheads-superceed-A2.t
317 test-push-checkheads-superceed-A3.t
318 test-push-checkheads-superceed-A3.t
318 test-push-checkheads-superceed-A4.t
319 test-push-checkheads-superceed-A4.t
319 test-push-checkheads-superceed-A5.t
320 test-push-checkheads-superceed-A5.t
320 test-push-checkheads-superceed-A6.t
321 test-push-checkheads-superceed-A6.t
321 test-push-checkheads-superceed-A7.t
322 test-push-checkheads-superceed-A7.t
322 test-push-checkheads-superceed-A8.t
323 test-push-checkheads-superceed-A8.t
323 test-push-checkheads-unpushed-D1.t
324 test-push-checkheads-unpushed-D1.t
324 test-push-checkheads-unpushed-D2.t
325 test-push-checkheads-unpushed-D2.t
325 test-push-checkheads-unpushed-D3.t
326 test-push-checkheads-unpushed-D3.t
326 test-push-checkheads-unpushed-D4.t
327 test-push-checkheads-unpushed-D4.t
327 test-push-checkheads-unpushed-D5.t
328 test-push-checkheads-unpushed-D5.t
328 test-push-checkheads-unpushed-D6.t
329 test-push-checkheads-unpushed-D6.t
329 test-push-checkheads-unpushed-D7.t
330 test-push-checkheads-unpushed-D7.t
330 test-push-http.t
331 test-push-http.t
331 test-push-warn.t
332 test-push-warn.t
332 test-pushvars.t
333 test-pushvars.t
333 test-rebase-abort.t
334 test-rebase-abort.t
334 test-rebase-base-flag.t
335 test-rebase-base-flag.t
335 test-rebase-bookmarks.t
336 test-rebase-bookmarks.t
336 test-rebase-brute-force.t
337 test-rebase-brute-force.t
337 test-rebase-cache.t
338 test-rebase-cache.t
338 test-rebase-check-restore.t
339 test-rebase-check-restore.t
339 test-rebase-collapse.t
340 test-rebase-collapse.t
340 test-rebase-conflicts.t
341 test-rebase-conflicts.t
341 test-rebase-dest.t
342 test-rebase-dest.t
342 test-rebase-detach.t
343 test-rebase-detach.t
343 test-rebase-emptycommit.t
344 test-rebase-emptycommit.t
344 test-rebase-inmemory.t
345 test-rebase-inmemory.t
345 test-rebase-interruptions.t
346 test-rebase-interruptions.t
346 test-rebase-issue-noparam-single-rev.t
347 test-rebase-issue-noparam-single-rev.t
347 test-rebase-legacy.t
348 test-rebase-legacy.t
348 test-rebase-mq-skip.t
349 test-rebase-mq-skip.t
349 test-rebase-named-branches.t
350 test-rebase-named-branches.t
350 test-rebase-newancestor.t
351 test-rebase-newancestor.t
351 test-rebase-obsolete.t
352 test-rebase-obsolete.t
352 test-rebase-parameters.t
353 test-rebase-parameters.t
353 test-rebase-partial.t
354 test-rebase-partial.t
354 test-rebase-pull.t
355 test-rebase-pull.t
355 test-rebase-rename.t
356 test-rebase-rename.t
356 test-rebase-scenario-global.t
357 test-rebase-scenario-global.t
357 test-rebase-templates.t
358 test-rebase-templates.t
358 test-rebase-transaction.t
359 test-rebase-transaction.t
359 test-record.t
360 test-record.t
360 test-relink.t
361 test-relink.t
361 test-remove.t
362 test-remove.t
362 test-rename-after-merge.t
363 test-rename-after-merge.t
363 test-rename-dir-merge.t
364 test-rename-dir-merge.t
364 test-rename-merge1.t
365 test-rename-merge1.t
365 test-rename.t
366 test-rename.t
366 test-repair-strip.t
367 test-repair-strip.t
367 test-repo-compengines.t
368 test-repo-compengines.t
368 test-resolve.t
369 test-resolve.t
369 test-revert-flags.t
370 test-revert-flags.t
370 test-revert-unknown.t
371 test-revert-unknown.t
371 test-revlog-ancestry.py
372 test-revlog-ancestry.py
372 test-revlog-group-emptyiter.t
373 test-revlog-group-emptyiter.t
373 test-revlog-mmapindex.t
374 test-revlog-mmapindex.t
374 test-revlog-packentry.t
375 test-revlog-packentry.t
375 test-revset-dirstate-parents.t
376 test-revset-dirstate-parents.t
376 test-revset-outgoing.t
377 test-revset-outgoing.t
377 test-rollback.t
378 test-rollback.t
378 test-run-tests.py
379 test-run-tests.py
379 test-schemes.t
380 test-schemes.t
380 test-serve.t
381 test-serve.t
381 test-share.t
382 test-share.t
382 test-show-stack.t
383 test-show-stack.t
383 test-show-work.t
384 test-show-work.t
384 test-show.t
385 test-show.t
385 test-simple-update.t
386 test-simple-update.t
386 test-single-head.t
387 test-single-head.t
387 test-sparse-clear.t
388 test-sparse-clear.t
388 test-sparse-merges.t
389 test-sparse-merges.t
389 test-sparse-requirement.t
390 test-sparse-requirement.t
390 test-sparse-verbose-json.t
391 test-sparse-verbose-json.t
391 test-ssh-clone-r.t
392 test-ssh-clone-r.t
392 test-ssh-proto.t
393 test-ssh-proto.t
393 test-sshserver.py
394 test-sshserver.py
394 test-stack.t
395 test-stack.t
395 test-status-rev.t
396 test-status-rev.t
396 test-status-terse.t
397 test-status-terse.t
397 test-strip-cross.t
398 test-strip-cross.t
398 test-strip.t
399 test-strip.t
399 test-subrepo-deep-nested-change.t
400 test-subrepo-deep-nested-change.t
400 test-subrepo.t
401 test-subrepo.t
401 test-symlinks.t
402 test-symlinks.t
402 test-tag.t
403 test-tag.t
403 test-tags.t
404 test-tags.t
404 test-template-engine.t
405 test-template-engine.t
405 test-treemanifest.t
406 test-treemanifest.t
406 test-unamend.t
407 test-unamend.t
407 test-uncommit.t
408 test-uncommit.t
408 test-unified-test.t
409 test-unified-test.t
409 test-unrelated-pull.t
410 test-unrelated-pull.t
410 test-up-local-change.t
411 test-up-local-change.t
411 test-update-branches.t
412 test-update-branches.t
412 test-update-dest.t
413 test-update-dest.t
413 test-update-issue1456.t
414 test-update-issue1456.t
414 test-update-names.t
415 test-update-names.t
415 test-update-reverse.t
416 test-update-reverse.t
416 test-upgrade-repo.t
417 test-upgrade-repo.t
417 test-url-rev.t
418 test-url-rev.t
418 test-username-newline.t
419 test-username-newline.t
419 test-verify.t
420 test-verify.t
420 test-websub.t
421 test-websub.t
421 test-win32text.t
422 test-win32text.t
422 test-xdg.t
423 test-xdg.t
@@ -1,2912 +1,2914 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import posixpath
18 import posixpath
19 import re
19 import re
20 import shutil
20 import shutil
21 import tempfile
21 import tempfile
22 import zlib
22 import zlib
23
23
24 from .i18n import _
24 from .i18n import _
25 from .node import (
25 from .node import (
26 hex,
26 hex,
27 short,
27 short,
28 )
28 )
29 from . import (
29 from . import (
30 copies,
30 copies,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 policy,
36 policy,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 similar,
39 similar,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43 from .utils import (
43 from .utils import (
44 dateutil,
44 dateutil,
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 diffhelpers = policy.importmod(r'diffhelpers')
49 diffhelpers = policy.importmod(r'diffhelpers')
50 stringio = util.stringio
50 stringio = util.stringio
51
51
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 tabsplitter = re.compile(br'(\t+|[^\t]+)')
54 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
54 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
55
55
56 PatchError = error.PatchError
56 PatchError = error.PatchError
57
57
58 # public functions
58 # public functions
59
59
60 def split(stream):
60 def split(stream):
61 '''return an iterator of individual patches from a stream'''
61 '''return an iterator of individual patches from a stream'''
62 def isheader(line, inheader):
62 def isheader(line, inheader):
63 if inheader and line[0] in (' ', '\t'):
63 if inheader and line.startswith((' ', '\t')):
64 # continuation
64 # continuation
65 return True
65 return True
66 if line[0] in (' ', '-', '+'):
66 if line.startswith((' ', '-', '+')):
67 # diff line - don't check for header pattern in there
67 # diff line - don't check for header pattern in there
68 return False
68 return False
69 l = line.split(': ', 1)
69 l = line.split(': ', 1)
70 return len(l) == 2 and ' ' not in l[0]
70 return len(l) == 2 and ' ' not in l[0]
71
71
72 def chunk(lines):
72 def chunk(lines):
73 return stringio(''.join(lines))
73 return stringio(''.join(lines))
74
74
75 def hgsplit(stream, cur):
75 def hgsplit(stream, cur):
76 inheader = True
76 inheader = True
77
77
78 for line in stream:
78 for line in stream:
79 if not line.strip():
79 if not line.strip():
80 inheader = False
80 inheader = False
81 if not inheader and line.startswith('# HG changeset patch'):
81 if not inheader and line.startswith('# HG changeset patch'):
82 yield chunk(cur)
82 yield chunk(cur)
83 cur = []
83 cur = []
84 inheader = True
84 inheader = True
85
85
86 cur.append(line)
86 cur.append(line)
87
87
88 if cur:
88 if cur:
89 yield chunk(cur)
89 yield chunk(cur)
90
90
91 def mboxsplit(stream, cur):
91 def mboxsplit(stream, cur):
92 for line in stream:
92 for line in stream:
93 if line.startswith('From '):
93 if line.startswith('From '):
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96 cur = []
96 cur = []
97
97
98 cur.append(line)
98 cur.append(line)
99
99
100 if cur:
100 if cur:
101 for c in split(chunk(cur[1:])):
101 for c in split(chunk(cur[1:])):
102 yield c
102 yield c
103
103
104 def mimesplit(stream, cur):
104 def mimesplit(stream, cur):
105 def msgfp(m):
105 def msgfp(m):
106 fp = stringio()
106 fp = stringio()
107 g = email.Generator.Generator(fp, mangle_from_=False)
107 g = email.Generator.Generator(fp, mangle_from_=False)
108 g.flatten(m)
108 g.flatten(m)
109 fp.seek(0)
109 fp.seek(0)
110 return fp
110 return fp
111
111
112 for line in stream:
112 for line in stream:
113 cur.append(line)
113 cur.append(line)
114 c = chunk(cur)
114 c = chunk(cur)
115
115
116 m = pycompat.emailparser().parse(c)
116 m = pycompat.emailparser().parse(c)
117 if not m.is_multipart():
117 if not m.is_multipart():
118 yield msgfp(m)
118 yield msgfp(m)
119 else:
119 else:
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 for part in m.walk():
121 for part in m.walk():
122 ct = part.get_content_type()
122 ct = part.get_content_type()
123 if ct not in ok_types:
123 if ct not in ok_types:
124 continue
124 continue
125 yield msgfp(part)
125 yield msgfp(part)
126
126
127 def headersplit(stream, cur):
127 def headersplit(stream, cur):
128 inheader = False
128 inheader = False
129
129
130 for line in stream:
130 for line in stream:
131 if not inheader and isheader(line, inheader):
131 if not inheader and isheader(line, inheader):
132 yield chunk(cur)
132 yield chunk(cur)
133 cur = []
133 cur = []
134 inheader = True
134 inheader = True
135 if inheader and not isheader(line, inheader):
135 if inheader and not isheader(line, inheader):
136 inheader = False
136 inheader = False
137
137
138 cur.append(line)
138 cur.append(line)
139
139
140 if cur:
140 if cur:
141 yield chunk(cur)
141 yield chunk(cur)
142
142
143 def remainder(cur):
143 def remainder(cur):
144 yield chunk(cur)
144 yield chunk(cur)
145
145
146 class fiter(object):
146 class fiter(object):
147 def __init__(self, fp):
147 def __init__(self, fp):
148 self.fp = fp
148 self.fp = fp
149
149
150 def __iter__(self):
150 def __iter__(self):
151 return self
151 return self
152
152
153 def next(self):
153 def next(self):
154 l = self.fp.readline()
154 l = self.fp.readline()
155 if not l:
155 if not l:
156 raise StopIteration
156 raise StopIteration
157 return l
157 return l
158
158
159 __next__ = next
159 __next__ = next
160
160
161 inheader = False
161 inheader = False
162 cur = []
162 cur = []
163
163
164 mimeheaders = ['content-type']
164 mimeheaders = ['content-type']
165
165
166 if not util.safehasattr(stream, 'next'):
166 if not util.safehasattr(stream, 'next'):
167 # http responses, for example, have readline but not next
167 # http responses, for example, have readline but not next
168 stream = fiter(stream)
168 stream = fiter(stream)
169
169
170 for line in stream:
170 for line in stream:
171 cur.append(line)
171 cur.append(line)
172 if line.startswith('# HG changeset patch'):
172 if line.startswith('# HG changeset patch'):
173 return hgsplit(stream, cur)
173 return hgsplit(stream, cur)
174 elif line.startswith('From '):
174 elif line.startswith('From '):
175 return mboxsplit(stream, cur)
175 return mboxsplit(stream, cur)
176 elif isheader(line, inheader):
176 elif isheader(line, inheader):
177 inheader = True
177 inheader = True
178 if line.split(':', 1)[0].lower() in mimeheaders:
178 if line.split(':', 1)[0].lower() in mimeheaders:
179 # let email parser handle this
179 # let email parser handle this
180 return mimesplit(stream, cur)
180 return mimesplit(stream, cur)
181 elif line.startswith('--- ') and inheader:
181 elif line.startswith('--- ') and inheader:
182 # No evil headers seen by diff start, split by hand
182 # No evil headers seen by diff start, split by hand
183 return headersplit(stream, cur)
183 return headersplit(stream, cur)
184 # Not enough info, keep reading
184 # Not enough info, keep reading
185
185
186 # if we are here, we have a very plain patch
186 # if we are here, we have a very plain patch
187 return remainder(cur)
187 return remainder(cur)
188
188
189 ## Some facility for extensible patch parsing:
189 ## Some facility for extensible patch parsing:
190 # list of pairs ("header to match", "data key")
190 # list of pairs ("header to match", "data key")
191 patchheadermap = [('Date', 'date'),
191 patchheadermap = [('Date', 'date'),
192 ('Branch', 'branch'),
192 ('Branch', 'branch'),
193 ('Node ID', 'nodeid'),
193 ('Node ID', 'nodeid'),
194 ]
194 ]
195
195
196 def extract(ui, fileobj):
196 def extract(ui, fileobj):
197 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
198
198
199 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
200
200
201 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
202 - filename,
202 - filename,
203 - message,
203 - message,
204 - user,
204 - user,
205 - date,
205 - date,
206 - branch,
206 - branch,
207 - node,
207 - node,
208 - p1,
208 - p1,
209 - p2.
209 - p2.
210 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212
212
213 # attempt to detect the start of a patch
213 # attempt to detect the start of a patch
214 # (this heuristic is borrowed from quilt)
214 # (this heuristic is borrowed from quilt)
215 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
215 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
216 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
216 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
217 br'---[ \t].*?^\+\+\+[ \t]|'
217 br'---[ \t].*?^\+\+\+[ \t]|'
218 br'\*\*\*[ \t].*?^---[ \t])',
218 br'\*\*\*[ \t].*?^---[ \t])',
219 re.MULTILINE | re.DOTALL)
219 re.MULTILINE | re.DOTALL)
220
220
221 data = {}
221 data = {}
222 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
222 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
223 tmpfp = os.fdopen(fd, r'wb')
223 tmpfp = os.fdopen(fd, r'wb')
224 try:
224 try:
225 msg = pycompat.emailparser().parse(fileobj)
225 msg = pycompat.emailparser().parse(fileobj)
226
226
227 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
227 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
228 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
228 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
229 if not subject and not data['user']:
229 if not subject and not data['user']:
230 # Not an email, restore parsed headers if any
230 # Not an email, restore parsed headers if any
231 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
231 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
232 for h in msg.items()) + '\n'
232 for h in msg.items()) + '\n'
233
233
234 # should try to parse msg['Date']
234 # should try to parse msg['Date']
235 parents = []
235 parents = []
236
236
237 if subject:
237 if subject:
238 if subject.startswith('[PATCH'):
238 if subject.startswith('[PATCH'):
239 pend = subject.find(']')
239 pend = subject.find(']')
240 if pend >= 0:
240 if pend >= 0:
241 subject = subject[pend + 1:].lstrip()
241 subject = subject[pend + 1:].lstrip()
242 subject = re.sub(br'\n[ \t]+', ' ', subject)
242 subject = re.sub(br'\n[ \t]+', ' ', subject)
243 ui.debug('Subject: %s\n' % subject)
243 ui.debug('Subject: %s\n' % subject)
244 if data['user']:
244 if data['user']:
245 ui.debug('From: %s\n' % data['user'])
245 ui.debug('From: %s\n' % data['user'])
246 diffs_seen = 0
246 diffs_seen = 0
247 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
247 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
248 message = ''
248 message = ''
249 for part in msg.walk():
249 for part in msg.walk():
250 content_type = pycompat.bytestr(part.get_content_type())
250 content_type = pycompat.bytestr(part.get_content_type())
251 ui.debug('Content-Type: %s\n' % content_type)
251 ui.debug('Content-Type: %s\n' % content_type)
252 if content_type not in ok_types:
252 if content_type not in ok_types:
253 continue
253 continue
254 payload = part.get_payload(decode=True)
254 payload = part.get_payload(decode=True)
255 m = diffre.search(payload)
255 m = diffre.search(payload)
256 if m:
256 if m:
257 hgpatch = False
257 hgpatch = False
258 hgpatchheader = False
258 hgpatchheader = False
259 ignoretext = False
259 ignoretext = False
260
260
261 ui.debug('found patch at byte %d\n' % m.start(0))
261 ui.debug('found patch at byte %d\n' % m.start(0))
262 diffs_seen += 1
262 diffs_seen += 1
263 cfp = stringio()
263 cfp = stringio()
264 for line in payload[:m.start(0)].splitlines():
264 for line in payload[:m.start(0)].splitlines():
265 if line.startswith('# HG changeset patch') and not hgpatch:
265 if line.startswith('# HG changeset patch') and not hgpatch:
266 ui.debug('patch generated by hg export\n')
266 ui.debug('patch generated by hg export\n')
267 hgpatch = True
267 hgpatch = True
268 hgpatchheader = True
268 hgpatchheader = True
269 # drop earlier commit message content
269 # drop earlier commit message content
270 cfp.seek(0)
270 cfp.seek(0)
271 cfp.truncate()
271 cfp.truncate()
272 subject = None
272 subject = None
273 elif hgpatchheader:
273 elif hgpatchheader:
274 if line.startswith('# User '):
274 if line.startswith('# User '):
275 data['user'] = line[7:]
275 data['user'] = line[7:]
276 ui.debug('From: %s\n' % data['user'])
276 ui.debug('From: %s\n' % data['user'])
277 elif line.startswith("# Parent "):
277 elif line.startswith("# Parent "):
278 parents.append(line[9:].lstrip())
278 parents.append(line[9:].lstrip())
279 elif line.startswith("# "):
279 elif line.startswith("# "):
280 for header, key in patchheadermap:
280 for header, key in patchheadermap:
281 prefix = '# %s ' % header
281 prefix = '# %s ' % header
282 if line.startswith(prefix):
282 if line.startswith(prefix):
283 data[key] = line[len(prefix):]
283 data[key] = line[len(prefix):]
284 else:
284 else:
285 hgpatchheader = False
285 hgpatchheader = False
286 elif line == '---':
286 elif line == '---':
287 ignoretext = True
287 ignoretext = True
288 if not hgpatchheader and not ignoretext:
288 if not hgpatchheader and not ignoretext:
289 cfp.write(line)
289 cfp.write(line)
290 cfp.write('\n')
290 cfp.write('\n')
291 message = cfp.getvalue()
291 message = cfp.getvalue()
292 if tmpfp:
292 if tmpfp:
293 tmpfp.write(payload)
293 tmpfp.write(payload)
294 if not payload.endswith('\n'):
294 if not payload.endswith('\n'):
295 tmpfp.write('\n')
295 tmpfp.write('\n')
296 elif not diffs_seen and message and content_type == 'text/plain':
296 elif not diffs_seen and message and content_type == 'text/plain':
297 message += '\n' + payload
297 message += '\n' + payload
298 except: # re-raises
298 except: # re-raises
299 tmpfp.close()
299 tmpfp.close()
300 os.unlink(tmpname)
300 os.unlink(tmpname)
301 raise
301 raise
302
302
303 if subject and not message.startswith(subject):
303 if subject and not message.startswith(subject):
304 message = '%s\n%s' % (subject, message)
304 message = '%s\n%s' % (subject, message)
305 data['message'] = message
305 data['message'] = message
306 tmpfp.close()
306 tmpfp.close()
307 if parents:
307 if parents:
308 data['p1'] = parents.pop(0)
308 data['p1'] = parents.pop(0)
309 if parents:
309 if parents:
310 data['p2'] = parents.pop(0)
310 data['p2'] = parents.pop(0)
311
311
312 if diffs_seen:
312 if diffs_seen:
313 data['filename'] = tmpname
313 data['filename'] = tmpname
314 else:
314 else:
315 os.unlink(tmpname)
315 os.unlink(tmpname)
316 return data
316 return data
317
317
318 class patchmeta(object):
318 class patchmeta(object):
319 """Patched file metadata
319 """Patched file metadata
320
320
321 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
321 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
322 or COPY. 'path' is patched file path. 'oldpath' is set to the
322 or COPY. 'path' is patched file path. 'oldpath' is set to the
323 origin file when 'op' is either COPY or RENAME, None otherwise. If
323 origin file when 'op' is either COPY or RENAME, None otherwise. If
324 file mode is changed, 'mode' is a tuple (islink, isexec) where
324 file mode is changed, 'mode' is a tuple (islink, isexec) where
325 'islink' is True if the file is a symlink and 'isexec' is True if
325 'islink' is True if the file is a symlink and 'isexec' is True if
326 the file is executable. Otherwise, 'mode' is None.
326 the file is executable. Otherwise, 'mode' is None.
327 """
327 """
328 def __init__(self, path):
328 def __init__(self, path):
329 self.path = path
329 self.path = path
330 self.oldpath = None
330 self.oldpath = None
331 self.mode = None
331 self.mode = None
332 self.op = 'MODIFY'
332 self.op = 'MODIFY'
333 self.binary = False
333 self.binary = False
334
334
335 def setmode(self, mode):
335 def setmode(self, mode):
336 islink = mode & 0o20000
336 islink = mode & 0o20000
337 isexec = mode & 0o100
337 isexec = mode & 0o100
338 self.mode = (islink, isexec)
338 self.mode = (islink, isexec)
339
339
340 def copy(self):
340 def copy(self):
341 other = patchmeta(self.path)
341 other = patchmeta(self.path)
342 other.oldpath = self.oldpath
342 other.oldpath = self.oldpath
343 other.mode = self.mode
343 other.mode = self.mode
344 other.op = self.op
344 other.op = self.op
345 other.binary = self.binary
345 other.binary = self.binary
346 return other
346 return other
347
347
348 def _ispatchinga(self, afile):
348 def _ispatchinga(self, afile):
349 if afile == '/dev/null':
349 if afile == '/dev/null':
350 return self.op == 'ADD'
350 return self.op == 'ADD'
351 return afile == 'a/' + (self.oldpath or self.path)
351 return afile == 'a/' + (self.oldpath or self.path)
352
352
353 def _ispatchingb(self, bfile):
353 def _ispatchingb(self, bfile):
354 if bfile == '/dev/null':
354 if bfile == '/dev/null':
355 return self.op == 'DELETE'
355 return self.op == 'DELETE'
356 return bfile == 'b/' + self.path
356 return bfile == 'b/' + self.path
357
357
358 def ispatching(self, afile, bfile):
358 def ispatching(self, afile, bfile):
359 return self._ispatchinga(afile) and self._ispatchingb(bfile)
359 return self._ispatchinga(afile) and self._ispatchingb(bfile)
360
360
361 def __repr__(self):
361 def __repr__(self):
362 return "<patchmeta %s %r>" % (self.op, self.path)
362 return "<patchmeta %s %r>" % (self.op, self.path)
363
363
364 def readgitpatch(lr):
364 def readgitpatch(lr):
365 """extract git-style metadata about patches from <patchname>"""
365 """extract git-style metadata about patches from <patchname>"""
366
366
367 # Filter patch for git information
367 # Filter patch for git information
368 gp = None
368 gp = None
369 gitpatches = []
369 gitpatches = []
370 for line in lr:
370 for line in lr:
371 line = line.rstrip(' \r\n')
371 line = line.rstrip(' \r\n')
372 if line.startswith('diff --git a/'):
372 if line.startswith('diff --git a/'):
373 m = gitre.match(line)
373 m = gitre.match(line)
374 if m:
374 if m:
375 if gp:
375 if gp:
376 gitpatches.append(gp)
376 gitpatches.append(gp)
377 dst = m.group(2)
377 dst = m.group(2)
378 gp = patchmeta(dst)
378 gp = patchmeta(dst)
379 elif gp:
379 elif gp:
380 if line.startswith('--- '):
380 if line.startswith('--- '):
381 gitpatches.append(gp)
381 gitpatches.append(gp)
382 gp = None
382 gp = None
383 continue
383 continue
384 if line.startswith('rename from '):
384 if line.startswith('rename from '):
385 gp.op = 'RENAME'
385 gp.op = 'RENAME'
386 gp.oldpath = line[12:]
386 gp.oldpath = line[12:]
387 elif line.startswith('rename to '):
387 elif line.startswith('rename to '):
388 gp.path = line[10:]
388 gp.path = line[10:]
389 elif line.startswith('copy from '):
389 elif line.startswith('copy from '):
390 gp.op = 'COPY'
390 gp.op = 'COPY'
391 gp.oldpath = line[10:]
391 gp.oldpath = line[10:]
392 elif line.startswith('copy to '):
392 elif line.startswith('copy to '):
393 gp.path = line[8:]
393 gp.path = line[8:]
394 elif line.startswith('deleted file'):
394 elif line.startswith('deleted file'):
395 gp.op = 'DELETE'
395 gp.op = 'DELETE'
396 elif line.startswith('new file mode '):
396 elif line.startswith('new file mode '):
397 gp.op = 'ADD'
397 gp.op = 'ADD'
398 gp.setmode(int(line[-6:], 8))
398 gp.setmode(int(line[-6:], 8))
399 elif line.startswith('new mode '):
399 elif line.startswith('new mode '):
400 gp.setmode(int(line[-6:], 8))
400 gp.setmode(int(line[-6:], 8))
401 elif line.startswith('GIT binary patch'):
401 elif line.startswith('GIT binary patch'):
402 gp.binary = True
402 gp.binary = True
403 if gp:
403 if gp:
404 gitpatches.append(gp)
404 gitpatches.append(gp)
405
405
406 return gitpatches
406 return gitpatches
407
407
408 class linereader(object):
408 class linereader(object):
409 # simple class to allow pushing lines back into the input stream
409 # simple class to allow pushing lines back into the input stream
410 def __init__(self, fp):
410 def __init__(self, fp):
411 self.fp = fp
411 self.fp = fp
412 self.buf = []
412 self.buf = []
413
413
414 def push(self, line):
414 def push(self, line):
415 if line is not None:
415 if line is not None:
416 self.buf.append(line)
416 self.buf.append(line)
417
417
418 def readline(self):
418 def readline(self):
419 if self.buf:
419 if self.buf:
420 l = self.buf[0]
420 l = self.buf[0]
421 del self.buf[0]
421 del self.buf[0]
422 return l
422 return l
423 return self.fp.readline()
423 return self.fp.readline()
424
424
425 def __iter__(self):
425 def __iter__(self):
426 return iter(self.readline, '')
426 return iter(self.readline, '')
427
427
428 class abstractbackend(object):
428 class abstractbackend(object):
429 def __init__(self, ui):
429 def __init__(self, ui):
430 self.ui = ui
430 self.ui = ui
431
431
432 def getfile(self, fname):
432 def getfile(self, fname):
433 """Return target file data and flags as a (data, (islink,
433 """Return target file data and flags as a (data, (islink,
434 isexec)) tuple. Data is None if file is missing/deleted.
434 isexec)) tuple. Data is None if file is missing/deleted.
435 """
435 """
436 raise NotImplementedError
436 raise NotImplementedError
437
437
438 def setfile(self, fname, data, mode, copysource):
438 def setfile(self, fname, data, mode, copysource):
439 """Write data to target file fname and set its mode. mode is a
439 """Write data to target file fname and set its mode. mode is a
440 (islink, isexec) tuple. If data is None, the file content should
440 (islink, isexec) tuple. If data is None, the file content should
441 be left unchanged. If the file is modified after being copied,
441 be left unchanged. If the file is modified after being copied,
442 copysource is set to the original file name.
442 copysource is set to the original file name.
443 """
443 """
444 raise NotImplementedError
444 raise NotImplementedError
445
445
446 def unlink(self, fname):
446 def unlink(self, fname):
447 """Unlink target file."""
447 """Unlink target file."""
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def writerej(self, fname, failed, total, lines):
450 def writerej(self, fname, failed, total, lines):
451 """Write rejected lines for fname. total is the number of hunks
451 """Write rejected lines for fname. total is the number of hunks
452 which failed to apply and total the total number of hunks for this
452 which failed to apply and total the total number of hunks for this
453 files.
453 files.
454 """
454 """
455
455
456 def exists(self, fname):
456 def exists(self, fname):
457 raise NotImplementedError
457 raise NotImplementedError
458
458
459 def close(self):
459 def close(self):
460 raise NotImplementedError
460 raise NotImplementedError
461
461
462 class fsbackend(abstractbackend):
462 class fsbackend(abstractbackend):
463 def __init__(self, ui, basedir):
463 def __init__(self, ui, basedir):
464 super(fsbackend, self).__init__(ui)
464 super(fsbackend, self).__init__(ui)
465 self.opener = vfsmod.vfs(basedir)
465 self.opener = vfsmod.vfs(basedir)
466
466
467 def getfile(self, fname):
467 def getfile(self, fname):
468 if self.opener.islink(fname):
468 if self.opener.islink(fname):
469 return (self.opener.readlink(fname), (True, False))
469 return (self.opener.readlink(fname), (True, False))
470
470
471 isexec = False
471 isexec = False
472 try:
472 try:
473 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
473 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
474 except OSError as e:
474 except OSError as e:
475 if e.errno != errno.ENOENT:
475 if e.errno != errno.ENOENT:
476 raise
476 raise
477 try:
477 try:
478 return (self.opener.read(fname), (False, isexec))
478 return (self.opener.read(fname), (False, isexec))
479 except IOError as e:
479 except IOError as e:
480 if e.errno != errno.ENOENT:
480 if e.errno != errno.ENOENT:
481 raise
481 raise
482 return None, None
482 return None, None
483
483
484 def setfile(self, fname, data, mode, copysource):
484 def setfile(self, fname, data, mode, copysource):
485 islink, isexec = mode
485 islink, isexec = mode
486 if data is None:
486 if data is None:
487 self.opener.setflags(fname, islink, isexec)
487 self.opener.setflags(fname, islink, isexec)
488 return
488 return
489 if islink:
489 if islink:
490 self.opener.symlink(data, fname)
490 self.opener.symlink(data, fname)
491 else:
491 else:
492 self.opener.write(fname, data)
492 self.opener.write(fname, data)
493 if isexec:
493 if isexec:
494 self.opener.setflags(fname, False, True)
494 self.opener.setflags(fname, False, True)
495
495
496 def unlink(self, fname):
496 def unlink(self, fname):
497 self.opener.unlinkpath(fname, ignoremissing=True)
497 self.opener.unlinkpath(fname, ignoremissing=True)
498
498
499 def writerej(self, fname, failed, total, lines):
499 def writerej(self, fname, failed, total, lines):
500 fname = fname + ".rej"
500 fname = fname + ".rej"
501 self.ui.warn(
501 self.ui.warn(
502 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
502 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
503 (failed, total, fname))
503 (failed, total, fname))
504 fp = self.opener(fname, 'w')
504 fp = self.opener(fname, 'w')
505 fp.writelines(lines)
505 fp.writelines(lines)
506 fp.close()
506 fp.close()
507
507
508 def exists(self, fname):
508 def exists(self, fname):
509 return self.opener.lexists(fname)
509 return self.opener.lexists(fname)
510
510
511 class workingbackend(fsbackend):
511 class workingbackend(fsbackend):
512 def __init__(self, ui, repo, similarity):
512 def __init__(self, ui, repo, similarity):
513 super(workingbackend, self).__init__(ui, repo.root)
513 super(workingbackend, self).__init__(ui, repo.root)
514 self.repo = repo
514 self.repo = repo
515 self.similarity = similarity
515 self.similarity = similarity
516 self.removed = set()
516 self.removed = set()
517 self.changed = set()
517 self.changed = set()
518 self.copied = []
518 self.copied = []
519
519
520 def _checkknown(self, fname):
520 def _checkknown(self, fname):
521 if self.repo.dirstate[fname] == '?' and self.exists(fname):
521 if self.repo.dirstate[fname] == '?' and self.exists(fname):
522 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
522 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
523
523
524 def setfile(self, fname, data, mode, copysource):
524 def setfile(self, fname, data, mode, copysource):
525 self._checkknown(fname)
525 self._checkknown(fname)
526 super(workingbackend, self).setfile(fname, data, mode, copysource)
526 super(workingbackend, self).setfile(fname, data, mode, copysource)
527 if copysource is not None:
527 if copysource is not None:
528 self.copied.append((copysource, fname))
528 self.copied.append((copysource, fname))
529 self.changed.add(fname)
529 self.changed.add(fname)
530
530
531 def unlink(self, fname):
531 def unlink(self, fname):
532 self._checkknown(fname)
532 self._checkknown(fname)
533 super(workingbackend, self).unlink(fname)
533 super(workingbackend, self).unlink(fname)
534 self.removed.add(fname)
534 self.removed.add(fname)
535 self.changed.add(fname)
535 self.changed.add(fname)
536
536
537 def close(self):
537 def close(self):
538 wctx = self.repo[None]
538 wctx = self.repo[None]
539 changed = set(self.changed)
539 changed = set(self.changed)
540 for src, dst in self.copied:
540 for src, dst in self.copied:
541 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
541 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
542 if self.removed:
542 if self.removed:
543 wctx.forget(sorted(self.removed))
543 wctx.forget(sorted(self.removed))
544 for f in self.removed:
544 for f in self.removed:
545 if f not in self.repo.dirstate:
545 if f not in self.repo.dirstate:
546 # File was deleted and no longer belongs to the
546 # File was deleted and no longer belongs to the
547 # dirstate, it was probably marked added then
547 # dirstate, it was probably marked added then
548 # deleted, and should not be considered by
548 # deleted, and should not be considered by
549 # marktouched().
549 # marktouched().
550 changed.discard(f)
550 changed.discard(f)
551 if changed:
551 if changed:
552 scmutil.marktouched(self.repo, changed, self.similarity)
552 scmutil.marktouched(self.repo, changed, self.similarity)
553 return sorted(self.changed)
553 return sorted(self.changed)
554
554
555 class filestore(object):
555 class filestore(object):
556 def __init__(self, maxsize=None):
556 def __init__(self, maxsize=None):
557 self.opener = None
557 self.opener = None
558 self.files = {}
558 self.files = {}
559 self.created = 0
559 self.created = 0
560 self.maxsize = maxsize
560 self.maxsize = maxsize
561 if self.maxsize is None:
561 if self.maxsize is None:
562 self.maxsize = 4*(2**20)
562 self.maxsize = 4*(2**20)
563 self.size = 0
563 self.size = 0
564 self.data = {}
564 self.data = {}
565
565
566 def setfile(self, fname, data, mode, copied=None):
566 def setfile(self, fname, data, mode, copied=None):
567 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
567 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
568 self.data[fname] = (data, mode, copied)
568 self.data[fname] = (data, mode, copied)
569 self.size += len(data)
569 self.size += len(data)
570 else:
570 else:
571 if self.opener is None:
571 if self.opener is None:
572 root = tempfile.mkdtemp(prefix='hg-patch-')
572 root = tempfile.mkdtemp(prefix='hg-patch-')
573 self.opener = vfsmod.vfs(root)
573 self.opener = vfsmod.vfs(root)
574 # Avoid filename issues with these simple names
574 # Avoid filename issues with these simple names
575 fn = '%d' % self.created
575 fn = '%d' % self.created
576 self.opener.write(fn, data)
576 self.opener.write(fn, data)
577 self.created += 1
577 self.created += 1
578 self.files[fname] = (fn, mode, copied)
578 self.files[fname] = (fn, mode, copied)
579
579
580 def getfile(self, fname):
580 def getfile(self, fname):
581 if fname in self.data:
581 if fname in self.data:
582 return self.data[fname]
582 return self.data[fname]
583 if not self.opener or fname not in self.files:
583 if not self.opener or fname not in self.files:
584 return None, None, None
584 return None, None, None
585 fn, mode, copied = self.files[fname]
585 fn, mode, copied = self.files[fname]
586 return self.opener.read(fn), mode, copied
586 return self.opener.read(fn), mode, copied
587
587
588 def close(self):
588 def close(self):
589 if self.opener:
589 if self.opener:
590 shutil.rmtree(self.opener.base)
590 shutil.rmtree(self.opener.base)
591
591
592 class repobackend(abstractbackend):
592 class repobackend(abstractbackend):
593 def __init__(self, ui, repo, ctx, store):
593 def __init__(self, ui, repo, ctx, store):
594 super(repobackend, self).__init__(ui)
594 super(repobackend, self).__init__(ui)
595 self.repo = repo
595 self.repo = repo
596 self.ctx = ctx
596 self.ctx = ctx
597 self.store = store
597 self.store = store
598 self.changed = set()
598 self.changed = set()
599 self.removed = set()
599 self.removed = set()
600 self.copied = {}
600 self.copied = {}
601
601
602 def _checkknown(self, fname):
602 def _checkknown(self, fname):
603 if fname not in self.ctx:
603 if fname not in self.ctx:
604 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
604 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
605
605
606 def getfile(self, fname):
606 def getfile(self, fname):
607 try:
607 try:
608 fctx = self.ctx[fname]
608 fctx = self.ctx[fname]
609 except error.LookupError:
609 except error.LookupError:
610 return None, None
610 return None, None
611 flags = fctx.flags()
611 flags = fctx.flags()
612 return fctx.data(), ('l' in flags, 'x' in flags)
612 return fctx.data(), ('l' in flags, 'x' in flags)
613
613
614 def setfile(self, fname, data, mode, copysource):
614 def setfile(self, fname, data, mode, copysource):
615 if copysource:
615 if copysource:
616 self._checkknown(copysource)
616 self._checkknown(copysource)
617 if data is None:
617 if data is None:
618 data = self.ctx[fname].data()
618 data = self.ctx[fname].data()
619 self.store.setfile(fname, data, mode, copysource)
619 self.store.setfile(fname, data, mode, copysource)
620 self.changed.add(fname)
620 self.changed.add(fname)
621 if copysource:
621 if copysource:
622 self.copied[fname] = copysource
622 self.copied[fname] = copysource
623
623
624 def unlink(self, fname):
624 def unlink(self, fname):
625 self._checkknown(fname)
625 self._checkknown(fname)
626 self.removed.add(fname)
626 self.removed.add(fname)
627
627
628 def exists(self, fname):
628 def exists(self, fname):
629 return fname in self.ctx
629 return fname in self.ctx
630
630
631 def close(self):
631 def close(self):
632 return self.changed | self.removed
632 return self.changed | self.removed
633
633
634 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
634 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
635 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
635 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
636 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
636 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
637 eolmodes = ['strict', 'crlf', 'lf', 'auto']
637 eolmodes = ['strict', 'crlf', 'lf', 'auto']
638
638
639 class patchfile(object):
639 class patchfile(object):
640 def __init__(self, ui, gp, backend, store, eolmode='strict'):
640 def __init__(self, ui, gp, backend, store, eolmode='strict'):
641 self.fname = gp.path
641 self.fname = gp.path
642 self.eolmode = eolmode
642 self.eolmode = eolmode
643 self.eol = None
643 self.eol = None
644 self.backend = backend
644 self.backend = backend
645 self.ui = ui
645 self.ui = ui
646 self.lines = []
646 self.lines = []
647 self.exists = False
647 self.exists = False
648 self.missing = True
648 self.missing = True
649 self.mode = gp.mode
649 self.mode = gp.mode
650 self.copysource = gp.oldpath
650 self.copysource = gp.oldpath
651 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
651 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
652 self.remove = gp.op == 'DELETE'
652 self.remove = gp.op == 'DELETE'
653 if self.copysource is None:
653 if self.copysource is None:
654 data, mode = backend.getfile(self.fname)
654 data, mode = backend.getfile(self.fname)
655 else:
655 else:
656 data, mode = store.getfile(self.copysource)[:2]
656 data, mode = store.getfile(self.copysource)[:2]
657 if data is not None:
657 if data is not None:
658 self.exists = self.copysource is None or backend.exists(self.fname)
658 self.exists = self.copysource is None or backend.exists(self.fname)
659 self.missing = False
659 self.missing = False
660 if data:
660 if data:
661 self.lines = mdiff.splitnewlines(data)
661 self.lines = mdiff.splitnewlines(data)
662 if self.mode is None:
662 if self.mode is None:
663 self.mode = mode
663 self.mode = mode
664 if self.lines:
664 if self.lines:
665 # Normalize line endings
665 # Normalize line endings
666 if self.lines[0].endswith('\r\n'):
666 if self.lines[0].endswith('\r\n'):
667 self.eol = '\r\n'
667 self.eol = '\r\n'
668 elif self.lines[0].endswith('\n'):
668 elif self.lines[0].endswith('\n'):
669 self.eol = '\n'
669 self.eol = '\n'
670 if eolmode != 'strict':
670 if eolmode != 'strict':
671 nlines = []
671 nlines = []
672 for l in self.lines:
672 for l in self.lines:
673 if l.endswith('\r\n'):
673 if l.endswith('\r\n'):
674 l = l[:-2] + '\n'
674 l = l[:-2] + '\n'
675 nlines.append(l)
675 nlines.append(l)
676 self.lines = nlines
676 self.lines = nlines
677 else:
677 else:
678 if self.create:
678 if self.create:
679 self.missing = False
679 self.missing = False
680 if self.mode is None:
680 if self.mode is None:
681 self.mode = (False, False)
681 self.mode = (False, False)
682 if self.missing:
682 if self.missing:
683 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
683 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
684 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
684 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
685 "current directory)\n"))
685 "current directory)\n"))
686
686
687 self.hash = {}
687 self.hash = {}
688 self.dirty = 0
688 self.dirty = 0
689 self.offset = 0
689 self.offset = 0
690 self.skew = 0
690 self.skew = 0
691 self.rej = []
691 self.rej = []
692 self.fileprinted = False
692 self.fileprinted = False
693 self.printfile(False)
693 self.printfile(False)
694 self.hunks = 0
694 self.hunks = 0
695
695
696 def writelines(self, fname, lines, mode):
696 def writelines(self, fname, lines, mode):
697 if self.eolmode == 'auto':
697 if self.eolmode == 'auto':
698 eol = self.eol
698 eol = self.eol
699 elif self.eolmode == 'crlf':
699 elif self.eolmode == 'crlf':
700 eol = '\r\n'
700 eol = '\r\n'
701 else:
701 else:
702 eol = '\n'
702 eol = '\n'
703
703
704 if self.eolmode != 'strict' and eol and eol != '\n':
704 if self.eolmode != 'strict' and eol and eol != '\n':
705 rawlines = []
705 rawlines = []
706 for l in lines:
706 for l in lines:
707 if l and l[-1] == '\n':
707 if l and l[-1] == '\n':
708 l = l[:-1] + eol
708 l = l[:-1] + eol
709 rawlines.append(l)
709 rawlines.append(l)
710 lines = rawlines
710 lines = rawlines
711
711
712 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
712 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
713
713
714 def printfile(self, warn):
714 def printfile(self, warn):
715 if self.fileprinted:
715 if self.fileprinted:
716 return
716 return
717 if warn or self.ui.verbose:
717 if warn or self.ui.verbose:
718 self.fileprinted = True
718 self.fileprinted = True
719 s = _("patching file %s\n") % self.fname
719 s = _("patching file %s\n") % self.fname
720 if warn:
720 if warn:
721 self.ui.warn(s)
721 self.ui.warn(s)
722 else:
722 else:
723 self.ui.note(s)
723 self.ui.note(s)
724
724
725
725
726 def findlines(self, l, linenum):
726 def findlines(self, l, linenum):
727 # looks through the hash and finds candidate lines. The
727 # looks through the hash and finds candidate lines. The
728 # result is a list of line numbers sorted based on distance
728 # result is a list of line numbers sorted based on distance
729 # from linenum
729 # from linenum
730
730
731 cand = self.hash.get(l, [])
731 cand = self.hash.get(l, [])
732 if len(cand) > 1:
732 if len(cand) > 1:
733 # resort our list of potentials forward then back.
733 # resort our list of potentials forward then back.
734 cand.sort(key=lambda x: abs(x - linenum))
734 cand.sort(key=lambda x: abs(x - linenum))
735 return cand
735 return cand
736
736
737 def write_rej(self):
737 def write_rej(self):
738 # our rejects are a little different from patch(1). This always
738 # our rejects are a little different from patch(1). This always
739 # creates rejects in the same form as the original patch. A file
739 # creates rejects in the same form as the original patch. A file
740 # header is inserted so that you can run the reject through patch again
740 # header is inserted so that you can run the reject through patch again
741 # without having to type the filename.
741 # without having to type the filename.
742 if not self.rej:
742 if not self.rej:
743 return
743 return
744 base = os.path.basename(self.fname)
744 base = os.path.basename(self.fname)
745 lines = ["--- %s\n+++ %s\n" % (base, base)]
745 lines = ["--- %s\n+++ %s\n" % (base, base)]
746 for x in self.rej:
746 for x in self.rej:
747 for l in x.hunk:
747 for l in x.hunk:
748 lines.append(l)
748 lines.append(l)
749 if l[-1:] != '\n':
749 if l[-1:] != '\n':
750 lines.append("\n\ No newline at end of file\n")
750 lines.append("\n\ No newline at end of file\n")
751 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
751 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
752
752
753 def apply(self, h):
753 def apply(self, h):
754 if not h.complete():
754 if not h.complete():
755 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
755 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
756 (h.number, h.desc, len(h.a), h.lena, len(h.b),
756 (h.number, h.desc, len(h.a), h.lena, len(h.b),
757 h.lenb))
757 h.lenb))
758
758
759 self.hunks += 1
759 self.hunks += 1
760
760
761 if self.missing:
761 if self.missing:
762 self.rej.append(h)
762 self.rej.append(h)
763 return -1
763 return -1
764
764
765 if self.exists and self.create:
765 if self.exists and self.create:
766 if self.copysource:
766 if self.copysource:
767 self.ui.warn(_("cannot create %s: destination already "
767 self.ui.warn(_("cannot create %s: destination already "
768 "exists\n") % self.fname)
768 "exists\n") % self.fname)
769 else:
769 else:
770 self.ui.warn(_("file %s already exists\n") % self.fname)
770 self.ui.warn(_("file %s already exists\n") % self.fname)
771 self.rej.append(h)
771 self.rej.append(h)
772 return -1
772 return -1
773
773
774 if isinstance(h, binhunk):
774 if isinstance(h, binhunk):
775 if self.remove:
775 if self.remove:
776 self.backend.unlink(self.fname)
776 self.backend.unlink(self.fname)
777 else:
777 else:
778 l = h.new(self.lines)
778 l = h.new(self.lines)
779 self.lines[:] = l
779 self.lines[:] = l
780 self.offset += len(l)
780 self.offset += len(l)
781 self.dirty = True
781 self.dirty = True
782 return 0
782 return 0
783
783
784 horig = h
784 horig = h
785 if (self.eolmode in ('crlf', 'lf')
785 if (self.eolmode in ('crlf', 'lf')
786 or self.eolmode == 'auto' and self.eol):
786 or self.eolmode == 'auto' and self.eol):
787 # If new eols are going to be normalized, then normalize
787 # If new eols are going to be normalized, then normalize
788 # hunk data before patching. Otherwise, preserve input
788 # hunk data before patching. Otherwise, preserve input
789 # line-endings.
789 # line-endings.
790 h = h.getnormalized()
790 h = h.getnormalized()
791
791
792 # fast case first, no offsets, no fuzz
792 # fast case first, no offsets, no fuzz
793 old, oldstart, new, newstart = h.fuzzit(0, False)
793 old, oldstart, new, newstart = h.fuzzit(0, False)
794 oldstart += self.offset
794 oldstart += self.offset
795 orig_start = oldstart
795 orig_start = oldstart
796 # if there's skew we want to emit the "(offset %d lines)" even
796 # if there's skew we want to emit the "(offset %d lines)" even
797 # when the hunk cleanly applies at start + skew, so skip the
797 # when the hunk cleanly applies at start + skew, so skip the
798 # fast case code
798 # fast case code
799 if (self.skew == 0 and
799 if (self.skew == 0 and
800 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
800 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
801 if self.remove:
801 if self.remove:
802 self.backend.unlink(self.fname)
802 self.backend.unlink(self.fname)
803 else:
803 else:
804 self.lines[oldstart:oldstart + len(old)] = new
804 self.lines[oldstart:oldstart + len(old)] = new
805 self.offset += len(new) - len(old)
805 self.offset += len(new) - len(old)
806 self.dirty = True
806 self.dirty = True
807 return 0
807 return 0
808
808
809 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
809 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
810 self.hash = {}
810 self.hash = {}
811 for x, s in enumerate(self.lines):
811 for x, s in enumerate(self.lines):
812 self.hash.setdefault(s, []).append(x)
812 self.hash.setdefault(s, []).append(x)
813
813
814 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
814 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
815 for toponly in [True, False]:
815 for toponly in [True, False]:
816 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
816 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
817 oldstart = oldstart + self.offset + self.skew
817 oldstart = oldstart + self.offset + self.skew
818 oldstart = min(oldstart, len(self.lines))
818 oldstart = min(oldstart, len(self.lines))
819 if old:
819 if old:
820 cand = self.findlines(old[0][1:], oldstart)
820 cand = self.findlines(old[0][1:], oldstart)
821 else:
821 else:
822 # Only adding lines with no or fuzzed context, just
822 # Only adding lines with no or fuzzed context, just
823 # take the skew in account
823 # take the skew in account
824 cand = [oldstart]
824 cand = [oldstart]
825
825
826 for l in cand:
826 for l in cand:
827 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
827 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
828 self.lines[l : l + len(old)] = new
828 self.lines[l : l + len(old)] = new
829 self.offset += len(new) - len(old)
829 self.offset += len(new) - len(old)
830 self.skew = l - orig_start
830 self.skew = l - orig_start
831 self.dirty = True
831 self.dirty = True
832 offset = l - orig_start - fuzzlen
832 offset = l - orig_start - fuzzlen
833 if fuzzlen:
833 if fuzzlen:
834 msg = _("Hunk #%d succeeded at %d "
834 msg = _("Hunk #%d succeeded at %d "
835 "with fuzz %d "
835 "with fuzz %d "
836 "(offset %d lines).\n")
836 "(offset %d lines).\n")
837 self.printfile(True)
837 self.printfile(True)
838 self.ui.warn(msg %
838 self.ui.warn(msg %
839 (h.number, l + 1, fuzzlen, offset))
839 (h.number, l + 1, fuzzlen, offset))
840 else:
840 else:
841 msg = _("Hunk #%d succeeded at %d "
841 msg = _("Hunk #%d succeeded at %d "
842 "(offset %d lines).\n")
842 "(offset %d lines).\n")
843 self.ui.note(msg % (h.number, l + 1, offset))
843 self.ui.note(msg % (h.number, l + 1, offset))
844 return fuzzlen
844 return fuzzlen
845 self.printfile(True)
845 self.printfile(True)
846 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
846 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
847 self.rej.append(horig)
847 self.rej.append(horig)
848 return -1
848 return -1
849
849
850 def close(self):
850 def close(self):
851 if self.dirty:
851 if self.dirty:
852 self.writelines(self.fname, self.lines, self.mode)
852 self.writelines(self.fname, self.lines, self.mode)
853 self.write_rej()
853 self.write_rej()
854 return len(self.rej)
854 return len(self.rej)
855
855
856 class header(object):
856 class header(object):
857 """patch header
857 """patch header
858 """
858 """
859 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
859 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
860 diff_re = re.compile('diff -r .* (.*)$')
860 diff_re = re.compile('diff -r .* (.*)$')
861 allhunks_re = re.compile('(?:index|deleted file) ')
861 allhunks_re = re.compile('(?:index|deleted file) ')
862 pretty_re = re.compile('(?:new file|deleted file) ')
862 pretty_re = re.compile('(?:new file|deleted file) ')
863 special_re = re.compile('(?:index|deleted|copy|rename) ')
863 special_re = re.compile('(?:index|deleted|copy|rename) ')
864 newfile_re = re.compile('(?:new file)')
864 newfile_re = re.compile('(?:new file)')
865
865
866 def __init__(self, header):
866 def __init__(self, header):
867 self.header = header
867 self.header = header
868 self.hunks = []
868 self.hunks = []
869
869
870 def binary(self):
870 def binary(self):
871 return any(h.startswith('index ') for h in self.header)
871 return any(h.startswith('index ') for h in self.header)
872
872
873 def pretty(self, fp):
873 def pretty(self, fp):
874 for h in self.header:
874 for h in self.header:
875 if h.startswith('index '):
875 if h.startswith('index '):
876 fp.write(_('this modifies a binary file (all or nothing)\n'))
876 fp.write(_('this modifies a binary file (all or nothing)\n'))
877 break
877 break
878 if self.pretty_re.match(h):
878 if self.pretty_re.match(h):
879 fp.write(h)
879 fp.write(h)
880 if self.binary():
880 if self.binary():
881 fp.write(_('this is a binary file\n'))
881 fp.write(_('this is a binary file\n'))
882 break
882 break
883 if h.startswith('---'):
883 if h.startswith('---'):
884 fp.write(_('%d hunks, %d lines changed\n') %
884 fp.write(_('%d hunks, %d lines changed\n') %
885 (len(self.hunks),
885 (len(self.hunks),
886 sum([max(h.added, h.removed) for h in self.hunks])))
886 sum([max(h.added, h.removed) for h in self.hunks])))
887 break
887 break
888 fp.write(h)
888 fp.write(h)
889
889
890 def write(self, fp):
890 def write(self, fp):
891 fp.write(''.join(self.header))
891 fp.write(''.join(self.header))
892
892
893 def allhunks(self):
893 def allhunks(self):
894 return any(self.allhunks_re.match(h) for h in self.header)
894 return any(self.allhunks_re.match(h) for h in self.header)
895
895
896 def files(self):
896 def files(self):
897 match = self.diffgit_re.match(self.header[0])
897 match = self.diffgit_re.match(self.header[0])
898 if match:
898 if match:
899 fromfile, tofile = match.groups()
899 fromfile, tofile = match.groups()
900 if fromfile == tofile:
900 if fromfile == tofile:
901 return [fromfile]
901 return [fromfile]
902 return [fromfile, tofile]
902 return [fromfile, tofile]
903 else:
903 else:
904 return self.diff_re.match(self.header[0]).groups()
904 return self.diff_re.match(self.header[0]).groups()
905
905
906 def filename(self):
906 def filename(self):
907 return self.files()[-1]
907 return self.files()[-1]
908
908
909 def __repr__(self):
909 def __repr__(self):
910 return '<header %s>' % (' '.join(map(repr, self.files())))
910 return '<header %s>' % (' '.join(map(repr, self.files())))
911
911
912 def isnewfile(self):
912 def isnewfile(self):
913 return any(self.newfile_re.match(h) for h in self.header)
913 return any(self.newfile_re.match(h) for h in self.header)
914
914
915 def special(self):
915 def special(self):
916 # Special files are shown only at the header level and not at the hunk
916 # Special files are shown only at the header level and not at the hunk
917 # level for example a file that has been deleted is a special file.
917 # level for example a file that has been deleted is a special file.
918 # The user cannot change the content of the operation, in the case of
918 # The user cannot change the content of the operation, in the case of
919 # the deleted file he has to take the deletion or not take it, he
919 # the deleted file he has to take the deletion or not take it, he
920 # cannot take some of it.
920 # cannot take some of it.
921 # Newly added files are special if they are empty, they are not special
921 # Newly added files are special if they are empty, they are not special
922 # if they have some content as we want to be able to change it
922 # if they have some content as we want to be able to change it
923 nocontent = len(self.header) == 2
923 nocontent = len(self.header) == 2
924 emptynewfile = self.isnewfile() and nocontent
924 emptynewfile = self.isnewfile() and nocontent
925 return emptynewfile or \
925 return emptynewfile or \
926 any(self.special_re.match(h) for h in self.header)
926 any(self.special_re.match(h) for h in self.header)
927
927
928 class recordhunk(object):
928 class recordhunk(object):
929 """patch hunk
929 """patch hunk
930
930
931 XXX shouldn't we merge this with the other hunk class?
931 XXX shouldn't we merge this with the other hunk class?
932 """
932 """
933
933
934 def __init__(self, header, fromline, toline, proc, before, hunk, after,
934 def __init__(self, header, fromline, toline, proc, before, hunk, after,
935 maxcontext=None):
935 maxcontext=None):
936 def trimcontext(lines, reverse=False):
936 def trimcontext(lines, reverse=False):
937 if maxcontext is not None:
937 if maxcontext is not None:
938 delta = len(lines) - maxcontext
938 delta = len(lines) - maxcontext
939 if delta > 0:
939 if delta > 0:
940 if reverse:
940 if reverse:
941 return delta, lines[delta:]
941 return delta, lines[delta:]
942 else:
942 else:
943 return delta, lines[:maxcontext]
943 return delta, lines[:maxcontext]
944 return 0, lines
944 return 0, lines
945
945
946 self.header = header
946 self.header = header
947 trimedbefore, self.before = trimcontext(before, True)
947 trimedbefore, self.before = trimcontext(before, True)
948 self.fromline = fromline + trimedbefore
948 self.fromline = fromline + trimedbefore
949 self.toline = toline + trimedbefore
949 self.toline = toline + trimedbefore
950 _trimedafter, self.after = trimcontext(after, False)
950 _trimedafter, self.after = trimcontext(after, False)
951 self.proc = proc
951 self.proc = proc
952 self.hunk = hunk
952 self.hunk = hunk
953 self.added, self.removed = self.countchanges(self.hunk)
953 self.added, self.removed = self.countchanges(self.hunk)
954
954
955 def __eq__(self, v):
955 def __eq__(self, v):
956 if not isinstance(v, recordhunk):
956 if not isinstance(v, recordhunk):
957 return False
957 return False
958
958
959 return ((v.hunk == self.hunk) and
959 return ((v.hunk == self.hunk) and
960 (v.proc == self.proc) and
960 (v.proc == self.proc) and
961 (self.fromline == v.fromline) and
961 (self.fromline == v.fromline) and
962 (self.header.files() == v.header.files()))
962 (self.header.files() == v.header.files()))
963
963
964 def __hash__(self):
964 def __hash__(self):
965 return hash((tuple(self.hunk),
965 return hash((tuple(self.hunk),
966 tuple(self.header.files()),
966 tuple(self.header.files()),
967 self.fromline,
967 self.fromline,
968 self.proc))
968 self.proc))
969
969
970 def countchanges(self, hunk):
970 def countchanges(self, hunk):
971 """hunk -> (n+,n-)"""
971 """hunk -> (n+,n-)"""
972 add = len([h for h in hunk if h.startswith('+')])
972 add = len([h for h in hunk if h.startswith('+')])
973 rem = len([h for h in hunk if h.startswith('-')])
973 rem = len([h for h in hunk if h.startswith('-')])
974 return add, rem
974 return add, rem
975
975
976 def reversehunk(self):
976 def reversehunk(self):
977 """return another recordhunk which is the reverse of the hunk
977 """return another recordhunk which is the reverse of the hunk
978
978
979 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
979 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
980 that, swap fromline/toline and +/- signs while keep other things
980 that, swap fromline/toline and +/- signs while keep other things
981 unchanged.
981 unchanged.
982 """
982 """
983 m = {'+': '-', '-': '+', '\\': '\\'}
983 m = {'+': '-', '-': '+', '\\': '\\'}
984 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
984 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
985 return recordhunk(self.header, self.toline, self.fromline, self.proc,
985 return recordhunk(self.header, self.toline, self.fromline, self.proc,
986 self.before, hunk, self.after)
986 self.before, hunk, self.after)
987
987
988 def write(self, fp):
988 def write(self, fp):
989 delta = len(self.before) + len(self.after)
989 delta = len(self.before) + len(self.after)
990 if self.after and self.after[-1] == '\\ No newline at end of file\n':
990 if self.after and self.after[-1] == '\\ No newline at end of file\n':
991 delta -= 1
991 delta -= 1
992 fromlen = delta + self.removed
992 fromlen = delta + self.removed
993 tolen = delta + self.added
993 tolen = delta + self.added
994 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
994 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
995 (self.fromline, fromlen, self.toline, tolen,
995 (self.fromline, fromlen, self.toline, tolen,
996 self.proc and (' ' + self.proc)))
996 self.proc and (' ' + self.proc)))
997 fp.write(''.join(self.before + self.hunk + self.after))
997 fp.write(''.join(self.before + self.hunk + self.after))
998
998
999 pretty = write
999 pretty = write
1000
1000
1001 def filename(self):
1001 def filename(self):
1002 return self.header.filename()
1002 return self.header.filename()
1003
1003
1004 def __repr__(self):
1004 def __repr__(self):
1005 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1005 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1006
1006
1007 def getmessages():
1007 def getmessages():
1008 return {
1008 return {
1009 'multiple': {
1009 'multiple': {
1010 'apply': _("apply change %d/%d to '%s'?"),
1010 'apply': _("apply change %d/%d to '%s'?"),
1011 'discard': _("discard change %d/%d to '%s'?"),
1011 'discard': _("discard change %d/%d to '%s'?"),
1012 'record': _("record change %d/%d to '%s'?"),
1012 'record': _("record change %d/%d to '%s'?"),
1013 },
1013 },
1014 'single': {
1014 'single': {
1015 'apply': _("apply this change to '%s'?"),
1015 'apply': _("apply this change to '%s'?"),
1016 'discard': _("discard this change to '%s'?"),
1016 'discard': _("discard this change to '%s'?"),
1017 'record': _("record this change to '%s'?"),
1017 'record': _("record this change to '%s'?"),
1018 },
1018 },
1019 'help': {
1019 'help': {
1020 'apply': _('[Ynesfdaq?]'
1020 'apply': _('[Ynesfdaq?]'
1021 '$$ &Yes, apply this change'
1021 '$$ &Yes, apply this change'
1022 '$$ &No, skip this change'
1022 '$$ &No, skip this change'
1023 '$$ &Edit this change manually'
1023 '$$ &Edit this change manually'
1024 '$$ &Skip remaining changes to this file'
1024 '$$ &Skip remaining changes to this file'
1025 '$$ Apply remaining changes to this &file'
1025 '$$ Apply remaining changes to this &file'
1026 '$$ &Done, skip remaining changes and files'
1026 '$$ &Done, skip remaining changes and files'
1027 '$$ Apply &all changes to all remaining files'
1027 '$$ Apply &all changes to all remaining files'
1028 '$$ &Quit, applying no changes'
1028 '$$ &Quit, applying no changes'
1029 '$$ &? (display help)'),
1029 '$$ &? (display help)'),
1030 'discard': _('[Ynesfdaq?]'
1030 'discard': _('[Ynesfdaq?]'
1031 '$$ &Yes, discard this change'
1031 '$$ &Yes, discard this change'
1032 '$$ &No, skip this change'
1032 '$$ &No, skip this change'
1033 '$$ &Edit this change manually'
1033 '$$ &Edit this change manually'
1034 '$$ &Skip remaining changes to this file'
1034 '$$ &Skip remaining changes to this file'
1035 '$$ Discard remaining changes to this &file'
1035 '$$ Discard remaining changes to this &file'
1036 '$$ &Done, skip remaining changes and files'
1036 '$$ &Done, skip remaining changes and files'
1037 '$$ Discard &all changes to all remaining files'
1037 '$$ Discard &all changes to all remaining files'
1038 '$$ &Quit, discarding no changes'
1038 '$$ &Quit, discarding no changes'
1039 '$$ &? (display help)'),
1039 '$$ &? (display help)'),
1040 'record': _('[Ynesfdaq?]'
1040 'record': _('[Ynesfdaq?]'
1041 '$$ &Yes, record this change'
1041 '$$ &Yes, record this change'
1042 '$$ &No, skip this change'
1042 '$$ &No, skip this change'
1043 '$$ &Edit this change manually'
1043 '$$ &Edit this change manually'
1044 '$$ &Skip remaining changes to this file'
1044 '$$ &Skip remaining changes to this file'
1045 '$$ Record remaining changes to this &file'
1045 '$$ Record remaining changes to this &file'
1046 '$$ &Done, skip remaining changes and files'
1046 '$$ &Done, skip remaining changes and files'
1047 '$$ Record &all changes to all remaining files'
1047 '$$ Record &all changes to all remaining files'
1048 '$$ &Quit, recording no changes'
1048 '$$ &Quit, recording no changes'
1049 '$$ &? (display help)'),
1049 '$$ &? (display help)'),
1050 }
1050 }
1051 }
1051 }
1052
1052
1053 def filterpatch(ui, headers, operation=None):
1053 def filterpatch(ui, headers, operation=None):
1054 """Interactively filter patch chunks into applied-only chunks"""
1054 """Interactively filter patch chunks into applied-only chunks"""
1055 messages = getmessages()
1055 messages = getmessages()
1056
1056
1057 if operation is None:
1057 if operation is None:
1058 operation = 'record'
1058 operation = 'record'
1059
1059
1060 def prompt(skipfile, skipall, query, chunk):
1060 def prompt(skipfile, skipall, query, chunk):
1061 """prompt query, and process base inputs
1061 """prompt query, and process base inputs
1062
1062
1063 - y/n for the rest of file
1063 - y/n for the rest of file
1064 - y/n for the rest
1064 - y/n for the rest
1065 - ? (help)
1065 - ? (help)
1066 - q (quit)
1066 - q (quit)
1067
1067
1068 Return True/False and possibly updated skipfile and skipall.
1068 Return True/False and possibly updated skipfile and skipall.
1069 """
1069 """
1070 newpatches = None
1070 newpatches = None
1071 if skipall is not None:
1071 if skipall is not None:
1072 return skipall, skipfile, skipall, newpatches
1072 return skipall, skipfile, skipall, newpatches
1073 if skipfile is not None:
1073 if skipfile is not None:
1074 return skipfile, skipfile, skipall, newpatches
1074 return skipfile, skipfile, skipall, newpatches
1075 while True:
1075 while True:
1076 resps = messages['help'][operation]
1076 resps = messages['help'][operation]
1077 r = ui.promptchoice("%s %s" % (query, resps))
1077 r = ui.promptchoice("%s %s" % (query, resps))
1078 ui.write("\n")
1078 ui.write("\n")
1079 if r == 8: # ?
1079 if r == 8: # ?
1080 for c, t in ui.extractchoices(resps)[1]:
1080 for c, t in ui.extractchoices(resps)[1]:
1081 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1081 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1082 continue
1082 continue
1083 elif r == 0: # yes
1083 elif r == 0: # yes
1084 ret = True
1084 ret = True
1085 elif r == 1: # no
1085 elif r == 1: # no
1086 ret = False
1086 ret = False
1087 elif r == 2: # Edit patch
1087 elif r == 2: # Edit patch
1088 if chunk is None:
1088 if chunk is None:
1089 ui.write(_('cannot edit patch for whole file'))
1089 ui.write(_('cannot edit patch for whole file'))
1090 ui.write("\n")
1090 ui.write("\n")
1091 continue
1091 continue
1092 if chunk.header.binary():
1092 if chunk.header.binary():
1093 ui.write(_('cannot edit patch for binary file'))
1093 ui.write(_('cannot edit patch for binary file'))
1094 ui.write("\n")
1094 ui.write("\n")
1095 continue
1095 continue
1096 # Patch comment based on the Git one (based on comment at end of
1096 # Patch comment based on the Git one (based on comment at end of
1097 # https://mercurial-scm.org/wiki/RecordExtension)
1097 # https://mercurial-scm.org/wiki/RecordExtension)
1098 phelp = '---' + _("""
1098 phelp = '---' + _("""
1099 To remove '-' lines, make them ' ' lines (context).
1099 To remove '-' lines, make them ' ' lines (context).
1100 To remove '+' lines, delete them.
1100 To remove '+' lines, delete them.
1101 Lines starting with # will be removed from the patch.
1101 Lines starting with # will be removed from the patch.
1102
1102
1103 If the patch applies cleanly, the edited hunk will immediately be
1103 If the patch applies cleanly, the edited hunk will immediately be
1104 added to the record list. If it does not apply cleanly, a rejects
1104 added to the record list. If it does not apply cleanly, a rejects
1105 file will be generated: you can use that when you try again. If
1105 file will be generated: you can use that when you try again. If
1106 all lines of the hunk are removed, then the edit is aborted and
1106 all lines of the hunk are removed, then the edit is aborted and
1107 the hunk is left unchanged.
1107 the hunk is left unchanged.
1108 """)
1108 """)
1109 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1109 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1110 suffix=".diff")
1110 suffix=".diff")
1111 ncpatchfp = None
1111 ncpatchfp = None
1112 try:
1112 try:
1113 # Write the initial patch
1113 # Write the initial patch
1114 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1114 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1115 chunk.header.write(f)
1115 chunk.header.write(f)
1116 chunk.write(f)
1116 chunk.write(f)
1117 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1117 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1118 f.close()
1118 f.close()
1119 # Start the editor and wait for it to complete
1119 # Start the editor and wait for it to complete
1120 editor = ui.geteditor()
1120 editor = ui.geteditor()
1121 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1121 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1122 environ={'HGUSER': ui.username()},
1122 environ={'HGUSER': ui.username()},
1123 blockedtag='filterpatch')
1123 blockedtag='filterpatch')
1124 if ret != 0:
1124 if ret != 0:
1125 ui.warn(_("editor exited with exit code %d\n") % ret)
1125 ui.warn(_("editor exited with exit code %d\n") % ret)
1126 continue
1126 continue
1127 # Remove comment lines
1127 # Remove comment lines
1128 patchfp = open(patchfn, r'rb')
1128 patchfp = open(patchfn, r'rb')
1129 ncpatchfp = stringio()
1129 ncpatchfp = stringio()
1130 for line in util.iterfile(patchfp):
1130 for line in util.iterfile(patchfp):
1131 line = util.fromnativeeol(line)
1131 line = util.fromnativeeol(line)
1132 if not line.startswith('#'):
1132 if not line.startswith('#'):
1133 ncpatchfp.write(line)
1133 ncpatchfp.write(line)
1134 patchfp.close()
1134 patchfp.close()
1135 ncpatchfp.seek(0)
1135 ncpatchfp.seek(0)
1136 newpatches = parsepatch(ncpatchfp)
1136 newpatches = parsepatch(ncpatchfp)
1137 finally:
1137 finally:
1138 os.unlink(patchfn)
1138 os.unlink(patchfn)
1139 del ncpatchfp
1139 del ncpatchfp
1140 # Signal that the chunk shouldn't be applied as-is, but
1140 # Signal that the chunk shouldn't be applied as-is, but
1141 # provide the new patch to be used instead.
1141 # provide the new patch to be used instead.
1142 ret = False
1142 ret = False
1143 elif r == 3: # Skip
1143 elif r == 3: # Skip
1144 ret = skipfile = False
1144 ret = skipfile = False
1145 elif r == 4: # file (Record remaining)
1145 elif r == 4: # file (Record remaining)
1146 ret = skipfile = True
1146 ret = skipfile = True
1147 elif r == 5: # done, skip remaining
1147 elif r == 5: # done, skip remaining
1148 ret = skipall = False
1148 ret = skipall = False
1149 elif r == 6: # all
1149 elif r == 6: # all
1150 ret = skipall = True
1150 ret = skipall = True
1151 elif r == 7: # quit
1151 elif r == 7: # quit
1152 raise error.Abort(_('user quit'))
1152 raise error.Abort(_('user quit'))
1153 return ret, skipfile, skipall, newpatches
1153 return ret, skipfile, skipall, newpatches
1154
1154
1155 seen = set()
1155 seen = set()
1156 applied = {} # 'filename' -> [] of chunks
1156 applied = {} # 'filename' -> [] of chunks
1157 skipfile, skipall = None, None
1157 skipfile, skipall = None, None
1158 pos, total = 1, sum(len(h.hunks) for h in headers)
1158 pos, total = 1, sum(len(h.hunks) for h in headers)
1159 for h in headers:
1159 for h in headers:
1160 pos += len(h.hunks)
1160 pos += len(h.hunks)
1161 skipfile = None
1161 skipfile = None
1162 fixoffset = 0
1162 fixoffset = 0
1163 hdr = ''.join(h.header)
1163 hdr = ''.join(h.header)
1164 if hdr in seen:
1164 if hdr in seen:
1165 continue
1165 continue
1166 seen.add(hdr)
1166 seen.add(hdr)
1167 if skipall is None:
1167 if skipall is None:
1168 h.pretty(ui)
1168 h.pretty(ui)
1169 msg = (_('examine changes to %s?') %
1169 msg = (_('examine changes to %s?') %
1170 _(' and ').join("'%s'" % f for f in h.files()))
1170 _(' and ').join("'%s'" % f for f in h.files()))
1171 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1171 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1172 if not r:
1172 if not r:
1173 continue
1173 continue
1174 applied[h.filename()] = [h]
1174 applied[h.filename()] = [h]
1175 if h.allhunks():
1175 if h.allhunks():
1176 applied[h.filename()] += h.hunks
1176 applied[h.filename()] += h.hunks
1177 continue
1177 continue
1178 for i, chunk in enumerate(h.hunks):
1178 for i, chunk in enumerate(h.hunks):
1179 if skipfile is None and skipall is None:
1179 if skipfile is None and skipall is None:
1180 chunk.pretty(ui)
1180 chunk.pretty(ui)
1181 if total == 1:
1181 if total == 1:
1182 msg = messages['single'][operation] % chunk.filename()
1182 msg = messages['single'][operation] % chunk.filename()
1183 else:
1183 else:
1184 idx = pos - len(h.hunks) + i
1184 idx = pos - len(h.hunks) + i
1185 msg = messages['multiple'][operation] % (idx, total,
1185 msg = messages['multiple'][operation] % (idx, total,
1186 chunk.filename())
1186 chunk.filename())
1187 r, skipfile, skipall, newpatches = prompt(skipfile,
1187 r, skipfile, skipall, newpatches = prompt(skipfile,
1188 skipall, msg, chunk)
1188 skipall, msg, chunk)
1189 if r:
1189 if r:
1190 if fixoffset:
1190 if fixoffset:
1191 chunk = copy.copy(chunk)
1191 chunk = copy.copy(chunk)
1192 chunk.toline += fixoffset
1192 chunk.toline += fixoffset
1193 applied[chunk.filename()].append(chunk)
1193 applied[chunk.filename()].append(chunk)
1194 elif newpatches is not None:
1194 elif newpatches is not None:
1195 for newpatch in newpatches:
1195 for newpatch in newpatches:
1196 for newhunk in newpatch.hunks:
1196 for newhunk in newpatch.hunks:
1197 if fixoffset:
1197 if fixoffset:
1198 newhunk.toline += fixoffset
1198 newhunk.toline += fixoffset
1199 applied[newhunk.filename()].append(newhunk)
1199 applied[newhunk.filename()].append(newhunk)
1200 else:
1200 else:
1201 fixoffset += chunk.removed - chunk.added
1201 fixoffset += chunk.removed - chunk.added
1202 return (sum([h for h in applied.itervalues()
1202 return (sum([h for h in applied.itervalues()
1203 if h[0].special() or len(h) > 1], []), {})
1203 if h[0].special() or len(h) > 1], []), {})
1204 class hunk(object):
1204 class hunk(object):
1205 def __init__(self, desc, num, lr, context):
1205 def __init__(self, desc, num, lr, context):
1206 self.number = num
1206 self.number = num
1207 self.desc = desc
1207 self.desc = desc
1208 self.hunk = [desc]
1208 self.hunk = [desc]
1209 self.a = []
1209 self.a = []
1210 self.b = []
1210 self.b = []
1211 self.starta = self.lena = None
1211 self.starta = self.lena = None
1212 self.startb = self.lenb = None
1212 self.startb = self.lenb = None
1213 if lr is not None:
1213 if lr is not None:
1214 if context:
1214 if context:
1215 self.read_context_hunk(lr)
1215 self.read_context_hunk(lr)
1216 else:
1216 else:
1217 self.read_unified_hunk(lr)
1217 self.read_unified_hunk(lr)
1218
1218
1219 def getnormalized(self):
1219 def getnormalized(self):
1220 """Return a copy with line endings normalized to LF."""
1220 """Return a copy with line endings normalized to LF."""
1221
1221
1222 def normalize(lines):
1222 def normalize(lines):
1223 nlines = []
1223 nlines = []
1224 for line in lines:
1224 for line in lines:
1225 if line.endswith('\r\n'):
1225 if line.endswith('\r\n'):
1226 line = line[:-2] + '\n'
1226 line = line[:-2] + '\n'
1227 nlines.append(line)
1227 nlines.append(line)
1228 return nlines
1228 return nlines
1229
1229
1230 # Dummy object, it is rebuilt manually
1230 # Dummy object, it is rebuilt manually
1231 nh = hunk(self.desc, self.number, None, None)
1231 nh = hunk(self.desc, self.number, None, None)
1232 nh.number = self.number
1232 nh.number = self.number
1233 nh.desc = self.desc
1233 nh.desc = self.desc
1234 nh.hunk = self.hunk
1234 nh.hunk = self.hunk
1235 nh.a = normalize(self.a)
1235 nh.a = normalize(self.a)
1236 nh.b = normalize(self.b)
1236 nh.b = normalize(self.b)
1237 nh.starta = self.starta
1237 nh.starta = self.starta
1238 nh.startb = self.startb
1238 nh.startb = self.startb
1239 nh.lena = self.lena
1239 nh.lena = self.lena
1240 nh.lenb = self.lenb
1240 nh.lenb = self.lenb
1241 return nh
1241 return nh
1242
1242
1243 def read_unified_hunk(self, lr):
1243 def read_unified_hunk(self, lr):
1244 m = unidesc.match(self.desc)
1244 m = unidesc.match(self.desc)
1245 if not m:
1245 if not m:
1246 raise PatchError(_("bad hunk #%d") % self.number)
1246 raise PatchError(_("bad hunk #%d") % self.number)
1247 self.starta, self.lena, self.startb, self.lenb = m.groups()
1247 self.starta, self.lena, self.startb, self.lenb = m.groups()
1248 if self.lena is None:
1248 if self.lena is None:
1249 self.lena = 1
1249 self.lena = 1
1250 else:
1250 else:
1251 self.lena = int(self.lena)
1251 self.lena = int(self.lena)
1252 if self.lenb is None:
1252 if self.lenb is None:
1253 self.lenb = 1
1253 self.lenb = 1
1254 else:
1254 else:
1255 self.lenb = int(self.lenb)
1255 self.lenb = int(self.lenb)
1256 self.starta = int(self.starta)
1256 self.starta = int(self.starta)
1257 self.startb = int(self.startb)
1257 self.startb = int(self.startb)
1258 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1258 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1259 self.b)
1259 self.b)
1260 # if we hit eof before finishing out the hunk, the last line will
1260 # if we hit eof before finishing out the hunk, the last line will
1261 # be zero length. Lets try to fix it up.
1261 # be zero length. Lets try to fix it up.
1262 while len(self.hunk[-1]) == 0:
1262 while len(self.hunk[-1]) == 0:
1263 del self.hunk[-1]
1263 del self.hunk[-1]
1264 del self.a[-1]
1264 del self.a[-1]
1265 del self.b[-1]
1265 del self.b[-1]
1266 self.lena -= 1
1266 self.lena -= 1
1267 self.lenb -= 1
1267 self.lenb -= 1
1268 self._fixnewline(lr)
1268 self._fixnewline(lr)
1269
1269
1270 def read_context_hunk(self, lr):
1270 def read_context_hunk(self, lr):
1271 self.desc = lr.readline()
1271 self.desc = lr.readline()
1272 m = contextdesc.match(self.desc)
1272 m = contextdesc.match(self.desc)
1273 if not m:
1273 if not m:
1274 raise PatchError(_("bad hunk #%d") % self.number)
1274 raise PatchError(_("bad hunk #%d") % self.number)
1275 self.starta, aend = m.groups()
1275 self.starta, aend = m.groups()
1276 self.starta = int(self.starta)
1276 self.starta = int(self.starta)
1277 if aend is None:
1277 if aend is None:
1278 aend = self.starta
1278 aend = self.starta
1279 self.lena = int(aend) - self.starta
1279 self.lena = int(aend) - self.starta
1280 if self.starta:
1280 if self.starta:
1281 self.lena += 1
1281 self.lena += 1
1282 for x in xrange(self.lena):
1282 for x in xrange(self.lena):
1283 l = lr.readline()
1283 l = lr.readline()
1284 if l.startswith('---'):
1284 if l.startswith('---'):
1285 # lines addition, old block is empty
1285 # lines addition, old block is empty
1286 lr.push(l)
1286 lr.push(l)
1287 break
1287 break
1288 s = l[2:]
1288 s = l[2:]
1289 if l.startswith('- ') or l.startswith('! '):
1289 if l.startswith('- ') or l.startswith('! '):
1290 u = '-' + s
1290 u = '-' + s
1291 elif l.startswith(' '):
1291 elif l.startswith(' '):
1292 u = ' ' + s
1292 u = ' ' + s
1293 else:
1293 else:
1294 raise PatchError(_("bad hunk #%d old text line %d") %
1294 raise PatchError(_("bad hunk #%d old text line %d") %
1295 (self.number, x))
1295 (self.number, x))
1296 self.a.append(u)
1296 self.a.append(u)
1297 self.hunk.append(u)
1297 self.hunk.append(u)
1298
1298
1299 l = lr.readline()
1299 l = lr.readline()
1300 if l.startswith('\ '):
1300 if l.startswith('\ '):
1301 s = self.a[-1][:-1]
1301 s = self.a[-1][:-1]
1302 self.a[-1] = s
1302 self.a[-1] = s
1303 self.hunk[-1] = s
1303 self.hunk[-1] = s
1304 l = lr.readline()
1304 l = lr.readline()
1305 m = contextdesc.match(l)
1305 m = contextdesc.match(l)
1306 if not m:
1306 if not m:
1307 raise PatchError(_("bad hunk #%d") % self.number)
1307 raise PatchError(_("bad hunk #%d") % self.number)
1308 self.startb, bend = m.groups()
1308 self.startb, bend = m.groups()
1309 self.startb = int(self.startb)
1309 self.startb = int(self.startb)
1310 if bend is None:
1310 if bend is None:
1311 bend = self.startb
1311 bend = self.startb
1312 self.lenb = int(bend) - self.startb
1312 self.lenb = int(bend) - self.startb
1313 if self.startb:
1313 if self.startb:
1314 self.lenb += 1
1314 self.lenb += 1
1315 hunki = 1
1315 hunki = 1
1316 for x in xrange(self.lenb):
1316 for x in xrange(self.lenb):
1317 l = lr.readline()
1317 l = lr.readline()
1318 if l.startswith('\ '):
1318 if l.startswith('\ '):
1319 # XXX: the only way to hit this is with an invalid line range.
1319 # XXX: the only way to hit this is with an invalid line range.
1320 # The no-eol marker is not counted in the line range, but I
1320 # The no-eol marker is not counted in the line range, but I
1321 # guess there are diff(1) out there which behave differently.
1321 # guess there are diff(1) out there which behave differently.
1322 s = self.b[-1][:-1]
1322 s = self.b[-1][:-1]
1323 self.b[-1] = s
1323 self.b[-1] = s
1324 self.hunk[hunki - 1] = s
1324 self.hunk[hunki - 1] = s
1325 continue
1325 continue
1326 if not l:
1326 if not l:
1327 # line deletions, new block is empty and we hit EOF
1327 # line deletions, new block is empty and we hit EOF
1328 lr.push(l)
1328 lr.push(l)
1329 break
1329 break
1330 s = l[2:]
1330 s = l[2:]
1331 if l.startswith('+ ') or l.startswith('! '):
1331 if l.startswith('+ ') or l.startswith('! '):
1332 u = '+' + s
1332 u = '+' + s
1333 elif l.startswith(' '):
1333 elif l.startswith(' '):
1334 u = ' ' + s
1334 u = ' ' + s
1335 elif len(self.b) == 0:
1335 elif len(self.b) == 0:
1336 # line deletions, new block is empty
1336 # line deletions, new block is empty
1337 lr.push(l)
1337 lr.push(l)
1338 break
1338 break
1339 else:
1339 else:
1340 raise PatchError(_("bad hunk #%d old text line %d") %
1340 raise PatchError(_("bad hunk #%d old text line %d") %
1341 (self.number, x))
1341 (self.number, x))
1342 self.b.append(s)
1342 self.b.append(s)
1343 while True:
1343 while True:
1344 if hunki >= len(self.hunk):
1344 if hunki >= len(self.hunk):
1345 h = ""
1345 h = ""
1346 else:
1346 else:
1347 h = self.hunk[hunki]
1347 h = self.hunk[hunki]
1348 hunki += 1
1348 hunki += 1
1349 if h == u:
1349 if h == u:
1350 break
1350 break
1351 elif h.startswith('-'):
1351 elif h.startswith('-'):
1352 continue
1352 continue
1353 else:
1353 else:
1354 self.hunk.insert(hunki - 1, u)
1354 self.hunk.insert(hunki - 1, u)
1355 break
1355 break
1356
1356
1357 if not self.a:
1357 if not self.a:
1358 # this happens when lines were only added to the hunk
1358 # this happens when lines were only added to the hunk
1359 for x in self.hunk:
1359 for x in self.hunk:
1360 if x.startswith('-') or x.startswith(' '):
1360 if x.startswith('-') or x.startswith(' '):
1361 self.a.append(x)
1361 self.a.append(x)
1362 if not self.b:
1362 if not self.b:
1363 # this happens when lines were only deleted from the hunk
1363 # this happens when lines were only deleted from the hunk
1364 for x in self.hunk:
1364 for x in self.hunk:
1365 if x.startswith('+') or x.startswith(' '):
1365 if x.startswith('+') or x.startswith(' '):
1366 self.b.append(x[1:])
1366 self.b.append(x[1:])
1367 # @@ -start,len +start,len @@
1367 # @@ -start,len +start,len @@
1368 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1368 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1369 self.startb, self.lenb)
1369 self.startb, self.lenb)
1370 self.hunk[0] = self.desc
1370 self.hunk[0] = self.desc
1371 self._fixnewline(lr)
1371 self._fixnewline(lr)
1372
1372
1373 def _fixnewline(self, lr):
1373 def _fixnewline(self, lr):
1374 l = lr.readline()
1374 l = lr.readline()
1375 if l.startswith('\ '):
1375 if l.startswith('\ '):
1376 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1376 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1377 else:
1377 else:
1378 lr.push(l)
1378 lr.push(l)
1379
1379
1380 def complete(self):
1380 def complete(self):
1381 return len(self.a) == self.lena and len(self.b) == self.lenb
1381 return len(self.a) == self.lena and len(self.b) == self.lenb
1382
1382
1383 def _fuzzit(self, old, new, fuzz, toponly):
1383 def _fuzzit(self, old, new, fuzz, toponly):
1384 # this removes context lines from the top and bottom of list 'l'. It
1384 # this removes context lines from the top and bottom of list 'l'. It
1385 # checks the hunk to make sure only context lines are removed, and then
1385 # checks the hunk to make sure only context lines are removed, and then
1386 # returns a new shortened list of lines.
1386 # returns a new shortened list of lines.
1387 fuzz = min(fuzz, len(old))
1387 fuzz = min(fuzz, len(old))
1388 if fuzz:
1388 if fuzz:
1389 top = 0
1389 top = 0
1390 bot = 0
1390 bot = 0
1391 hlen = len(self.hunk)
1391 hlen = len(self.hunk)
1392 for x in xrange(hlen - 1):
1392 for x in xrange(hlen - 1):
1393 # the hunk starts with the @@ line, so use x+1
1393 # the hunk starts with the @@ line, so use x+1
1394 if self.hunk[x + 1][0] == ' ':
1394 if self.hunk[x + 1].startswith(' '):
1395 top += 1
1395 top += 1
1396 else:
1396 else:
1397 break
1397 break
1398 if not toponly:
1398 if not toponly:
1399 for x in xrange(hlen - 1):
1399 for x in xrange(hlen - 1):
1400 if self.hunk[hlen - bot - 1][0] == ' ':
1400 if self.hunk[hlen - bot - 1].startswith(' '):
1401 bot += 1
1401 bot += 1
1402 else:
1402 else:
1403 break
1403 break
1404
1404
1405 bot = min(fuzz, bot)
1405 bot = min(fuzz, bot)
1406 top = min(fuzz, top)
1406 top = min(fuzz, top)
1407 return old[top:len(old) - bot], new[top:len(new) - bot], top
1407 return old[top:len(old) - bot], new[top:len(new) - bot], top
1408 return old, new, 0
1408 return old, new, 0
1409
1409
1410 def fuzzit(self, fuzz, toponly):
1410 def fuzzit(self, fuzz, toponly):
1411 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1411 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1412 oldstart = self.starta + top
1412 oldstart = self.starta + top
1413 newstart = self.startb + top
1413 newstart = self.startb + top
1414 # zero length hunk ranges already have their start decremented
1414 # zero length hunk ranges already have their start decremented
1415 if self.lena and oldstart > 0:
1415 if self.lena and oldstart > 0:
1416 oldstart -= 1
1416 oldstart -= 1
1417 if self.lenb and newstart > 0:
1417 if self.lenb and newstart > 0:
1418 newstart -= 1
1418 newstart -= 1
1419 return old, oldstart, new, newstart
1419 return old, oldstart, new, newstart
1420
1420
1421 class binhunk(object):
1421 class binhunk(object):
1422 'A binary patch file.'
1422 'A binary patch file.'
1423 def __init__(self, lr, fname):
1423 def __init__(self, lr, fname):
1424 self.text = None
1424 self.text = None
1425 self.delta = False
1425 self.delta = False
1426 self.hunk = ['GIT binary patch\n']
1426 self.hunk = ['GIT binary patch\n']
1427 self._fname = fname
1427 self._fname = fname
1428 self._read(lr)
1428 self._read(lr)
1429
1429
1430 def complete(self):
1430 def complete(self):
1431 return self.text is not None
1431 return self.text is not None
1432
1432
1433 def new(self, lines):
1433 def new(self, lines):
1434 if self.delta:
1434 if self.delta:
1435 return [applybindelta(self.text, ''.join(lines))]
1435 return [applybindelta(self.text, ''.join(lines))]
1436 return [self.text]
1436 return [self.text]
1437
1437
1438 def _read(self, lr):
1438 def _read(self, lr):
1439 def getline(lr, hunk):
1439 def getline(lr, hunk):
1440 l = lr.readline()
1440 l = lr.readline()
1441 hunk.append(l)
1441 hunk.append(l)
1442 return l.rstrip('\r\n')
1442 return l.rstrip('\r\n')
1443
1443
1444 size = 0
1444 size = 0
1445 while True:
1445 while True:
1446 line = getline(lr, self.hunk)
1446 line = getline(lr, self.hunk)
1447 if not line:
1447 if not line:
1448 raise PatchError(_('could not extract "%s" binary data')
1448 raise PatchError(_('could not extract "%s" binary data')
1449 % self._fname)
1449 % self._fname)
1450 if line.startswith('literal '):
1450 if line.startswith('literal '):
1451 size = int(line[8:].rstrip())
1451 size = int(line[8:].rstrip())
1452 break
1452 break
1453 if line.startswith('delta '):
1453 if line.startswith('delta '):
1454 size = int(line[6:].rstrip())
1454 size = int(line[6:].rstrip())
1455 self.delta = True
1455 self.delta = True
1456 break
1456 break
1457 dec = []
1457 dec = []
1458 line = getline(lr, self.hunk)
1458 line = getline(lr, self.hunk)
1459 while len(line) > 1:
1459 while len(line) > 1:
1460 l = line[0:1]
1460 l = line[0:1]
1461 if l <= 'Z' and l >= 'A':
1461 if l <= 'Z' and l >= 'A':
1462 l = ord(l) - ord('A') + 1
1462 l = ord(l) - ord('A') + 1
1463 else:
1463 else:
1464 l = ord(l) - ord('a') + 27
1464 l = ord(l) - ord('a') + 27
1465 try:
1465 try:
1466 dec.append(util.b85decode(line[1:])[:l])
1466 dec.append(util.b85decode(line[1:])[:l])
1467 except ValueError as e:
1467 except ValueError as e:
1468 raise PatchError(_('could not decode "%s" binary patch: %s')
1468 raise PatchError(_('could not decode "%s" binary patch: %s')
1469 % (self._fname, stringutil.forcebytestr(e)))
1469 % (self._fname, stringutil.forcebytestr(e)))
1470 line = getline(lr, self.hunk)
1470 line = getline(lr, self.hunk)
1471 text = zlib.decompress(''.join(dec))
1471 text = zlib.decompress(''.join(dec))
1472 if len(text) != size:
1472 if len(text) != size:
1473 raise PatchError(_('"%s" length is %d bytes, should be %d')
1473 raise PatchError(_('"%s" length is %d bytes, should be %d')
1474 % (self._fname, len(text), size))
1474 % (self._fname, len(text), size))
1475 self.text = text
1475 self.text = text
1476
1476
1477 def parsefilename(str):
1477 def parsefilename(str):
1478 # --- filename \t|space stuff
1478 # --- filename \t|space stuff
1479 s = str[4:].rstrip('\r\n')
1479 s = str[4:].rstrip('\r\n')
1480 i = s.find('\t')
1480 i = s.find('\t')
1481 if i < 0:
1481 if i < 0:
1482 i = s.find(' ')
1482 i = s.find(' ')
1483 if i < 0:
1483 if i < 0:
1484 return s
1484 return s
1485 return s[:i]
1485 return s[:i]
1486
1486
1487 def reversehunks(hunks):
1487 def reversehunks(hunks):
1488 '''reverse the signs in the hunks given as argument
1488 '''reverse the signs in the hunks given as argument
1489
1489
1490 This function operates on hunks coming out of patch.filterpatch, that is
1490 This function operates on hunks coming out of patch.filterpatch, that is
1491 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1491 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1492
1492
1493 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1493 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1494 ... --- a/folder1/g
1494 ... --- a/folder1/g
1495 ... +++ b/folder1/g
1495 ... +++ b/folder1/g
1496 ... @@ -1,7 +1,7 @@
1496 ... @@ -1,7 +1,7 @@
1497 ... +firstline
1497 ... +firstline
1498 ... c
1498 ... c
1499 ... 1
1499 ... 1
1500 ... 2
1500 ... 2
1501 ... + 3
1501 ... + 3
1502 ... -4
1502 ... -4
1503 ... 5
1503 ... 5
1504 ... d
1504 ... d
1505 ... +lastline"""
1505 ... +lastline"""
1506 >>> hunks = parsepatch([rawpatch])
1506 >>> hunks = parsepatch([rawpatch])
1507 >>> hunkscomingfromfilterpatch = []
1507 >>> hunkscomingfromfilterpatch = []
1508 >>> for h in hunks:
1508 >>> for h in hunks:
1509 ... hunkscomingfromfilterpatch.append(h)
1509 ... hunkscomingfromfilterpatch.append(h)
1510 ... hunkscomingfromfilterpatch.extend(h.hunks)
1510 ... hunkscomingfromfilterpatch.extend(h.hunks)
1511
1511
1512 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1512 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1513 >>> from . import util
1513 >>> from . import util
1514 >>> fp = util.stringio()
1514 >>> fp = util.stringio()
1515 >>> for c in reversedhunks:
1515 >>> for c in reversedhunks:
1516 ... c.write(fp)
1516 ... c.write(fp)
1517 >>> fp.seek(0) or None
1517 >>> fp.seek(0) or None
1518 >>> reversedpatch = fp.read()
1518 >>> reversedpatch = fp.read()
1519 >>> print(pycompat.sysstr(reversedpatch))
1519 >>> print(pycompat.sysstr(reversedpatch))
1520 diff --git a/folder1/g b/folder1/g
1520 diff --git a/folder1/g b/folder1/g
1521 --- a/folder1/g
1521 --- a/folder1/g
1522 +++ b/folder1/g
1522 +++ b/folder1/g
1523 @@ -1,4 +1,3 @@
1523 @@ -1,4 +1,3 @@
1524 -firstline
1524 -firstline
1525 c
1525 c
1526 1
1526 1
1527 2
1527 2
1528 @@ -2,6 +1,6 @@
1528 @@ -2,6 +1,6 @@
1529 c
1529 c
1530 1
1530 1
1531 2
1531 2
1532 - 3
1532 - 3
1533 +4
1533 +4
1534 5
1534 5
1535 d
1535 d
1536 @@ -6,3 +5,2 @@
1536 @@ -6,3 +5,2 @@
1537 5
1537 5
1538 d
1538 d
1539 -lastline
1539 -lastline
1540
1540
1541 '''
1541 '''
1542
1542
1543 newhunks = []
1543 newhunks = []
1544 for c in hunks:
1544 for c in hunks:
1545 if util.safehasattr(c, 'reversehunk'):
1545 if util.safehasattr(c, 'reversehunk'):
1546 c = c.reversehunk()
1546 c = c.reversehunk()
1547 newhunks.append(c)
1547 newhunks.append(c)
1548 return newhunks
1548 return newhunks
1549
1549
1550 def parsepatch(originalchunks, maxcontext=None):
1550 def parsepatch(originalchunks, maxcontext=None):
1551 """patch -> [] of headers -> [] of hunks
1551 """patch -> [] of headers -> [] of hunks
1552
1552
1553 If maxcontext is not None, trim context lines if necessary.
1553 If maxcontext is not None, trim context lines if necessary.
1554
1554
1555 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1555 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1556 ... --- a/folder1/g
1556 ... --- a/folder1/g
1557 ... +++ b/folder1/g
1557 ... +++ b/folder1/g
1558 ... @@ -1,8 +1,10 @@
1558 ... @@ -1,8 +1,10 @@
1559 ... 1
1559 ... 1
1560 ... 2
1560 ... 2
1561 ... -3
1561 ... -3
1562 ... 4
1562 ... 4
1563 ... 5
1563 ... 5
1564 ... 6
1564 ... 6
1565 ... +6.1
1565 ... +6.1
1566 ... +6.2
1566 ... +6.2
1567 ... 7
1567 ... 7
1568 ... 8
1568 ... 8
1569 ... +9'''
1569 ... +9'''
1570 >>> out = util.stringio()
1570 >>> out = util.stringio()
1571 >>> headers = parsepatch([rawpatch], maxcontext=1)
1571 >>> headers = parsepatch([rawpatch], maxcontext=1)
1572 >>> for header in headers:
1572 >>> for header in headers:
1573 ... header.write(out)
1573 ... header.write(out)
1574 ... for hunk in header.hunks:
1574 ... for hunk in header.hunks:
1575 ... hunk.write(out)
1575 ... hunk.write(out)
1576 >>> print(pycompat.sysstr(out.getvalue()))
1576 >>> print(pycompat.sysstr(out.getvalue()))
1577 diff --git a/folder1/g b/folder1/g
1577 diff --git a/folder1/g b/folder1/g
1578 --- a/folder1/g
1578 --- a/folder1/g
1579 +++ b/folder1/g
1579 +++ b/folder1/g
1580 @@ -2,3 +2,2 @@
1580 @@ -2,3 +2,2 @@
1581 2
1581 2
1582 -3
1582 -3
1583 4
1583 4
1584 @@ -6,2 +5,4 @@
1584 @@ -6,2 +5,4 @@
1585 6
1585 6
1586 +6.1
1586 +6.1
1587 +6.2
1587 +6.2
1588 7
1588 7
1589 @@ -8,1 +9,2 @@
1589 @@ -8,1 +9,2 @@
1590 8
1590 8
1591 +9
1591 +9
1592 """
1592 """
1593 class parser(object):
1593 class parser(object):
1594 """patch parsing state machine"""
1594 """patch parsing state machine"""
1595 def __init__(self):
1595 def __init__(self):
1596 self.fromline = 0
1596 self.fromline = 0
1597 self.toline = 0
1597 self.toline = 0
1598 self.proc = ''
1598 self.proc = ''
1599 self.header = None
1599 self.header = None
1600 self.context = []
1600 self.context = []
1601 self.before = []
1601 self.before = []
1602 self.hunk = []
1602 self.hunk = []
1603 self.headers = []
1603 self.headers = []
1604
1604
1605 def addrange(self, limits):
1605 def addrange(self, limits):
1606 fromstart, fromend, tostart, toend, proc = limits
1606 fromstart, fromend, tostart, toend, proc = limits
1607 self.fromline = int(fromstart)
1607 self.fromline = int(fromstart)
1608 self.toline = int(tostart)
1608 self.toline = int(tostart)
1609 self.proc = proc
1609 self.proc = proc
1610
1610
1611 def addcontext(self, context):
1611 def addcontext(self, context):
1612 if self.hunk:
1612 if self.hunk:
1613 h = recordhunk(self.header, self.fromline, self.toline,
1613 h = recordhunk(self.header, self.fromline, self.toline,
1614 self.proc, self.before, self.hunk, context, maxcontext)
1614 self.proc, self.before, self.hunk, context, maxcontext)
1615 self.header.hunks.append(h)
1615 self.header.hunks.append(h)
1616 self.fromline += len(self.before) + h.removed
1616 self.fromline += len(self.before) + h.removed
1617 self.toline += len(self.before) + h.added
1617 self.toline += len(self.before) + h.added
1618 self.before = []
1618 self.before = []
1619 self.hunk = []
1619 self.hunk = []
1620 self.context = context
1620 self.context = context
1621
1621
1622 def addhunk(self, hunk):
1622 def addhunk(self, hunk):
1623 if self.context:
1623 if self.context:
1624 self.before = self.context
1624 self.before = self.context
1625 self.context = []
1625 self.context = []
1626 self.hunk = hunk
1626 self.hunk = hunk
1627
1627
1628 def newfile(self, hdr):
1628 def newfile(self, hdr):
1629 self.addcontext([])
1629 self.addcontext([])
1630 h = header(hdr)
1630 h = header(hdr)
1631 self.headers.append(h)
1631 self.headers.append(h)
1632 self.header = h
1632 self.header = h
1633
1633
1634 def addother(self, line):
1634 def addother(self, line):
1635 pass # 'other' lines are ignored
1635 pass # 'other' lines are ignored
1636
1636
1637 def finished(self):
1637 def finished(self):
1638 self.addcontext([])
1638 self.addcontext([])
1639 return self.headers
1639 return self.headers
1640
1640
1641 transitions = {
1641 transitions = {
1642 'file': {'context': addcontext,
1642 'file': {'context': addcontext,
1643 'file': newfile,
1643 'file': newfile,
1644 'hunk': addhunk,
1644 'hunk': addhunk,
1645 'range': addrange},
1645 'range': addrange},
1646 'context': {'file': newfile,
1646 'context': {'file': newfile,
1647 'hunk': addhunk,
1647 'hunk': addhunk,
1648 'range': addrange,
1648 'range': addrange,
1649 'other': addother},
1649 'other': addother},
1650 'hunk': {'context': addcontext,
1650 'hunk': {'context': addcontext,
1651 'file': newfile,
1651 'file': newfile,
1652 'range': addrange},
1652 'range': addrange},
1653 'range': {'context': addcontext,
1653 'range': {'context': addcontext,
1654 'hunk': addhunk},
1654 'hunk': addhunk},
1655 'other': {'other': addother},
1655 'other': {'other': addother},
1656 }
1656 }
1657
1657
1658 p = parser()
1658 p = parser()
1659 fp = stringio()
1659 fp = stringio()
1660 fp.write(''.join(originalchunks))
1660 fp.write(''.join(originalchunks))
1661 fp.seek(0)
1661 fp.seek(0)
1662
1662
1663 state = 'context'
1663 state = 'context'
1664 for newstate, data in scanpatch(fp):
1664 for newstate, data in scanpatch(fp):
1665 try:
1665 try:
1666 p.transitions[state][newstate](p, data)
1666 p.transitions[state][newstate](p, data)
1667 except KeyError:
1667 except KeyError:
1668 raise PatchError('unhandled transition: %s -> %s' %
1668 raise PatchError('unhandled transition: %s -> %s' %
1669 (state, newstate))
1669 (state, newstate))
1670 state = newstate
1670 state = newstate
1671 del fp
1671 del fp
1672 return p.finished()
1672 return p.finished()
1673
1673
1674 def pathtransform(path, strip, prefix):
1674 def pathtransform(path, strip, prefix):
1675 '''turn a path from a patch into a path suitable for the repository
1675 '''turn a path from a patch into a path suitable for the repository
1676
1676
1677 prefix, if not empty, is expected to be normalized with a / at the end.
1677 prefix, if not empty, is expected to be normalized with a / at the end.
1678
1678
1679 Returns (stripped components, path in repository).
1679 Returns (stripped components, path in repository).
1680
1680
1681 >>> pathtransform(b'a/b/c', 0, b'')
1681 >>> pathtransform(b'a/b/c', 0, b'')
1682 ('', 'a/b/c')
1682 ('', 'a/b/c')
1683 >>> pathtransform(b' a/b/c ', 0, b'')
1683 >>> pathtransform(b' a/b/c ', 0, b'')
1684 ('', ' a/b/c')
1684 ('', ' a/b/c')
1685 >>> pathtransform(b' a/b/c ', 2, b'')
1685 >>> pathtransform(b' a/b/c ', 2, b'')
1686 ('a/b/', 'c')
1686 ('a/b/', 'c')
1687 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1687 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1688 ('', 'd/e/a/b/c')
1688 ('', 'd/e/a/b/c')
1689 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1689 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1690 ('a//b/', 'd/e/c')
1690 ('a//b/', 'd/e/c')
1691 >>> pathtransform(b'a/b/c', 3, b'')
1691 >>> pathtransform(b'a/b/c', 3, b'')
1692 Traceback (most recent call last):
1692 Traceback (most recent call last):
1693 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1693 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1694 '''
1694 '''
1695 pathlen = len(path)
1695 pathlen = len(path)
1696 i = 0
1696 i = 0
1697 if strip == 0:
1697 if strip == 0:
1698 return '', prefix + path.rstrip()
1698 return '', prefix + path.rstrip()
1699 count = strip
1699 count = strip
1700 while count > 0:
1700 while count > 0:
1701 i = path.find('/', i)
1701 i = path.find('/', i)
1702 if i == -1:
1702 if i == -1:
1703 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1703 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1704 (count, strip, path))
1704 (count, strip, path))
1705 i += 1
1705 i += 1
1706 # consume '//' in the path
1706 # consume '//' in the path
1707 while i < pathlen - 1 and path[i:i + 1] == '/':
1707 while i < pathlen - 1 and path[i:i + 1] == '/':
1708 i += 1
1708 i += 1
1709 count -= 1
1709 count -= 1
1710 return path[:i].lstrip(), prefix + path[i:].rstrip()
1710 return path[:i].lstrip(), prefix + path[i:].rstrip()
1711
1711
1712 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1712 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1713 nulla = afile_orig == "/dev/null"
1713 nulla = afile_orig == "/dev/null"
1714 nullb = bfile_orig == "/dev/null"
1714 nullb = bfile_orig == "/dev/null"
1715 create = nulla and hunk.starta == 0 and hunk.lena == 0
1715 create = nulla and hunk.starta == 0 and hunk.lena == 0
1716 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1716 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1717 abase, afile = pathtransform(afile_orig, strip, prefix)
1717 abase, afile = pathtransform(afile_orig, strip, prefix)
1718 gooda = not nulla and backend.exists(afile)
1718 gooda = not nulla and backend.exists(afile)
1719 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1719 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1720 if afile == bfile:
1720 if afile == bfile:
1721 goodb = gooda
1721 goodb = gooda
1722 else:
1722 else:
1723 goodb = not nullb and backend.exists(bfile)
1723 goodb = not nullb and backend.exists(bfile)
1724 missing = not goodb and not gooda and not create
1724 missing = not goodb and not gooda and not create
1725
1725
1726 # some diff programs apparently produce patches where the afile is
1726 # some diff programs apparently produce patches where the afile is
1727 # not /dev/null, but afile starts with bfile
1727 # not /dev/null, but afile starts with bfile
1728 abasedir = afile[:afile.rfind('/') + 1]
1728 abasedir = afile[:afile.rfind('/') + 1]
1729 bbasedir = bfile[:bfile.rfind('/') + 1]
1729 bbasedir = bfile[:bfile.rfind('/') + 1]
1730 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1730 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1731 and hunk.starta == 0 and hunk.lena == 0):
1731 and hunk.starta == 0 and hunk.lena == 0):
1732 create = True
1732 create = True
1733 missing = False
1733 missing = False
1734
1734
1735 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1735 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1736 # diff is between a file and its backup. In this case, the original
1736 # diff is between a file and its backup. In this case, the original
1737 # file should be patched (see original mpatch code).
1737 # file should be patched (see original mpatch code).
1738 isbackup = (abase == bbase and bfile.startswith(afile))
1738 isbackup = (abase == bbase and bfile.startswith(afile))
1739 fname = None
1739 fname = None
1740 if not missing:
1740 if not missing:
1741 if gooda and goodb:
1741 if gooda and goodb:
1742 if isbackup:
1742 if isbackup:
1743 fname = afile
1743 fname = afile
1744 else:
1744 else:
1745 fname = bfile
1745 fname = bfile
1746 elif gooda:
1746 elif gooda:
1747 fname = afile
1747 fname = afile
1748
1748
1749 if not fname:
1749 if not fname:
1750 if not nullb:
1750 if not nullb:
1751 if isbackup:
1751 if isbackup:
1752 fname = afile
1752 fname = afile
1753 else:
1753 else:
1754 fname = bfile
1754 fname = bfile
1755 elif not nulla:
1755 elif not nulla:
1756 fname = afile
1756 fname = afile
1757 else:
1757 else:
1758 raise PatchError(_("undefined source and destination files"))
1758 raise PatchError(_("undefined source and destination files"))
1759
1759
1760 gp = patchmeta(fname)
1760 gp = patchmeta(fname)
1761 if create:
1761 if create:
1762 gp.op = 'ADD'
1762 gp.op = 'ADD'
1763 elif remove:
1763 elif remove:
1764 gp.op = 'DELETE'
1764 gp.op = 'DELETE'
1765 return gp
1765 return gp
1766
1766
1767 def scanpatch(fp):
1767 def scanpatch(fp):
1768 """like patch.iterhunks, but yield different events
1768 """like patch.iterhunks, but yield different events
1769
1769
1770 - ('file', [header_lines + fromfile + tofile])
1770 - ('file', [header_lines + fromfile + tofile])
1771 - ('context', [context_lines])
1771 - ('context', [context_lines])
1772 - ('hunk', [hunk_lines])
1772 - ('hunk', [hunk_lines])
1773 - ('range', (-start,len, +start,len, proc))
1773 - ('range', (-start,len, +start,len, proc))
1774 """
1774 """
1775 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1775 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1776 lr = linereader(fp)
1776 lr = linereader(fp)
1777
1777
1778 def scanwhile(first, p):
1778 def scanwhile(first, p):
1779 """scan lr while predicate holds"""
1779 """scan lr while predicate holds"""
1780 lines = [first]
1780 lines = [first]
1781 for line in iter(lr.readline, ''):
1781 for line in iter(lr.readline, ''):
1782 if p(line):
1782 if p(line):
1783 lines.append(line)
1783 lines.append(line)
1784 else:
1784 else:
1785 lr.push(line)
1785 lr.push(line)
1786 break
1786 break
1787 return lines
1787 return lines
1788
1788
1789 for line in iter(lr.readline, ''):
1789 for line in iter(lr.readline, ''):
1790 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1790 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1791 def notheader(line):
1791 def notheader(line):
1792 s = line.split(None, 1)
1792 s = line.split(None, 1)
1793 return not s or s[0] not in ('---', 'diff')
1793 return not s or s[0] not in ('---', 'diff')
1794 header = scanwhile(line, notheader)
1794 header = scanwhile(line, notheader)
1795 fromfile = lr.readline()
1795 fromfile = lr.readline()
1796 if fromfile.startswith('---'):
1796 if fromfile.startswith('---'):
1797 tofile = lr.readline()
1797 tofile = lr.readline()
1798 header += [fromfile, tofile]
1798 header += [fromfile, tofile]
1799 else:
1799 else:
1800 lr.push(fromfile)
1800 lr.push(fromfile)
1801 yield 'file', header
1801 yield 'file', header
1802 elif line[0:1] == ' ':
1802 elif line.startswith(' '):
1803 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1803 cs = (' ', '\\')
1804 elif line[0] in '-+':
1804 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1805 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1805 elif line.startswith(('-', '+')):
1806 cs = ('-', '+', '\\')
1807 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1806 else:
1808 else:
1807 m = lines_re.match(line)
1809 m = lines_re.match(line)
1808 if m:
1810 if m:
1809 yield 'range', m.groups()
1811 yield 'range', m.groups()
1810 else:
1812 else:
1811 yield 'other', line
1813 yield 'other', line
1812
1814
1813 def scangitpatch(lr, firstline):
1815 def scangitpatch(lr, firstline):
1814 """
1816 """
1815 Git patches can emit:
1817 Git patches can emit:
1816 - rename a to b
1818 - rename a to b
1817 - change b
1819 - change b
1818 - copy a to c
1820 - copy a to c
1819 - change c
1821 - change c
1820
1822
1821 We cannot apply this sequence as-is, the renamed 'a' could not be
1823 We cannot apply this sequence as-is, the renamed 'a' could not be
1822 found for it would have been renamed already. And we cannot copy
1824 found for it would have been renamed already. And we cannot copy
1823 from 'b' instead because 'b' would have been changed already. So
1825 from 'b' instead because 'b' would have been changed already. So
1824 we scan the git patch for copy and rename commands so we can
1826 we scan the git patch for copy and rename commands so we can
1825 perform the copies ahead of time.
1827 perform the copies ahead of time.
1826 """
1828 """
1827 pos = 0
1829 pos = 0
1828 try:
1830 try:
1829 pos = lr.fp.tell()
1831 pos = lr.fp.tell()
1830 fp = lr.fp
1832 fp = lr.fp
1831 except IOError:
1833 except IOError:
1832 fp = stringio(lr.fp.read())
1834 fp = stringio(lr.fp.read())
1833 gitlr = linereader(fp)
1835 gitlr = linereader(fp)
1834 gitlr.push(firstline)
1836 gitlr.push(firstline)
1835 gitpatches = readgitpatch(gitlr)
1837 gitpatches = readgitpatch(gitlr)
1836 fp.seek(pos)
1838 fp.seek(pos)
1837 return gitpatches
1839 return gitpatches
1838
1840
1839 def iterhunks(fp):
1841 def iterhunks(fp):
1840 """Read a patch and yield the following events:
1842 """Read a patch and yield the following events:
1841 - ("file", afile, bfile, firsthunk): select a new target file.
1843 - ("file", afile, bfile, firsthunk): select a new target file.
1842 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1844 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1843 "file" event.
1845 "file" event.
1844 - ("git", gitchanges): current diff is in git format, gitchanges
1846 - ("git", gitchanges): current diff is in git format, gitchanges
1845 maps filenames to gitpatch records. Unique event.
1847 maps filenames to gitpatch records. Unique event.
1846 """
1848 """
1847 afile = ""
1849 afile = ""
1848 bfile = ""
1850 bfile = ""
1849 state = None
1851 state = None
1850 hunknum = 0
1852 hunknum = 0
1851 emitfile = newfile = False
1853 emitfile = newfile = False
1852 gitpatches = None
1854 gitpatches = None
1853
1855
1854 # our states
1856 # our states
1855 BFILE = 1
1857 BFILE = 1
1856 context = None
1858 context = None
1857 lr = linereader(fp)
1859 lr = linereader(fp)
1858
1860
1859 for x in iter(lr.readline, ''):
1861 for x in iter(lr.readline, ''):
1860 if state == BFILE and (
1862 if state == BFILE and (
1861 (not context and x.startswith('@'))
1863 (not context and x.startswith('@'))
1862 or (context is not False and x.startswith('***************'))
1864 or (context is not False and x.startswith('***************'))
1863 or x.startswith('GIT binary patch')):
1865 or x.startswith('GIT binary patch')):
1864 gp = None
1866 gp = None
1865 if (gitpatches and
1867 if (gitpatches and
1866 gitpatches[-1].ispatching(afile, bfile)):
1868 gitpatches[-1].ispatching(afile, bfile)):
1867 gp = gitpatches.pop()
1869 gp = gitpatches.pop()
1868 if x.startswith('GIT binary patch'):
1870 if x.startswith('GIT binary patch'):
1869 h = binhunk(lr, gp.path)
1871 h = binhunk(lr, gp.path)
1870 else:
1872 else:
1871 if context is None and x.startswith('***************'):
1873 if context is None and x.startswith('***************'):
1872 context = True
1874 context = True
1873 h = hunk(x, hunknum + 1, lr, context)
1875 h = hunk(x, hunknum + 1, lr, context)
1874 hunknum += 1
1876 hunknum += 1
1875 if emitfile:
1877 if emitfile:
1876 emitfile = False
1878 emitfile = False
1877 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1879 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1878 yield 'hunk', h
1880 yield 'hunk', h
1879 elif x.startswith('diff --git a/'):
1881 elif x.startswith('diff --git a/'):
1880 m = gitre.match(x.rstrip(' \r\n'))
1882 m = gitre.match(x.rstrip(' \r\n'))
1881 if not m:
1883 if not m:
1882 continue
1884 continue
1883 if gitpatches is None:
1885 if gitpatches is None:
1884 # scan whole input for git metadata
1886 # scan whole input for git metadata
1885 gitpatches = scangitpatch(lr, x)
1887 gitpatches = scangitpatch(lr, x)
1886 yield 'git', [g.copy() for g in gitpatches
1888 yield 'git', [g.copy() for g in gitpatches
1887 if g.op in ('COPY', 'RENAME')]
1889 if g.op in ('COPY', 'RENAME')]
1888 gitpatches.reverse()
1890 gitpatches.reverse()
1889 afile = 'a/' + m.group(1)
1891 afile = 'a/' + m.group(1)
1890 bfile = 'b/' + m.group(2)
1892 bfile = 'b/' + m.group(2)
1891 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1893 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1892 gp = gitpatches.pop()
1894 gp = gitpatches.pop()
1893 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1895 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1894 if not gitpatches:
1896 if not gitpatches:
1895 raise PatchError(_('failed to synchronize metadata for "%s"')
1897 raise PatchError(_('failed to synchronize metadata for "%s"')
1896 % afile[2:])
1898 % afile[2:])
1897 gp = gitpatches[-1]
1899 gp = gitpatches[-1]
1898 newfile = True
1900 newfile = True
1899 elif x.startswith('---'):
1901 elif x.startswith('---'):
1900 # check for a unified diff
1902 # check for a unified diff
1901 l2 = lr.readline()
1903 l2 = lr.readline()
1902 if not l2.startswith('+++'):
1904 if not l2.startswith('+++'):
1903 lr.push(l2)
1905 lr.push(l2)
1904 continue
1906 continue
1905 newfile = True
1907 newfile = True
1906 context = False
1908 context = False
1907 afile = parsefilename(x)
1909 afile = parsefilename(x)
1908 bfile = parsefilename(l2)
1910 bfile = parsefilename(l2)
1909 elif x.startswith('***'):
1911 elif x.startswith('***'):
1910 # check for a context diff
1912 # check for a context diff
1911 l2 = lr.readline()
1913 l2 = lr.readline()
1912 if not l2.startswith('---'):
1914 if not l2.startswith('---'):
1913 lr.push(l2)
1915 lr.push(l2)
1914 continue
1916 continue
1915 l3 = lr.readline()
1917 l3 = lr.readline()
1916 lr.push(l3)
1918 lr.push(l3)
1917 if not l3.startswith("***************"):
1919 if not l3.startswith("***************"):
1918 lr.push(l2)
1920 lr.push(l2)
1919 continue
1921 continue
1920 newfile = True
1922 newfile = True
1921 context = True
1923 context = True
1922 afile = parsefilename(x)
1924 afile = parsefilename(x)
1923 bfile = parsefilename(l2)
1925 bfile = parsefilename(l2)
1924
1926
1925 if newfile:
1927 if newfile:
1926 newfile = False
1928 newfile = False
1927 emitfile = True
1929 emitfile = True
1928 state = BFILE
1930 state = BFILE
1929 hunknum = 0
1931 hunknum = 0
1930
1932
1931 while gitpatches:
1933 while gitpatches:
1932 gp = gitpatches.pop()
1934 gp = gitpatches.pop()
1933 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1935 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1934
1936
1935 def applybindelta(binchunk, data):
1937 def applybindelta(binchunk, data):
1936 """Apply a binary delta hunk
1938 """Apply a binary delta hunk
1937 The algorithm used is the algorithm from git's patch-delta.c
1939 The algorithm used is the algorithm from git's patch-delta.c
1938 """
1940 """
1939 def deltahead(binchunk):
1941 def deltahead(binchunk):
1940 i = 0
1942 i = 0
1941 for c in binchunk:
1943 for c in binchunk:
1942 i += 1
1944 i += 1
1943 if not (ord(c) & 0x80):
1945 if not (ord(c) & 0x80):
1944 return i
1946 return i
1945 return i
1947 return i
1946 out = ""
1948 out = ""
1947 s = deltahead(binchunk)
1949 s = deltahead(binchunk)
1948 binchunk = binchunk[s:]
1950 binchunk = binchunk[s:]
1949 s = deltahead(binchunk)
1951 s = deltahead(binchunk)
1950 binchunk = binchunk[s:]
1952 binchunk = binchunk[s:]
1951 i = 0
1953 i = 0
1952 while i < len(binchunk):
1954 while i < len(binchunk):
1953 cmd = ord(binchunk[i])
1955 cmd = ord(binchunk[i])
1954 i += 1
1956 i += 1
1955 if (cmd & 0x80):
1957 if (cmd & 0x80):
1956 offset = 0
1958 offset = 0
1957 size = 0
1959 size = 0
1958 if (cmd & 0x01):
1960 if (cmd & 0x01):
1959 offset = ord(binchunk[i])
1961 offset = ord(binchunk[i])
1960 i += 1
1962 i += 1
1961 if (cmd & 0x02):
1963 if (cmd & 0x02):
1962 offset |= ord(binchunk[i]) << 8
1964 offset |= ord(binchunk[i]) << 8
1963 i += 1
1965 i += 1
1964 if (cmd & 0x04):
1966 if (cmd & 0x04):
1965 offset |= ord(binchunk[i]) << 16
1967 offset |= ord(binchunk[i]) << 16
1966 i += 1
1968 i += 1
1967 if (cmd & 0x08):
1969 if (cmd & 0x08):
1968 offset |= ord(binchunk[i]) << 24
1970 offset |= ord(binchunk[i]) << 24
1969 i += 1
1971 i += 1
1970 if (cmd & 0x10):
1972 if (cmd & 0x10):
1971 size = ord(binchunk[i])
1973 size = ord(binchunk[i])
1972 i += 1
1974 i += 1
1973 if (cmd & 0x20):
1975 if (cmd & 0x20):
1974 size |= ord(binchunk[i]) << 8
1976 size |= ord(binchunk[i]) << 8
1975 i += 1
1977 i += 1
1976 if (cmd & 0x40):
1978 if (cmd & 0x40):
1977 size |= ord(binchunk[i]) << 16
1979 size |= ord(binchunk[i]) << 16
1978 i += 1
1980 i += 1
1979 if size == 0:
1981 if size == 0:
1980 size = 0x10000
1982 size = 0x10000
1981 offset_end = offset + size
1983 offset_end = offset + size
1982 out += data[offset:offset_end]
1984 out += data[offset:offset_end]
1983 elif cmd != 0:
1985 elif cmd != 0:
1984 offset_end = i + cmd
1986 offset_end = i + cmd
1985 out += binchunk[i:offset_end]
1987 out += binchunk[i:offset_end]
1986 i += cmd
1988 i += cmd
1987 else:
1989 else:
1988 raise PatchError(_('unexpected delta opcode 0'))
1990 raise PatchError(_('unexpected delta opcode 0'))
1989 return out
1991 return out
1990
1992
1991 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1993 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1992 """Reads a patch from fp and tries to apply it.
1994 """Reads a patch from fp and tries to apply it.
1993
1995
1994 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1996 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1995 there was any fuzz.
1997 there was any fuzz.
1996
1998
1997 If 'eolmode' is 'strict', the patch content and patched file are
1999 If 'eolmode' is 'strict', the patch content and patched file are
1998 read in binary mode. Otherwise, line endings are ignored when
2000 read in binary mode. Otherwise, line endings are ignored when
1999 patching then normalized according to 'eolmode'.
2001 patching then normalized according to 'eolmode'.
2000 """
2002 """
2001 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2003 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2002 prefix=prefix, eolmode=eolmode)
2004 prefix=prefix, eolmode=eolmode)
2003
2005
2004 def _canonprefix(repo, prefix):
2006 def _canonprefix(repo, prefix):
2005 if prefix:
2007 if prefix:
2006 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2008 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2007 if prefix != '':
2009 if prefix != '':
2008 prefix += '/'
2010 prefix += '/'
2009 return prefix
2011 return prefix
2010
2012
2011 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2013 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2012 eolmode='strict'):
2014 eolmode='strict'):
2013 prefix = _canonprefix(backend.repo, prefix)
2015 prefix = _canonprefix(backend.repo, prefix)
2014 def pstrip(p):
2016 def pstrip(p):
2015 return pathtransform(p, strip - 1, prefix)[1]
2017 return pathtransform(p, strip - 1, prefix)[1]
2016
2018
2017 rejects = 0
2019 rejects = 0
2018 err = 0
2020 err = 0
2019 current_file = None
2021 current_file = None
2020
2022
2021 for state, values in iterhunks(fp):
2023 for state, values in iterhunks(fp):
2022 if state == 'hunk':
2024 if state == 'hunk':
2023 if not current_file:
2025 if not current_file:
2024 continue
2026 continue
2025 ret = current_file.apply(values)
2027 ret = current_file.apply(values)
2026 if ret > 0:
2028 if ret > 0:
2027 err = 1
2029 err = 1
2028 elif state == 'file':
2030 elif state == 'file':
2029 if current_file:
2031 if current_file:
2030 rejects += current_file.close()
2032 rejects += current_file.close()
2031 current_file = None
2033 current_file = None
2032 afile, bfile, first_hunk, gp = values
2034 afile, bfile, first_hunk, gp = values
2033 if gp:
2035 if gp:
2034 gp.path = pstrip(gp.path)
2036 gp.path = pstrip(gp.path)
2035 if gp.oldpath:
2037 if gp.oldpath:
2036 gp.oldpath = pstrip(gp.oldpath)
2038 gp.oldpath = pstrip(gp.oldpath)
2037 else:
2039 else:
2038 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2040 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2039 prefix)
2041 prefix)
2040 if gp.op == 'RENAME':
2042 if gp.op == 'RENAME':
2041 backend.unlink(gp.oldpath)
2043 backend.unlink(gp.oldpath)
2042 if not first_hunk:
2044 if not first_hunk:
2043 if gp.op == 'DELETE':
2045 if gp.op == 'DELETE':
2044 backend.unlink(gp.path)
2046 backend.unlink(gp.path)
2045 continue
2047 continue
2046 data, mode = None, None
2048 data, mode = None, None
2047 if gp.op in ('RENAME', 'COPY'):
2049 if gp.op in ('RENAME', 'COPY'):
2048 data, mode = store.getfile(gp.oldpath)[:2]
2050 data, mode = store.getfile(gp.oldpath)[:2]
2049 if data is None:
2051 if data is None:
2050 # This means that the old path does not exist
2052 # This means that the old path does not exist
2051 raise PatchError(_("source file '%s' does not exist")
2053 raise PatchError(_("source file '%s' does not exist")
2052 % gp.oldpath)
2054 % gp.oldpath)
2053 if gp.mode:
2055 if gp.mode:
2054 mode = gp.mode
2056 mode = gp.mode
2055 if gp.op == 'ADD':
2057 if gp.op == 'ADD':
2056 # Added files without content have no hunk and
2058 # Added files without content have no hunk and
2057 # must be created
2059 # must be created
2058 data = ''
2060 data = ''
2059 if data or mode:
2061 if data or mode:
2060 if (gp.op in ('ADD', 'RENAME', 'COPY')
2062 if (gp.op in ('ADD', 'RENAME', 'COPY')
2061 and backend.exists(gp.path)):
2063 and backend.exists(gp.path)):
2062 raise PatchError(_("cannot create %s: destination "
2064 raise PatchError(_("cannot create %s: destination "
2063 "already exists") % gp.path)
2065 "already exists") % gp.path)
2064 backend.setfile(gp.path, data, mode, gp.oldpath)
2066 backend.setfile(gp.path, data, mode, gp.oldpath)
2065 continue
2067 continue
2066 try:
2068 try:
2067 current_file = patcher(ui, gp, backend, store,
2069 current_file = patcher(ui, gp, backend, store,
2068 eolmode=eolmode)
2070 eolmode=eolmode)
2069 except PatchError as inst:
2071 except PatchError as inst:
2070 ui.warn(str(inst) + '\n')
2072 ui.warn(str(inst) + '\n')
2071 current_file = None
2073 current_file = None
2072 rejects += 1
2074 rejects += 1
2073 continue
2075 continue
2074 elif state == 'git':
2076 elif state == 'git':
2075 for gp in values:
2077 for gp in values:
2076 path = pstrip(gp.oldpath)
2078 path = pstrip(gp.oldpath)
2077 data, mode = backend.getfile(path)
2079 data, mode = backend.getfile(path)
2078 if data is None:
2080 if data is None:
2079 # The error ignored here will trigger a getfile()
2081 # The error ignored here will trigger a getfile()
2080 # error in a place more appropriate for error
2082 # error in a place more appropriate for error
2081 # handling, and will not interrupt the patching
2083 # handling, and will not interrupt the patching
2082 # process.
2084 # process.
2083 pass
2085 pass
2084 else:
2086 else:
2085 store.setfile(path, data, mode)
2087 store.setfile(path, data, mode)
2086 else:
2088 else:
2087 raise error.Abort(_('unsupported parser state: %s') % state)
2089 raise error.Abort(_('unsupported parser state: %s') % state)
2088
2090
2089 if current_file:
2091 if current_file:
2090 rejects += current_file.close()
2092 rejects += current_file.close()
2091
2093
2092 if rejects:
2094 if rejects:
2093 return -1
2095 return -1
2094 return err
2096 return err
2095
2097
2096 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2098 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2097 similarity):
2099 similarity):
2098 """use <patcher> to apply <patchname> to the working directory.
2100 """use <patcher> to apply <patchname> to the working directory.
2099 returns whether patch was applied with fuzz factor."""
2101 returns whether patch was applied with fuzz factor."""
2100
2102
2101 fuzz = False
2103 fuzz = False
2102 args = []
2104 args = []
2103 cwd = repo.root
2105 cwd = repo.root
2104 if cwd:
2106 if cwd:
2105 args.append('-d %s' % procutil.shellquote(cwd))
2107 args.append('-d %s' % procutil.shellquote(cwd))
2106 cmd = ('%s %s -p%d < %s'
2108 cmd = ('%s %s -p%d < %s'
2107 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2109 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2108 fp = procutil.popen(cmd, 'rb')
2110 fp = procutil.popen(cmd, 'rb')
2109 try:
2111 try:
2110 for line in util.iterfile(fp):
2112 for line in util.iterfile(fp):
2111 line = line.rstrip()
2113 line = line.rstrip()
2112 ui.note(line + '\n')
2114 ui.note(line + '\n')
2113 if line.startswith('patching file '):
2115 if line.startswith('patching file '):
2114 pf = util.parsepatchoutput(line)
2116 pf = util.parsepatchoutput(line)
2115 printed_file = False
2117 printed_file = False
2116 files.add(pf)
2118 files.add(pf)
2117 elif line.find('with fuzz') >= 0:
2119 elif line.find('with fuzz') >= 0:
2118 fuzz = True
2120 fuzz = True
2119 if not printed_file:
2121 if not printed_file:
2120 ui.warn(pf + '\n')
2122 ui.warn(pf + '\n')
2121 printed_file = True
2123 printed_file = True
2122 ui.warn(line + '\n')
2124 ui.warn(line + '\n')
2123 elif line.find('saving rejects to file') >= 0:
2125 elif line.find('saving rejects to file') >= 0:
2124 ui.warn(line + '\n')
2126 ui.warn(line + '\n')
2125 elif line.find('FAILED') >= 0:
2127 elif line.find('FAILED') >= 0:
2126 if not printed_file:
2128 if not printed_file:
2127 ui.warn(pf + '\n')
2129 ui.warn(pf + '\n')
2128 printed_file = True
2130 printed_file = True
2129 ui.warn(line + '\n')
2131 ui.warn(line + '\n')
2130 finally:
2132 finally:
2131 if files:
2133 if files:
2132 scmutil.marktouched(repo, files, similarity)
2134 scmutil.marktouched(repo, files, similarity)
2133 code = fp.close()
2135 code = fp.close()
2134 if code:
2136 if code:
2135 raise PatchError(_("patch command failed: %s") %
2137 raise PatchError(_("patch command failed: %s") %
2136 procutil.explainexit(code))
2138 procutil.explainexit(code))
2137 return fuzz
2139 return fuzz
2138
2140
2139 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2141 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2140 eolmode='strict'):
2142 eolmode='strict'):
2141 if files is None:
2143 if files is None:
2142 files = set()
2144 files = set()
2143 if eolmode is None:
2145 if eolmode is None:
2144 eolmode = ui.config('patch', 'eol')
2146 eolmode = ui.config('patch', 'eol')
2145 if eolmode.lower() not in eolmodes:
2147 if eolmode.lower() not in eolmodes:
2146 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2148 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2147 eolmode = eolmode.lower()
2149 eolmode = eolmode.lower()
2148
2150
2149 store = filestore()
2151 store = filestore()
2150 try:
2152 try:
2151 fp = open(patchobj, 'rb')
2153 fp = open(patchobj, 'rb')
2152 except TypeError:
2154 except TypeError:
2153 fp = patchobj
2155 fp = patchobj
2154 try:
2156 try:
2155 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2157 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2156 eolmode=eolmode)
2158 eolmode=eolmode)
2157 finally:
2159 finally:
2158 if fp != patchobj:
2160 if fp != patchobj:
2159 fp.close()
2161 fp.close()
2160 files.update(backend.close())
2162 files.update(backend.close())
2161 store.close()
2163 store.close()
2162 if ret < 0:
2164 if ret < 0:
2163 raise PatchError(_('patch failed to apply'))
2165 raise PatchError(_('patch failed to apply'))
2164 return ret > 0
2166 return ret > 0
2165
2167
2166 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2168 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2167 eolmode='strict', similarity=0):
2169 eolmode='strict', similarity=0):
2168 """use builtin patch to apply <patchobj> to the working directory.
2170 """use builtin patch to apply <patchobj> to the working directory.
2169 returns whether patch was applied with fuzz factor."""
2171 returns whether patch was applied with fuzz factor."""
2170 backend = workingbackend(ui, repo, similarity)
2172 backend = workingbackend(ui, repo, similarity)
2171 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2173 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2172
2174
2173 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2175 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2174 eolmode='strict'):
2176 eolmode='strict'):
2175 backend = repobackend(ui, repo, ctx, store)
2177 backend = repobackend(ui, repo, ctx, store)
2176 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2178 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2177
2179
2178 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2180 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2179 similarity=0):
2181 similarity=0):
2180 """Apply <patchname> to the working directory.
2182 """Apply <patchname> to the working directory.
2181
2183
2182 'eolmode' specifies how end of lines should be handled. It can be:
2184 'eolmode' specifies how end of lines should be handled. It can be:
2183 - 'strict': inputs are read in binary mode, EOLs are preserved
2185 - 'strict': inputs are read in binary mode, EOLs are preserved
2184 - 'crlf': EOLs are ignored when patching and reset to CRLF
2186 - 'crlf': EOLs are ignored when patching and reset to CRLF
2185 - 'lf': EOLs are ignored when patching and reset to LF
2187 - 'lf': EOLs are ignored when patching and reset to LF
2186 - None: get it from user settings, default to 'strict'
2188 - None: get it from user settings, default to 'strict'
2187 'eolmode' is ignored when using an external patcher program.
2189 'eolmode' is ignored when using an external patcher program.
2188
2190
2189 Returns whether patch was applied with fuzz factor.
2191 Returns whether patch was applied with fuzz factor.
2190 """
2192 """
2191 patcher = ui.config('ui', 'patch')
2193 patcher = ui.config('ui', 'patch')
2192 if files is None:
2194 if files is None:
2193 files = set()
2195 files = set()
2194 if patcher:
2196 if patcher:
2195 return _externalpatch(ui, repo, patcher, patchname, strip,
2197 return _externalpatch(ui, repo, patcher, patchname, strip,
2196 files, similarity)
2198 files, similarity)
2197 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2199 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2198 similarity)
2200 similarity)
2199
2201
2200 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2202 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2201 backend = fsbackend(ui, repo.root)
2203 backend = fsbackend(ui, repo.root)
2202 prefix = _canonprefix(repo, prefix)
2204 prefix = _canonprefix(repo, prefix)
2203 with open(patchpath, 'rb') as fp:
2205 with open(patchpath, 'rb') as fp:
2204 changed = set()
2206 changed = set()
2205 for state, values in iterhunks(fp):
2207 for state, values in iterhunks(fp):
2206 if state == 'file':
2208 if state == 'file':
2207 afile, bfile, first_hunk, gp = values
2209 afile, bfile, first_hunk, gp = values
2208 if gp:
2210 if gp:
2209 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2211 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2210 if gp.oldpath:
2212 if gp.oldpath:
2211 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2213 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2212 prefix)[1]
2214 prefix)[1]
2213 else:
2215 else:
2214 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2216 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2215 prefix)
2217 prefix)
2216 changed.add(gp.path)
2218 changed.add(gp.path)
2217 if gp.op == 'RENAME':
2219 if gp.op == 'RENAME':
2218 changed.add(gp.oldpath)
2220 changed.add(gp.oldpath)
2219 elif state not in ('hunk', 'git'):
2221 elif state not in ('hunk', 'git'):
2220 raise error.Abort(_('unsupported parser state: %s') % state)
2222 raise error.Abort(_('unsupported parser state: %s') % state)
2221 return changed
2223 return changed
2222
2224
2223 class GitDiffRequired(Exception):
2225 class GitDiffRequired(Exception):
2224 pass
2226 pass
2225
2227
2226 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2228 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2227 '''return diffopts with all features supported and parsed'''
2229 '''return diffopts with all features supported and parsed'''
2228 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2230 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2229 git=True, whitespace=True, formatchanging=True)
2231 git=True, whitespace=True, formatchanging=True)
2230
2232
2231 diffopts = diffallopts
2233 diffopts = diffallopts
2232
2234
2233 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2235 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2234 whitespace=False, formatchanging=False):
2236 whitespace=False, formatchanging=False):
2235 '''return diffopts with only opted-in features parsed
2237 '''return diffopts with only opted-in features parsed
2236
2238
2237 Features:
2239 Features:
2238 - git: git-style diffs
2240 - git: git-style diffs
2239 - whitespace: whitespace options like ignoreblanklines and ignorews
2241 - whitespace: whitespace options like ignoreblanklines and ignorews
2240 - formatchanging: options that will likely break or cause correctness issues
2242 - formatchanging: options that will likely break or cause correctness issues
2241 with most diff parsers
2243 with most diff parsers
2242 '''
2244 '''
2243 def get(key, name=None, getter=ui.configbool, forceplain=None):
2245 def get(key, name=None, getter=ui.configbool, forceplain=None):
2244 if opts:
2246 if opts:
2245 v = opts.get(key)
2247 v = opts.get(key)
2246 # diffopts flags are either None-default (which is passed
2248 # diffopts flags are either None-default (which is passed
2247 # through unchanged, so we can identify unset values), or
2249 # through unchanged, so we can identify unset values), or
2248 # some other falsey default (eg --unified, which defaults
2250 # some other falsey default (eg --unified, which defaults
2249 # to an empty string). We only want to override the config
2251 # to an empty string). We only want to override the config
2250 # entries from hgrc with command line values if they
2252 # entries from hgrc with command line values if they
2251 # appear to have been set, which is any truthy value,
2253 # appear to have been set, which is any truthy value,
2252 # True, or False.
2254 # True, or False.
2253 if v or isinstance(v, bool):
2255 if v or isinstance(v, bool):
2254 return v
2256 return v
2255 if forceplain is not None and ui.plain():
2257 if forceplain is not None and ui.plain():
2256 return forceplain
2258 return forceplain
2257 return getter(section, name or key, untrusted=untrusted)
2259 return getter(section, name or key, untrusted=untrusted)
2258
2260
2259 # core options, expected to be understood by every diff parser
2261 # core options, expected to be understood by every diff parser
2260 buildopts = {
2262 buildopts = {
2261 'nodates': get('nodates'),
2263 'nodates': get('nodates'),
2262 'showfunc': get('show_function', 'showfunc'),
2264 'showfunc': get('show_function', 'showfunc'),
2263 'context': get('unified', getter=ui.config),
2265 'context': get('unified', getter=ui.config),
2264 }
2266 }
2265 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2267 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2266 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2268 buildopts['xdiff'] = ui.configbool('experimental', 'xdiff')
2267
2269
2268 if git:
2270 if git:
2269 buildopts['git'] = get('git')
2271 buildopts['git'] = get('git')
2270
2272
2271 # since this is in the experimental section, we need to call
2273 # since this is in the experimental section, we need to call
2272 # ui.configbool directory
2274 # ui.configbool directory
2273 buildopts['showsimilarity'] = ui.configbool('experimental',
2275 buildopts['showsimilarity'] = ui.configbool('experimental',
2274 'extendedheader.similarity')
2276 'extendedheader.similarity')
2275
2277
2276 # need to inspect the ui object instead of using get() since we want to
2278 # need to inspect the ui object instead of using get() since we want to
2277 # test for an int
2279 # test for an int
2278 hconf = ui.config('experimental', 'extendedheader.index')
2280 hconf = ui.config('experimental', 'extendedheader.index')
2279 if hconf is not None:
2281 if hconf is not None:
2280 hlen = None
2282 hlen = None
2281 try:
2283 try:
2282 # the hash config could be an integer (for length of hash) or a
2284 # the hash config could be an integer (for length of hash) or a
2283 # word (e.g. short, full, none)
2285 # word (e.g. short, full, none)
2284 hlen = int(hconf)
2286 hlen = int(hconf)
2285 if hlen < 0 or hlen > 40:
2287 if hlen < 0 or hlen > 40:
2286 msg = _("invalid length for extendedheader.index: '%d'\n")
2288 msg = _("invalid length for extendedheader.index: '%d'\n")
2287 ui.warn(msg % hlen)
2289 ui.warn(msg % hlen)
2288 except ValueError:
2290 except ValueError:
2289 # default value
2291 # default value
2290 if hconf == 'short' or hconf == '':
2292 if hconf == 'short' or hconf == '':
2291 hlen = 12
2293 hlen = 12
2292 elif hconf == 'full':
2294 elif hconf == 'full':
2293 hlen = 40
2295 hlen = 40
2294 elif hconf != 'none':
2296 elif hconf != 'none':
2295 msg = _("invalid value for extendedheader.index: '%s'\n")
2297 msg = _("invalid value for extendedheader.index: '%s'\n")
2296 ui.warn(msg % hconf)
2298 ui.warn(msg % hconf)
2297 finally:
2299 finally:
2298 buildopts['index'] = hlen
2300 buildopts['index'] = hlen
2299
2301
2300 if whitespace:
2302 if whitespace:
2301 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2303 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2302 buildopts['ignorewsamount'] = get('ignore_space_change',
2304 buildopts['ignorewsamount'] = get('ignore_space_change',
2303 'ignorewsamount')
2305 'ignorewsamount')
2304 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2306 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2305 'ignoreblanklines')
2307 'ignoreblanklines')
2306 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2308 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2307 if formatchanging:
2309 if formatchanging:
2308 buildopts['text'] = opts and opts.get('text')
2310 buildopts['text'] = opts and opts.get('text')
2309 binary = None if opts is None else opts.get('binary')
2311 binary = None if opts is None else opts.get('binary')
2310 buildopts['nobinary'] = (not binary if binary is not None
2312 buildopts['nobinary'] = (not binary if binary is not None
2311 else get('nobinary', forceplain=False))
2313 else get('nobinary', forceplain=False))
2312 buildopts['noprefix'] = get('noprefix', forceplain=False)
2314 buildopts['noprefix'] = get('noprefix', forceplain=False)
2313
2315
2314 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2316 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2315
2317
2316 def diff(repo, node1=None, node2=None, match=None, changes=None,
2318 def diff(repo, node1=None, node2=None, match=None, changes=None,
2317 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2319 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2318 hunksfilterfn=None):
2320 hunksfilterfn=None):
2319 '''yields diff of changes to files between two nodes, or node and
2321 '''yields diff of changes to files between two nodes, or node and
2320 working directory.
2322 working directory.
2321
2323
2322 if node1 is None, use first dirstate parent instead.
2324 if node1 is None, use first dirstate parent instead.
2323 if node2 is None, compare node1 with working directory.
2325 if node2 is None, compare node1 with working directory.
2324
2326
2325 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2327 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2326 every time some change cannot be represented with the current
2328 every time some change cannot be represented with the current
2327 patch format. Return False to upgrade to git patch format, True to
2329 patch format. Return False to upgrade to git patch format, True to
2328 accept the loss or raise an exception to abort the diff. It is
2330 accept the loss or raise an exception to abort the diff. It is
2329 called with the name of current file being diffed as 'fn'. If set
2331 called with the name of current file being diffed as 'fn'. If set
2330 to None, patches will always be upgraded to git format when
2332 to None, patches will always be upgraded to git format when
2331 necessary.
2333 necessary.
2332
2334
2333 prefix is a filename prefix that is prepended to all filenames on
2335 prefix is a filename prefix that is prepended to all filenames on
2334 display (used for subrepos).
2336 display (used for subrepos).
2335
2337
2336 relroot, if not empty, must be normalized with a trailing /. Any match
2338 relroot, if not empty, must be normalized with a trailing /. Any match
2337 patterns that fall outside it will be ignored.
2339 patterns that fall outside it will be ignored.
2338
2340
2339 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2341 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2340 information.
2342 information.
2341
2343
2342 hunksfilterfn, if not None, should be a function taking a filectx and
2344 hunksfilterfn, if not None, should be a function taking a filectx and
2343 hunks generator that may yield filtered hunks.
2345 hunks generator that may yield filtered hunks.
2344 '''
2346 '''
2345 for fctx1, fctx2, hdr, hunks in diffhunks(
2347 for fctx1, fctx2, hdr, hunks in diffhunks(
2346 repo, node1=node1, node2=node2,
2348 repo, node1=node1, node2=node2,
2347 match=match, changes=changes, opts=opts,
2349 match=match, changes=changes, opts=opts,
2348 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2350 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2349 ):
2351 ):
2350 if hunksfilterfn is not None:
2352 if hunksfilterfn is not None:
2351 # If the file has been removed, fctx2 is None; but this should
2353 # If the file has been removed, fctx2 is None; but this should
2352 # not occur here since we catch removed files early in
2354 # not occur here since we catch removed files early in
2353 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2355 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2354 assert fctx2 is not None, \
2356 assert fctx2 is not None, \
2355 'fctx2 unexpectly None in diff hunks filtering'
2357 'fctx2 unexpectly None in diff hunks filtering'
2356 hunks = hunksfilterfn(fctx2, hunks)
2358 hunks = hunksfilterfn(fctx2, hunks)
2357 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2359 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2358 if hdr and (text or len(hdr) > 1):
2360 if hdr and (text or len(hdr) > 1):
2359 yield '\n'.join(hdr) + '\n'
2361 yield '\n'.join(hdr) + '\n'
2360 if text:
2362 if text:
2361 yield text
2363 yield text
2362
2364
2363 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2365 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2364 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2366 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2365 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2367 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2366 where `header` is a list of diff headers and `hunks` is an iterable of
2368 where `header` is a list of diff headers and `hunks` is an iterable of
2367 (`hunkrange`, `hunklines`) tuples.
2369 (`hunkrange`, `hunklines`) tuples.
2368
2370
2369 See diff() for the meaning of parameters.
2371 See diff() for the meaning of parameters.
2370 """
2372 """
2371
2373
2372 if opts is None:
2374 if opts is None:
2373 opts = mdiff.defaultopts
2375 opts = mdiff.defaultopts
2374
2376
2375 if not node1 and not node2:
2377 if not node1 and not node2:
2376 node1 = repo.dirstate.p1()
2378 node1 = repo.dirstate.p1()
2377
2379
2378 def lrugetfilectx():
2380 def lrugetfilectx():
2379 cache = {}
2381 cache = {}
2380 order = collections.deque()
2382 order = collections.deque()
2381 def getfilectx(f, ctx):
2383 def getfilectx(f, ctx):
2382 fctx = ctx.filectx(f, filelog=cache.get(f))
2384 fctx = ctx.filectx(f, filelog=cache.get(f))
2383 if f not in cache:
2385 if f not in cache:
2384 if len(cache) > 20:
2386 if len(cache) > 20:
2385 del cache[order.popleft()]
2387 del cache[order.popleft()]
2386 cache[f] = fctx.filelog()
2388 cache[f] = fctx.filelog()
2387 else:
2389 else:
2388 order.remove(f)
2390 order.remove(f)
2389 order.append(f)
2391 order.append(f)
2390 return fctx
2392 return fctx
2391 return getfilectx
2393 return getfilectx
2392 getfilectx = lrugetfilectx()
2394 getfilectx = lrugetfilectx()
2393
2395
2394 ctx1 = repo[node1]
2396 ctx1 = repo[node1]
2395 ctx2 = repo[node2]
2397 ctx2 = repo[node2]
2396
2398
2397 relfiltered = False
2399 relfiltered = False
2398 if relroot != '' and match.always():
2400 if relroot != '' and match.always():
2399 # as a special case, create a new matcher with just the relroot
2401 # as a special case, create a new matcher with just the relroot
2400 pats = [relroot]
2402 pats = [relroot]
2401 match = scmutil.match(ctx2, pats, default='path')
2403 match = scmutil.match(ctx2, pats, default='path')
2402 relfiltered = True
2404 relfiltered = True
2403
2405
2404 if not changes:
2406 if not changes:
2405 changes = repo.status(ctx1, ctx2, match=match)
2407 changes = repo.status(ctx1, ctx2, match=match)
2406 modified, added, removed = changes[:3]
2408 modified, added, removed = changes[:3]
2407
2409
2408 if not modified and not added and not removed:
2410 if not modified and not added and not removed:
2409 return []
2411 return []
2410
2412
2411 if repo.ui.debugflag:
2413 if repo.ui.debugflag:
2412 hexfunc = hex
2414 hexfunc = hex
2413 else:
2415 else:
2414 hexfunc = short
2416 hexfunc = short
2415 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2417 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2416
2418
2417 if copy is None:
2419 if copy is None:
2418 copy = {}
2420 copy = {}
2419 if opts.git or opts.upgrade:
2421 if opts.git or opts.upgrade:
2420 copy = copies.pathcopies(ctx1, ctx2, match=match)
2422 copy = copies.pathcopies(ctx1, ctx2, match=match)
2421
2423
2422 if relroot is not None:
2424 if relroot is not None:
2423 if not relfiltered:
2425 if not relfiltered:
2424 # XXX this would ideally be done in the matcher, but that is
2426 # XXX this would ideally be done in the matcher, but that is
2425 # generally meant to 'or' patterns, not 'and' them. In this case we
2427 # generally meant to 'or' patterns, not 'and' them. In this case we
2426 # need to 'and' all the patterns from the matcher with relroot.
2428 # need to 'and' all the patterns from the matcher with relroot.
2427 def filterrel(l):
2429 def filterrel(l):
2428 return [f for f in l if f.startswith(relroot)]
2430 return [f for f in l if f.startswith(relroot)]
2429 modified = filterrel(modified)
2431 modified = filterrel(modified)
2430 added = filterrel(added)
2432 added = filterrel(added)
2431 removed = filterrel(removed)
2433 removed = filterrel(removed)
2432 relfiltered = True
2434 relfiltered = True
2433 # filter out copies where either side isn't inside the relative root
2435 # filter out copies where either side isn't inside the relative root
2434 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2436 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2435 if dst.startswith(relroot)
2437 if dst.startswith(relroot)
2436 and src.startswith(relroot)))
2438 and src.startswith(relroot)))
2437
2439
2438 modifiedset = set(modified)
2440 modifiedset = set(modified)
2439 addedset = set(added)
2441 addedset = set(added)
2440 removedset = set(removed)
2442 removedset = set(removed)
2441 for f in modified:
2443 for f in modified:
2442 if f not in ctx1:
2444 if f not in ctx1:
2443 # Fix up added, since merged-in additions appear as
2445 # Fix up added, since merged-in additions appear as
2444 # modifications during merges
2446 # modifications during merges
2445 modifiedset.remove(f)
2447 modifiedset.remove(f)
2446 addedset.add(f)
2448 addedset.add(f)
2447 for f in removed:
2449 for f in removed:
2448 if f not in ctx1:
2450 if f not in ctx1:
2449 # Merged-in additions that are then removed are reported as removed.
2451 # Merged-in additions that are then removed are reported as removed.
2450 # They are not in ctx1, so We don't want to show them in the diff.
2452 # They are not in ctx1, so We don't want to show them in the diff.
2451 removedset.remove(f)
2453 removedset.remove(f)
2452 modified = sorted(modifiedset)
2454 modified = sorted(modifiedset)
2453 added = sorted(addedset)
2455 added = sorted(addedset)
2454 removed = sorted(removedset)
2456 removed = sorted(removedset)
2455 for dst, src in list(copy.items()):
2457 for dst, src in list(copy.items()):
2456 if src not in ctx1:
2458 if src not in ctx1:
2457 # Files merged in during a merge and then copied/renamed are
2459 # Files merged in during a merge and then copied/renamed are
2458 # reported as copies. We want to show them in the diff as additions.
2460 # reported as copies. We want to show them in the diff as additions.
2459 del copy[dst]
2461 del copy[dst]
2460
2462
2461 def difffn(opts, losedata):
2463 def difffn(opts, losedata):
2462 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2464 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2463 copy, getfilectx, opts, losedata, prefix, relroot)
2465 copy, getfilectx, opts, losedata, prefix, relroot)
2464 if opts.upgrade and not opts.git:
2466 if opts.upgrade and not opts.git:
2465 try:
2467 try:
2466 def losedata(fn):
2468 def losedata(fn):
2467 if not losedatafn or not losedatafn(fn=fn):
2469 if not losedatafn or not losedatafn(fn=fn):
2468 raise GitDiffRequired
2470 raise GitDiffRequired
2469 # Buffer the whole output until we are sure it can be generated
2471 # Buffer the whole output until we are sure it can be generated
2470 return list(difffn(opts.copy(git=False), losedata))
2472 return list(difffn(opts.copy(git=False), losedata))
2471 except GitDiffRequired:
2473 except GitDiffRequired:
2472 return difffn(opts.copy(git=True), None)
2474 return difffn(opts.copy(git=True), None)
2473 else:
2475 else:
2474 return difffn(opts, None)
2476 return difffn(opts, None)
2475
2477
2476 def difflabel(func, *args, **kw):
2478 def difflabel(func, *args, **kw):
2477 '''yields 2-tuples of (output, label) based on the output of func()'''
2479 '''yields 2-tuples of (output, label) based on the output of func()'''
2478 inlinecolor = False
2480 inlinecolor = False
2479 if kw.get(r'opts'):
2481 if kw.get(r'opts'):
2480 inlinecolor = kw[r'opts'].worddiff
2482 inlinecolor = kw[r'opts'].worddiff
2481 headprefixes = [('diff', 'diff.diffline'),
2483 headprefixes = [('diff', 'diff.diffline'),
2482 ('copy', 'diff.extended'),
2484 ('copy', 'diff.extended'),
2483 ('rename', 'diff.extended'),
2485 ('rename', 'diff.extended'),
2484 ('old', 'diff.extended'),
2486 ('old', 'diff.extended'),
2485 ('new', 'diff.extended'),
2487 ('new', 'diff.extended'),
2486 ('deleted', 'diff.extended'),
2488 ('deleted', 'diff.extended'),
2487 ('index', 'diff.extended'),
2489 ('index', 'diff.extended'),
2488 ('similarity', 'diff.extended'),
2490 ('similarity', 'diff.extended'),
2489 ('---', 'diff.file_a'),
2491 ('---', 'diff.file_a'),
2490 ('+++', 'diff.file_b')]
2492 ('+++', 'diff.file_b')]
2491 textprefixes = [('@', 'diff.hunk'),
2493 textprefixes = [('@', 'diff.hunk'),
2492 ('-', 'diff.deleted'),
2494 ('-', 'diff.deleted'),
2493 ('+', 'diff.inserted')]
2495 ('+', 'diff.inserted')]
2494 head = False
2496 head = False
2495 for chunk in func(*args, **kw):
2497 for chunk in func(*args, **kw):
2496 lines = chunk.split('\n')
2498 lines = chunk.split('\n')
2497 matches = {}
2499 matches = {}
2498 if inlinecolor:
2500 if inlinecolor:
2499 matches = _findmatches(lines)
2501 matches = _findmatches(lines)
2500 for i, line in enumerate(lines):
2502 for i, line in enumerate(lines):
2501 if i != 0:
2503 if i != 0:
2502 yield ('\n', '')
2504 yield ('\n', '')
2503 if head:
2505 if head:
2504 if line.startswith('@'):
2506 if line.startswith('@'):
2505 head = False
2507 head = False
2506 else:
2508 else:
2507 if line and line[0] not in ' +-@\\':
2509 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2508 head = True
2510 head = True
2509 stripline = line
2511 stripline = line
2510 diffline = False
2512 diffline = False
2511 if not head and line and line[0] in '+-':
2513 if not head and line and line.startswith(('+', '-')):
2512 # highlight tabs and trailing whitespace, but only in
2514 # highlight tabs and trailing whitespace, but only in
2513 # changed lines
2515 # changed lines
2514 stripline = line.rstrip()
2516 stripline = line.rstrip()
2515 diffline = True
2517 diffline = True
2516
2518
2517 prefixes = textprefixes
2519 prefixes = textprefixes
2518 if head:
2520 if head:
2519 prefixes = headprefixes
2521 prefixes = headprefixes
2520 for prefix, label in prefixes:
2522 for prefix, label in prefixes:
2521 if stripline.startswith(prefix):
2523 if stripline.startswith(prefix):
2522 if diffline:
2524 if diffline:
2523 if i in matches:
2525 if i in matches:
2524 for t, l in _inlinediff(lines[i].rstrip(),
2526 for t, l in _inlinediff(lines[i].rstrip(),
2525 lines[matches[i]].rstrip(),
2527 lines[matches[i]].rstrip(),
2526 label):
2528 label):
2527 yield (t, l)
2529 yield (t, l)
2528 else:
2530 else:
2529 for token in tabsplitter.findall(stripline):
2531 for token in tabsplitter.findall(stripline):
2530 if token.startswith('\t'):
2532 if token.startswith('\t'):
2531 yield (token, 'diff.tab')
2533 yield (token, 'diff.tab')
2532 else:
2534 else:
2533 yield (token, label)
2535 yield (token, label)
2534 else:
2536 else:
2535 yield (stripline, label)
2537 yield (stripline, label)
2536 break
2538 break
2537 else:
2539 else:
2538 yield (line, '')
2540 yield (line, '')
2539 if line != stripline:
2541 if line != stripline:
2540 yield (line[len(stripline):], 'diff.trailingwhitespace')
2542 yield (line[len(stripline):], 'diff.trailingwhitespace')
2541
2543
2542 def _findmatches(slist):
2544 def _findmatches(slist):
2543 '''Look for insertion matches to deletion and returns a dict of
2545 '''Look for insertion matches to deletion and returns a dict of
2544 correspondences.
2546 correspondences.
2545 '''
2547 '''
2546 lastmatch = 0
2548 lastmatch = 0
2547 matches = {}
2549 matches = {}
2548 for i, line in enumerate(slist):
2550 for i, line in enumerate(slist):
2549 if line == '':
2551 if line == '':
2550 continue
2552 continue
2551 if line[0] == '-':
2553 if line.startswith('-'):
2552 lastmatch = max(lastmatch, i)
2554 lastmatch = max(lastmatch, i)
2553 newgroup = False
2555 newgroup = False
2554 for j, newline in enumerate(slist[lastmatch + 1:]):
2556 for j, newline in enumerate(slist[lastmatch + 1:]):
2555 if newline == '':
2557 if newline == '':
2556 continue
2558 continue
2557 if newline[0] == '-' and newgroup: # too far, no match
2559 if newline.startswith('-') and newgroup: # too far, no match
2558 break
2560 break
2559 if newline[0] == '+': # potential match
2561 if newline.startswith('+'): # potential match
2560 newgroup = True
2562 newgroup = True
2561 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2563 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2562 if sim > 0.7:
2564 if sim > 0.7:
2563 lastmatch = lastmatch + 1 + j
2565 lastmatch = lastmatch + 1 + j
2564 matches[i] = lastmatch
2566 matches[i] = lastmatch
2565 matches[lastmatch] = i
2567 matches[lastmatch] = i
2566 break
2568 break
2567 return matches
2569 return matches
2568
2570
2569 def _inlinediff(s1, s2, operation):
2571 def _inlinediff(s1, s2, operation):
2570 '''Perform string diff to highlight specific changes.'''
2572 '''Perform string diff to highlight specific changes.'''
2571 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2573 operation_skip = ('+', '?') if operation == 'diff.deleted' else ('-', '?')
2572 if operation == 'diff.deleted':
2574 if operation == 'diff.deleted':
2573 s2, s1 = s1, s2
2575 s2, s1 = s1, s2
2574
2576
2575 buff = []
2577 buff = []
2576 # we never want to higlight the leading +-
2578 # we never want to higlight the leading +-
2577 if operation == 'diff.deleted' and s2.startswith('-'):
2579 if operation == 'diff.deleted' and s2.startswith('-'):
2578 label = operation
2580 label = operation
2579 token = '-'
2581 token = '-'
2580 s2 = s2[1:]
2582 s2 = s2[1:]
2581 s1 = s1[1:]
2583 s1 = s1[1:]
2582 elif operation == 'diff.inserted' and s1.startswith('+'):
2584 elif operation == 'diff.inserted' and s1.startswith('+'):
2583 label = operation
2585 label = operation
2584 token = '+'
2586 token = '+'
2585 s2 = s2[1:]
2587 s2 = s2[1:]
2586 s1 = s1[1:]
2588 s1 = s1[1:]
2587 else:
2589 else:
2588 raise error.ProgrammingError("Case not expected, operation = %s" %
2590 raise error.ProgrammingError("Case not expected, operation = %s" %
2589 operation)
2591 operation)
2590
2592
2591 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2593 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2592 for part in s:
2594 for part in s:
2593 if part[0] in operation_skip or len(part) == 2:
2595 if part.startswith(operation_skip) or len(part) == 2:
2594 continue
2596 continue
2595 l = operation + '.highlight'
2597 l = operation + '.highlight'
2596 if part[0] in ' ':
2598 if part.startswith(' '):
2597 l = operation
2599 l = operation
2598 if part[2:] == '\t':
2600 if part[2:] == '\t':
2599 l = 'diff.tab'
2601 l = 'diff.tab'
2600 if l == label: # contiguous token with same label
2602 if l == label: # contiguous token with same label
2601 token += part[2:]
2603 token += part[2:]
2602 continue
2604 continue
2603 else:
2605 else:
2604 buff.append((token, label))
2606 buff.append((token, label))
2605 label = l
2607 label = l
2606 token = part[2:]
2608 token = part[2:]
2607 buff.append((token, label))
2609 buff.append((token, label))
2608
2610
2609 return buff
2611 return buff
2610
2612
2611 def diffui(*args, **kw):
2613 def diffui(*args, **kw):
2612 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2614 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2613 return difflabel(diff, *args, **kw)
2615 return difflabel(diff, *args, **kw)
2614
2616
2615 def _filepairs(modified, added, removed, copy, opts):
2617 def _filepairs(modified, added, removed, copy, opts):
2616 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2618 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2617 before and f2 is the the name after. For added files, f1 will be None,
2619 before and f2 is the the name after. For added files, f1 will be None,
2618 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2620 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2619 or 'rename' (the latter two only if opts.git is set).'''
2621 or 'rename' (the latter two only if opts.git is set).'''
2620 gone = set()
2622 gone = set()
2621
2623
2622 copyto = dict([(v, k) for k, v in copy.items()])
2624 copyto = dict([(v, k) for k, v in copy.items()])
2623
2625
2624 addedset, removedset = set(added), set(removed)
2626 addedset, removedset = set(added), set(removed)
2625
2627
2626 for f in sorted(modified + added + removed):
2628 for f in sorted(modified + added + removed):
2627 copyop = None
2629 copyop = None
2628 f1, f2 = f, f
2630 f1, f2 = f, f
2629 if f in addedset:
2631 if f in addedset:
2630 f1 = None
2632 f1 = None
2631 if f in copy:
2633 if f in copy:
2632 if opts.git:
2634 if opts.git:
2633 f1 = copy[f]
2635 f1 = copy[f]
2634 if f1 in removedset and f1 not in gone:
2636 if f1 in removedset and f1 not in gone:
2635 copyop = 'rename'
2637 copyop = 'rename'
2636 gone.add(f1)
2638 gone.add(f1)
2637 else:
2639 else:
2638 copyop = 'copy'
2640 copyop = 'copy'
2639 elif f in removedset:
2641 elif f in removedset:
2640 f2 = None
2642 f2 = None
2641 if opts.git:
2643 if opts.git:
2642 # have we already reported a copy above?
2644 # have we already reported a copy above?
2643 if (f in copyto and copyto[f] in addedset
2645 if (f in copyto and copyto[f] in addedset
2644 and copy[copyto[f]] == f):
2646 and copy[copyto[f]] == f):
2645 continue
2647 continue
2646 yield f1, f2, copyop
2648 yield f1, f2, copyop
2647
2649
2648 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2650 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2649 copy, getfilectx, opts, losedatafn, prefix, relroot):
2651 copy, getfilectx, opts, losedatafn, prefix, relroot):
2650 '''given input data, generate a diff and yield it in blocks
2652 '''given input data, generate a diff and yield it in blocks
2651
2653
2652 If generating a diff would lose data like flags or binary data and
2654 If generating a diff would lose data like flags or binary data and
2653 losedatafn is not None, it will be called.
2655 losedatafn is not None, it will be called.
2654
2656
2655 relroot is removed and prefix is added to every path in the diff output.
2657 relroot is removed and prefix is added to every path in the diff output.
2656
2658
2657 If relroot is not empty, this function expects every path in modified,
2659 If relroot is not empty, this function expects every path in modified,
2658 added, removed and copy to start with it.'''
2660 added, removed and copy to start with it.'''
2659
2661
2660 def gitindex(text):
2662 def gitindex(text):
2661 if not text:
2663 if not text:
2662 text = ""
2664 text = ""
2663 l = len(text)
2665 l = len(text)
2664 s = hashlib.sha1('blob %d\0' % l)
2666 s = hashlib.sha1('blob %d\0' % l)
2665 s.update(text)
2667 s.update(text)
2666 return hex(s.digest())
2668 return hex(s.digest())
2667
2669
2668 if opts.noprefix:
2670 if opts.noprefix:
2669 aprefix = bprefix = ''
2671 aprefix = bprefix = ''
2670 else:
2672 else:
2671 aprefix = 'a/'
2673 aprefix = 'a/'
2672 bprefix = 'b/'
2674 bprefix = 'b/'
2673
2675
2674 def diffline(f, revs):
2676 def diffline(f, revs):
2675 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2677 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2676 return 'diff %s %s' % (revinfo, f)
2678 return 'diff %s %s' % (revinfo, f)
2677
2679
2678 def isempty(fctx):
2680 def isempty(fctx):
2679 return fctx is None or fctx.size() == 0
2681 return fctx is None or fctx.size() == 0
2680
2682
2681 date1 = dateutil.datestr(ctx1.date())
2683 date1 = dateutil.datestr(ctx1.date())
2682 date2 = dateutil.datestr(ctx2.date())
2684 date2 = dateutil.datestr(ctx2.date())
2683
2685
2684 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2686 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2685
2687
2686 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2688 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2687 or repo.ui.configbool('devel', 'check-relroot')):
2689 or repo.ui.configbool('devel', 'check-relroot')):
2688 for f in modified + added + removed + list(copy) + list(copy.values()):
2690 for f in modified + added + removed + list(copy) + list(copy.values()):
2689 if f is not None and not f.startswith(relroot):
2691 if f is not None and not f.startswith(relroot):
2690 raise AssertionError(
2692 raise AssertionError(
2691 "file %s doesn't start with relroot %s" % (f, relroot))
2693 "file %s doesn't start with relroot %s" % (f, relroot))
2692
2694
2693 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2695 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2694 content1 = None
2696 content1 = None
2695 content2 = None
2697 content2 = None
2696 fctx1 = None
2698 fctx1 = None
2697 fctx2 = None
2699 fctx2 = None
2698 flag1 = None
2700 flag1 = None
2699 flag2 = None
2701 flag2 = None
2700 if f1:
2702 if f1:
2701 fctx1 = getfilectx(f1, ctx1)
2703 fctx1 = getfilectx(f1, ctx1)
2702 if opts.git or losedatafn:
2704 if opts.git or losedatafn:
2703 flag1 = ctx1.flags(f1)
2705 flag1 = ctx1.flags(f1)
2704 if f2:
2706 if f2:
2705 fctx2 = getfilectx(f2, ctx2)
2707 fctx2 = getfilectx(f2, ctx2)
2706 if opts.git or losedatafn:
2708 if opts.git or losedatafn:
2707 flag2 = ctx2.flags(f2)
2709 flag2 = ctx2.flags(f2)
2708 # if binary is True, output "summary" or "base85", but not "text diff"
2710 # if binary is True, output "summary" or "base85", but not "text diff"
2709 if opts.text:
2711 if opts.text:
2710 binary = False
2712 binary = False
2711 else:
2713 else:
2712 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2714 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2713
2715
2714 if losedatafn and not opts.git:
2716 if losedatafn and not opts.git:
2715 if (binary or
2717 if (binary or
2716 # copy/rename
2718 # copy/rename
2717 f2 in copy or
2719 f2 in copy or
2718 # empty file creation
2720 # empty file creation
2719 (not f1 and isempty(fctx2)) or
2721 (not f1 and isempty(fctx2)) or
2720 # empty file deletion
2722 # empty file deletion
2721 (isempty(fctx1) and not f2) or
2723 (isempty(fctx1) and not f2) or
2722 # create with flags
2724 # create with flags
2723 (not f1 and flag2) or
2725 (not f1 and flag2) or
2724 # change flags
2726 # change flags
2725 (f1 and f2 and flag1 != flag2)):
2727 (f1 and f2 and flag1 != flag2)):
2726 losedatafn(f2 or f1)
2728 losedatafn(f2 or f1)
2727
2729
2728 path1 = f1 or f2
2730 path1 = f1 or f2
2729 path2 = f2 or f1
2731 path2 = f2 or f1
2730 path1 = posixpath.join(prefix, path1[len(relroot):])
2732 path1 = posixpath.join(prefix, path1[len(relroot):])
2731 path2 = posixpath.join(prefix, path2[len(relroot):])
2733 path2 = posixpath.join(prefix, path2[len(relroot):])
2732 header = []
2734 header = []
2733 if opts.git:
2735 if opts.git:
2734 header.append('diff --git %s%s %s%s' %
2736 header.append('diff --git %s%s %s%s' %
2735 (aprefix, path1, bprefix, path2))
2737 (aprefix, path1, bprefix, path2))
2736 if not f1: # added
2738 if not f1: # added
2737 header.append('new file mode %s' % gitmode[flag2])
2739 header.append('new file mode %s' % gitmode[flag2])
2738 elif not f2: # removed
2740 elif not f2: # removed
2739 header.append('deleted file mode %s' % gitmode[flag1])
2741 header.append('deleted file mode %s' % gitmode[flag1])
2740 else: # modified/copied/renamed
2742 else: # modified/copied/renamed
2741 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2743 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2742 if mode1 != mode2:
2744 if mode1 != mode2:
2743 header.append('old mode %s' % mode1)
2745 header.append('old mode %s' % mode1)
2744 header.append('new mode %s' % mode2)
2746 header.append('new mode %s' % mode2)
2745 if copyop is not None:
2747 if copyop is not None:
2746 if opts.showsimilarity:
2748 if opts.showsimilarity:
2747 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2749 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2748 header.append('similarity index %d%%' % sim)
2750 header.append('similarity index %d%%' % sim)
2749 header.append('%s from %s' % (copyop, path1))
2751 header.append('%s from %s' % (copyop, path1))
2750 header.append('%s to %s' % (copyop, path2))
2752 header.append('%s to %s' % (copyop, path2))
2751 elif revs and not repo.ui.quiet:
2753 elif revs and not repo.ui.quiet:
2752 header.append(diffline(path1, revs))
2754 header.append(diffline(path1, revs))
2753
2755
2754 # fctx.is | diffopts | what to | is fctx.data()
2756 # fctx.is | diffopts | what to | is fctx.data()
2755 # binary() | text nobinary git index | output? | outputted?
2757 # binary() | text nobinary git index | output? | outputted?
2756 # ------------------------------------|----------------------------
2758 # ------------------------------------|----------------------------
2757 # yes | no no no * | summary | no
2759 # yes | no no no * | summary | no
2758 # yes | no no yes * | base85 | yes
2760 # yes | no no yes * | base85 | yes
2759 # yes | no yes no * | summary | no
2761 # yes | no yes no * | summary | no
2760 # yes | no yes yes 0 | summary | no
2762 # yes | no yes yes 0 | summary | no
2761 # yes | no yes yes >0 | summary | semi [1]
2763 # yes | no yes yes >0 | summary | semi [1]
2762 # yes | yes * * * | text diff | yes
2764 # yes | yes * * * | text diff | yes
2763 # no | * * * * | text diff | yes
2765 # no | * * * * | text diff | yes
2764 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2766 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2765 if binary and (not opts.git or (opts.git and opts.nobinary and not
2767 if binary and (not opts.git or (opts.git and opts.nobinary and not
2766 opts.index)):
2768 opts.index)):
2767 # fast path: no binary content will be displayed, content1 and
2769 # fast path: no binary content will be displayed, content1 and
2768 # content2 are only used for equivalent test. cmp() could have a
2770 # content2 are only used for equivalent test. cmp() could have a
2769 # fast path.
2771 # fast path.
2770 if fctx1 is not None:
2772 if fctx1 is not None:
2771 content1 = b'\0'
2773 content1 = b'\0'
2772 if fctx2 is not None:
2774 if fctx2 is not None:
2773 if fctx1 is not None and not fctx1.cmp(fctx2):
2775 if fctx1 is not None and not fctx1.cmp(fctx2):
2774 content2 = b'\0' # not different
2776 content2 = b'\0' # not different
2775 else:
2777 else:
2776 content2 = b'\0\0'
2778 content2 = b'\0\0'
2777 else:
2779 else:
2778 # normal path: load contents
2780 # normal path: load contents
2779 if fctx1 is not None:
2781 if fctx1 is not None:
2780 content1 = fctx1.data()
2782 content1 = fctx1.data()
2781 if fctx2 is not None:
2783 if fctx2 is not None:
2782 content2 = fctx2.data()
2784 content2 = fctx2.data()
2783
2785
2784 if binary and opts.git and not opts.nobinary:
2786 if binary and opts.git and not opts.nobinary:
2785 text = mdiff.b85diff(content1, content2)
2787 text = mdiff.b85diff(content1, content2)
2786 if text:
2788 if text:
2787 header.append('index %s..%s' %
2789 header.append('index %s..%s' %
2788 (gitindex(content1), gitindex(content2)))
2790 (gitindex(content1), gitindex(content2)))
2789 hunks = (None, [text]),
2791 hunks = (None, [text]),
2790 else:
2792 else:
2791 if opts.git and opts.index > 0:
2793 if opts.git and opts.index > 0:
2792 flag = flag1
2794 flag = flag1
2793 if flag is None:
2795 if flag is None:
2794 flag = flag2
2796 flag = flag2
2795 header.append('index %s..%s %s' %
2797 header.append('index %s..%s %s' %
2796 (gitindex(content1)[0:opts.index],
2798 (gitindex(content1)[0:opts.index],
2797 gitindex(content2)[0:opts.index],
2799 gitindex(content2)[0:opts.index],
2798 gitmode[flag]))
2800 gitmode[flag]))
2799
2801
2800 uheaders, hunks = mdiff.unidiff(content1, date1,
2802 uheaders, hunks = mdiff.unidiff(content1, date1,
2801 content2, date2,
2803 content2, date2,
2802 path1, path2,
2804 path1, path2,
2803 binary=binary, opts=opts)
2805 binary=binary, opts=opts)
2804 header.extend(uheaders)
2806 header.extend(uheaders)
2805 yield fctx1, fctx2, header, hunks
2807 yield fctx1, fctx2, header, hunks
2806
2808
2807 def diffstatsum(stats):
2809 def diffstatsum(stats):
2808 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2810 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2809 for f, a, r, b in stats:
2811 for f, a, r, b in stats:
2810 maxfile = max(maxfile, encoding.colwidth(f))
2812 maxfile = max(maxfile, encoding.colwidth(f))
2811 maxtotal = max(maxtotal, a + r)
2813 maxtotal = max(maxtotal, a + r)
2812 addtotal += a
2814 addtotal += a
2813 removetotal += r
2815 removetotal += r
2814 binary = binary or b
2816 binary = binary or b
2815
2817
2816 return maxfile, maxtotal, addtotal, removetotal, binary
2818 return maxfile, maxtotal, addtotal, removetotal, binary
2817
2819
2818 def diffstatdata(lines):
2820 def diffstatdata(lines):
2819 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2821 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2820
2822
2821 results = []
2823 results = []
2822 filename, adds, removes, isbinary = None, 0, 0, False
2824 filename, adds, removes, isbinary = None, 0, 0, False
2823
2825
2824 def addresult():
2826 def addresult():
2825 if filename:
2827 if filename:
2826 results.append((filename, adds, removes, isbinary))
2828 results.append((filename, adds, removes, isbinary))
2827
2829
2828 # inheader is used to track if a line is in the
2830 # inheader is used to track if a line is in the
2829 # header portion of the diff. This helps properly account
2831 # header portion of the diff. This helps properly account
2830 # for lines that start with '--' or '++'
2832 # for lines that start with '--' or '++'
2831 inheader = False
2833 inheader = False
2832
2834
2833 for line in lines:
2835 for line in lines:
2834 if line.startswith('diff'):
2836 if line.startswith('diff'):
2835 addresult()
2837 addresult()
2836 # starting a new file diff
2838 # starting a new file diff
2837 # set numbers to 0 and reset inheader
2839 # set numbers to 0 and reset inheader
2838 inheader = True
2840 inheader = True
2839 adds, removes, isbinary = 0, 0, False
2841 adds, removes, isbinary = 0, 0, False
2840 if line.startswith('diff --git a/'):
2842 if line.startswith('diff --git a/'):
2841 filename = gitre.search(line).group(2)
2843 filename = gitre.search(line).group(2)
2842 elif line.startswith('diff -r'):
2844 elif line.startswith('diff -r'):
2843 # format: "diff -r ... -r ... filename"
2845 # format: "diff -r ... -r ... filename"
2844 filename = diffre.search(line).group(1)
2846 filename = diffre.search(line).group(1)
2845 elif line.startswith('@@'):
2847 elif line.startswith('@@'):
2846 inheader = False
2848 inheader = False
2847 elif line.startswith('+') and not inheader:
2849 elif line.startswith('+') and not inheader:
2848 adds += 1
2850 adds += 1
2849 elif line.startswith('-') and not inheader:
2851 elif line.startswith('-') and not inheader:
2850 removes += 1
2852 removes += 1
2851 elif (line.startswith('GIT binary patch') or
2853 elif (line.startswith('GIT binary patch') or
2852 line.startswith('Binary file')):
2854 line.startswith('Binary file')):
2853 isbinary = True
2855 isbinary = True
2854 addresult()
2856 addresult()
2855 return results
2857 return results
2856
2858
2857 def diffstat(lines, width=80):
2859 def diffstat(lines, width=80):
2858 output = []
2860 output = []
2859 stats = diffstatdata(lines)
2861 stats = diffstatdata(lines)
2860 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2862 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2861
2863
2862 countwidth = len(str(maxtotal))
2864 countwidth = len(str(maxtotal))
2863 if hasbinary and countwidth < 3:
2865 if hasbinary and countwidth < 3:
2864 countwidth = 3
2866 countwidth = 3
2865 graphwidth = width - countwidth - maxname - 6
2867 graphwidth = width - countwidth - maxname - 6
2866 if graphwidth < 10:
2868 if graphwidth < 10:
2867 graphwidth = 10
2869 graphwidth = 10
2868
2870
2869 def scale(i):
2871 def scale(i):
2870 if maxtotal <= graphwidth:
2872 if maxtotal <= graphwidth:
2871 return i
2873 return i
2872 # If diffstat runs out of room it doesn't print anything,
2874 # If diffstat runs out of room it doesn't print anything,
2873 # which isn't very useful, so always print at least one + or -
2875 # which isn't very useful, so always print at least one + or -
2874 # if there were at least some changes.
2876 # if there were at least some changes.
2875 return max(i * graphwidth // maxtotal, int(bool(i)))
2877 return max(i * graphwidth // maxtotal, int(bool(i)))
2876
2878
2877 for filename, adds, removes, isbinary in stats:
2879 for filename, adds, removes, isbinary in stats:
2878 if isbinary:
2880 if isbinary:
2879 count = 'Bin'
2881 count = 'Bin'
2880 else:
2882 else:
2881 count = '%d' % (adds + removes)
2883 count = '%d' % (adds + removes)
2882 pluses = '+' * scale(adds)
2884 pluses = '+' * scale(adds)
2883 minuses = '-' * scale(removes)
2885 minuses = '-' * scale(removes)
2884 output.append(' %s%s | %*s %s%s\n' %
2886 output.append(' %s%s | %*s %s%s\n' %
2885 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2887 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2886 countwidth, count, pluses, minuses))
2888 countwidth, count, pluses, minuses))
2887
2889
2888 if stats:
2890 if stats:
2889 output.append(_(' %d files changed, %d insertions(+), '
2891 output.append(_(' %d files changed, %d insertions(+), '
2890 '%d deletions(-)\n')
2892 '%d deletions(-)\n')
2891 % (len(stats), totaladds, totalremoves))
2893 % (len(stats), totaladds, totalremoves))
2892
2894
2893 return ''.join(output)
2895 return ''.join(output)
2894
2896
2895 def diffstatui(*args, **kw):
2897 def diffstatui(*args, **kw):
2896 '''like diffstat(), but yields 2-tuples of (output, label) for
2898 '''like diffstat(), but yields 2-tuples of (output, label) for
2897 ui.write()
2899 ui.write()
2898 '''
2900 '''
2899
2901
2900 for line in diffstat(*args, **kw).splitlines():
2902 for line in diffstat(*args, **kw).splitlines():
2901 if line and line[-1] in '+-':
2903 if line and line[-1] in '+-':
2902 name, graph = line.rsplit(' ', 1)
2904 name, graph = line.rsplit(' ', 1)
2903 yield (name + ' ', '')
2905 yield (name + ' ', '')
2904 m = re.search(br'\++', graph)
2906 m = re.search(br'\++', graph)
2905 if m:
2907 if m:
2906 yield (m.group(0), 'diffstat.inserted')
2908 yield (m.group(0), 'diffstat.inserted')
2907 m = re.search(br'-+', graph)
2909 m = re.search(br'-+', graph)
2908 if m:
2910 if m:
2909 yield (m.group(0), 'diffstat.deleted')
2911 yield (m.group(0), 'diffstat.deleted')
2910 else:
2912 else:
2911 yield (line, '')
2913 yield (line, '')
2912 yield ('\n', '')
2914 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now