##// END OF EJS Templates
py3: fix test-diff-color.t...
Mark Thomas -
r40318:e4f82db0 default
parent child Browse files
Show More
@@ -1,631 +1,632
1 1 test-abort-checkin.t
2 2 test-absorb-filefixupstate.py
3 3 test-absorb-phase.t
4 4 test-absorb-rename.t
5 5 test-absorb-strip.t
6 6 test-absorb.t
7 7 test-add.t
8 8 test-addremove-similar.t
9 9 test-addremove.t
10 10 test-alias.t
11 11 test-amend-subrepo.t
12 12 test-amend.t
13 13 test-ancestor.py
14 14 test-annotate.py
15 15 test-annotate.t
16 16 test-archive-symlinks.t
17 17 test-archive.t
18 18 test-atomictempfile.py
19 19 test-audit-path.t
20 20 test-audit-subrepo.t
21 21 test-automv.t
22 22 test-backout.t
23 23 test-backwards-remove.t
24 24 test-bad-extension.t
25 25 test-bad-pull.t
26 26 test-basic.t
27 27 test-bdiff.py
28 28 test-bheads.t
29 29 test-bisect.t
30 30 test-bisect2.t
31 31 test-bisect3.t
32 32 test-blackbox.t
33 33 test-bookmarks-current.t
34 34 test-bookmarks-merge.t
35 35 test-bookmarks-pushpull.t
36 36 test-bookmarks-rebase.t
37 37 test-bookmarks-strip.t
38 38 test-bookmarks.t
39 39 test-branch-change.t
40 40 test-branch-option.t
41 41 test-branch-tag-confict.t
42 42 test-branches.t
43 43 test-bundle-phases.t
44 44 test-bundle-r.t
45 45 test-bundle-type.t
46 46 test-bundle-vs-outgoing.t
47 47 test-bundle.t
48 48 test-bundle2-exchange.t
49 49 test-bundle2-format.t
50 50 test-bundle2-multiple-changegroups.t
51 51 test-bundle2-pushback.t
52 52 test-bundle2-remote-changegroup.t
53 53 test-cache-abuse.t
54 54 test-cappedreader.py
55 55 test-casecollision.t
56 56 test-cat.t
57 57 test-cbor.py
58 58 test-censor.t
59 59 test-changelog-exec.t
60 60 test-check-code.t
61 61 test-check-commit.t
62 62 test-check-config.py
63 63 test-check-config.t
64 64 test-check-execute.t
65 65 test-check-interfaces.py
66 66 test-check-module-imports.t
67 67 test-check-py3-compat.t
68 68 test-check-pyflakes.t
69 69 test-check-pylint.t
70 70 test-check-shbang.t
71 71 test-children.t
72 72 test-churn.t
73 73 test-clone-cgi.t
74 74 test-clone-pull-corruption.t
75 75 test-clone-r.t
76 76 test-clone-uncompressed.t
77 77 test-clone-update-order.t
78 78 test-clone.t
79 79 test-clonebundles.t
80 80 test-close-head.t
81 81 test-commit-amend.t
82 82 test-commit-interactive.t
83 83 test-commit-multiple.t
84 84 test-commit-unresolved.t
85 85 test-commit.t
86 86 test-committer.t
87 87 test-completion.t
88 88 test-config-env.py
89 89 test-config.t
90 90 test-conflict.t
91 91 test-confused-revert.t
92 92 test-context.py
93 93 test-contrib-check-code.t
94 94 test-contrib-check-commit.t
95 95 test-contrib-dumprevlog.t
96 96 test-contrib-perf.t
97 97 test-contrib-relnotes.t
98 98 test-contrib-testparseutil.t
99 99 test-contrib.t
100 100 test-convert-authormap.t
101 101 test-convert-clonebranches.t
102 102 test-convert-cvs-branch.t
103 103 test-convert-cvs-detectmerge.t
104 104 test-convert-cvs-synthetic.t
105 105 test-convert-cvs.t
106 106 test-convert-cvsnt-mergepoints.t
107 107 test-convert-datesort.t
108 108 test-convert-filemap.t
109 109 test-convert-hg-sink.t
110 110 test-convert-hg-source.t
111 111 test-convert-hg-startrev.t
112 112 test-convert-splicemap.t
113 113 test-convert-tagsbranch-topology.t
114 114 test-copy-move-merge.t
115 115 test-copy.t
116 116 test-copytrace-heuristics.t
117 117 test-debugbuilddag.t
118 118 test-debugbundle.t
119 119 test-debugcommands.t
120 120 test-debugextensions.t
121 121 test-debugindexdot.t
122 122 test-debugrename.t
123 123 test-default-push.t
124 124 test-diff-antipatience.t
125 125 test-diff-binary-file.t
126 126 test-diff-change.t
127 test-diff-color.t
127 128 test-diff-copy-depth.t
128 129 test-diff-hashes.t
129 130 test-diff-ignore-whitespace.t
130 131 test-diff-indent-heuristic.t
131 132 test-diff-issue2761.t
132 133 test-diff-newlines.t
133 134 test-diff-reverse.t
134 135 test-diff-subdir.t
135 136 test-diff-unified.t
136 137 test-diff-upgrade.t
137 138 test-diffdir.t
138 139 test-diffstat.t
139 140 test-directaccess.t
140 141 test-dirstate-backup.t
141 142 test-dirstate-nonnormalset.t
142 143 test-dirstate.t
143 144 test-dispatch.py
144 145 test-doctest.py
145 146 test-double-merge.t
146 147 test-drawdag.t
147 148 test-duplicateoptions.py
148 149 test-editor-filename.t
149 150 test-empty-dir.t
150 151 test-empty-file.t
151 152 test-empty-group.t
152 153 test-empty.t
153 154 test-encode.t
154 155 test-encoding-func.py
155 156 test-encoding.t
156 157 test-eol-add.t
157 158 test-eol-clone.t
158 159 test-eol-hook.t
159 160 test-eol-patch.t
160 161 test-eol-tag.t
161 162 test-eol-update.t
162 163 test-eol.t
163 164 test-eolfilename.t
164 165 test-excessive-merge.t
165 166 test-exchange-obsmarkers-case-A1.t
166 167 test-exchange-obsmarkers-case-A2.t
167 168 test-exchange-obsmarkers-case-A3.t
168 169 test-exchange-obsmarkers-case-A4.t
169 170 test-exchange-obsmarkers-case-A5.t
170 171 test-exchange-obsmarkers-case-A6.t
171 172 test-exchange-obsmarkers-case-A7.t
172 173 test-exchange-obsmarkers-case-B1.t
173 174 test-exchange-obsmarkers-case-B2.t
174 175 test-exchange-obsmarkers-case-B3.t
175 176 test-exchange-obsmarkers-case-B4.t
176 177 test-exchange-obsmarkers-case-B5.t
177 178 test-exchange-obsmarkers-case-B6.t
178 179 test-exchange-obsmarkers-case-B7.t
179 180 test-exchange-obsmarkers-case-C1.t
180 181 test-exchange-obsmarkers-case-C2.t
181 182 test-exchange-obsmarkers-case-C3.t
182 183 test-exchange-obsmarkers-case-C4.t
183 184 test-exchange-obsmarkers-case-D1.t
184 185 test-exchange-obsmarkers-case-D2.t
185 186 test-exchange-obsmarkers-case-D3.t
186 187 test-exchange-obsmarkers-case-D4.t
187 188 test-execute-bit.t
188 189 test-export.t
189 190 test-extdata.t
190 191 test-extdiff.t
191 192 test-extensions-afterloaded.t
192 193 test-extensions-wrapfunction.py
193 194 test-extra-filelog-entry.t
194 195 test-fetch.t
195 196 test-filebranch.t
196 197 test-filecache.py
197 198 test-filelog.py
198 199 test-fileset-generated.t
199 200 test-fileset.t
200 201 test-fix-topology.t
201 202 test-fix.t
202 203 test-flags.t
203 204 test-fncache.t
204 205 test-generaldelta.t
205 206 test-getbundle.t
206 207 test-git-export.t
207 208 test-globalopts.t
208 209 test-glog-beautifygraph.t
209 210 test-glog-topological.t
210 211 test-glog.t
211 212 test-gpg.t
212 213 test-graft.t
213 214 test-grep.t
214 215 test-hg-parseurl.py
215 216 test-hghave.t
216 217 test-hgignore.t
217 218 test-hgk.t
218 219 test-hgrc.t
219 220 test-hgweb-annotate-whitespace.t
220 221 test-hgweb-bundle.t
221 222 test-hgweb-csp.t
222 223 test-hgweb-descend-empties.t
223 224 test-hgweb-diffs.t
224 225 test-hgweb-empty.t
225 226 test-hgweb-filelog.t
226 227 test-hgweb-non-interactive.t
227 228 test-hgweb-raw.t
228 229 test-hgweb-removed.t
229 230 test-hgweb.t
230 231 test-hgwebdir-paths.py
231 232 test-hgwebdirsym.t
232 233 test-histedit-arguments.t
233 234 test-histedit-base.t
234 235 test-histedit-bookmark-motion.t
235 236 test-histedit-commute.t
236 237 test-histedit-drop.t
237 238 test-histedit-edit.t
238 239 test-histedit-fold-non-commute.t
239 240 test-histedit-fold.t
240 241 test-histedit-no-backup.t
241 242 test-histedit-no-change.t
242 243 test-histedit-non-commute-abort.t
243 244 test-histedit-non-commute.t
244 245 test-histedit-obsolete.t
245 246 test-histedit-outgoing.t
246 247 test-histedit-templates.t
247 248 test-http-branchmap.t
248 249 test-http-bundle1.t
249 250 test-http-clone-r.t
250 251 test-http-permissions.t
251 252 test-http.t
252 253 test-hybridencode.py
253 254 test-i18n.t
254 255 test-identify.t
255 256 test-impexp-branch.t
256 257 test-import-bypass.t
257 258 test-import-eol.t
258 259 test-import-merge.t
259 260 test-import-unknown.t
260 261 test-import.t
261 262 test-imports-checker.t
262 263 test-incoming-outgoing.t
263 264 test-infinitepush-bundlestore.t
264 265 test-infinitepush-ci.t
265 266 test-infinitepush.t
266 267 test-inherit-mode.t
267 268 test-init.t
268 269 test-issue1089.t
269 270 test-issue1102.t
270 271 test-issue1175.t
271 272 test-issue1306.t
272 273 test-issue1438.t
273 274 test-issue1502.t
274 275 test-issue1802.t
275 276 test-issue1877.t
276 277 test-issue1993.t
277 278 test-issue2137.t
278 279 test-issue3084.t
279 280 test-issue4074.t
280 281 test-issue522.t
281 282 test-issue586.t
282 283 test-issue5979.t
283 284 test-issue612.t
284 285 test-issue619.t
285 286 test-issue660.t
286 287 test-issue672.t
287 288 test-issue842.t
288 289 test-journal-exists.t
289 290 test-journal-share.t
290 291 test-journal.t
291 292 test-known.t
292 293 test-largefiles-cache.t
293 294 test-largefiles-misc.t
294 295 test-largefiles-small-disk.t
295 296 test-largefiles-update.t
296 297 test-largefiles.t
297 298 test-lfs-largefiles.t
298 299 test-lfs-pointer.py
299 300 test-linelog.py
300 301 test-linerange.py
301 302 test-locate.t
302 303 test-lock-badness.t
303 304 test-log-linerange.t
304 305 test-log.t
305 306 test-logexchange.t
306 307 test-lrucachedict.py
307 308 test-mactext.t
308 309 test-mailmap.t
309 310 test-manifest-merging.t
310 311 test-manifest.py
311 312 test-manifest.t
312 313 test-match.py
313 314 test-mdiff.py
314 315 test-merge-changedelete.t
315 316 test-merge-closedheads.t
316 317 test-merge-commit.t
317 318 test-merge-criss-cross.t
318 319 test-merge-default.t
319 320 test-merge-force.t
320 321 test-merge-halt.t
321 322 test-merge-internal-tools-pattern.t
322 323 test-merge-local.t
323 324 test-merge-no-file-change.t
324 325 test-merge-remove.t
325 326 test-merge-revert.t
326 327 test-merge-revert2.t
327 328 test-merge-subrepos.t
328 329 test-merge-symlinks.t
329 330 test-merge-tools.t
330 331 test-merge-types.t
331 332 test-merge1.t
332 333 test-merge10.t
333 334 test-merge2.t
334 335 test-merge4.t
335 336 test-merge5.t
336 337 test-merge6.t
337 338 test-merge7.t
338 339 test-merge8.t
339 340 test-merge9.t
340 341 test-minifileset.py
341 342 test-minirst.py
342 343 test-mq-git.t
343 344 test-mq-guards.t
344 345 test-mq-header-date.t
345 346 test-mq-header-from.t
346 347 test-mq-merge.t
347 348 test-mq-pull-from-bundle.t
348 349 test-mq-qclone-http.t
349 350 test-mq-qdelete.t
350 351 test-mq-qdiff.t
351 352 test-mq-qfold.t
352 353 test-mq-qgoto.t
353 354 test-mq-qimport-fail-cleanup.t
354 355 test-mq-qnew.t
355 356 test-mq-qpush-exact.t
356 357 test-mq-qpush-fail.t
357 358 test-mq-qqueue.t
358 359 test-mq-qrefresh-interactive.t
359 360 test-mq-qrefresh-replace-log-message.t
360 361 test-mq-qrefresh.t
361 362 test-mq-qrename.t
362 363 test-mq-qsave.t
363 364 test-mq-safety.t
364 365 test-mq-subrepo.t
365 366 test-mq-symlinks.t
366 367 test-mq.t
367 368 test-mv-cp-st-diff.t
368 369 test-narrow-acl.t
369 370 test-narrow-archive.t
370 371 test-narrow-clone-no-ellipsis.t
371 372 test-narrow-clone-non-narrow-server.t
372 373 test-narrow-clone-nonlinear.t
373 374 test-narrow-clone.t
374 375 test-narrow-commit.t
375 376 test-narrow-copies.t
376 377 test-narrow-debugcommands.t
377 378 test-narrow-debugrebuilddirstate.t
378 379 test-narrow-exchange-merges.t
379 380 test-narrow-exchange.t
380 381 test-narrow-expanddirstate.t
381 382 test-narrow-merge.t
382 383 test-narrow-patch.t
383 384 test-narrow-patterns.t
384 385 test-narrow-pull.t
385 386 test-narrow-rebase.t
386 387 test-narrow-shallow-merges.t
387 388 test-narrow-shallow.t
388 389 test-narrow-strip.t
389 390 test-narrow-trackedcmd.t
390 391 test-narrow-update.t
391 392 test-narrow-widen-no-ellipsis.t
392 393 test-narrow-widen.t
393 394 test-narrow.t
394 395 test-nested-repo.t
395 396 test-newbranch.t
396 397 test-newercgi.t
397 398 test-nointerrupt.t
398 399 test-obshistory.t
399 400 test-obsmarker-template.t
400 401 test-obsmarkers-effectflag.t
401 402 test-obsolete-bounds-checking.t
402 403 test-obsolete-bundle-strip.t
403 404 test-obsolete-changeset-exchange.t
404 405 test-obsolete-checkheads.t
405 406 test-obsolete-distributed.t
406 407 test-obsolete-divergent.t
407 408 test-obsolete-tag-cache.t
408 409 test-obsolete.t
409 410 test-origbackup-conflict.t
410 411 test-pager-legacy.t
411 412 test-pager.t
412 413 test-parents.t
413 414 test-parse-date.t
414 415 test-parseindex2.py
415 416 test-patch-offset.t
416 417 test-patch.t
417 418 test-patchbomb-bookmark.t
418 419 test-patchbomb-tls.t
419 420 test-patchbomb.t
420 421 test-pathconflicts-basic.t
421 422 test-pathconflicts-merge.t
422 423 test-pathconflicts-update.t
423 424 test-pathencode.py
424 425 test-pending.t
425 426 test-permissions.t
426 427 test-phases-exchange.t
427 428 test-phases.t
428 429 test-profile.t
429 430 test-progress.t
430 431 test-pull-branch.t
431 432 test-pull-http.t
432 433 test-pull-permission.t
433 434 test-pull-pull-corruption.t
434 435 test-pull-r.t
435 436 test-pull-update.t
436 437 test-pull.t
437 438 test-purge.t
438 439 test-push-cgi.t
439 440 test-push-checkheads-partial-C1.t
440 441 test-push-checkheads-partial-C2.t
441 442 test-push-checkheads-partial-C3.t
442 443 test-push-checkheads-partial-C4.t
443 444 test-push-checkheads-pruned-B1.t
444 445 test-push-checkheads-pruned-B2.t
445 446 test-push-checkheads-pruned-B3.t
446 447 test-push-checkheads-pruned-B4.t
447 448 test-push-checkheads-pruned-B5.t
448 449 test-push-checkheads-pruned-B6.t
449 450 test-push-checkheads-pruned-B7.t
450 451 test-push-checkheads-pruned-B8.t
451 452 test-push-checkheads-superceed-A1.t
452 453 test-push-checkheads-superceed-A2.t
453 454 test-push-checkheads-superceed-A3.t
454 455 test-push-checkheads-superceed-A4.t
455 456 test-push-checkheads-superceed-A5.t
456 457 test-push-checkheads-superceed-A6.t
457 458 test-push-checkheads-superceed-A7.t
458 459 test-push-checkheads-superceed-A8.t
459 460 test-push-checkheads-unpushed-D1.t
460 461 test-push-checkheads-unpushed-D2.t
461 462 test-push-checkheads-unpushed-D3.t
462 463 test-push-checkheads-unpushed-D4.t
463 464 test-push-checkheads-unpushed-D5.t
464 465 test-push-checkheads-unpushed-D6.t
465 466 test-push-checkheads-unpushed-D7.t
466 467 test-push-http.t
467 468 test-push-warn.t
468 469 test-push.t
469 470 test-pushvars.t
470 471 test-qrecord.t
471 472 test-rebase-abort.t
472 473 test-rebase-backup.t
473 474 test-rebase-base-flag.t
474 475 test-rebase-bookmarks.t
475 476 test-rebase-brute-force.t
476 477 test-rebase-cache.t
477 478 test-rebase-check-restore.t
478 479 test-rebase-collapse.t
479 480 test-rebase-conflicts.t
480 481 test-rebase-dest.t
481 482 test-rebase-detach.t
482 483 test-rebase-emptycommit.t
483 484 test-rebase-inmemory.t
484 485 test-rebase-interruptions.t
485 486 test-rebase-issue-noparam-single-rev.t
486 487 test-rebase-legacy.t
487 488 test-rebase-mq-skip.t
488 489 test-rebase-mq.t
489 490 test-rebase-named-branches.t
490 491 test-rebase-newancestor.t
491 492 test-rebase-obsolete.t
492 493 test-rebase-parameters.t
493 494 test-rebase-partial.t
494 495 test-rebase-pull.t
495 496 test-rebase-rename.t
496 497 test-rebase-scenario-global.t
497 498 test-rebase-templates.t
498 499 test-rebase-transaction.t
499 500 test-rebuildstate.t
500 501 test-record.t
501 502 test-releasenotes-formatting.t
502 503 test-releasenotes-merging.t
503 504 test-releasenotes-parsing.t
504 505 test-relink.t
505 506 test-remove.t
506 507 test-removeemptydirs.t
507 508 test-rename-after-merge.t
508 509 test-rename-dir-merge.t
509 510 test-rename-merge1.t
510 511 test-rename-merge2.t
511 512 test-rename.t
512 513 test-repair-strip.t
513 514 test-repo-compengines.t
514 515 test-requires.t
515 516 test-resolve.t
516 517 test-revert-flags.t
517 518 test-revert-interactive.t
518 519 test-revert-unknown.t
519 520 test-revert.t
520 521 test-revisions.t
521 522 test-revlog-ancestry.py
522 523 test-revlog-group-emptyiter.t
523 524 test-revlog-mmapindex.t
524 525 test-revlog-packentry.t
525 526 test-revlog-raw.py
526 527 test-revlog-v2.t
527 528 test-revlog.t
528 529 test-revset-dirstate-parents.t
529 530 test-revset-legacy-lookup.t
530 531 test-revset-outgoing.t
531 532 test-rollback.t
532 533 test-run-tests.py
533 534 test-run-tests.t
534 535 test-schemes.t
535 536 test-serve.t
536 537 test-setdiscovery.t
537 538 test-share.t
538 539 test-shelve.t
539 540 test-show-stack.t
540 541 test-show-work.t
541 542 test-show.t
542 543 test-simple-update.t
543 544 test-simplekeyvaluefile.py
544 545 test-simplemerge.py
545 546 test-single-head.t
546 547 test-sparse-clear.t
547 548 test-sparse-clone.t
548 549 test-sparse-import.t
549 550 test-sparse-merges.t
550 551 test-sparse-profiles.t
551 552 test-sparse-requirement.t
552 553 test-sparse-verbose-json.t
553 554 test-sparse.t
554 555 test-split.t
555 556 test-ssh-bundle1.t
556 557 test-ssh-clone-r.t
557 558 test-ssh-proto-unbundle.t
558 559 test-ssh-proto.t
559 560 test-ssh-repoerror.t
560 561 test-ssh.t
561 562 test-sshserver.py
562 563 test-stack.t
563 564 test-status-color.t
564 565 test-status-inprocess.py
565 566 test-status-rev.t
566 567 test-status-terse.t
567 568 test-status.t
568 569 test-storage.py
569 570 test-stream-bundle-v2.t
570 571 test-strict.t
571 572 test-strip-cross.t
572 573 test-strip.t
573 574 test-subrepo-deep-nested-change.t
574 575 test-subrepo-missing.t
575 576 test-subrepo-paths.t
576 577 test-subrepo-recursion.t
577 578 test-subrepo-relative-path.t
578 579 test-subrepo.t
579 580 test-symlink-os-yes-fs-no.py
580 581 test-symlink-placeholder.t
581 582 test-symlinks.t
582 583 test-tag.t
583 584 test-tags.t
584 585 test-template-basic.t
585 586 test-template-functions.t
586 587 test-template-keywords.t
587 588 test-template-map.t
588 589 test-tools.t
589 590 test-transplant.t
590 591 test-treemanifest.t
591 592 test-ui-color.py
592 593 test-ui-config.py
593 594 test-ui-verbosity.py
594 595 test-unamend.t
595 596 test-unbundlehash.t
596 597 test-uncommit.t
597 598 test-unified-test.t
598 599 test-unionrepo.t
599 600 test-unrelated-pull.t
600 601 test-up-local-change.t
601 602 test-update-branches.t
602 603 test-update-dest.t
603 604 test-update-issue1456.t
604 605 test-update-names.t
605 606 test-update-reverse.t
606 607 test-upgrade-repo.t
607 608 test-url-download.t
608 609 test-url-rev.t
609 610 test-url.py
610 611 test-username-newline.t
611 612 test-util.py
612 613 test-verify.t
613 614 test-walk.t
614 615 test-walkrepo.py
615 616 test-websub.t
616 617 test-win32text.t
617 618 test-wireproto-clientreactor.py
618 619 test-wireproto-command-branchmap.t
619 620 test-wireproto-command-changesetdata.t
620 621 test-wireproto-command-filedata.t
621 622 test-wireproto-command-filesdata.t
622 623 test-wireproto-command-heads.t
623 624 test-wireproto-command-listkeys.t
624 625 test-wireproto-command-lookup.t
625 626 test-wireproto-command-manifestdata.t
626 627 test-wireproto-command-pushkey.t
627 628 test-wireproto-framing.py
628 629 test-wireproto-serverreactor.py
629 630 test-wireproto.py
630 631 test-wsgirequest.py
631 632 test-xdg.t
@@ -1,2869 +1,2869
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import, print_function
10 10
11 11 import collections
12 12 import contextlib
13 13 import copy
14 14 import email
15 15 import errno
16 16 import hashlib
17 17 import os
18 18 import posixpath
19 19 import re
20 20 import shutil
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 copies,
30 30 diffhelper,
31 31 diffutil,
32 32 encoding,
33 33 error,
34 34 mail,
35 35 mdiff,
36 36 pathutil,
37 37 pycompat,
38 38 scmutil,
39 39 similar,
40 40 util,
41 41 vfs as vfsmod,
42 42 )
43 43 from .utils import (
44 44 dateutil,
45 45 procutil,
46 46 stringutil,
47 47 )
48 48
49 49 stringio = util.stringio
50 50
51 51 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
52 52 tabsplitter = re.compile(br'(\t+|[^\t]+)')
53 53 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
54 54 b'[^ \ta-zA-Z0-9_\x80-\xff])')
55 55
56 56 PatchError = error.PatchError
57 57
58 58 # public functions
59 59
60 60 def split(stream):
61 61 '''return an iterator of individual patches from a stream'''
62 62 def isheader(line, inheader):
63 63 if inheader and line.startswith((' ', '\t')):
64 64 # continuation
65 65 return True
66 66 if line.startswith((' ', '-', '+')):
67 67 # diff line - don't check for header pattern in there
68 68 return False
69 69 l = line.split(': ', 1)
70 70 return len(l) == 2 and ' ' not in l[0]
71 71
72 72 def chunk(lines):
73 73 return stringio(''.join(lines))
74 74
75 75 def hgsplit(stream, cur):
76 76 inheader = True
77 77
78 78 for line in stream:
79 79 if not line.strip():
80 80 inheader = False
81 81 if not inheader and line.startswith('# HG changeset patch'):
82 82 yield chunk(cur)
83 83 cur = []
84 84 inheader = True
85 85
86 86 cur.append(line)
87 87
88 88 if cur:
89 89 yield chunk(cur)
90 90
91 91 def mboxsplit(stream, cur):
92 92 for line in stream:
93 93 if line.startswith('From '):
94 94 for c in split(chunk(cur[1:])):
95 95 yield c
96 96 cur = []
97 97
98 98 cur.append(line)
99 99
100 100 if cur:
101 101 for c in split(chunk(cur[1:])):
102 102 yield c
103 103
104 104 def mimesplit(stream, cur):
105 105 def msgfp(m):
106 106 fp = stringio()
107 107 g = email.Generator.Generator(fp, mangle_from_=False)
108 108 g.flatten(m)
109 109 fp.seek(0)
110 110 return fp
111 111
112 112 for line in stream:
113 113 cur.append(line)
114 114 c = chunk(cur)
115 115
116 116 m = mail.parse(c)
117 117 if not m.is_multipart():
118 118 yield msgfp(m)
119 119 else:
120 120 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
121 121 for part in m.walk():
122 122 ct = part.get_content_type()
123 123 if ct not in ok_types:
124 124 continue
125 125 yield msgfp(part)
126 126
127 127 def headersplit(stream, cur):
128 128 inheader = False
129 129
130 130 for line in stream:
131 131 if not inheader and isheader(line, inheader):
132 132 yield chunk(cur)
133 133 cur = []
134 134 inheader = True
135 135 if inheader and not isheader(line, inheader):
136 136 inheader = False
137 137
138 138 cur.append(line)
139 139
140 140 if cur:
141 141 yield chunk(cur)
142 142
143 143 def remainder(cur):
144 144 yield chunk(cur)
145 145
146 146 class fiter(object):
147 147 def __init__(self, fp):
148 148 self.fp = fp
149 149
150 150 def __iter__(self):
151 151 return self
152 152
153 153 def next(self):
154 154 l = self.fp.readline()
155 155 if not l:
156 156 raise StopIteration
157 157 return l
158 158
159 159 __next__ = next
160 160
161 161 inheader = False
162 162 cur = []
163 163
164 164 mimeheaders = ['content-type']
165 165
166 166 if not util.safehasattr(stream, 'next'):
167 167 # http responses, for example, have readline but not next
168 168 stream = fiter(stream)
169 169
170 170 for line in stream:
171 171 cur.append(line)
172 172 if line.startswith('# HG changeset patch'):
173 173 return hgsplit(stream, cur)
174 174 elif line.startswith('From '):
175 175 return mboxsplit(stream, cur)
176 176 elif isheader(line, inheader):
177 177 inheader = True
178 178 if line.split(':', 1)[0].lower() in mimeheaders:
179 179 # let email parser handle this
180 180 return mimesplit(stream, cur)
181 181 elif line.startswith('--- ') and inheader:
182 182 # No evil headers seen by diff start, split by hand
183 183 return headersplit(stream, cur)
184 184 # Not enough info, keep reading
185 185
186 186 # if we are here, we have a very plain patch
187 187 return remainder(cur)
188 188
189 189 ## Some facility for extensible patch parsing:
190 190 # list of pairs ("header to match", "data key")
191 191 patchheadermap = [('Date', 'date'),
192 192 ('Branch', 'branch'),
193 193 ('Node ID', 'nodeid'),
194 194 ]
195 195
196 196 @contextlib.contextmanager
197 197 def extract(ui, fileobj):
198 198 '''extract patch from data read from fileobj.
199 199
200 200 patch can be a normal patch or contained in an email message.
201 201
202 202 return a dictionary. Standard keys are:
203 203 - filename,
204 204 - message,
205 205 - user,
206 206 - date,
207 207 - branch,
208 208 - node,
209 209 - p1,
210 210 - p2.
211 211 Any item can be missing from the dictionary. If filename is missing,
212 212 fileobj did not contain a patch. Caller must unlink filename when done.'''
213 213
214 214 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
215 215 tmpfp = os.fdopen(fd, r'wb')
216 216 try:
217 217 yield _extract(ui, fileobj, tmpname, tmpfp)
218 218 finally:
219 219 tmpfp.close()
220 220 os.unlink(tmpname)
221 221
222 222 def _extract(ui, fileobj, tmpname, tmpfp):
223 223
224 224 # attempt to detect the start of a patch
225 225 # (this heuristic is borrowed from quilt)
226 226 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
227 227 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
228 228 br'---[ \t].*?^\+\+\+[ \t]|'
229 229 br'\*\*\*[ \t].*?^---[ \t])',
230 230 re.MULTILINE | re.DOTALL)
231 231
232 232 data = {}
233 233
234 234 msg = mail.parse(fileobj)
235 235
236 236 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
237 237 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
238 238 if not subject and not data['user']:
239 239 # Not an email, restore parsed headers if any
240 240 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
241 241 for h in msg.items()) + '\n'
242 242
243 243 # should try to parse msg['Date']
244 244 parents = []
245 245
246 246 if subject:
247 247 if subject.startswith('[PATCH'):
248 248 pend = subject.find(']')
249 249 if pend >= 0:
250 250 subject = subject[pend + 1:].lstrip()
251 251 subject = re.sub(br'\n[ \t]+', ' ', subject)
252 252 ui.debug('Subject: %s\n' % subject)
253 253 if data['user']:
254 254 ui.debug('From: %s\n' % data['user'])
255 255 diffs_seen = 0
256 256 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
257 257 message = ''
258 258 for part in msg.walk():
259 259 content_type = pycompat.bytestr(part.get_content_type())
260 260 ui.debug('Content-Type: %s\n' % content_type)
261 261 if content_type not in ok_types:
262 262 continue
263 263 payload = part.get_payload(decode=True)
264 264 m = diffre.search(payload)
265 265 if m:
266 266 hgpatch = False
267 267 hgpatchheader = False
268 268 ignoretext = False
269 269
270 270 ui.debug('found patch at byte %d\n' % m.start(0))
271 271 diffs_seen += 1
272 272 cfp = stringio()
273 273 for line in payload[:m.start(0)].splitlines():
274 274 if line.startswith('# HG changeset patch') and not hgpatch:
275 275 ui.debug('patch generated by hg export\n')
276 276 hgpatch = True
277 277 hgpatchheader = True
278 278 # drop earlier commit message content
279 279 cfp.seek(0)
280 280 cfp.truncate()
281 281 subject = None
282 282 elif hgpatchheader:
283 283 if line.startswith('# User '):
284 284 data['user'] = line[7:]
285 285 ui.debug('From: %s\n' % data['user'])
286 286 elif line.startswith("# Parent "):
287 287 parents.append(line[9:].lstrip())
288 288 elif line.startswith("# "):
289 289 for header, key in patchheadermap:
290 290 prefix = '# %s ' % header
291 291 if line.startswith(prefix):
292 292 data[key] = line[len(prefix):]
293 293 else:
294 294 hgpatchheader = False
295 295 elif line == '---':
296 296 ignoretext = True
297 297 if not hgpatchheader and not ignoretext:
298 298 cfp.write(line)
299 299 cfp.write('\n')
300 300 message = cfp.getvalue()
301 301 if tmpfp:
302 302 tmpfp.write(payload)
303 303 if not payload.endswith('\n'):
304 304 tmpfp.write('\n')
305 305 elif not diffs_seen and message and content_type == 'text/plain':
306 306 message += '\n' + payload
307 307
308 308 if subject and not message.startswith(subject):
309 309 message = '%s\n%s' % (subject, message)
310 310 data['message'] = message
311 311 tmpfp.close()
312 312 if parents:
313 313 data['p1'] = parents.pop(0)
314 314 if parents:
315 315 data['p2'] = parents.pop(0)
316 316
317 317 if diffs_seen:
318 318 data['filename'] = tmpname
319 319
320 320 return data
321 321
322 322 class patchmeta(object):
323 323 """Patched file metadata
324 324
325 325 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
326 326 or COPY. 'path' is patched file path. 'oldpath' is set to the
327 327 origin file when 'op' is either COPY or RENAME, None otherwise. If
328 328 file mode is changed, 'mode' is a tuple (islink, isexec) where
329 329 'islink' is True if the file is a symlink and 'isexec' is True if
330 330 the file is executable. Otherwise, 'mode' is None.
331 331 """
332 332 def __init__(self, path):
333 333 self.path = path
334 334 self.oldpath = None
335 335 self.mode = None
336 336 self.op = 'MODIFY'
337 337 self.binary = False
338 338
339 339 def setmode(self, mode):
340 340 islink = mode & 0o20000
341 341 isexec = mode & 0o100
342 342 self.mode = (islink, isexec)
343 343
344 344 def copy(self):
345 345 other = patchmeta(self.path)
346 346 other.oldpath = self.oldpath
347 347 other.mode = self.mode
348 348 other.op = self.op
349 349 other.binary = self.binary
350 350 return other
351 351
352 352 def _ispatchinga(self, afile):
353 353 if afile == '/dev/null':
354 354 return self.op == 'ADD'
355 355 return afile == 'a/' + (self.oldpath or self.path)
356 356
357 357 def _ispatchingb(self, bfile):
358 358 if bfile == '/dev/null':
359 359 return self.op == 'DELETE'
360 360 return bfile == 'b/' + self.path
361 361
362 362 def ispatching(self, afile, bfile):
363 363 return self._ispatchinga(afile) and self._ispatchingb(bfile)
364 364
365 365 def __repr__(self):
366 366 return "<patchmeta %s %r>" % (self.op, self.path)
367 367
368 368 def readgitpatch(lr):
369 369 """extract git-style metadata about patches from <patchname>"""
370 370
371 371 # Filter patch for git information
372 372 gp = None
373 373 gitpatches = []
374 374 for line in lr:
375 375 line = line.rstrip(' \r\n')
376 376 if line.startswith('diff --git a/'):
377 377 m = gitre.match(line)
378 378 if m:
379 379 if gp:
380 380 gitpatches.append(gp)
381 381 dst = m.group(2)
382 382 gp = patchmeta(dst)
383 383 elif gp:
384 384 if line.startswith('--- '):
385 385 gitpatches.append(gp)
386 386 gp = None
387 387 continue
388 388 if line.startswith('rename from '):
389 389 gp.op = 'RENAME'
390 390 gp.oldpath = line[12:]
391 391 elif line.startswith('rename to '):
392 392 gp.path = line[10:]
393 393 elif line.startswith('copy from '):
394 394 gp.op = 'COPY'
395 395 gp.oldpath = line[10:]
396 396 elif line.startswith('copy to '):
397 397 gp.path = line[8:]
398 398 elif line.startswith('deleted file'):
399 399 gp.op = 'DELETE'
400 400 elif line.startswith('new file mode '):
401 401 gp.op = 'ADD'
402 402 gp.setmode(int(line[-6:], 8))
403 403 elif line.startswith('new mode '):
404 404 gp.setmode(int(line[-6:], 8))
405 405 elif line.startswith('GIT binary patch'):
406 406 gp.binary = True
407 407 if gp:
408 408 gitpatches.append(gp)
409 409
410 410 return gitpatches
411 411
412 412 class linereader(object):
413 413 # simple class to allow pushing lines back into the input stream
414 414 def __init__(self, fp):
415 415 self.fp = fp
416 416 self.buf = []
417 417
418 418 def push(self, line):
419 419 if line is not None:
420 420 self.buf.append(line)
421 421
422 422 def readline(self):
423 423 if self.buf:
424 424 l = self.buf[0]
425 425 del self.buf[0]
426 426 return l
427 427 return self.fp.readline()
428 428
429 429 def __iter__(self):
430 430 return iter(self.readline, '')
431 431
432 432 class abstractbackend(object):
433 433 def __init__(self, ui):
434 434 self.ui = ui
435 435
436 436 def getfile(self, fname):
437 437 """Return target file data and flags as a (data, (islink,
438 438 isexec)) tuple. Data is None if file is missing/deleted.
439 439 """
440 440 raise NotImplementedError
441 441
442 442 def setfile(self, fname, data, mode, copysource):
443 443 """Write data to target file fname and set its mode. mode is a
444 444 (islink, isexec) tuple. If data is None, the file content should
445 445 be left unchanged. If the file is modified after being copied,
446 446 copysource is set to the original file name.
447 447 """
448 448 raise NotImplementedError
449 449
450 450 def unlink(self, fname):
451 451 """Unlink target file."""
452 452 raise NotImplementedError
453 453
454 454 def writerej(self, fname, failed, total, lines):
455 455 """Write rejected lines for fname. total is the number of hunks
456 456 which failed to apply and total the total number of hunks for this
457 457 files.
458 458 """
459 459
460 460 def exists(self, fname):
461 461 raise NotImplementedError
462 462
463 463 def close(self):
464 464 raise NotImplementedError
465 465
466 466 class fsbackend(abstractbackend):
467 467 def __init__(self, ui, basedir):
468 468 super(fsbackend, self).__init__(ui)
469 469 self.opener = vfsmod.vfs(basedir)
470 470
471 471 def getfile(self, fname):
472 472 if self.opener.islink(fname):
473 473 return (self.opener.readlink(fname), (True, False))
474 474
475 475 isexec = False
476 476 try:
477 477 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
478 478 except OSError as e:
479 479 if e.errno != errno.ENOENT:
480 480 raise
481 481 try:
482 482 return (self.opener.read(fname), (False, isexec))
483 483 except IOError as e:
484 484 if e.errno != errno.ENOENT:
485 485 raise
486 486 return None, None
487 487
488 488 def setfile(self, fname, data, mode, copysource):
489 489 islink, isexec = mode
490 490 if data is None:
491 491 self.opener.setflags(fname, islink, isexec)
492 492 return
493 493 if islink:
494 494 self.opener.symlink(data, fname)
495 495 else:
496 496 self.opener.write(fname, data)
497 497 if isexec:
498 498 self.opener.setflags(fname, False, True)
499 499
500 500 def unlink(self, fname):
501 501 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
502 502 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
503 503
504 504 def writerej(self, fname, failed, total, lines):
505 505 fname = fname + ".rej"
506 506 self.ui.warn(
507 507 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
508 508 (failed, total, fname))
509 509 fp = self.opener(fname, 'w')
510 510 fp.writelines(lines)
511 511 fp.close()
512 512
513 513 def exists(self, fname):
514 514 return self.opener.lexists(fname)
515 515
516 516 class workingbackend(fsbackend):
517 517 def __init__(self, ui, repo, similarity):
518 518 super(workingbackend, self).__init__(ui, repo.root)
519 519 self.repo = repo
520 520 self.similarity = similarity
521 521 self.removed = set()
522 522 self.changed = set()
523 523 self.copied = []
524 524
525 525 def _checkknown(self, fname):
526 526 if self.repo.dirstate[fname] == '?' and self.exists(fname):
527 527 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
528 528
529 529 def setfile(self, fname, data, mode, copysource):
530 530 self._checkknown(fname)
531 531 super(workingbackend, self).setfile(fname, data, mode, copysource)
532 532 if copysource is not None:
533 533 self.copied.append((copysource, fname))
534 534 self.changed.add(fname)
535 535
536 536 def unlink(self, fname):
537 537 self._checkknown(fname)
538 538 super(workingbackend, self).unlink(fname)
539 539 self.removed.add(fname)
540 540 self.changed.add(fname)
541 541
542 542 def close(self):
543 543 wctx = self.repo[None]
544 544 changed = set(self.changed)
545 545 for src, dst in self.copied:
546 546 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
547 547 if self.removed:
548 548 wctx.forget(sorted(self.removed))
549 549 for f in self.removed:
550 550 if f not in self.repo.dirstate:
551 551 # File was deleted and no longer belongs to the
552 552 # dirstate, it was probably marked added then
553 553 # deleted, and should not be considered by
554 554 # marktouched().
555 555 changed.discard(f)
556 556 if changed:
557 557 scmutil.marktouched(self.repo, changed, self.similarity)
558 558 return sorted(self.changed)
559 559
560 560 class filestore(object):
561 561 def __init__(self, maxsize=None):
562 562 self.opener = None
563 563 self.files = {}
564 564 self.created = 0
565 565 self.maxsize = maxsize
566 566 if self.maxsize is None:
567 567 self.maxsize = 4*(2**20)
568 568 self.size = 0
569 569 self.data = {}
570 570
571 571 def setfile(self, fname, data, mode, copied=None):
572 572 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
573 573 self.data[fname] = (data, mode, copied)
574 574 self.size += len(data)
575 575 else:
576 576 if self.opener is None:
577 577 root = pycompat.mkdtemp(prefix='hg-patch-')
578 578 self.opener = vfsmod.vfs(root)
579 579 # Avoid filename issues with these simple names
580 580 fn = '%d' % self.created
581 581 self.opener.write(fn, data)
582 582 self.created += 1
583 583 self.files[fname] = (fn, mode, copied)
584 584
585 585 def getfile(self, fname):
586 586 if fname in self.data:
587 587 return self.data[fname]
588 588 if not self.opener or fname not in self.files:
589 589 return None, None, None
590 590 fn, mode, copied = self.files[fname]
591 591 return self.opener.read(fn), mode, copied
592 592
593 593 def close(self):
594 594 if self.opener:
595 595 shutil.rmtree(self.opener.base)
596 596
597 597 class repobackend(abstractbackend):
598 598 def __init__(self, ui, repo, ctx, store):
599 599 super(repobackend, self).__init__(ui)
600 600 self.repo = repo
601 601 self.ctx = ctx
602 602 self.store = store
603 603 self.changed = set()
604 604 self.removed = set()
605 605 self.copied = {}
606 606
607 607 def _checkknown(self, fname):
608 608 if fname not in self.ctx:
609 609 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
610 610
611 611 def getfile(self, fname):
612 612 try:
613 613 fctx = self.ctx[fname]
614 614 except error.LookupError:
615 615 return None, None
616 616 flags = fctx.flags()
617 617 return fctx.data(), ('l' in flags, 'x' in flags)
618 618
619 619 def setfile(self, fname, data, mode, copysource):
620 620 if copysource:
621 621 self._checkknown(copysource)
622 622 if data is None:
623 623 data = self.ctx[fname].data()
624 624 self.store.setfile(fname, data, mode, copysource)
625 625 self.changed.add(fname)
626 626 if copysource:
627 627 self.copied[fname] = copysource
628 628
629 629 def unlink(self, fname):
630 630 self._checkknown(fname)
631 631 self.removed.add(fname)
632 632
633 633 def exists(self, fname):
634 634 return fname in self.ctx
635 635
636 636 def close(self):
637 637 return self.changed | self.removed
638 638
639 639 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
640 640 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
641 641 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
642 642 eolmodes = ['strict', 'crlf', 'lf', 'auto']
643 643
644 644 class patchfile(object):
645 645 def __init__(self, ui, gp, backend, store, eolmode='strict'):
646 646 self.fname = gp.path
647 647 self.eolmode = eolmode
648 648 self.eol = None
649 649 self.backend = backend
650 650 self.ui = ui
651 651 self.lines = []
652 652 self.exists = False
653 653 self.missing = True
654 654 self.mode = gp.mode
655 655 self.copysource = gp.oldpath
656 656 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
657 657 self.remove = gp.op == 'DELETE'
658 658 if self.copysource is None:
659 659 data, mode = backend.getfile(self.fname)
660 660 else:
661 661 data, mode = store.getfile(self.copysource)[:2]
662 662 if data is not None:
663 663 self.exists = self.copysource is None or backend.exists(self.fname)
664 664 self.missing = False
665 665 if data:
666 666 self.lines = mdiff.splitnewlines(data)
667 667 if self.mode is None:
668 668 self.mode = mode
669 669 if self.lines:
670 670 # Normalize line endings
671 671 if self.lines[0].endswith('\r\n'):
672 672 self.eol = '\r\n'
673 673 elif self.lines[0].endswith('\n'):
674 674 self.eol = '\n'
675 675 if eolmode != 'strict':
676 676 nlines = []
677 677 for l in self.lines:
678 678 if l.endswith('\r\n'):
679 679 l = l[:-2] + '\n'
680 680 nlines.append(l)
681 681 self.lines = nlines
682 682 else:
683 683 if self.create:
684 684 self.missing = False
685 685 if self.mode is None:
686 686 self.mode = (False, False)
687 687 if self.missing:
688 688 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
689 689 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
690 690 "current directory)\n"))
691 691
692 692 self.hash = {}
693 693 self.dirty = 0
694 694 self.offset = 0
695 695 self.skew = 0
696 696 self.rej = []
697 697 self.fileprinted = False
698 698 self.printfile(False)
699 699 self.hunks = 0
700 700
701 701 def writelines(self, fname, lines, mode):
702 702 if self.eolmode == 'auto':
703 703 eol = self.eol
704 704 elif self.eolmode == 'crlf':
705 705 eol = '\r\n'
706 706 else:
707 707 eol = '\n'
708 708
709 709 if self.eolmode != 'strict' and eol and eol != '\n':
710 710 rawlines = []
711 711 for l in lines:
712 712 if l and l.endswith('\n'):
713 713 l = l[:-1] + eol
714 714 rawlines.append(l)
715 715 lines = rawlines
716 716
717 717 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
718 718
719 719 def printfile(self, warn):
720 720 if self.fileprinted:
721 721 return
722 722 if warn or self.ui.verbose:
723 723 self.fileprinted = True
724 724 s = _("patching file %s\n") % self.fname
725 725 if warn:
726 726 self.ui.warn(s)
727 727 else:
728 728 self.ui.note(s)
729 729
730 730
731 731 def findlines(self, l, linenum):
732 732 # looks through the hash and finds candidate lines. The
733 733 # result is a list of line numbers sorted based on distance
734 734 # from linenum
735 735
736 736 cand = self.hash.get(l, [])
737 737 if len(cand) > 1:
738 738 # resort our list of potentials forward then back.
739 739 cand.sort(key=lambda x: abs(x - linenum))
740 740 return cand
741 741
742 742 def write_rej(self):
743 743 # our rejects are a little different from patch(1). This always
744 744 # creates rejects in the same form as the original patch. A file
745 745 # header is inserted so that you can run the reject through patch again
746 746 # without having to type the filename.
747 747 if not self.rej:
748 748 return
749 749 base = os.path.basename(self.fname)
750 750 lines = ["--- %s\n+++ %s\n" % (base, base)]
751 751 for x in self.rej:
752 752 for l in x.hunk:
753 753 lines.append(l)
754 754 if l[-1:] != '\n':
755 755 lines.append("\n\ No newline at end of file\n")
756 756 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
757 757
758 758 def apply(self, h):
759 759 if not h.complete():
760 760 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
761 761 (h.number, h.desc, len(h.a), h.lena, len(h.b),
762 762 h.lenb))
763 763
764 764 self.hunks += 1
765 765
766 766 if self.missing:
767 767 self.rej.append(h)
768 768 return -1
769 769
770 770 if self.exists and self.create:
771 771 if self.copysource:
772 772 self.ui.warn(_("cannot create %s: destination already "
773 773 "exists\n") % self.fname)
774 774 else:
775 775 self.ui.warn(_("file %s already exists\n") % self.fname)
776 776 self.rej.append(h)
777 777 return -1
778 778
779 779 if isinstance(h, binhunk):
780 780 if self.remove:
781 781 self.backend.unlink(self.fname)
782 782 else:
783 783 l = h.new(self.lines)
784 784 self.lines[:] = l
785 785 self.offset += len(l)
786 786 self.dirty = True
787 787 return 0
788 788
789 789 horig = h
790 790 if (self.eolmode in ('crlf', 'lf')
791 791 or self.eolmode == 'auto' and self.eol):
792 792 # If new eols are going to be normalized, then normalize
793 793 # hunk data before patching. Otherwise, preserve input
794 794 # line-endings.
795 795 h = h.getnormalized()
796 796
797 797 # fast case first, no offsets, no fuzz
798 798 old, oldstart, new, newstart = h.fuzzit(0, False)
799 799 oldstart += self.offset
800 800 orig_start = oldstart
801 801 # if there's skew we want to emit the "(offset %d lines)" even
802 802 # when the hunk cleanly applies at start + skew, so skip the
803 803 # fast case code
804 804 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
805 805 if self.remove:
806 806 self.backend.unlink(self.fname)
807 807 else:
808 808 self.lines[oldstart:oldstart + len(old)] = new
809 809 self.offset += len(new) - len(old)
810 810 self.dirty = True
811 811 return 0
812 812
813 813 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
814 814 self.hash = {}
815 815 for x, s in enumerate(self.lines):
816 816 self.hash.setdefault(s, []).append(x)
817 817
818 818 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
819 819 for toponly in [True, False]:
820 820 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
821 821 oldstart = oldstart + self.offset + self.skew
822 822 oldstart = min(oldstart, len(self.lines))
823 823 if old:
824 824 cand = self.findlines(old[0][1:], oldstart)
825 825 else:
826 826 # Only adding lines with no or fuzzed context, just
827 827 # take the skew in account
828 828 cand = [oldstart]
829 829
830 830 for l in cand:
831 831 if not old or diffhelper.testhunk(old, self.lines, l):
832 832 self.lines[l : l + len(old)] = new
833 833 self.offset += len(new) - len(old)
834 834 self.skew = l - orig_start
835 835 self.dirty = True
836 836 offset = l - orig_start - fuzzlen
837 837 if fuzzlen:
838 838 msg = _("Hunk #%d succeeded at %d "
839 839 "with fuzz %d "
840 840 "(offset %d lines).\n")
841 841 self.printfile(True)
842 842 self.ui.warn(msg %
843 843 (h.number, l + 1, fuzzlen, offset))
844 844 else:
845 845 msg = _("Hunk #%d succeeded at %d "
846 846 "(offset %d lines).\n")
847 847 self.ui.note(msg % (h.number, l + 1, offset))
848 848 return fuzzlen
849 849 self.printfile(True)
850 850 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
851 851 self.rej.append(horig)
852 852 return -1
853 853
854 854 def close(self):
855 855 if self.dirty:
856 856 self.writelines(self.fname, self.lines, self.mode)
857 857 self.write_rej()
858 858 return len(self.rej)
859 859
860 860 class header(object):
861 861 """patch header
862 862 """
863 863 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
864 864 diff_re = re.compile('diff -r .* (.*)$')
865 865 allhunks_re = re.compile('(?:index|deleted file) ')
866 866 pretty_re = re.compile('(?:new file|deleted file) ')
867 867 special_re = re.compile('(?:index|deleted|copy|rename) ')
868 868 newfile_re = re.compile('(?:new file)')
869 869
870 870 def __init__(self, header):
871 871 self.header = header
872 872 self.hunks = []
873 873
874 874 def binary(self):
875 875 return any(h.startswith('index ') for h in self.header)
876 876
877 877 def pretty(self, fp):
878 878 for h in self.header:
879 879 if h.startswith('index '):
880 880 fp.write(_('this modifies a binary file (all or nothing)\n'))
881 881 break
882 882 if self.pretty_re.match(h):
883 883 fp.write(h)
884 884 if self.binary():
885 885 fp.write(_('this is a binary file\n'))
886 886 break
887 887 if h.startswith('---'):
888 888 fp.write(_('%d hunks, %d lines changed\n') %
889 889 (len(self.hunks),
890 890 sum([max(h.added, h.removed) for h in self.hunks])))
891 891 break
892 892 fp.write(h)
893 893
894 894 def write(self, fp):
895 895 fp.write(''.join(self.header))
896 896
897 897 def allhunks(self):
898 898 return any(self.allhunks_re.match(h) for h in self.header)
899 899
900 900 def files(self):
901 901 match = self.diffgit_re.match(self.header[0])
902 902 if match:
903 903 fromfile, tofile = match.groups()
904 904 if fromfile == tofile:
905 905 return [fromfile]
906 906 return [fromfile, tofile]
907 907 else:
908 908 return self.diff_re.match(self.header[0]).groups()
909 909
910 910 def filename(self):
911 911 return self.files()[-1]
912 912
913 913 def __repr__(self):
914 914 return '<header %s>' % (' '.join(map(repr, self.files())))
915 915
916 916 def isnewfile(self):
917 917 return any(self.newfile_re.match(h) for h in self.header)
918 918
919 919 def special(self):
920 920 # Special files are shown only at the header level and not at the hunk
921 921 # level for example a file that has been deleted is a special file.
922 922 # The user cannot change the content of the operation, in the case of
923 923 # the deleted file he has to take the deletion or not take it, he
924 924 # cannot take some of it.
925 925 # Newly added files are special if they are empty, they are not special
926 926 # if they have some content as we want to be able to change it
927 927 nocontent = len(self.header) == 2
928 928 emptynewfile = self.isnewfile() and nocontent
929 929 return emptynewfile or \
930 930 any(self.special_re.match(h) for h in self.header)
931 931
932 932 class recordhunk(object):
933 933 """patch hunk
934 934
935 935 XXX shouldn't we merge this with the other hunk class?
936 936 """
937 937
938 938 def __init__(self, header, fromline, toline, proc, before, hunk, after,
939 939 maxcontext=None):
940 940 def trimcontext(lines, reverse=False):
941 941 if maxcontext is not None:
942 942 delta = len(lines) - maxcontext
943 943 if delta > 0:
944 944 if reverse:
945 945 return delta, lines[delta:]
946 946 else:
947 947 return delta, lines[:maxcontext]
948 948 return 0, lines
949 949
950 950 self.header = header
951 951 trimedbefore, self.before = trimcontext(before, True)
952 952 self.fromline = fromline + trimedbefore
953 953 self.toline = toline + trimedbefore
954 954 _trimedafter, self.after = trimcontext(after, False)
955 955 self.proc = proc
956 956 self.hunk = hunk
957 957 self.added, self.removed = self.countchanges(self.hunk)
958 958
959 959 def __eq__(self, v):
960 960 if not isinstance(v, recordhunk):
961 961 return False
962 962
963 963 return ((v.hunk == self.hunk) and
964 964 (v.proc == self.proc) and
965 965 (self.fromline == v.fromline) and
966 966 (self.header.files() == v.header.files()))
967 967
968 968 def __hash__(self):
969 969 return hash((tuple(self.hunk),
970 970 tuple(self.header.files()),
971 971 self.fromline,
972 972 self.proc))
973 973
974 974 def countchanges(self, hunk):
975 975 """hunk -> (n+,n-)"""
976 976 add = len([h for h in hunk if h.startswith('+')])
977 977 rem = len([h for h in hunk if h.startswith('-')])
978 978 return add, rem
979 979
980 980 def reversehunk(self):
981 981 """return another recordhunk which is the reverse of the hunk
982 982
983 983 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
984 984 that, swap fromline/toline and +/- signs while keep other things
985 985 unchanged.
986 986 """
987 987 m = {'+': '-', '-': '+', '\\': '\\'}
988 988 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
989 989 return recordhunk(self.header, self.toline, self.fromline, self.proc,
990 990 self.before, hunk, self.after)
991 991
992 992 def write(self, fp):
993 993 delta = len(self.before) + len(self.after)
994 994 if self.after and self.after[-1] == '\\ No newline at end of file\n':
995 995 delta -= 1
996 996 fromlen = delta + self.removed
997 997 tolen = delta + self.added
998 998 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
999 999 (self.fromline, fromlen, self.toline, tolen,
1000 1000 self.proc and (' ' + self.proc)))
1001 1001 fp.write(''.join(self.before + self.hunk + self.after))
1002 1002
1003 1003 pretty = write
1004 1004
1005 1005 def filename(self):
1006 1006 return self.header.filename()
1007 1007
1008 1008 def __repr__(self):
1009 1009 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1010 1010
1011 1011 def getmessages():
1012 1012 return {
1013 1013 'multiple': {
1014 1014 'apply': _("apply change %d/%d to '%s'?"),
1015 1015 'discard': _("discard change %d/%d to '%s'?"),
1016 1016 'record': _("record change %d/%d to '%s'?"),
1017 1017 },
1018 1018 'single': {
1019 1019 'apply': _("apply this change to '%s'?"),
1020 1020 'discard': _("discard this change to '%s'?"),
1021 1021 'record': _("record this change to '%s'?"),
1022 1022 },
1023 1023 'help': {
1024 1024 'apply': _('[Ynesfdaq?]'
1025 1025 '$$ &Yes, apply this change'
1026 1026 '$$ &No, skip this change'
1027 1027 '$$ &Edit this change manually'
1028 1028 '$$ &Skip remaining changes to this file'
1029 1029 '$$ Apply remaining changes to this &file'
1030 1030 '$$ &Done, skip remaining changes and files'
1031 1031 '$$ Apply &all changes to all remaining files'
1032 1032 '$$ &Quit, applying no changes'
1033 1033 '$$ &? (display help)'),
1034 1034 'discard': _('[Ynesfdaq?]'
1035 1035 '$$ &Yes, discard this change'
1036 1036 '$$ &No, skip this change'
1037 1037 '$$ &Edit this change manually'
1038 1038 '$$ &Skip remaining changes to this file'
1039 1039 '$$ Discard remaining changes to this &file'
1040 1040 '$$ &Done, skip remaining changes and files'
1041 1041 '$$ Discard &all changes to all remaining files'
1042 1042 '$$ &Quit, discarding no changes'
1043 1043 '$$ &? (display help)'),
1044 1044 'record': _('[Ynesfdaq?]'
1045 1045 '$$ &Yes, record this change'
1046 1046 '$$ &No, skip this change'
1047 1047 '$$ &Edit this change manually'
1048 1048 '$$ &Skip remaining changes to this file'
1049 1049 '$$ Record remaining changes to this &file'
1050 1050 '$$ &Done, skip remaining changes and files'
1051 1051 '$$ Record &all changes to all remaining files'
1052 1052 '$$ &Quit, recording no changes'
1053 1053 '$$ &? (display help)'),
1054 1054 }
1055 1055 }
1056 1056
1057 1057 def filterpatch(ui, headers, operation=None):
1058 1058 """Interactively filter patch chunks into applied-only chunks"""
1059 1059 messages = getmessages()
1060 1060
1061 1061 if operation is None:
1062 1062 operation = 'record'
1063 1063
1064 1064 def prompt(skipfile, skipall, query, chunk):
1065 1065 """prompt query, and process base inputs
1066 1066
1067 1067 - y/n for the rest of file
1068 1068 - y/n for the rest
1069 1069 - ? (help)
1070 1070 - q (quit)
1071 1071
1072 1072 Return True/False and possibly updated skipfile and skipall.
1073 1073 """
1074 1074 newpatches = None
1075 1075 if skipall is not None:
1076 1076 return skipall, skipfile, skipall, newpatches
1077 1077 if skipfile is not None:
1078 1078 return skipfile, skipfile, skipall, newpatches
1079 1079 while True:
1080 1080 resps = messages['help'][operation]
1081 1081 r = ui.promptchoice("%s %s" % (query, resps))
1082 1082 ui.write("\n")
1083 1083 if r == 8: # ?
1084 1084 for c, t in ui.extractchoices(resps)[1]:
1085 1085 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1086 1086 continue
1087 1087 elif r == 0: # yes
1088 1088 ret = True
1089 1089 elif r == 1: # no
1090 1090 ret = False
1091 1091 elif r == 2: # Edit patch
1092 1092 if chunk is None:
1093 1093 ui.write(_('cannot edit patch for whole file'))
1094 1094 ui.write("\n")
1095 1095 continue
1096 1096 if chunk.header.binary():
1097 1097 ui.write(_('cannot edit patch for binary file'))
1098 1098 ui.write("\n")
1099 1099 continue
1100 1100 # Patch comment based on the Git one (based on comment at end of
1101 1101 # https://mercurial-scm.org/wiki/RecordExtension)
1102 1102 phelp = '---' + _("""
1103 1103 To remove '-' lines, make them ' ' lines (context).
1104 1104 To remove '+' lines, delete them.
1105 1105 Lines starting with # will be removed from the patch.
1106 1106
1107 1107 If the patch applies cleanly, the edited hunk will immediately be
1108 1108 added to the record list. If it does not apply cleanly, a rejects
1109 1109 file will be generated: you can use that when you try again. If
1110 1110 all lines of the hunk are removed, then the edit is aborted and
1111 1111 the hunk is left unchanged.
1112 1112 """)
1113 1113 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1114 1114 suffix=".diff")
1115 1115 ncpatchfp = None
1116 1116 try:
1117 1117 # Write the initial patch
1118 1118 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1119 1119 chunk.header.write(f)
1120 1120 chunk.write(f)
1121 1121 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1122 1122 f.close()
1123 1123 # Start the editor and wait for it to complete
1124 1124 editor = ui.geteditor()
1125 1125 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1126 1126 environ={'HGUSER': ui.username()},
1127 1127 blockedtag='filterpatch')
1128 1128 if ret != 0:
1129 1129 ui.warn(_("editor exited with exit code %d\n") % ret)
1130 1130 continue
1131 1131 # Remove comment lines
1132 1132 patchfp = open(patchfn, r'rb')
1133 1133 ncpatchfp = stringio()
1134 1134 for line in util.iterfile(patchfp):
1135 1135 line = util.fromnativeeol(line)
1136 1136 if not line.startswith('#'):
1137 1137 ncpatchfp.write(line)
1138 1138 patchfp.close()
1139 1139 ncpatchfp.seek(0)
1140 1140 newpatches = parsepatch(ncpatchfp)
1141 1141 finally:
1142 1142 os.unlink(patchfn)
1143 1143 del ncpatchfp
1144 1144 # Signal that the chunk shouldn't be applied as-is, but
1145 1145 # provide the new patch to be used instead.
1146 1146 ret = False
1147 1147 elif r == 3: # Skip
1148 1148 ret = skipfile = False
1149 1149 elif r == 4: # file (Record remaining)
1150 1150 ret = skipfile = True
1151 1151 elif r == 5: # done, skip remaining
1152 1152 ret = skipall = False
1153 1153 elif r == 6: # all
1154 1154 ret = skipall = True
1155 1155 elif r == 7: # quit
1156 1156 raise error.Abort(_('user quit'))
1157 1157 return ret, skipfile, skipall, newpatches
1158 1158
1159 1159 seen = set()
1160 1160 applied = {} # 'filename' -> [] of chunks
1161 1161 skipfile, skipall = None, None
1162 1162 pos, total = 1, sum(len(h.hunks) for h in headers)
1163 1163 for h in headers:
1164 1164 pos += len(h.hunks)
1165 1165 skipfile = None
1166 1166 fixoffset = 0
1167 1167 hdr = ''.join(h.header)
1168 1168 if hdr in seen:
1169 1169 continue
1170 1170 seen.add(hdr)
1171 1171 if skipall is None:
1172 1172 h.pretty(ui)
1173 1173 msg = (_('examine changes to %s?') %
1174 1174 _(' and ').join("'%s'" % f for f in h.files()))
1175 1175 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1176 1176 if not r:
1177 1177 continue
1178 1178 applied[h.filename()] = [h]
1179 1179 if h.allhunks():
1180 1180 applied[h.filename()] += h.hunks
1181 1181 continue
1182 1182 for i, chunk in enumerate(h.hunks):
1183 1183 if skipfile is None and skipall is None:
1184 1184 chunk.pretty(ui)
1185 1185 if total == 1:
1186 1186 msg = messages['single'][operation] % chunk.filename()
1187 1187 else:
1188 1188 idx = pos - len(h.hunks) + i
1189 1189 msg = messages['multiple'][operation] % (idx, total,
1190 1190 chunk.filename())
1191 1191 r, skipfile, skipall, newpatches = prompt(skipfile,
1192 1192 skipall, msg, chunk)
1193 1193 if r:
1194 1194 if fixoffset:
1195 1195 chunk = copy.copy(chunk)
1196 1196 chunk.toline += fixoffset
1197 1197 applied[chunk.filename()].append(chunk)
1198 1198 elif newpatches is not None:
1199 1199 for newpatch in newpatches:
1200 1200 for newhunk in newpatch.hunks:
1201 1201 if fixoffset:
1202 1202 newhunk.toline += fixoffset
1203 1203 applied[newhunk.filename()].append(newhunk)
1204 1204 else:
1205 1205 fixoffset += chunk.removed - chunk.added
1206 1206 return (sum([h for h in applied.itervalues()
1207 1207 if h[0].special() or len(h) > 1], []), {})
1208 1208 class hunk(object):
1209 1209 def __init__(self, desc, num, lr, context):
1210 1210 self.number = num
1211 1211 self.desc = desc
1212 1212 self.hunk = [desc]
1213 1213 self.a = []
1214 1214 self.b = []
1215 1215 self.starta = self.lena = None
1216 1216 self.startb = self.lenb = None
1217 1217 if lr is not None:
1218 1218 if context:
1219 1219 self.read_context_hunk(lr)
1220 1220 else:
1221 1221 self.read_unified_hunk(lr)
1222 1222
1223 1223 def getnormalized(self):
1224 1224 """Return a copy with line endings normalized to LF."""
1225 1225
1226 1226 def normalize(lines):
1227 1227 nlines = []
1228 1228 for line in lines:
1229 1229 if line.endswith('\r\n'):
1230 1230 line = line[:-2] + '\n'
1231 1231 nlines.append(line)
1232 1232 return nlines
1233 1233
1234 1234 # Dummy object, it is rebuilt manually
1235 1235 nh = hunk(self.desc, self.number, None, None)
1236 1236 nh.number = self.number
1237 1237 nh.desc = self.desc
1238 1238 nh.hunk = self.hunk
1239 1239 nh.a = normalize(self.a)
1240 1240 nh.b = normalize(self.b)
1241 1241 nh.starta = self.starta
1242 1242 nh.startb = self.startb
1243 1243 nh.lena = self.lena
1244 1244 nh.lenb = self.lenb
1245 1245 return nh
1246 1246
1247 1247 def read_unified_hunk(self, lr):
1248 1248 m = unidesc.match(self.desc)
1249 1249 if not m:
1250 1250 raise PatchError(_("bad hunk #%d") % self.number)
1251 1251 self.starta, self.lena, self.startb, self.lenb = m.groups()
1252 1252 if self.lena is None:
1253 1253 self.lena = 1
1254 1254 else:
1255 1255 self.lena = int(self.lena)
1256 1256 if self.lenb is None:
1257 1257 self.lenb = 1
1258 1258 else:
1259 1259 self.lenb = int(self.lenb)
1260 1260 self.starta = int(self.starta)
1261 1261 self.startb = int(self.startb)
1262 1262 try:
1263 1263 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1264 1264 self.a, self.b)
1265 1265 except error.ParseError as e:
1266 1266 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1267 1267 # if we hit eof before finishing out the hunk, the last line will
1268 1268 # be zero length. Lets try to fix it up.
1269 1269 while len(self.hunk[-1]) == 0:
1270 1270 del self.hunk[-1]
1271 1271 del self.a[-1]
1272 1272 del self.b[-1]
1273 1273 self.lena -= 1
1274 1274 self.lenb -= 1
1275 1275 self._fixnewline(lr)
1276 1276
1277 1277 def read_context_hunk(self, lr):
1278 1278 self.desc = lr.readline()
1279 1279 m = contextdesc.match(self.desc)
1280 1280 if not m:
1281 1281 raise PatchError(_("bad hunk #%d") % self.number)
1282 1282 self.starta, aend = m.groups()
1283 1283 self.starta = int(self.starta)
1284 1284 if aend is None:
1285 1285 aend = self.starta
1286 1286 self.lena = int(aend) - self.starta
1287 1287 if self.starta:
1288 1288 self.lena += 1
1289 1289 for x in pycompat.xrange(self.lena):
1290 1290 l = lr.readline()
1291 1291 if l.startswith('---'):
1292 1292 # lines addition, old block is empty
1293 1293 lr.push(l)
1294 1294 break
1295 1295 s = l[2:]
1296 1296 if l.startswith('- ') or l.startswith('! '):
1297 1297 u = '-' + s
1298 1298 elif l.startswith(' '):
1299 1299 u = ' ' + s
1300 1300 else:
1301 1301 raise PatchError(_("bad hunk #%d old text line %d") %
1302 1302 (self.number, x))
1303 1303 self.a.append(u)
1304 1304 self.hunk.append(u)
1305 1305
1306 1306 l = lr.readline()
1307 1307 if l.startswith('\ '):
1308 1308 s = self.a[-1][:-1]
1309 1309 self.a[-1] = s
1310 1310 self.hunk[-1] = s
1311 1311 l = lr.readline()
1312 1312 m = contextdesc.match(l)
1313 1313 if not m:
1314 1314 raise PatchError(_("bad hunk #%d") % self.number)
1315 1315 self.startb, bend = m.groups()
1316 1316 self.startb = int(self.startb)
1317 1317 if bend is None:
1318 1318 bend = self.startb
1319 1319 self.lenb = int(bend) - self.startb
1320 1320 if self.startb:
1321 1321 self.lenb += 1
1322 1322 hunki = 1
1323 1323 for x in pycompat.xrange(self.lenb):
1324 1324 l = lr.readline()
1325 1325 if l.startswith('\ '):
1326 1326 # XXX: the only way to hit this is with an invalid line range.
1327 1327 # The no-eol marker is not counted in the line range, but I
1328 1328 # guess there are diff(1) out there which behave differently.
1329 1329 s = self.b[-1][:-1]
1330 1330 self.b[-1] = s
1331 1331 self.hunk[hunki - 1] = s
1332 1332 continue
1333 1333 if not l:
1334 1334 # line deletions, new block is empty and we hit EOF
1335 1335 lr.push(l)
1336 1336 break
1337 1337 s = l[2:]
1338 1338 if l.startswith('+ ') or l.startswith('! '):
1339 1339 u = '+' + s
1340 1340 elif l.startswith(' '):
1341 1341 u = ' ' + s
1342 1342 elif len(self.b) == 0:
1343 1343 # line deletions, new block is empty
1344 1344 lr.push(l)
1345 1345 break
1346 1346 else:
1347 1347 raise PatchError(_("bad hunk #%d old text line %d") %
1348 1348 (self.number, x))
1349 1349 self.b.append(s)
1350 1350 while True:
1351 1351 if hunki >= len(self.hunk):
1352 1352 h = ""
1353 1353 else:
1354 1354 h = self.hunk[hunki]
1355 1355 hunki += 1
1356 1356 if h == u:
1357 1357 break
1358 1358 elif h.startswith('-'):
1359 1359 continue
1360 1360 else:
1361 1361 self.hunk.insert(hunki - 1, u)
1362 1362 break
1363 1363
1364 1364 if not self.a:
1365 1365 # this happens when lines were only added to the hunk
1366 1366 for x in self.hunk:
1367 1367 if x.startswith('-') or x.startswith(' '):
1368 1368 self.a.append(x)
1369 1369 if not self.b:
1370 1370 # this happens when lines were only deleted from the hunk
1371 1371 for x in self.hunk:
1372 1372 if x.startswith('+') or x.startswith(' '):
1373 1373 self.b.append(x[1:])
1374 1374 # @@ -start,len +start,len @@
1375 1375 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1376 1376 self.startb, self.lenb)
1377 1377 self.hunk[0] = self.desc
1378 1378 self._fixnewline(lr)
1379 1379
1380 1380 def _fixnewline(self, lr):
1381 1381 l = lr.readline()
1382 1382 if l.startswith('\ '):
1383 1383 diffhelper.fixnewline(self.hunk, self.a, self.b)
1384 1384 else:
1385 1385 lr.push(l)
1386 1386
1387 1387 def complete(self):
1388 1388 return len(self.a) == self.lena and len(self.b) == self.lenb
1389 1389
1390 1390 def _fuzzit(self, old, new, fuzz, toponly):
1391 1391 # this removes context lines from the top and bottom of list 'l'. It
1392 1392 # checks the hunk to make sure only context lines are removed, and then
1393 1393 # returns a new shortened list of lines.
1394 1394 fuzz = min(fuzz, len(old))
1395 1395 if fuzz:
1396 1396 top = 0
1397 1397 bot = 0
1398 1398 hlen = len(self.hunk)
1399 1399 for x in pycompat.xrange(hlen - 1):
1400 1400 # the hunk starts with the @@ line, so use x+1
1401 1401 if self.hunk[x + 1].startswith(' '):
1402 1402 top += 1
1403 1403 else:
1404 1404 break
1405 1405 if not toponly:
1406 1406 for x in pycompat.xrange(hlen - 1):
1407 1407 if self.hunk[hlen - bot - 1].startswith(' '):
1408 1408 bot += 1
1409 1409 else:
1410 1410 break
1411 1411
1412 1412 bot = min(fuzz, bot)
1413 1413 top = min(fuzz, top)
1414 1414 return old[top:len(old) - bot], new[top:len(new) - bot], top
1415 1415 return old, new, 0
1416 1416
1417 1417 def fuzzit(self, fuzz, toponly):
1418 1418 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1419 1419 oldstart = self.starta + top
1420 1420 newstart = self.startb + top
1421 1421 # zero length hunk ranges already have their start decremented
1422 1422 if self.lena and oldstart > 0:
1423 1423 oldstart -= 1
1424 1424 if self.lenb and newstart > 0:
1425 1425 newstart -= 1
1426 1426 return old, oldstart, new, newstart
1427 1427
1428 1428 class binhunk(object):
1429 1429 'A binary patch file.'
1430 1430 def __init__(self, lr, fname):
1431 1431 self.text = None
1432 1432 self.delta = False
1433 1433 self.hunk = ['GIT binary patch\n']
1434 1434 self._fname = fname
1435 1435 self._read(lr)
1436 1436
1437 1437 def complete(self):
1438 1438 return self.text is not None
1439 1439
1440 1440 def new(self, lines):
1441 1441 if self.delta:
1442 1442 return [applybindelta(self.text, ''.join(lines))]
1443 1443 return [self.text]
1444 1444
1445 1445 def _read(self, lr):
1446 1446 def getline(lr, hunk):
1447 1447 l = lr.readline()
1448 1448 hunk.append(l)
1449 1449 return l.rstrip('\r\n')
1450 1450
1451 1451 size = 0
1452 1452 while True:
1453 1453 line = getline(lr, self.hunk)
1454 1454 if not line:
1455 1455 raise PatchError(_('could not extract "%s" binary data')
1456 1456 % self._fname)
1457 1457 if line.startswith('literal '):
1458 1458 size = int(line[8:].rstrip())
1459 1459 break
1460 1460 if line.startswith('delta '):
1461 1461 size = int(line[6:].rstrip())
1462 1462 self.delta = True
1463 1463 break
1464 1464 dec = []
1465 1465 line = getline(lr, self.hunk)
1466 1466 while len(line) > 1:
1467 1467 l = line[0:1]
1468 1468 if l <= 'Z' and l >= 'A':
1469 1469 l = ord(l) - ord('A') + 1
1470 1470 else:
1471 1471 l = ord(l) - ord('a') + 27
1472 1472 try:
1473 1473 dec.append(util.b85decode(line[1:])[:l])
1474 1474 except ValueError as e:
1475 1475 raise PatchError(_('could not decode "%s" binary patch: %s')
1476 1476 % (self._fname, stringutil.forcebytestr(e)))
1477 1477 line = getline(lr, self.hunk)
1478 1478 text = zlib.decompress(''.join(dec))
1479 1479 if len(text) != size:
1480 1480 raise PatchError(_('"%s" length is %d bytes, should be %d')
1481 1481 % (self._fname, len(text), size))
1482 1482 self.text = text
1483 1483
1484 1484 def parsefilename(str):
1485 1485 # --- filename \t|space stuff
1486 1486 s = str[4:].rstrip('\r\n')
1487 1487 i = s.find('\t')
1488 1488 if i < 0:
1489 1489 i = s.find(' ')
1490 1490 if i < 0:
1491 1491 return s
1492 1492 return s[:i]
1493 1493
1494 1494 def reversehunks(hunks):
1495 1495 '''reverse the signs in the hunks given as argument
1496 1496
1497 1497 This function operates on hunks coming out of patch.filterpatch, that is
1498 1498 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1499 1499
1500 1500 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1501 1501 ... --- a/folder1/g
1502 1502 ... +++ b/folder1/g
1503 1503 ... @@ -1,7 +1,7 @@
1504 1504 ... +firstline
1505 1505 ... c
1506 1506 ... 1
1507 1507 ... 2
1508 1508 ... + 3
1509 1509 ... -4
1510 1510 ... 5
1511 1511 ... d
1512 1512 ... +lastline"""
1513 1513 >>> hunks = parsepatch([rawpatch])
1514 1514 >>> hunkscomingfromfilterpatch = []
1515 1515 >>> for h in hunks:
1516 1516 ... hunkscomingfromfilterpatch.append(h)
1517 1517 ... hunkscomingfromfilterpatch.extend(h.hunks)
1518 1518
1519 1519 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1520 1520 >>> from . import util
1521 1521 >>> fp = util.stringio()
1522 1522 >>> for c in reversedhunks:
1523 1523 ... c.write(fp)
1524 1524 >>> fp.seek(0) or None
1525 1525 >>> reversedpatch = fp.read()
1526 1526 >>> print(pycompat.sysstr(reversedpatch))
1527 1527 diff --git a/folder1/g b/folder1/g
1528 1528 --- a/folder1/g
1529 1529 +++ b/folder1/g
1530 1530 @@ -1,4 +1,3 @@
1531 1531 -firstline
1532 1532 c
1533 1533 1
1534 1534 2
1535 1535 @@ -2,6 +1,6 @@
1536 1536 c
1537 1537 1
1538 1538 2
1539 1539 - 3
1540 1540 +4
1541 1541 5
1542 1542 d
1543 1543 @@ -6,3 +5,2 @@
1544 1544 5
1545 1545 d
1546 1546 -lastline
1547 1547
1548 1548 '''
1549 1549
1550 1550 newhunks = []
1551 1551 for c in hunks:
1552 1552 if util.safehasattr(c, 'reversehunk'):
1553 1553 c = c.reversehunk()
1554 1554 newhunks.append(c)
1555 1555 return newhunks
1556 1556
1557 1557 def parsepatch(originalchunks, maxcontext=None):
1558 1558 """patch -> [] of headers -> [] of hunks
1559 1559
1560 1560 If maxcontext is not None, trim context lines if necessary.
1561 1561
1562 1562 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1563 1563 ... --- a/folder1/g
1564 1564 ... +++ b/folder1/g
1565 1565 ... @@ -1,8 +1,10 @@
1566 1566 ... 1
1567 1567 ... 2
1568 1568 ... -3
1569 1569 ... 4
1570 1570 ... 5
1571 1571 ... 6
1572 1572 ... +6.1
1573 1573 ... +6.2
1574 1574 ... 7
1575 1575 ... 8
1576 1576 ... +9'''
1577 1577 >>> out = util.stringio()
1578 1578 >>> headers = parsepatch([rawpatch], maxcontext=1)
1579 1579 >>> for header in headers:
1580 1580 ... header.write(out)
1581 1581 ... for hunk in header.hunks:
1582 1582 ... hunk.write(out)
1583 1583 >>> print(pycompat.sysstr(out.getvalue()))
1584 1584 diff --git a/folder1/g b/folder1/g
1585 1585 --- a/folder1/g
1586 1586 +++ b/folder1/g
1587 1587 @@ -2,3 +2,2 @@
1588 1588 2
1589 1589 -3
1590 1590 4
1591 1591 @@ -6,2 +5,4 @@
1592 1592 6
1593 1593 +6.1
1594 1594 +6.2
1595 1595 7
1596 1596 @@ -8,1 +9,2 @@
1597 1597 8
1598 1598 +9
1599 1599 """
1600 1600 class parser(object):
1601 1601 """patch parsing state machine"""
1602 1602 def __init__(self):
1603 1603 self.fromline = 0
1604 1604 self.toline = 0
1605 1605 self.proc = ''
1606 1606 self.header = None
1607 1607 self.context = []
1608 1608 self.before = []
1609 1609 self.hunk = []
1610 1610 self.headers = []
1611 1611
1612 1612 def addrange(self, limits):
1613 1613 fromstart, fromend, tostart, toend, proc = limits
1614 1614 self.fromline = int(fromstart)
1615 1615 self.toline = int(tostart)
1616 1616 self.proc = proc
1617 1617
1618 1618 def addcontext(self, context):
1619 1619 if self.hunk:
1620 1620 h = recordhunk(self.header, self.fromline, self.toline,
1621 1621 self.proc, self.before, self.hunk, context, maxcontext)
1622 1622 self.header.hunks.append(h)
1623 1623 self.fromline += len(self.before) + h.removed
1624 1624 self.toline += len(self.before) + h.added
1625 1625 self.before = []
1626 1626 self.hunk = []
1627 1627 self.context = context
1628 1628
1629 1629 def addhunk(self, hunk):
1630 1630 if self.context:
1631 1631 self.before = self.context
1632 1632 self.context = []
1633 1633 self.hunk = hunk
1634 1634
1635 1635 def newfile(self, hdr):
1636 1636 self.addcontext([])
1637 1637 h = header(hdr)
1638 1638 self.headers.append(h)
1639 1639 self.header = h
1640 1640
1641 1641 def addother(self, line):
1642 1642 pass # 'other' lines are ignored
1643 1643
1644 1644 def finished(self):
1645 1645 self.addcontext([])
1646 1646 return self.headers
1647 1647
1648 1648 transitions = {
1649 1649 'file': {'context': addcontext,
1650 1650 'file': newfile,
1651 1651 'hunk': addhunk,
1652 1652 'range': addrange},
1653 1653 'context': {'file': newfile,
1654 1654 'hunk': addhunk,
1655 1655 'range': addrange,
1656 1656 'other': addother},
1657 1657 'hunk': {'context': addcontext,
1658 1658 'file': newfile,
1659 1659 'range': addrange},
1660 1660 'range': {'context': addcontext,
1661 1661 'hunk': addhunk},
1662 1662 'other': {'other': addother},
1663 1663 }
1664 1664
1665 1665 p = parser()
1666 1666 fp = stringio()
1667 1667 fp.write(''.join(originalchunks))
1668 1668 fp.seek(0)
1669 1669
1670 1670 state = 'context'
1671 1671 for newstate, data in scanpatch(fp):
1672 1672 try:
1673 1673 p.transitions[state][newstate](p, data)
1674 1674 except KeyError:
1675 1675 raise PatchError('unhandled transition: %s -> %s' %
1676 1676 (state, newstate))
1677 1677 state = newstate
1678 1678 del fp
1679 1679 return p.finished()
1680 1680
1681 1681 def pathtransform(path, strip, prefix):
1682 1682 '''turn a path from a patch into a path suitable for the repository
1683 1683
1684 1684 prefix, if not empty, is expected to be normalized with a / at the end.
1685 1685
1686 1686 Returns (stripped components, path in repository).
1687 1687
1688 1688 >>> pathtransform(b'a/b/c', 0, b'')
1689 1689 ('', 'a/b/c')
1690 1690 >>> pathtransform(b' a/b/c ', 0, b'')
1691 1691 ('', ' a/b/c')
1692 1692 >>> pathtransform(b' a/b/c ', 2, b'')
1693 1693 ('a/b/', 'c')
1694 1694 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1695 1695 ('', 'd/e/a/b/c')
1696 1696 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1697 1697 ('a//b/', 'd/e/c')
1698 1698 >>> pathtransform(b'a/b/c', 3, b'')
1699 1699 Traceback (most recent call last):
1700 1700 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1701 1701 '''
1702 1702 pathlen = len(path)
1703 1703 i = 0
1704 1704 if strip == 0:
1705 1705 return '', prefix + path.rstrip()
1706 1706 count = strip
1707 1707 while count > 0:
1708 1708 i = path.find('/', i)
1709 1709 if i == -1:
1710 1710 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1711 1711 (count, strip, path))
1712 1712 i += 1
1713 1713 # consume '//' in the path
1714 1714 while i < pathlen - 1 and path[i:i + 1] == '/':
1715 1715 i += 1
1716 1716 count -= 1
1717 1717 return path[:i].lstrip(), prefix + path[i:].rstrip()
1718 1718
1719 1719 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1720 1720 nulla = afile_orig == "/dev/null"
1721 1721 nullb = bfile_orig == "/dev/null"
1722 1722 create = nulla and hunk.starta == 0 and hunk.lena == 0
1723 1723 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1724 1724 abase, afile = pathtransform(afile_orig, strip, prefix)
1725 1725 gooda = not nulla and backend.exists(afile)
1726 1726 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1727 1727 if afile == bfile:
1728 1728 goodb = gooda
1729 1729 else:
1730 1730 goodb = not nullb and backend.exists(bfile)
1731 1731 missing = not goodb and not gooda and not create
1732 1732
1733 1733 # some diff programs apparently produce patches where the afile is
1734 1734 # not /dev/null, but afile starts with bfile
1735 1735 abasedir = afile[:afile.rfind('/') + 1]
1736 1736 bbasedir = bfile[:bfile.rfind('/') + 1]
1737 1737 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1738 1738 and hunk.starta == 0 and hunk.lena == 0):
1739 1739 create = True
1740 1740 missing = False
1741 1741
1742 1742 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1743 1743 # diff is between a file and its backup. In this case, the original
1744 1744 # file should be patched (see original mpatch code).
1745 1745 isbackup = (abase == bbase and bfile.startswith(afile))
1746 1746 fname = None
1747 1747 if not missing:
1748 1748 if gooda and goodb:
1749 1749 if isbackup:
1750 1750 fname = afile
1751 1751 else:
1752 1752 fname = bfile
1753 1753 elif gooda:
1754 1754 fname = afile
1755 1755
1756 1756 if not fname:
1757 1757 if not nullb:
1758 1758 if isbackup:
1759 1759 fname = afile
1760 1760 else:
1761 1761 fname = bfile
1762 1762 elif not nulla:
1763 1763 fname = afile
1764 1764 else:
1765 1765 raise PatchError(_("undefined source and destination files"))
1766 1766
1767 1767 gp = patchmeta(fname)
1768 1768 if create:
1769 1769 gp.op = 'ADD'
1770 1770 elif remove:
1771 1771 gp.op = 'DELETE'
1772 1772 return gp
1773 1773
1774 1774 def scanpatch(fp):
1775 1775 """like patch.iterhunks, but yield different events
1776 1776
1777 1777 - ('file', [header_lines + fromfile + tofile])
1778 1778 - ('context', [context_lines])
1779 1779 - ('hunk', [hunk_lines])
1780 1780 - ('range', (-start,len, +start,len, proc))
1781 1781 """
1782 1782 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1783 1783 lr = linereader(fp)
1784 1784
1785 1785 def scanwhile(first, p):
1786 1786 """scan lr while predicate holds"""
1787 1787 lines = [first]
1788 1788 for line in iter(lr.readline, ''):
1789 1789 if p(line):
1790 1790 lines.append(line)
1791 1791 else:
1792 1792 lr.push(line)
1793 1793 break
1794 1794 return lines
1795 1795
1796 1796 for line in iter(lr.readline, ''):
1797 1797 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1798 1798 def notheader(line):
1799 1799 s = line.split(None, 1)
1800 1800 return not s or s[0] not in ('---', 'diff')
1801 1801 header = scanwhile(line, notheader)
1802 1802 fromfile = lr.readline()
1803 1803 if fromfile.startswith('---'):
1804 1804 tofile = lr.readline()
1805 1805 header += [fromfile, tofile]
1806 1806 else:
1807 1807 lr.push(fromfile)
1808 1808 yield 'file', header
1809 1809 elif line.startswith(' '):
1810 1810 cs = (' ', '\\')
1811 1811 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1812 1812 elif line.startswith(('-', '+')):
1813 1813 cs = ('-', '+', '\\')
1814 1814 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1815 1815 else:
1816 1816 m = lines_re.match(line)
1817 1817 if m:
1818 1818 yield 'range', m.groups()
1819 1819 else:
1820 1820 yield 'other', line
1821 1821
1822 1822 def scangitpatch(lr, firstline):
1823 1823 """
1824 1824 Git patches can emit:
1825 1825 - rename a to b
1826 1826 - change b
1827 1827 - copy a to c
1828 1828 - change c
1829 1829
1830 1830 We cannot apply this sequence as-is, the renamed 'a' could not be
1831 1831 found for it would have been renamed already. And we cannot copy
1832 1832 from 'b' instead because 'b' would have been changed already. So
1833 1833 we scan the git patch for copy and rename commands so we can
1834 1834 perform the copies ahead of time.
1835 1835 """
1836 1836 pos = 0
1837 1837 try:
1838 1838 pos = lr.fp.tell()
1839 1839 fp = lr.fp
1840 1840 except IOError:
1841 1841 fp = stringio(lr.fp.read())
1842 1842 gitlr = linereader(fp)
1843 1843 gitlr.push(firstline)
1844 1844 gitpatches = readgitpatch(gitlr)
1845 1845 fp.seek(pos)
1846 1846 return gitpatches
1847 1847
1848 1848 def iterhunks(fp):
1849 1849 """Read a patch and yield the following events:
1850 1850 - ("file", afile, bfile, firsthunk): select a new target file.
1851 1851 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1852 1852 "file" event.
1853 1853 - ("git", gitchanges): current diff is in git format, gitchanges
1854 1854 maps filenames to gitpatch records. Unique event.
1855 1855 """
1856 1856 afile = ""
1857 1857 bfile = ""
1858 1858 state = None
1859 1859 hunknum = 0
1860 1860 emitfile = newfile = False
1861 1861 gitpatches = None
1862 1862
1863 1863 # our states
1864 1864 BFILE = 1
1865 1865 context = None
1866 1866 lr = linereader(fp)
1867 1867
1868 1868 for x in iter(lr.readline, ''):
1869 1869 if state == BFILE and (
1870 1870 (not context and x.startswith('@'))
1871 1871 or (context is not False and x.startswith('***************'))
1872 1872 or x.startswith('GIT binary patch')):
1873 1873 gp = None
1874 1874 if (gitpatches and
1875 1875 gitpatches[-1].ispatching(afile, bfile)):
1876 1876 gp = gitpatches.pop()
1877 1877 if x.startswith('GIT binary patch'):
1878 1878 h = binhunk(lr, gp.path)
1879 1879 else:
1880 1880 if context is None and x.startswith('***************'):
1881 1881 context = True
1882 1882 h = hunk(x, hunknum + 1, lr, context)
1883 1883 hunknum += 1
1884 1884 if emitfile:
1885 1885 emitfile = False
1886 1886 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1887 1887 yield 'hunk', h
1888 1888 elif x.startswith('diff --git a/'):
1889 1889 m = gitre.match(x.rstrip(' \r\n'))
1890 1890 if not m:
1891 1891 continue
1892 1892 if gitpatches is None:
1893 1893 # scan whole input for git metadata
1894 1894 gitpatches = scangitpatch(lr, x)
1895 1895 yield 'git', [g.copy() for g in gitpatches
1896 1896 if g.op in ('COPY', 'RENAME')]
1897 1897 gitpatches.reverse()
1898 1898 afile = 'a/' + m.group(1)
1899 1899 bfile = 'b/' + m.group(2)
1900 1900 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1901 1901 gp = gitpatches.pop()
1902 1902 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1903 1903 if not gitpatches:
1904 1904 raise PatchError(_('failed to synchronize metadata for "%s"')
1905 1905 % afile[2:])
1906 1906 gp = gitpatches[-1]
1907 1907 newfile = True
1908 1908 elif x.startswith('---'):
1909 1909 # check for a unified diff
1910 1910 l2 = lr.readline()
1911 1911 if not l2.startswith('+++'):
1912 1912 lr.push(l2)
1913 1913 continue
1914 1914 newfile = True
1915 1915 context = False
1916 1916 afile = parsefilename(x)
1917 1917 bfile = parsefilename(l2)
1918 1918 elif x.startswith('***'):
1919 1919 # check for a context diff
1920 1920 l2 = lr.readline()
1921 1921 if not l2.startswith('---'):
1922 1922 lr.push(l2)
1923 1923 continue
1924 1924 l3 = lr.readline()
1925 1925 lr.push(l3)
1926 1926 if not l3.startswith("***************"):
1927 1927 lr.push(l2)
1928 1928 continue
1929 1929 newfile = True
1930 1930 context = True
1931 1931 afile = parsefilename(x)
1932 1932 bfile = parsefilename(l2)
1933 1933
1934 1934 if newfile:
1935 1935 newfile = False
1936 1936 emitfile = True
1937 1937 state = BFILE
1938 1938 hunknum = 0
1939 1939
1940 1940 while gitpatches:
1941 1941 gp = gitpatches.pop()
1942 1942 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1943 1943
1944 1944 def applybindelta(binchunk, data):
1945 1945 """Apply a binary delta hunk
1946 1946 The algorithm used is the algorithm from git's patch-delta.c
1947 1947 """
1948 1948 def deltahead(binchunk):
1949 1949 i = 0
1950 1950 for c in pycompat.bytestr(binchunk):
1951 1951 i += 1
1952 1952 if not (ord(c) & 0x80):
1953 1953 return i
1954 1954 return i
1955 1955 out = ""
1956 1956 s = deltahead(binchunk)
1957 1957 binchunk = binchunk[s:]
1958 1958 s = deltahead(binchunk)
1959 1959 binchunk = binchunk[s:]
1960 1960 i = 0
1961 1961 while i < len(binchunk):
1962 1962 cmd = ord(binchunk[i:i + 1])
1963 1963 i += 1
1964 1964 if (cmd & 0x80):
1965 1965 offset = 0
1966 1966 size = 0
1967 1967 if (cmd & 0x01):
1968 1968 offset = ord(binchunk[i:i + 1])
1969 1969 i += 1
1970 1970 if (cmd & 0x02):
1971 1971 offset |= ord(binchunk[i:i + 1]) << 8
1972 1972 i += 1
1973 1973 if (cmd & 0x04):
1974 1974 offset |= ord(binchunk[i:i + 1]) << 16
1975 1975 i += 1
1976 1976 if (cmd & 0x08):
1977 1977 offset |= ord(binchunk[i:i + 1]) << 24
1978 1978 i += 1
1979 1979 if (cmd & 0x10):
1980 1980 size = ord(binchunk[i:i + 1])
1981 1981 i += 1
1982 1982 if (cmd & 0x20):
1983 1983 size |= ord(binchunk[i:i + 1]) << 8
1984 1984 i += 1
1985 1985 if (cmd & 0x40):
1986 1986 size |= ord(binchunk[i:i + 1]) << 16
1987 1987 i += 1
1988 1988 if size == 0:
1989 1989 size = 0x10000
1990 1990 offset_end = offset + size
1991 1991 out += data[offset:offset_end]
1992 1992 elif cmd != 0:
1993 1993 offset_end = i + cmd
1994 1994 out += binchunk[i:offset_end]
1995 1995 i += cmd
1996 1996 else:
1997 1997 raise PatchError(_('unexpected delta opcode 0'))
1998 1998 return out
1999 1999
2000 2000 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2001 2001 """Reads a patch from fp and tries to apply it.
2002 2002
2003 2003 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2004 2004 there was any fuzz.
2005 2005
2006 2006 If 'eolmode' is 'strict', the patch content and patched file are
2007 2007 read in binary mode. Otherwise, line endings are ignored when
2008 2008 patching then normalized according to 'eolmode'.
2009 2009 """
2010 2010 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2011 2011 prefix=prefix, eolmode=eolmode)
2012 2012
2013 2013 def _canonprefix(repo, prefix):
2014 2014 if prefix:
2015 2015 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2016 2016 if prefix != '':
2017 2017 prefix += '/'
2018 2018 return prefix
2019 2019
2020 2020 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2021 2021 eolmode='strict'):
2022 2022 prefix = _canonprefix(backend.repo, prefix)
2023 2023 def pstrip(p):
2024 2024 return pathtransform(p, strip - 1, prefix)[1]
2025 2025
2026 2026 rejects = 0
2027 2027 err = 0
2028 2028 current_file = None
2029 2029
2030 2030 for state, values in iterhunks(fp):
2031 2031 if state == 'hunk':
2032 2032 if not current_file:
2033 2033 continue
2034 2034 ret = current_file.apply(values)
2035 2035 if ret > 0:
2036 2036 err = 1
2037 2037 elif state == 'file':
2038 2038 if current_file:
2039 2039 rejects += current_file.close()
2040 2040 current_file = None
2041 2041 afile, bfile, first_hunk, gp = values
2042 2042 if gp:
2043 2043 gp.path = pstrip(gp.path)
2044 2044 if gp.oldpath:
2045 2045 gp.oldpath = pstrip(gp.oldpath)
2046 2046 else:
2047 2047 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2048 2048 prefix)
2049 2049 if gp.op == 'RENAME':
2050 2050 backend.unlink(gp.oldpath)
2051 2051 if not first_hunk:
2052 2052 if gp.op == 'DELETE':
2053 2053 backend.unlink(gp.path)
2054 2054 continue
2055 2055 data, mode = None, None
2056 2056 if gp.op in ('RENAME', 'COPY'):
2057 2057 data, mode = store.getfile(gp.oldpath)[:2]
2058 2058 if data is None:
2059 2059 # This means that the old path does not exist
2060 2060 raise PatchError(_("source file '%s' does not exist")
2061 2061 % gp.oldpath)
2062 2062 if gp.mode:
2063 2063 mode = gp.mode
2064 2064 if gp.op == 'ADD':
2065 2065 # Added files without content have no hunk and
2066 2066 # must be created
2067 2067 data = ''
2068 2068 if data or mode:
2069 2069 if (gp.op in ('ADD', 'RENAME', 'COPY')
2070 2070 and backend.exists(gp.path)):
2071 2071 raise PatchError(_("cannot create %s: destination "
2072 2072 "already exists") % gp.path)
2073 2073 backend.setfile(gp.path, data, mode, gp.oldpath)
2074 2074 continue
2075 2075 try:
2076 2076 current_file = patcher(ui, gp, backend, store,
2077 2077 eolmode=eolmode)
2078 2078 except PatchError as inst:
2079 2079 ui.warn(str(inst) + '\n')
2080 2080 current_file = None
2081 2081 rejects += 1
2082 2082 continue
2083 2083 elif state == 'git':
2084 2084 for gp in values:
2085 2085 path = pstrip(gp.oldpath)
2086 2086 data, mode = backend.getfile(path)
2087 2087 if data is None:
2088 2088 # The error ignored here will trigger a getfile()
2089 2089 # error in a place more appropriate for error
2090 2090 # handling, and will not interrupt the patching
2091 2091 # process.
2092 2092 pass
2093 2093 else:
2094 2094 store.setfile(path, data, mode)
2095 2095 else:
2096 2096 raise error.Abort(_('unsupported parser state: %s') % state)
2097 2097
2098 2098 if current_file:
2099 2099 rejects += current_file.close()
2100 2100
2101 2101 if rejects:
2102 2102 return -1
2103 2103 return err
2104 2104
2105 2105 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2106 2106 similarity):
2107 2107 """use <patcher> to apply <patchname> to the working directory.
2108 2108 returns whether patch was applied with fuzz factor."""
2109 2109
2110 2110 fuzz = False
2111 2111 args = []
2112 2112 cwd = repo.root
2113 2113 if cwd:
2114 2114 args.append('-d %s' % procutil.shellquote(cwd))
2115 2115 cmd = ('%s %s -p%d < %s'
2116 2116 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2117 2117 ui.debug('Using external patch tool: %s\n' % cmd)
2118 2118 fp = procutil.popen(cmd, 'rb')
2119 2119 try:
2120 2120 for line in util.iterfile(fp):
2121 2121 line = line.rstrip()
2122 2122 ui.note(line + '\n')
2123 2123 if line.startswith('patching file '):
2124 2124 pf = util.parsepatchoutput(line)
2125 2125 printed_file = False
2126 2126 files.add(pf)
2127 2127 elif line.find('with fuzz') >= 0:
2128 2128 fuzz = True
2129 2129 if not printed_file:
2130 2130 ui.warn(pf + '\n')
2131 2131 printed_file = True
2132 2132 ui.warn(line + '\n')
2133 2133 elif line.find('saving rejects to file') >= 0:
2134 2134 ui.warn(line + '\n')
2135 2135 elif line.find('FAILED') >= 0:
2136 2136 if not printed_file:
2137 2137 ui.warn(pf + '\n')
2138 2138 printed_file = True
2139 2139 ui.warn(line + '\n')
2140 2140 finally:
2141 2141 if files:
2142 2142 scmutil.marktouched(repo, files, similarity)
2143 2143 code = fp.close()
2144 2144 if code:
2145 2145 raise PatchError(_("patch command failed: %s") %
2146 2146 procutil.explainexit(code))
2147 2147 return fuzz
2148 2148
2149 2149 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2150 2150 eolmode='strict'):
2151 2151 if files is None:
2152 2152 files = set()
2153 2153 if eolmode is None:
2154 2154 eolmode = ui.config('patch', 'eol')
2155 2155 if eolmode.lower() not in eolmodes:
2156 2156 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2157 2157 eolmode = eolmode.lower()
2158 2158
2159 2159 store = filestore()
2160 2160 try:
2161 2161 fp = open(patchobj, 'rb')
2162 2162 except TypeError:
2163 2163 fp = patchobj
2164 2164 try:
2165 2165 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2166 2166 eolmode=eolmode)
2167 2167 finally:
2168 2168 if fp != patchobj:
2169 2169 fp.close()
2170 2170 files.update(backend.close())
2171 2171 store.close()
2172 2172 if ret < 0:
2173 2173 raise PatchError(_('patch failed to apply'))
2174 2174 return ret > 0
2175 2175
2176 2176 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2177 2177 eolmode='strict', similarity=0):
2178 2178 """use builtin patch to apply <patchobj> to the working directory.
2179 2179 returns whether patch was applied with fuzz factor."""
2180 2180 backend = workingbackend(ui, repo, similarity)
2181 2181 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2182 2182
2183 2183 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2184 2184 eolmode='strict'):
2185 2185 backend = repobackend(ui, repo, ctx, store)
2186 2186 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2187 2187
2188 2188 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2189 2189 similarity=0):
2190 2190 """Apply <patchname> to the working directory.
2191 2191
2192 2192 'eolmode' specifies how end of lines should be handled. It can be:
2193 2193 - 'strict': inputs are read in binary mode, EOLs are preserved
2194 2194 - 'crlf': EOLs are ignored when patching and reset to CRLF
2195 2195 - 'lf': EOLs are ignored when patching and reset to LF
2196 2196 - None: get it from user settings, default to 'strict'
2197 2197 'eolmode' is ignored when using an external patcher program.
2198 2198
2199 2199 Returns whether patch was applied with fuzz factor.
2200 2200 """
2201 2201 patcher = ui.config('ui', 'patch')
2202 2202 if files is None:
2203 2203 files = set()
2204 2204 if patcher:
2205 2205 return _externalpatch(ui, repo, patcher, patchname, strip,
2206 2206 files, similarity)
2207 2207 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2208 2208 similarity)
2209 2209
2210 2210 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2211 2211 backend = fsbackend(ui, repo.root)
2212 2212 prefix = _canonprefix(repo, prefix)
2213 2213 with open(patchpath, 'rb') as fp:
2214 2214 changed = set()
2215 2215 for state, values in iterhunks(fp):
2216 2216 if state == 'file':
2217 2217 afile, bfile, first_hunk, gp = values
2218 2218 if gp:
2219 2219 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2220 2220 if gp.oldpath:
2221 2221 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2222 2222 prefix)[1]
2223 2223 else:
2224 2224 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2225 2225 prefix)
2226 2226 changed.add(gp.path)
2227 2227 if gp.op == 'RENAME':
2228 2228 changed.add(gp.oldpath)
2229 2229 elif state not in ('hunk', 'git'):
2230 2230 raise error.Abort(_('unsupported parser state: %s') % state)
2231 2231 return changed
2232 2232
2233 2233 class GitDiffRequired(Exception):
2234 2234 pass
2235 2235
2236 2236 diffopts = diffutil.diffallopts
2237 2237 diffallopts = diffutil.diffallopts
2238 2238 difffeatureopts = diffutil.difffeatureopts
2239 2239
2240 2240 def diff(repo, node1=None, node2=None, match=None, changes=None,
2241 2241 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2242 2242 hunksfilterfn=None):
2243 2243 '''yields diff of changes to files between two nodes, or node and
2244 2244 working directory.
2245 2245
2246 2246 if node1 is None, use first dirstate parent instead.
2247 2247 if node2 is None, compare node1 with working directory.
2248 2248
2249 2249 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2250 2250 every time some change cannot be represented with the current
2251 2251 patch format. Return False to upgrade to git patch format, True to
2252 2252 accept the loss or raise an exception to abort the diff. It is
2253 2253 called with the name of current file being diffed as 'fn'. If set
2254 2254 to None, patches will always be upgraded to git format when
2255 2255 necessary.
2256 2256
2257 2257 prefix is a filename prefix that is prepended to all filenames on
2258 2258 display (used for subrepos).
2259 2259
2260 2260 relroot, if not empty, must be normalized with a trailing /. Any match
2261 2261 patterns that fall outside it will be ignored.
2262 2262
2263 2263 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2264 2264 information.
2265 2265
2266 2266 hunksfilterfn, if not None, should be a function taking a filectx and
2267 2267 hunks generator that may yield filtered hunks.
2268 2268 '''
2269 2269 for fctx1, fctx2, hdr, hunks in diffhunks(
2270 2270 repo, node1=node1, node2=node2,
2271 2271 match=match, changes=changes, opts=opts,
2272 2272 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2273 2273 ):
2274 2274 if hunksfilterfn is not None:
2275 2275 # If the file has been removed, fctx2 is None; but this should
2276 2276 # not occur here since we catch removed files early in
2277 2277 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2278 2278 assert fctx2 is not None, \
2279 2279 'fctx2 unexpectly None in diff hunks filtering'
2280 2280 hunks = hunksfilterfn(fctx2, hunks)
2281 2281 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2282 2282 if hdr and (text or len(hdr) > 1):
2283 2283 yield '\n'.join(hdr) + '\n'
2284 2284 if text:
2285 2285 yield text
2286 2286
2287 2287 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2288 2288 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2289 2289 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2290 2290 where `header` is a list of diff headers and `hunks` is an iterable of
2291 2291 (`hunkrange`, `hunklines`) tuples.
2292 2292
2293 2293 See diff() for the meaning of parameters.
2294 2294 """
2295 2295
2296 2296 if opts is None:
2297 2297 opts = mdiff.defaultopts
2298 2298
2299 2299 if not node1 and not node2:
2300 2300 node1 = repo.dirstate.p1()
2301 2301
2302 2302 def lrugetfilectx():
2303 2303 cache = {}
2304 2304 order = collections.deque()
2305 2305 def getfilectx(f, ctx):
2306 2306 fctx = ctx.filectx(f, filelog=cache.get(f))
2307 2307 if f not in cache:
2308 2308 if len(cache) > 20:
2309 2309 del cache[order.popleft()]
2310 2310 cache[f] = fctx.filelog()
2311 2311 else:
2312 2312 order.remove(f)
2313 2313 order.append(f)
2314 2314 return fctx
2315 2315 return getfilectx
2316 2316 getfilectx = lrugetfilectx()
2317 2317
2318 2318 ctx1 = repo[node1]
2319 2319 ctx2 = repo[node2]
2320 2320
2321 2321 relfiltered = False
2322 2322 if relroot != '' and match.always():
2323 2323 # as a special case, create a new matcher with just the relroot
2324 2324 pats = [relroot]
2325 2325 match = scmutil.match(ctx2, pats, default='path')
2326 2326 relfiltered = True
2327 2327
2328 2328 if not changes:
2329 2329 changes = ctx1.status(ctx2, match=match)
2330 2330 modified, added, removed = changes[:3]
2331 2331
2332 2332 if not modified and not added and not removed:
2333 2333 return []
2334 2334
2335 2335 if repo.ui.debugflag:
2336 2336 hexfunc = hex
2337 2337 else:
2338 2338 hexfunc = short
2339 2339 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2340 2340
2341 2341 if copy is None:
2342 2342 copy = {}
2343 2343 if opts.git or opts.upgrade:
2344 2344 copy = copies.pathcopies(ctx1, ctx2, match=match)
2345 2345
2346 2346 if relroot is not None:
2347 2347 if not relfiltered:
2348 2348 # XXX this would ideally be done in the matcher, but that is
2349 2349 # generally meant to 'or' patterns, not 'and' them. In this case we
2350 2350 # need to 'and' all the patterns from the matcher with relroot.
2351 2351 def filterrel(l):
2352 2352 return [f for f in l if f.startswith(relroot)]
2353 2353 modified = filterrel(modified)
2354 2354 added = filterrel(added)
2355 2355 removed = filterrel(removed)
2356 2356 relfiltered = True
2357 2357 # filter out copies where either side isn't inside the relative root
2358 2358 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2359 2359 if dst.startswith(relroot)
2360 2360 and src.startswith(relroot)))
2361 2361
2362 2362 modifiedset = set(modified)
2363 2363 addedset = set(added)
2364 2364 removedset = set(removed)
2365 2365 for f in modified:
2366 2366 if f not in ctx1:
2367 2367 # Fix up added, since merged-in additions appear as
2368 2368 # modifications during merges
2369 2369 modifiedset.remove(f)
2370 2370 addedset.add(f)
2371 2371 for f in removed:
2372 2372 if f not in ctx1:
2373 2373 # Merged-in additions that are then removed are reported as removed.
2374 2374 # They are not in ctx1, so We don't want to show them in the diff.
2375 2375 removedset.remove(f)
2376 2376 modified = sorted(modifiedset)
2377 2377 added = sorted(addedset)
2378 2378 removed = sorted(removedset)
2379 2379 for dst, src in list(copy.items()):
2380 2380 if src not in ctx1:
2381 2381 # Files merged in during a merge and then copied/renamed are
2382 2382 # reported as copies. We want to show them in the diff as additions.
2383 2383 del copy[dst]
2384 2384
2385 2385 prefetchmatch = scmutil.matchfiles(
2386 2386 repo, list(modifiedset | addedset | removedset))
2387 2387 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2388 2388
2389 2389 def difffn(opts, losedata):
2390 2390 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2391 2391 copy, getfilectx, opts, losedata, prefix, relroot)
2392 2392 if opts.upgrade and not opts.git:
2393 2393 try:
2394 2394 def losedata(fn):
2395 2395 if not losedatafn or not losedatafn(fn=fn):
2396 2396 raise GitDiffRequired
2397 2397 # Buffer the whole output until we are sure it can be generated
2398 2398 return list(difffn(opts.copy(git=False), losedata))
2399 2399 except GitDiffRequired:
2400 2400 return difffn(opts.copy(git=True), None)
2401 2401 else:
2402 2402 return difffn(opts, None)
2403 2403
2404 2404 def diffsinglehunk(hunklines):
2405 2405 """yield tokens for a list of lines in a single hunk"""
2406 2406 for line in hunklines:
2407 2407 # chomp
2408 2408 chompline = line.rstrip('\r\n')
2409 2409 # highlight tabs and trailing whitespace
2410 2410 stripline = chompline.rstrip()
2411 2411 if line.startswith('-'):
2412 2412 label = 'diff.deleted'
2413 2413 elif line.startswith('+'):
2414 2414 label = 'diff.inserted'
2415 2415 else:
2416 2416 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2417 2417 for token in tabsplitter.findall(stripline):
2418 2418 if token.startswith('\t'):
2419 2419 yield (token, 'diff.tab')
2420 2420 else:
2421 2421 yield (token, label)
2422 2422
2423 2423 if chompline != stripline:
2424 2424 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2425 2425 if chompline != line:
2426 2426 yield (line[len(chompline):], '')
2427 2427
2428 2428 def diffsinglehunkinline(hunklines):
2429 2429 """yield tokens for a list of lines in a single hunk, with inline colors"""
2430 2430 # prepare deleted, and inserted content
2431 2431 a = ''
2432 2432 b = ''
2433 2433 for line in hunklines:
2434 2434 if line[0:1] == '-':
2435 2435 a += line[1:]
2436 2436 elif line[0:1] == '+':
2437 2437 b += line[1:]
2438 2438 else:
2439 2439 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2440 2440 # fast path: if either side is empty, use diffsinglehunk
2441 2441 if not a or not b:
2442 2442 for t in diffsinglehunk(hunklines):
2443 2443 yield t
2444 2444 return
2445 2445 # re-split the content into words
2446 2446 al = wordsplitter.findall(a)
2447 2447 bl = wordsplitter.findall(b)
2448 2448 # re-arrange the words to lines since the diff algorithm is line-based
2449 2449 aln = [s if s == '\n' else s + '\n' for s in al]
2450 2450 bln = [s if s == '\n' else s + '\n' for s in bl]
2451 2451 an = ''.join(aln)
2452 2452 bn = ''.join(bln)
2453 2453 # run the diff algorithm, prepare atokens and btokens
2454 2454 atokens = []
2455 2455 btokens = []
2456 2456 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2457 2457 for (a1, a2, b1, b2), btype in blocks:
2458 2458 changed = btype == '!'
2459 2459 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2460 2460 atokens.append((changed, token))
2461 2461 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2462 2462 btokens.append((changed, token))
2463 2463
2464 2464 # yield deleted tokens, then inserted ones
2465 2465 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2466 2466 ('+', 'diff.inserted', btokens)]:
2467 2467 nextisnewline = True
2468 2468 for changed, token in tokens:
2469 2469 if nextisnewline:
2470 2470 yield (prefix, label)
2471 2471 nextisnewline = False
2472 2472 # special handling line end
2473 2473 isendofline = token.endswith('\n')
2474 2474 if isendofline:
2475 2475 chomp = token[:-1] # chomp
2476 2476 if chomp.endswith('\r'):
2477 2477 chomp = chomp[:-1]
2478 2478 endofline = token[len(chomp):]
2479 2479 token = chomp.rstrip() # detect spaces at the end
2480 2480 endspaces = chomp[len(token):]
2481 2481 # scan tabs
2482 2482 for maybetab in tabsplitter.findall(token):
2483 if '\t' == maybetab[0]:
2483 if b'\t' == maybetab[0:1]:
2484 2484 currentlabel = 'diff.tab'
2485 2485 else:
2486 2486 if changed:
2487 2487 currentlabel = label + '.changed'
2488 2488 else:
2489 2489 currentlabel = label + '.unchanged'
2490 2490 yield (maybetab, currentlabel)
2491 2491 if isendofline:
2492 2492 if endspaces:
2493 2493 yield (endspaces, 'diff.trailingwhitespace')
2494 2494 yield (endofline, '')
2495 2495 nextisnewline = True
2496 2496
2497 2497 def difflabel(func, *args, **kw):
2498 2498 '''yields 2-tuples of (output, label) based on the output of func()'''
2499 2499 if kw.get(r'opts') and kw[r'opts'].worddiff:
2500 2500 dodiffhunk = diffsinglehunkinline
2501 2501 else:
2502 2502 dodiffhunk = diffsinglehunk
2503 2503 headprefixes = [('diff', 'diff.diffline'),
2504 2504 ('copy', 'diff.extended'),
2505 2505 ('rename', 'diff.extended'),
2506 2506 ('old', 'diff.extended'),
2507 2507 ('new', 'diff.extended'),
2508 2508 ('deleted', 'diff.extended'),
2509 2509 ('index', 'diff.extended'),
2510 2510 ('similarity', 'diff.extended'),
2511 2511 ('---', 'diff.file_a'),
2512 2512 ('+++', 'diff.file_b')]
2513 2513 textprefixes = [('@', 'diff.hunk'),
2514 2514 # - and + are handled by diffsinglehunk
2515 2515 ]
2516 2516 head = False
2517 2517
2518 2518 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2519 2519 hunkbuffer = []
2520 2520 def consumehunkbuffer():
2521 2521 if hunkbuffer:
2522 2522 for token in dodiffhunk(hunkbuffer):
2523 2523 yield token
2524 2524 hunkbuffer[:] = []
2525 2525
2526 2526 for chunk in func(*args, **kw):
2527 2527 lines = chunk.split('\n')
2528 2528 linecount = len(lines)
2529 2529 for i, line in enumerate(lines):
2530 2530 if head:
2531 2531 if line.startswith('@'):
2532 2532 head = False
2533 2533 else:
2534 2534 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2535 2535 head = True
2536 2536 diffline = False
2537 2537 if not head and line and line.startswith(('+', '-')):
2538 2538 diffline = True
2539 2539
2540 2540 prefixes = textprefixes
2541 2541 if head:
2542 2542 prefixes = headprefixes
2543 2543 if diffline:
2544 2544 # buffered
2545 2545 bufferedline = line
2546 2546 if i + 1 < linecount:
2547 2547 bufferedline += "\n"
2548 2548 hunkbuffer.append(bufferedline)
2549 2549 else:
2550 2550 # unbuffered
2551 2551 for token in consumehunkbuffer():
2552 2552 yield token
2553 2553 stripline = line.rstrip()
2554 2554 for prefix, label in prefixes:
2555 2555 if stripline.startswith(prefix):
2556 2556 yield (stripline, label)
2557 2557 if line != stripline:
2558 2558 yield (line[len(stripline):],
2559 2559 'diff.trailingwhitespace')
2560 2560 break
2561 2561 else:
2562 2562 yield (line, '')
2563 2563 if i + 1 < linecount:
2564 2564 yield ('\n', '')
2565 2565 for token in consumehunkbuffer():
2566 2566 yield token
2567 2567
2568 2568 def diffui(*args, **kw):
2569 2569 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2570 2570 return difflabel(diff, *args, **kw)
2571 2571
2572 2572 def _filepairs(modified, added, removed, copy, opts):
2573 2573 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2574 2574 before and f2 is the the name after. For added files, f1 will be None,
2575 2575 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2576 2576 or 'rename' (the latter two only if opts.git is set).'''
2577 2577 gone = set()
2578 2578
2579 2579 copyto = dict([(v, k) for k, v in copy.items()])
2580 2580
2581 2581 addedset, removedset = set(added), set(removed)
2582 2582
2583 2583 for f in sorted(modified + added + removed):
2584 2584 copyop = None
2585 2585 f1, f2 = f, f
2586 2586 if f in addedset:
2587 2587 f1 = None
2588 2588 if f in copy:
2589 2589 if opts.git:
2590 2590 f1 = copy[f]
2591 2591 if f1 in removedset and f1 not in gone:
2592 2592 copyop = 'rename'
2593 2593 gone.add(f1)
2594 2594 else:
2595 2595 copyop = 'copy'
2596 2596 elif f in removedset:
2597 2597 f2 = None
2598 2598 if opts.git:
2599 2599 # have we already reported a copy above?
2600 2600 if (f in copyto and copyto[f] in addedset
2601 2601 and copy[copyto[f]] == f):
2602 2602 continue
2603 2603 yield f1, f2, copyop
2604 2604
2605 2605 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2606 2606 copy, getfilectx, opts, losedatafn, prefix, relroot):
2607 2607 '''given input data, generate a diff and yield it in blocks
2608 2608
2609 2609 If generating a diff would lose data like flags or binary data and
2610 2610 losedatafn is not None, it will be called.
2611 2611
2612 2612 relroot is removed and prefix is added to every path in the diff output.
2613 2613
2614 2614 If relroot is not empty, this function expects every path in modified,
2615 2615 added, removed and copy to start with it.'''
2616 2616
2617 2617 def gitindex(text):
2618 2618 if not text:
2619 2619 text = ""
2620 2620 l = len(text)
2621 2621 s = hashlib.sha1('blob %d\0' % l)
2622 2622 s.update(text)
2623 2623 return hex(s.digest())
2624 2624
2625 2625 if opts.noprefix:
2626 2626 aprefix = bprefix = ''
2627 2627 else:
2628 2628 aprefix = 'a/'
2629 2629 bprefix = 'b/'
2630 2630
2631 2631 def diffline(f, revs):
2632 2632 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2633 2633 return 'diff %s %s' % (revinfo, f)
2634 2634
2635 2635 def isempty(fctx):
2636 2636 return fctx is None or fctx.size() == 0
2637 2637
2638 2638 date1 = dateutil.datestr(ctx1.date())
2639 2639 date2 = dateutil.datestr(ctx2.date())
2640 2640
2641 2641 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2642 2642
2643 2643 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2644 2644 or repo.ui.configbool('devel', 'check-relroot')):
2645 2645 for f in modified + added + removed + list(copy) + list(copy.values()):
2646 2646 if f is not None and not f.startswith(relroot):
2647 2647 raise AssertionError(
2648 2648 "file %s doesn't start with relroot %s" % (f, relroot))
2649 2649
2650 2650 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2651 2651 content1 = None
2652 2652 content2 = None
2653 2653 fctx1 = None
2654 2654 fctx2 = None
2655 2655 flag1 = None
2656 2656 flag2 = None
2657 2657 if f1:
2658 2658 fctx1 = getfilectx(f1, ctx1)
2659 2659 if opts.git or losedatafn:
2660 2660 flag1 = ctx1.flags(f1)
2661 2661 if f2:
2662 2662 fctx2 = getfilectx(f2, ctx2)
2663 2663 if opts.git or losedatafn:
2664 2664 flag2 = ctx2.flags(f2)
2665 2665 # if binary is True, output "summary" or "base85", but not "text diff"
2666 2666 if opts.text:
2667 2667 binary = False
2668 2668 else:
2669 2669 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2670 2670
2671 2671 if losedatafn and not opts.git:
2672 2672 if (binary or
2673 2673 # copy/rename
2674 2674 f2 in copy or
2675 2675 # empty file creation
2676 2676 (not f1 and isempty(fctx2)) or
2677 2677 # empty file deletion
2678 2678 (isempty(fctx1) and not f2) or
2679 2679 # create with flags
2680 2680 (not f1 and flag2) or
2681 2681 # change flags
2682 2682 (f1 and f2 and flag1 != flag2)):
2683 2683 losedatafn(f2 or f1)
2684 2684
2685 2685 path1 = f1 or f2
2686 2686 path2 = f2 or f1
2687 2687 path1 = posixpath.join(prefix, path1[len(relroot):])
2688 2688 path2 = posixpath.join(prefix, path2[len(relroot):])
2689 2689 header = []
2690 2690 if opts.git:
2691 2691 header.append('diff --git %s%s %s%s' %
2692 2692 (aprefix, path1, bprefix, path2))
2693 2693 if not f1: # added
2694 2694 header.append('new file mode %s' % gitmode[flag2])
2695 2695 elif not f2: # removed
2696 2696 header.append('deleted file mode %s' % gitmode[flag1])
2697 2697 else: # modified/copied/renamed
2698 2698 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2699 2699 if mode1 != mode2:
2700 2700 header.append('old mode %s' % mode1)
2701 2701 header.append('new mode %s' % mode2)
2702 2702 if copyop is not None:
2703 2703 if opts.showsimilarity:
2704 2704 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2705 2705 header.append('similarity index %d%%' % sim)
2706 2706 header.append('%s from %s' % (copyop, path1))
2707 2707 header.append('%s to %s' % (copyop, path2))
2708 2708 elif revs and not repo.ui.quiet:
2709 2709 header.append(diffline(path1, revs))
2710 2710
2711 2711 # fctx.is | diffopts | what to | is fctx.data()
2712 2712 # binary() | text nobinary git index | output? | outputted?
2713 2713 # ------------------------------------|----------------------------
2714 2714 # yes | no no no * | summary | no
2715 2715 # yes | no no yes * | base85 | yes
2716 2716 # yes | no yes no * | summary | no
2717 2717 # yes | no yes yes 0 | summary | no
2718 2718 # yes | no yes yes >0 | summary | semi [1]
2719 2719 # yes | yes * * * | text diff | yes
2720 2720 # no | * * * * | text diff | yes
2721 2721 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2722 2722 if binary and (not opts.git or (opts.git and opts.nobinary and not
2723 2723 opts.index)):
2724 2724 # fast path: no binary content will be displayed, content1 and
2725 2725 # content2 are only used for equivalent test. cmp() could have a
2726 2726 # fast path.
2727 2727 if fctx1 is not None:
2728 2728 content1 = b'\0'
2729 2729 if fctx2 is not None:
2730 2730 if fctx1 is not None and not fctx1.cmp(fctx2):
2731 2731 content2 = b'\0' # not different
2732 2732 else:
2733 2733 content2 = b'\0\0'
2734 2734 else:
2735 2735 # normal path: load contents
2736 2736 if fctx1 is not None:
2737 2737 content1 = fctx1.data()
2738 2738 if fctx2 is not None:
2739 2739 content2 = fctx2.data()
2740 2740
2741 2741 if binary and opts.git and not opts.nobinary:
2742 2742 text = mdiff.b85diff(content1, content2)
2743 2743 if text:
2744 2744 header.append('index %s..%s' %
2745 2745 (gitindex(content1), gitindex(content2)))
2746 2746 hunks = (None, [text]),
2747 2747 else:
2748 2748 if opts.git and opts.index > 0:
2749 2749 flag = flag1
2750 2750 if flag is None:
2751 2751 flag = flag2
2752 2752 header.append('index %s..%s %s' %
2753 2753 (gitindex(content1)[0:opts.index],
2754 2754 gitindex(content2)[0:opts.index],
2755 2755 gitmode[flag]))
2756 2756
2757 2757 uheaders, hunks = mdiff.unidiff(content1, date1,
2758 2758 content2, date2,
2759 2759 path1, path2,
2760 2760 binary=binary, opts=opts)
2761 2761 header.extend(uheaders)
2762 2762 yield fctx1, fctx2, header, hunks
2763 2763
2764 2764 def diffstatsum(stats):
2765 2765 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2766 2766 for f, a, r, b in stats:
2767 2767 maxfile = max(maxfile, encoding.colwidth(f))
2768 2768 maxtotal = max(maxtotal, a + r)
2769 2769 addtotal += a
2770 2770 removetotal += r
2771 2771 binary = binary or b
2772 2772
2773 2773 return maxfile, maxtotal, addtotal, removetotal, binary
2774 2774
2775 2775 def diffstatdata(lines):
2776 2776 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2777 2777
2778 2778 results = []
2779 2779 filename, adds, removes, isbinary = None, 0, 0, False
2780 2780
2781 2781 def addresult():
2782 2782 if filename:
2783 2783 results.append((filename, adds, removes, isbinary))
2784 2784
2785 2785 # inheader is used to track if a line is in the
2786 2786 # header portion of the diff. This helps properly account
2787 2787 # for lines that start with '--' or '++'
2788 2788 inheader = False
2789 2789
2790 2790 for line in lines:
2791 2791 if line.startswith('diff'):
2792 2792 addresult()
2793 2793 # starting a new file diff
2794 2794 # set numbers to 0 and reset inheader
2795 2795 inheader = True
2796 2796 adds, removes, isbinary = 0, 0, False
2797 2797 if line.startswith('diff --git a/'):
2798 2798 filename = gitre.search(line).group(2)
2799 2799 elif line.startswith('diff -r'):
2800 2800 # format: "diff -r ... -r ... filename"
2801 2801 filename = diffre.search(line).group(1)
2802 2802 elif line.startswith('@@'):
2803 2803 inheader = False
2804 2804 elif line.startswith('+') and not inheader:
2805 2805 adds += 1
2806 2806 elif line.startswith('-') and not inheader:
2807 2807 removes += 1
2808 2808 elif (line.startswith('GIT binary patch') or
2809 2809 line.startswith('Binary file')):
2810 2810 isbinary = True
2811 2811 addresult()
2812 2812 return results
2813 2813
2814 2814 def diffstat(lines, width=80):
2815 2815 output = []
2816 2816 stats = diffstatdata(lines)
2817 2817 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2818 2818
2819 2819 countwidth = len(str(maxtotal))
2820 2820 if hasbinary and countwidth < 3:
2821 2821 countwidth = 3
2822 2822 graphwidth = width - countwidth - maxname - 6
2823 2823 if graphwidth < 10:
2824 2824 graphwidth = 10
2825 2825
2826 2826 def scale(i):
2827 2827 if maxtotal <= graphwidth:
2828 2828 return i
2829 2829 # If diffstat runs out of room it doesn't print anything,
2830 2830 # which isn't very useful, so always print at least one + or -
2831 2831 # if there were at least some changes.
2832 2832 return max(i * graphwidth // maxtotal, int(bool(i)))
2833 2833
2834 2834 for filename, adds, removes, isbinary in stats:
2835 2835 if isbinary:
2836 2836 count = 'Bin'
2837 2837 else:
2838 2838 count = '%d' % (adds + removes)
2839 2839 pluses = '+' * scale(adds)
2840 2840 minuses = '-' * scale(removes)
2841 2841 output.append(' %s%s | %*s %s%s\n' %
2842 2842 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2843 2843 countwidth, count, pluses, minuses))
2844 2844
2845 2845 if stats:
2846 2846 output.append(_(' %d files changed, %d insertions(+), '
2847 2847 '%d deletions(-)\n')
2848 2848 % (len(stats), totaladds, totalremoves))
2849 2849
2850 2850 return ''.join(output)
2851 2851
2852 2852 def diffstatui(*args, **kw):
2853 2853 '''like diffstat(), but yields 2-tuples of (output, label) for
2854 2854 ui.write()
2855 2855 '''
2856 2856
2857 2857 for line in diffstat(*args, **kw).splitlines():
2858 2858 if line and line[-1] in '+-':
2859 2859 name, graph = line.rsplit(' ', 1)
2860 2860 yield (name + ' ', '')
2861 2861 m = re.search(br'\++', graph)
2862 2862 if m:
2863 2863 yield (m.group(0), 'diffstat.inserted')
2864 2864 m = re.search(br'-+', graph)
2865 2865 if m:
2866 2866 yield (m.group(0), 'diffstat.deleted')
2867 2867 else:
2868 2868 yield (line, '')
2869 2869 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now