##// END OF EJS Templates
merge with stable
Matt Mackall -
r13104:5dac0d04 merge default
parent child Browse files
Show More
@@ -1,572 +1,578 b''
1 1 # Mercurial extension to provide the 'hg bookmark' command
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''track a line of development with movable markers
9 9
10 10 Bookmarks are local movable markers to changesets. Every bookmark
11 11 points to a changeset identified by its hash. If you commit a
12 12 changeset that is based on a changeset that has a bookmark on it, the
13 13 bookmark shifts to the new changeset.
14 14
15 15 It is possible to use bookmark names in every revision lookup (e.g.
16 16 :hg:`merge`, :hg:`update`).
17 17
18 18 By default, when several bookmarks point to the same changeset, they
19 19 will all move forward together. It is possible to obtain a more
20 20 git-like experience by adding the following configuration option to
21 21 your configuration file::
22 22
23 23 [bookmarks]
24 24 track.current = True
25 25
26 26 This will cause Mercurial to track the bookmark that you are currently
27 27 using, and only update it. This is similar to git's approach to
28 28 branching.
29 29 '''
30 30
31 31 from mercurial.i18n import _
32 32 from mercurial.node import nullid, nullrev, bin, hex, short
33 33 from mercurial import util, commands, repair, extensions, pushkey, hg, url
34 34 from mercurial import revset, encoding
35 35 import os
36 36
37 37 def write(repo):
38 38 '''Write bookmarks
39 39
40 40 Write the given bookmark => hash dictionary to the .hg/bookmarks file
41 41 in a format equal to those of localtags.
42 42
43 43 We also store a backup of the previous state in undo.bookmarks that
44 44 can be copied back on rollback.
45 45 '''
46 46 refs = repo._bookmarks
47 if os.path.exists(repo.join('bookmarks')):
48 util.copyfile(repo.join('bookmarks'), repo.join('undo.bookmarks'))
47
48 try:
49 bms = repo.opener('bookmarks').read()
50 except IOError:
51 bms = None
52 if bms is not None:
53 repo.opener('undo.bookmarks', 'w').write(bms)
54
49 55 if repo._bookmarkcurrent not in refs:
50 56 setcurrent(repo, None)
51 57 wlock = repo.wlock()
52 58 try:
53 59 file = repo.opener('bookmarks', 'w', atomictemp=True)
54 60 for refspec, node in refs.iteritems():
55 61 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
56 62 file.rename()
57 63
58 64 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
59 65 try:
60 66 os.utime(repo.sjoin('00changelog.i'), None)
61 67 except OSError:
62 68 pass
63 69
64 70 finally:
65 71 wlock.release()
66 72
67 73 def setcurrent(repo, mark):
68 74 '''Set the name of the bookmark that we are currently on
69 75
70 76 Set the name of the bookmark that we are on (hg update <bookmark>).
71 77 The name is recorded in .hg/bookmarks.current
72 78 '''
73 79 current = repo._bookmarkcurrent
74 80 if current == mark:
75 81 return
76 82
77 83 refs = repo._bookmarks
78 84
79 85 # do not update if we do update to a rev equal to the current bookmark
80 86 if (mark and mark not in refs and
81 87 current and refs[current] == repo.changectx('.').node()):
82 88 return
83 89 if mark not in refs:
84 90 mark = ''
85 91 wlock = repo.wlock()
86 92 try:
87 93 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
88 94 file.write(mark)
89 95 file.rename()
90 96 finally:
91 97 wlock.release()
92 98 repo._bookmarkcurrent = mark
93 99
94 100 def bookmark(ui, repo, mark=None, rev=None, force=False, delete=False, rename=None):
95 101 '''track a line of development with movable markers
96 102
97 103 Bookmarks are pointers to certain commits that move when
98 104 committing. Bookmarks are local. They can be renamed, copied and
99 105 deleted. It is possible to use bookmark names in :hg:`merge` and
100 106 :hg:`update` to merge and update respectively to a given bookmark.
101 107
102 108 You can use :hg:`bookmark NAME` to set a bookmark on the working
103 109 directory's parent revision with the given name. If you specify
104 110 a revision using -r REV (where REV may be an existing bookmark),
105 111 the bookmark is assigned to that revision.
106 112
107 113 Bookmarks can be pushed and pulled between repositories (see :hg:`help
108 114 push` and :hg:`help pull`). This requires the bookmark extension to be
109 115 enabled for both the local and remote repositories.
110 116 '''
111 117 hexfn = ui.debugflag and hex or short
112 118 marks = repo._bookmarks
113 119 cur = repo.changectx('.').node()
114 120
115 121 if rename:
116 122 if rename not in marks:
117 123 raise util.Abort(_("a bookmark of this name does not exist"))
118 124 if mark in marks and not force:
119 125 raise util.Abort(_("a bookmark of the same name already exists"))
120 126 if mark is None:
121 127 raise util.Abort(_("new bookmark name required"))
122 128 marks[mark] = marks[rename]
123 129 del marks[rename]
124 130 if repo._bookmarkcurrent == rename:
125 131 setcurrent(repo, mark)
126 132 write(repo)
127 133 return
128 134
129 135 if delete:
130 136 if mark is None:
131 137 raise util.Abort(_("bookmark name required"))
132 138 if mark not in marks:
133 139 raise util.Abort(_("a bookmark of this name does not exist"))
134 140 if mark == repo._bookmarkcurrent:
135 141 setcurrent(repo, None)
136 142 del marks[mark]
137 143 write(repo)
138 144 return
139 145
140 146 if mark is not None:
141 147 if "\n" in mark:
142 148 raise util.Abort(_("bookmark name cannot contain newlines"))
143 149 mark = mark.strip()
144 150 if not mark:
145 151 raise util.Abort(_("bookmark names cannot consist entirely of "
146 152 "whitespace"))
147 153 if mark in marks and not force:
148 154 raise util.Abort(_("a bookmark of the same name already exists"))
149 155 if ((mark in repo.branchtags() or mark == repo.dirstate.branch())
150 156 and not force):
151 157 raise util.Abort(
152 158 _("a bookmark cannot have the name of an existing branch"))
153 159 if rev:
154 160 marks[mark] = repo.lookup(rev)
155 161 else:
156 162 marks[mark] = repo.changectx('.').node()
157 163 setcurrent(repo, mark)
158 164 write(repo)
159 165 return
160 166
161 167 if mark is None:
162 168 if rev:
163 169 raise util.Abort(_("bookmark name required"))
164 170 if len(marks) == 0:
165 171 ui.status(_("no bookmarks set\n"))
166 172 else:
167 173 for bmark, n in marks.iteritems():
168 174 if ui.configbool('bookmarks', 'track.current'):
169 175 current = repo._bookmarkcurrent
170 176 if bmark == current and n == cur:
171 177 prefix, label = '*', 'bookmarks.current'
172 178 else:
173 179 prefix, label = ' ', ''
174 180 else:
175 181 if n == cur:
176 182 prefix, label = '*', 'bookmarks.current'
177 183 else:
178 184 prefix, label = ' ', ''
179 185
180 186 if ui.quiet:
181 187 ui.write("%s\n" % bmark, label=label)
182 188 else:
183 189 ui.write(" %s %-25s %d:%s\n" % (
184 190 prefix, bmark, repo.changelog.rev(n), hexfn(n)),
185 191 label=label)
186 192 return
187 193
188 194 def _revstostrip(changelog, node):
189 195 srev = changelog.rev(node)
190 196 tostrip = [srev]
191 197 saveheads = []
192 198 for r in xrange(srev, len(changelog)):
193 199 parents = changelog.parentrevs(r)
194 200 if parents[0] in tostrip or parents[1] in tostrip:
195 201 tostrip.append(r)
196 202 if parents[1] != nullrev:
197 203 for p in parents:
198 204 if p not in tostrip and p > srev:
199 205 saveheads.append(p)
200 206 return [r for r in tostrip if r not in saveheads]
201 207
202 208 def strip(oldstrip, ui, repo, node, backup="all"):
203 209 """Strip bookmarks if revisions are stripped using
204 210 the mercurial.strip method. This usually happens during
205 211 qpush and qpop"""
206 212 revisions = _revstostrip(repo.changelog, node)
207 213 marks = repo._bookmarks
208 214 update = []
209 215 for mark, n in marks.iteritems():
210 216 if repo.changelog.rev(n) in revisions:
211 217 update.append(mark)
212 218 oldstrip(ui, repo, node, backup)
213 219 if len(update) > 0:
214 220 for m in update:
215 221 marks[m] = repo.changectx('.').node()
216 222 write(repo)
217 223
218 224 def reposetup(ui, repo):
219 225 if not repo.local():
220 226 return
221 227
222 228 class bookmark_repo(repo.__class__):
223 229
224 230 @util.propertycache
225 231 def _bookmarks(self):
226 232 '''Parse .hg/bookmarks file and return a dictionary
227 233
228 234 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
229 235 in the .hg/bookmarks file.
230 236 Read the file and return a (name=>nodeid) dictionary
231 237 '''
232 238 try:
233 239 bookmarks = {}
234 240 for line in self.opener('bookmarks'):
235 241 sha, refspec = line.strip().split(' ', 1)
236 242 refspec = encoding.tolocal(refspec)
237 243 bookmarks[refspec] = self.changelog.lookup(sha)
238 244 except:
239 245 pass
240 246 return bookmarks
241 247
242 248 @util.propertycache
243 249 def _bookmarkcurrent(self):
244 250 '''Get the current bookmark
245 251
246 252 If we use gittishsh branches we have a current bookmark that
247 253 we are on. This function returns the name of the bookmark. It
248 254 is stored in .hg/bookmarks.current
249 255 '''
250 256 mark = None
251 257 if os.path.exists(self.join('bookmarks.current')):
252 258 file = self.opener('bookmarks.current')
253 259 # No readline() in posixfile_nt, reading everything is cheap
254 260 mark = (file.readlines() or [''])[0]
255 261 if mark == '':
256 262 mark = None
257 263 file.close()
258 264 return mark
259 265
260 266 def rollback(self, *args):
261 267 if os.path.exists(self.join('undo.bookmarks')):
262 268 util.rename(self.join('undo.bookmarks'), self.join('bookmarks'))
263 269 return super(bookmark_repo, self).rollback(*args)
264 270
265 271 def lookup(self, key):
266 272 if key in self._bookmarks:
267 273 key = self._bookmarks[key]
268 274 return super(bookmark_repo, self).lookup(key)
269 275
270 276 def _bookmarksupdate(self, parents, node):
271 277 marks = self._bookmarks
272 278 update = False
273 279 if ui.configbool('bookmarks', 'track.current'):
274 280 mark = self._bookmarkcurrent
275 281 if mark and marks[mark] in parents:
276 282 marks[mark] = node
277 283 update = True
278 284 else:
279 285 for mark, n in marks.items():
280 286 if n in parents:
281 287 marks[mark] = node
282 288 update = True
283 289 if update:
284 290 write(self)
285 291
286 292 def commitctx(self, ctx, error=False):
287 293 """Add a revision to the repository and
288 294 move the bookmark"""
289 295 wlock = self.wlock() # do both commit and bookmark with lock held
290 296 try:
291 297 node = super(bookmark_repo, self).commitctx(ctx, error)
292 298 if node is None:
293 299 return None
294 300 parents = self.changelog.parents(node)
295 301 if parents[1] == nullid:
296 302 parents = (parents[0],)
297 303
298 304 self._bookmarksupdate(parents, node)
299 305 return node
300 306 finally:
301 307 wlock.release()
302 308
303 309 def pull(self, remote, heads=None, force=False):
304 310 result = super(bookmark_repo, self).pull(remote, heads, force)
305 311
306 312 self.ui.debug("checking for updated bookmarks\n")
307 313 rb = remote.listkeys('bookmarks')
308 314 changed = False
309 315 for k in rb.keys():
310 316 if k in self._bookmarks:
311 317 nr, nl = rb[k], self._bookmarks[k]
312 318 if nr in self:
313 319 cr = self[nr]
314 320 cl = self[nl]
315 321 if cl.rev() >= cr.rev():
316 322 continue
317 323 if cr in cl.descendants():
318 324 self._bookmarks[k] = cr.node()
319 325 changed = True
320 326 self.ui.status(_("updating bookmark %s\n") % k)
321 327 else:
322 328 self.ui.warn(_("not updating divergent"
323 329 " bookmark %s\n") % k)
324 330 if changed:
325 331 write(repo)
326 332
327 333 return result
328 334
329 335 def push(self, remote, force=False, revs=None, newbranch=False):
330 336 result = super(bookmark_repo, self).push(remote, force, revs,
331 337 newbranch)
332 338
333 339 self.ui.debug("checking for updated bookmarks\n")
334 340 rb = remote.listkeys('bookmarks')
335 341 for k in rb.keys():
336 342 if k in self._bookmarks:
337 343 nr, nl = rb[k], hex(self._bookmarks[k])
338 344 if nr in self:
339 345 cr = self[nr]
340 346 cl = self[nl]
341 347 if cl in cr.descendants():
342 348 r = remote.pushkey('bookmarks', k, nr, nl)
343 349 if r:
344 350 self.ui.status(_("updating bookmark %s\n") % k)
345 351 else:
346 352 self.ui.warn(_('updating bookmark %s'
347 353 ' failed!\n') % k)
348 354
349 355 return result
350 356
351 357 def addchangegroup(self, *args, **kwargs):
352 358 result = super(bookmark_repo, self).addchangegroup(*args, **kwargs)
353 359 if result > 1:
354 360 # We have more heads than before
355 361 return result
356 362 node = self.changelog.tip()
357 363 parents = self.dirstate.parents()
358 364 self._bookmarksupdate(parents, node)
359 365 return result
360 366
361 367 def _findtags(self):
362 368 """Merge bookmarks with normal tags"""
363 369 (tags, tagtypes) = super(bookmark_repo, self)._findtags()
364 370 tags.update(self._bookmarks)
365 371 return (tags, tagtypes)
366 372
367 373 if hasattr(repo, 'invalidate'):
368 374 def invalidate(self):
369 375 super(bookmark_repo, self).invalidate()
370 376 for attr in ('_bookmarks', '_bookmarkcurrent'):
371 377 if attr in self.__dict__:
372 378 delattr(self, attr)
373 379
374 380 repo.__class__ = bookmark_repo
375 381
376 382 def listbookmarks(repo):
377 383 # We may try to list bookmarks on a repo type that does not
378 384 # support it (e.g., statichttprepository).
379 385 if not hasattr(repo, '_bookmarks'):
380 386 return {}
381 387
382 388 d = {}
383 389 for k, v in repo._bookmarks.iteritems():
384 390 d[k] = hex(v)
385 391 return d
386 392
387 393 def pushbookmark(repo, key, old, new):
388 394 w = repo.wlock()
389 395 try:
390 396 marks = repo._bookmarks
391 397 if hex(marks.get(key, '')) != old:
392 398 return False
393 399 if new == '':
394 400 del marks[key]
395 401 else:
396 402 if new not in repo:
397 403 return False
398 404 marks[key] = repo[new].node()
399 405 write(repo)
400 406 return True
401 407 finally:
402 408 w.release()
403 409
404 410 def pull(oldpull, ui, repo, source="default", **opts):
405 411 # translate bookmark args to rev args for actual pull
406 412 if opts.get('bookmark'):
407 413 # this is an unpleasant hack as pull will do this internally
408 414 source, branches = hg.parseurl(ui.expandpath(source),
409 415 opts.get('branch'))
410 416 other = hg.repository(hg.remoteui(repo, opts), source)
411 417 rb = other.listkeys('bookmarks')
412 418
413 419 for b in opts['bookmark']:
414 420 if b not in rb:
415 421 raise util.Abort(_('remote bookmark %s not found!') % b)
416 422 opts.setdefault('rev', []).append(b)
417 423
418 424 result = oldpull(ui, repo, source, **opts)
419 425
420 426 # update specified bookmarks
421 427 if opts.get('bookmark'):
422 428 for b in opts['bookmark']:
423 429 # explicit pull overrides local bookmark if any
424 430 ui.status(_("importing bookmark %s\n") % b)
425 431 repo._bookmarks[b] = repo[rb[b]].node()
426 432 write(repo)
427 433
428 434 return result
429 435
430 436 def push(oldpush, ui, repo, dest=None, **opts):
431 437 dopush = True
432 438 if opts.get('bookmark'):
433 439 dopush = False
434 440 for b in opts['bookmark']:
435 441 if b in repo._bookmarks:
436 442 dopush = True
437 443 opts.setdefault('rev', []).append(b)
438 444
439 445 result = 0
440 446 if dopush:
441 447 result = oldpush(ui, repo, dest, **opts)
442 448
443 449 if opts.get('bookmark'):
444 450 # this is an unpleasant hack as push will do this internally
445 451 dest = ui.expandpath(dest or 'default-push', dest or 'default')
446 452 dest, branches = hg.parseurl(dest, opts.get('branch'))
447 453 other = hg.repository(hg.remoteui(repo, opts), dest)
448 454 rb = other.listkeys('bookmarks')
449 455 for b in opts['bookmark']:
450 456 # explicit push overrides remote bookmark if any
451 457 if b in repo._bookmarks:
452 458 ui.status(_("exporting bookmark %s\n") % b)
453 459 new = repo[b].hex()
454 460 elif b in rb:
455 461 ui.status(_("deleting remote bookmark %s\n") % b)
456 462 new = '' # delete
457 463 else:
458 464 ui.warn(_('bookmark %s does not exist on the local '
459 465 'or remote repository!\n') % b)
460 466 return 2
461 467 old = rb.get(b, '')
462 468 r = other.pushkey('bookmarks', b, old, new)
463 469 if not r:
464 470 ui.warn(_('updating bookmark %s failed!\n') % b)
465 471 if not result:
466 472 result = 2
467 473
468 474 return result
469 475
470 476 def diffbookmarks(ui, repo, remote):
471 477 ui.status(_("searching for changed bookmarks\n"))
472 478
473 479 lmarks = repo.listkeys('bookmarks')
474 480 rmarks = remote.listkeys('bookmarks')
475 481
476 482 diff = sorted(set(rmarks) - set(lmarks))
477 483 for k in diff:
478 484 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
479 485
480 486 if len(diff) <= 0:
481 487 ui.status(_("no changed bookmarks found\n"))
482 488 return 1
483 489 return 0
484 490
485 491 def incoming(oldincoming, ui, repo, source="default", **opts):
486 492 if opts.get('bookmarks'):
487 493 source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch'))
488 494 other = hg.repository(hg.remoteui(repo, opts), source)
489 495 ui.status(_('comparing with %s\n') % url.hidepassword(source))
490 496 return diffbookmarks(ui, repo, other)
491 497 else:
492 498 return oldincoming(ui, repo, source, **opts)
493 499
494 500 def outgoing(oldoutgoing, ui, repo, dest=None, **opts):
495 501 if opts.get('bookmarks'):
496 502 dest = ui.expandpath(dest or 'default-push', dest or 'default')
497 503 dest, branches = hg.parseurl(dest, opts.get('branch'))
498 504 other = hg.repository(hg.remoteui(repo, opts), dest)
499 505 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
500 506 return diffbookmarks(ui, other, repo)
501 507 else:
502 508 return oldoutgoing(ui, repo, dest, **opts)
503 509
504 510 def uisetup(ui):
505 511 extensions.wrapfunction(repair, "strip", strip)
506 512 if ui.configbool('bookmarks', 'track.current'):
507 513 extensions.wrapcommand(commands.table, 'update', updatecurbookmark)
508 514
509 515 entry = extensions.wrapcommand(commands.table, 'pull', pull)
510 516 entry[1].append(('B', 'bookmark', [],
511 517 _("bookmark to import"),
512 518 _('BOOKMARK')))
513 519 entry = extensions.wrapcommand(commands.table, 'push', push)
514 520 entry[1].append(('B', 'bookmark', [],
515 521 _("bookmark to export"),
516 522 _('BOOKMARK')))
517 523 entry = extensions.wrapcommand(commands.table, 'incoming', incoming)
518 524 entry[1].append(('B', 'bookmarks', False,
519 525 _("compare bookmark")))
520 526 entry = extensions.wrapcommand(commands.table, 'outgoing', outgoing)
521 527 entry[1].append(('B', 'bookmarks', False,
522 528 _("compare bookmark")))
523 529
524 530 pushkey.register('bookmarks', pushbookmark, listbookmarks)
525 531
526 532 def updatecurbookmark(orig, ui, repo, *args, **opts):
527 533 '''Set the current bookmark
528 534
529 535 If the user updates to a bookmark we update the .hg/bookmarks.current
530 536 file.
531 537 '''
532 538 res = orig(ui, repo, *args, **opts)
533 539 rev = opts['rev']
534 540 if not rev and len(args) > 0:
535 541 rev = args[0]
536 542 setcurrent(repo, rev)
537 543 return res
538 544
539 545 def bmrevset(repo, subset, x):
540 546 """``bookmark([name])``
541 547 The named bookmark or all bookmarks.
542 548 """
543 549 # i18n: "bookmark" is a keyword
544 550 args = revset.getargs(x, 0, 1, _('bookmark takes one or no arguments'))
545 551 if args:
546 552 bm = revset.getstring(args[0],
547 553 # i18n: "bookmark" is a keyword
548 554 _('the argument to bookmark must be a string'))
549 555 bmrev = listbookmarks(repo).get(bm, None)
550 556 if bmrev:
551 557 bmrev = repo.changelog.rev(bin(bmrev))
552 558 return [r for r in subset if r == bmrev]
553 559 bms = set([repo.changelog.rev(bin(r)) for r in listbookmarks(repo).values()])
554 560 return [r for r in subset if r in bms]
555 561
556 562 def extsetup(ui):
557 563 revset.symbols['bookmark'] = bmrevset
558 564
559 565 cmdtable = {
560 566 "bookmarks":
561 567 (bookmark,
562 568 [('f', 'force', False, _('force')),
563 569 ('r', 'rev', '', _('revision'), _('REV')),
564 570 ('d', 'delete', False, _('delete a given bookmark')),
565 571 ('m', 'rename', '', _('rename a given bookmark'), _('NAME'))],
566 572 _('hg bookmarks [-f] [-d] [-m NAME] [-r REV] [NAME]')),
567 573 }
568 574
569 575 colortable = {'bookmarks.current': 'green'}
570 576
571 577 # tell hggettext to extract docstrings from these functions:
572 578 i18nfunctions = [bmrevset]
@@ -1,272 +1,274 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex
10 10 import cmdutil
11 11 import util, encoding
12 12 import cStringIO, os, stat, tarfile, time, zipfile
13 13 import zlib, gzip
14 14
15 15 def tidyprefix(dest, kind, prefix):
16 16 '''choose prefix to use for names in archive. make sure prefix is
17 17 safe for consumers.'''
18 18
19 19 if prefix:
20 20 prefix = util.normpath(prefix)
21 21 else:
22 22 if not isinstance(dest, str):
23 23 raise ValueError('dest must be string if no prefix')
24 24 prefix = os.path.basename(dest)
25 25 lower = prefix.lower()
26 26 for sfx in exts.get(kind, []):
27 27 if lower.endswith(sfx):
28 28 prefix = prefix[:-len(sfx)]
29 29 break
30 30 lpfx = os.path.normpath(util.localpath(prefix))
31 31 prefix = util.pconvert(lpfx)
32 32 if not prefix.endswith('/'):
33 33 prefix += '/'
34 34 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
35 35 raise util.Abort(_('archive prefix contains illegal components'))
36 36 return prefix
37 37
38 38 exts = {
39 39 'tar': ['.tar'],
40 40 'tbz2': ['.tbz2', '.tar.bz2'],
41 41 'tgz': ['.tgz', '.tar.gz'],
42 42 'zip': ['.zip'],
43 43 }
44 44
45 45 def guesskind(dest):
46 46 for kind, extensions in exts.iteritems():
47 47 if util.any(dest.endswith(ext) for ext in extensions):
48 48 return kind
49 49 return None
50 50
51 51
52 52 class tarit(object):
53 53 '''write archive to tar file or stream. can write uncompressed,
54 54 or compress with gzip or bzip2.'''
55 55
56 56 class GzipFileWithTime(gzip.GzipFile):
57 57
58 58 def __init__(self, *args, **kw):
59 59 timestamp = None
60 60 if 'timestamp' in kw:
61 61 timestamp = kw.pop('timestamp')
62 62 if timestamp is None:
63 63 self.timestamp = time.time()
64 64 else:
65 65 self.timestamp = timestamp
66 66 gzip.GzipFile.__init__(self, *args, **kw)
67 67
68 68 def _write_gzip_header(self):
69 69 self.fileobj.write('\037\213') # magic header
70 70 self.fileobj.write('\010') # compression method
71 71 # Python 2.6 deprecates self.filename
72 72 fname = getattr(self, 'name', None) or self.filename
73 if fname and fname.endswith('.gz'):
74 fname = fname[:-3]
73 75 flags = 0
74 76 if fname:
75 77 flags = gzip.FNAME
76 78 self.fileobj.write(chr(flags))
77 79 gzip.write32u(self.fileobj, long(self.timestamp))
78 80 self.fileobj.write('\002')
79 81 self.fileobj.write('\377')
80 82 if fname:
81 83 self.fileobj.write(fname + '\000')
82 84
83 85 def __init__(self, dest, mtime, kind=''):
84 86 self.mtime = mtime
85 87
86 88 def taropen(name, mode, fileobj=None):
87 89 if kind == 'gz':
88 90 mode = mode[0]
89 91 if not fileobj:
90 92 fileobj = open(name, mode + 'b')
91 93 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
92 94 zlib.Z_BEST_COMPRESSION,
93 95 fileobj, timestamp=mtime)
94 96 return tarfile.TarFile.taropen(name, mode, gzfileobj)
95 97 else:
96 98 return tarfile.open(name, mode + kind, fileobj)
97 99
98 100 if isinstance(dest, str):
99 101 self.z = taropen(dest, mode='w:')
100 102 else:
101 103 # Python 2.5-2.5.1 have a regression that requires a name arg
102 104 self.z = taropen(name='', mode='w|', fileobj=dest)
103 105
104 106 def addfile(self, name, mode, islink, data):
105 107 i = tarfile.TarInfo(name)
106 108 i.mtime = self.mtime
107 109 i.size = len(data)
108 110 if islink:
109 111 i.type = tarfile.SYMTYPE
110 112 i.mode = 0777
111 113 i.linkname = data
112 114 data = None
113 115 i.size = 0
114 116 else:
115 117 i.mode = mode
116 118 data = cStringIO.StringIO(data)
117 119 self.z.addfile(i, data)
118 120
119 121 def done(self):
120 122 self.z.close()
121 123
122 124 class tellable(object):
123 125 '''provide tell method for zipfile.ZipFile when writing to http
124 126 response file object.'''
125 127
126 128 def __init__(self, fp):
127 129 self.fp = fp
128 130 self.offset = 0
129 131
130 132 def __getattr__(self, key):
131 133 return getattr(self.fp, key)
132 134
133 135 def write(self, s):
134 136 self.fp.write(s)
135 137 self.offset += len(s)
136 138
137 139 def tell(self):
138 140 return self.offset
139 141
140 142 class zipit(object):
141 143 '''write archive to zip file or stream. can write uncompressed,
142 144 or compressed with deflate.'''
143 145
144 146 def __init__(self, dest, mtime, compress=True):
145 147 if not isinstance(dest, str):
146 148 try:
147 149 dest.tell()
148 150 except (AttributeError, IOError):
149 151 dest = tellable(dest)
150 152 self.z = zipfile.ZipFile(dest, 'w',
151 153 compress and zipfile.ZIP_DEFLATED or
152 154 zipfile.ZIP_STORED)
153 155
154 156 # Python's zipfile module emits deprecation warnings if we try
155 157 # to store files with a date before 1980.
156 158 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
157 159 if mtime < epoch:
158 160 mtime = epoch
159 161
160 162 self.date_time = time.gmtime(mtime)[:6]
161 163
162 164 def addfile(self, name, mode, islink, data):
163 165 i = zipfile.ZipInfo(name, self.date_time)
164 166 i.compress_type = self.z.compression
165 167 # unzip will not honor unix file modes unless file creator is
166 168 # set to unix (id 3).
167 169 i.create_system = 3
168 170 ftype = stat.S_IFREG
169 171 if islink:
170 172 mode = 0777
171 173 ftype = stat.S_IFLNK
172 174 i.external_attr = (mode | ftype) << 16L
173 175 self.z.writestr(i, data)
174 176
175 177 def done(self):
176 178 self.z.close()
177 179
178 180 class fileit(object):
179 181 '''write archive as files in directory.'''
180 182
181 183 def __init__(self, name, mtime):
182 184 self.basedir = name
183 185 self.opener = util.opener(self.basedir)
184 186
185 187 def addfile(self, name, mode, islink, data):
186 188 if islink:
187 189 self.opener.symlink(data, name)
188 190 return
189 191 f = self.opener(name, "w", atomictemp=True)
190 192 f.write(data)
191 193 f.rename()
192 194 destfile = os.path.join(self.basedir, name)
193 195 os.chmod(destfile, mode)
194 196
195 197 def done(self):
196 198 pass
197 199
198 200 archivers = {
199 201 'files': fileit,
200 202 'tar': tarit,
201 203 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
202 204 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
203 205 'uzip': lambda name, mtime: zipit(name, mtime, False),
204 206 'zip': zipit,
205 207 }
206 208
207 209 def archive(repo, dest, node, kind, decode=True, matchfn=None,
208 210 prefix=None, mtime=None, subrepos=False):
209 211 '''create archive of repo as it was at node.
210 212
211 213 dest can be name of directory, name of archive file, or file
212 214 object to write archive to.
213 215
214 216 kind is type of archive to create.
215 217
216 218 decode tells whether to put files through decode filters from
217 219 hgrc.
218 220
219 221 matchfn is function to filter names of files to write to archive.
220 222
221 223 prefix is name of path to put before every archive member.'''
222 224
223 225 if kind == 'files':
224 226 if prefix:
225 227 raise util.Abort(_('cannot give prefix when archiving to files'))
226 228 else:
227 229 prefix = tidyprefix(dest, kind, prefix)
228 230
229 231 def write(name, mode, islink, getdata):
230 232 if matchfn and not matchfn(name):
231 233 return
232 234 data = getdata()
233 235 if decode:
234 236 data = repo.wwritedata(name, data)
235 237 archiver.addfile(prefix + name, mode, islink, data)
236 238
237 239 if kind not in archivers:
238 240 raise util.Abort(_("unknown archive type '%s'") % kind)
239 241
240 242 ctx = repo[node]
241 243 archiver = archivers[kind](dest, mtime or ctx.date()[0])
242 244
243 245 if repo.ui.configbool("ui", "archivemeta", True):
244 246 def metadata():
245 247 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
246 248 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
247 249
248 250 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
249 251 if repo.tagtype(t) == 'global')
250 252 if not tags:
251 253 repo.ui.pushbuffer()
252 254 opts = {'template': '{latesttag}\n{latesttagdistance}',
253 255 'style': '', 'patch': None, 'git': None}
254 256 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
255 257 ltags, dist = repo.ui.popbuffer().split('\n')
256 258 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
257 259 tags += 'latesttagdistance: %s\n' % dist
258 260
259 261 return base + tags
260 262
261 263 write('.hg_archival.txt', 0644, False, metadata)
262 264
263 265 for f in ctx:
264 266 ff = ctx.flags(f)
265 267 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
266 268
267 269 if subrepos:
268 270 for subpath in ctx.substate:
269 271 sub = ctx.sub(subpath)
270 272 sub.archive(archiver, prefix)
271 273
272 274 archiver.done()
@@ -1,1612 +1,1614 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 import cStringIO, email.Parser, os, re
10 10 import tempfile, zlib
11 11
12 12 from i18n import _
13 13 from node import hex, nullid, short
14 14 import base85, mdiff, util, diffhelpers, copies, encoding
15 15
16 16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 17
18 18 class PatchError(Exception):
19 19 pass
20 20
21 21 # helper functions
22 22
23 23 def copyfile(src, dst, basedir):
24 24 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
25 25 if os.path.lexists(absdst):
26 26 raise util.Abort(_("cannot create %s: destination already exists") %
27 27 dst)
28 28
29 29 dstdir = os.path.dirname(absdst)
30 30 if dstdir and not os.path.isdir(dstdir):
31 31 try:
32 32 os.makedirs(dstdir)
33 33 except IOError:
34 34 raise util.Abort(
35 35 _("cannot create %s: unable to create destination directory")
36 36 % dst)
37 37
38 38 util.copyfile(abssrc, absdst)
39 39
40 40 # public functions
41 41
42 42 def split(stream):
43 43 '''return an iterator of individual patches from a stream'''
44 44 def isheader(line, inheader):
45 45 if inheader and line[0] in (' ', '\t'):
46 46 # continuation
47 47 return True
48 48 if line[0] in (' ', '-', '+'):
49 49 # diff line - don't check for header pattern in there
50 50 return False
51 51 l = line.split(': ', 1)
52 52 return len(l) == 2 and ' ' not in l[0]
53 53
54 54 def chunk(lines):
55 55 return cStringIO.StringIO(''.join(lines))
56 56
57 57 def hgsplit(stream, cur):
58 58 inheader = True
59 59
60 60 for line in stream:
61 61 if not line.strip():
62 62 inheader = False
63 63 if not inheader and line.startswith('# HG changeset patch'):
64 64 yield chunk(cur)
65 65 cur = []
66 66 inheader = True
67 67
68 68 cur.append(line)
69 69
70 70 if cur:
71 71 yield chunk(cur)
72 72
73 73 def mboxsplit(stream, cur):
74 74 for line in stream:
75 75 if line.startswith('From '):
76 76 for c in split(chunk(cur[1:])):
77 77 yield c
78 78 cur = []
79 79
80 80 cur.append(line)
81 81
82 82 if cur:
83 83 for c in split(chunk(cur[1:])):
84 84 yield c
85 85
86 86 def mimesplit(stream, cur):
87 87 def msgfp(m):
88 88 fp = cStringIO.StringIO()
89 89 g = email.Generator.Generator(fp, mangle_from_=False)
90 90 g.flatten(m)
91 91 fp.seek(0)
92 92 return fp
93 93
94 94 for line in stream:
95 95 cur.append(line)
96 96 c = chunk(cur)
97 97
98 98 m = email.Parser.Parser().parse(c)
99 99 if not m.is_multipart():
100 100 yield msgfp(m)
101 101 else:
102 102 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 103 for part in m.walk():
104 104 ct = part.get_content_type()
105 105 if ct not in ok_types:
106 106 continue
107 107 yield msgfp(part)
108 108
109 109 def headersplit(stream, cur):
110 110 inheader = False
111 111
112 112 for line in stream:
113 113 if not inheader and isheader(line, inheader):
114 114 yield chunk(cur)
115 115 cur = []
116 116 inheader = True
117 117 if inheader and not isheader(line, inheader):
118 118 inheader = False
119 119
120 120 cur.append(line)
121 121
122 122 if cur:
123 123 yield chunk(cur)
124 124
125 125 def remainder(cur):
126 126 yield chunk(cur)
127 127
128 128 class fiter(object):
129 129 def __init__(self, fp):
130 130 self.fp = fp
131 131
132 132 def __iter__(self):
133 133 return self
134 134
135 135 def next(self):
136 136 l = self.fp.readline()
137 137 if not l:
138 138 raise StopIteration
139 139 return l
140 140
141 141 inheader = False
142 142 cur = []
143 143
144 144 mimeheaders = ['content-type']
145 145
146 146 if not hasattr(stream, 'next'):
147 147 # http responses, for example, have readline but not next
148 148 stream = fiter(stream)
149 149
150 150 for line in stream:
151 151 cur.append(line)
152 152 if line.startswith('# HG changeset patch'):
153 153 return hgsplit(stream, cur)
154 154 elif line.startswith('From '):
155 155 return mboxsplit(stream, cur)
156 156 elif isheader(line, inheader):
157 157 inheader = True
158 158 if line.split(':', 1)[0].lower() in mimeheaders:
159 159 # let email parser handle this
160 160 return mimesplit(stream, cur)
161 161 elif line.startswith('--- ') and inheader:
162 162 # No evil headers seen by diff start, split by hand
163 163 return headersplit(stream, cur)
164 164 # Not enough info, keep reading
165 165
166 166 # if we are here, we have a very plain patch
167 167 return remainder(cur)
168 168
169 169 def extract(ui, fileobj):
170 170 '''extract patch from data read from fileobj.
171 171
172 172 patch can be a normal patch or contained in an email message.
173 173
174 174 return tuple (filename, message, user, date, branch, node, p1, p2).
175 175 Any item in the returned tuple can be None. If filename is None,
176 176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177 177
178 178 # attempt to detect the start of a patch
179 179 # (this heuristic is borrowed from quilt)
180 180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 182 r'---[ \t].*?^\+\+\+[ \t]|'
183 183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184 184
185 185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 186 tmpfp = os.fdopen(fd, 'w')
187 187 try:
188 188 msg = email.Parser.Parser().parse(fileobj)
189 189
190 190 subject = msg['Subject']
191 191 user = msg['From']
192 192 if not subject and not user:
193 193 # Not an email, restore parsed headers if any
194 194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 195
196 196 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 197 # should try to parse msg['Date']
198 198 date = None
199 199 nodeid = None
200 200 branch = None
201 201 parents = []
202 202
203 203 if subject:
204 204 if subject.startswith('[PATCH'):
205 205 pend = subject.find(']')
206 206 if pend >= 0:
207 207 subject = subject[pend + 1:].lstrip()
208 208 subject = subject.replace('\n\t', ' ')
209 209 ui.debug('Subject: %s\n' % subject)
210 210 if user:
211 211 ui.debug('From: %s\n' % user)
212 212 diffs_seen = 0
213 213 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 214 message = ''
215 215 for part in msg.walk():
216 216 content_type = part.get_content_type()
217 217 ui.debug('Content-Type: %s\n' % content_type)
218 218 if content_type not in ok_types:
219 219 continue
220 220 payload = part.get_payload(decode=True)
221 221 m = diffre.search(payload)
222 222 if m:
223 223 hgpatch = False
224 224 hgpatchheader = False
225 225 ignoretext = False
226 226
227 227 ui.debug('found patch at byte %d\n' % m.start(0))
228 228 diffs_seen += 1
229 229 cfp = cStringIO.StringIO()
230 230 for line in payload[:m.start(0)].splitlines():
231 231 if line.startswith('# HG changeset patch') and not hgpatch:
232 232 ui.debug('patch generated by hg export\n')
233 233 hgpatch = True
234 234 hgpatchheader = True
235 235 # drop earlier commit message content
236 236 cfp.seek(0)
237 237 cfp.truncate()
238 238 subject = None
239 239 elif hgpatchheader:
240 240 if line.startswith('# User '):
241 241 user = line[7:]
242 242 ui.debug('From: %s\n' % user)
243 243 elif line.startswith("# Date "):
244 244 date = line[7:]
245 245 elif line.startswith("# Branch "):
246 246 branch = line[9:]
247 247 elif line.startswith("# Node ID "):
248 248 nodeid = line[10:]
249 249 elif line.startswith("# Parent "):
250 250 parents.append(line[10:])
251 251 elif not line.startswith("# "):
252 252 hgpatchheader = False
253 253 elif line == '---' and gitsendmail:
254 254 ignoretext = True
255 255 if not hgpatchheader and not ignoretext:
256 256 cfp.write(line)
257 257 cfp.write('\n')
258 258 message = cfp.getvalue()
259 259 if tmpfp:
260 260 tmpfp.write(payload)
261 261 if not payload.endswith('\n'):
262 262 tmpfp.write('\n')
263 263 elif not diffs_seen and message and content_type == 'text/plain':
264 264 message += '\n' + payload
265 265 except:
266 266 tmpfp.close()
267 267 os.unlink(tmpname)
268 268 raise
269 269
270 270 if subject and not message.startswith(subject):
271 271 message = '%s\n%s' % (subject, message)
272 272 tmpfp.close()
273 273 if not diffs_seen:
274 274 os.unlink(tmpname)
275 275 return None, message, user, date, branch, None, None, None
276 276 p1 = parents and parents.pop(0) or None
277 277 p2 = parents and parents.pop(0) or None
278 278 return tmpname, message, user, date, branch, nodeid, p1, p2
279 279
280 280 class patchmeta(object):
281 281 """Patched file metadata
282 282
283 283 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 284 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 285 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 286 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 287 'islink' is True if the file is a symlink and 'isexec' is True if
288 288 the file is executable. Otherwise, 'mode' is None.
289 289 """
290 290 def __init__(self, path):
291 291 self.path = path
292 292 self.oldpath = None
293 293 self.mode = None
294 294 self.op = 'MODIFY'
295 295 self.binary = False
296 296
297 297 def setmode(self, mode):
298 298 islink = mode & 020000
299 299 isexec = mode & 0100
300 300 self.mode = (islink, isexec)
301 301
302 302 def __repr__(self):
303 303 return "<patchmeta %s %r>" % (self.op, self.path)
304 304
305 305 def readgitpatch(lr):
306 306 """extract git-style metadata about patches from <patchname>"""
307 307
308 308 # Filter patch for git information
309 309 gp = None
310 310 gitpatches = []
311 311 for line in lr:
312 312 line = line.rstrip(' \r\n')
313 313 if line.startswith('diff --git'):
314 314 m = gitre.match(line)
315 315 if m:
316 316 if gp:
317 317 gitpatches.append(gp)
318 318 dst = m.group(2)
319 319 gp = patchmeta(dst)
320 320 elif gp:
321 321 if line.startswith('--- '):
322 322 gitpatches.append(gp)
323 323 gp = None
324 324 continue
325 325 if line.startswith('rename from '):
326 326 gp.op = 'RENAME'
327 327 gp.oldpath = line[12:]
328 328 elif line.startswith('rename to '):
329 329 gp.path = line[10:]
330 330 elif line.startswith('copy from '):
331 331 gp.op = 'COPY'
332 332 gp.oldpath = line[10:]
333 333 elif line.startswith('copy to '):
334 334 gp.path = line[8:]
335 335 elif line.startswith('deleted file'):
336 336 gp.op = 'DELETE'
337 337 elif line.startswith('new file mode '):
338 338 gp.op = 'ADD'
339 339 gp.setmode(int(line[-6:], 8))
340 340 elif line.startswith('new mode '):
341 341 gp.setmode(int(line[-6:], 8))
342 342 elif line.startswith('GIT binary patch'):
343 343 gp.binary = True
344 344 if gp:
345 345 gitpatches.append(gp)
346 346
347 347 return gitpatches
348 348
349 349 class linereader(object):
350 350 # simple class to allow pushing lines back into the input stream
351 351 def __init__(self, fp, textmode=False):
352 352 self.fp = fp
353 353 self.buf = []
354 354 self.textmode = textmode
355 355 self.eol = None
356 356
357 357 def push(self, line):
358 358 if line is not None:
359 359 self.buf.append(line)
360 360
361 361 def readline(self):
362 362 if self.buf:
363 363 l = self.buf[0]
364 364 del self.buf[0]
365 365 return l
366 366 l = self.fp.readline()
367 367 if not self.eol:
368 368 if l.endswith('\r\n'):
369 369 self.eol = '\r\n'
370 370 elif l.endswith('\n'):
371 371 self.eol = '\n'
372 372 if self.textmode and l.endswith('\r\n'):
373 373 l = l[:-2] + '\n'
374 374 return l
375 375
376 376 def __iter__(self):
377 377 while 1:
378 378 l = self.readline()
379 379 if not l:
380 380 break
381 381 yield l
382 382
383 383 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
384 384 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
385 385 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
386 386 eolmodes = ['strict', 'crlf', 'lf', 'auto']
387 387
388 388 class patchfile(object):
389 389 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
390 390 self.fname = fname
391 391 self.eolmode = eolmode
392 392 self.eol = None
393 393 self.opener = opener
394 394 self.ui = ui
395 395 self.lines = []
396 396 self.exists = False
397 397 self.missing = missing
398 398 if not missing:
399 399 try:
400 400 self.lines = self.readlines(fname)
401 401 self.exists = True
402 402 except IOError:
403 403 pass
404 404 else:
405 405 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
406 406
407 407 self.hash = {}
408 408 self.dirty = 0
409 409 self.offset = 0
410 410 self.skew = 0
411 411 self.rej = []
412 412 self.fileprinted = False
413 413 self.printfile(False)
414 414 self.hunks = 0
415 415
416 416 def readlines(self, fname):
417 417 if os.path.islink(fname):
418 418 return [os.readlink(fname)]
419 419 fp = self.opener(fname, 'r')
420 420 try:
421 421 lr = linereader(fp, self.eolmode != 'strict')
422 422 lines = list(lr)
423 423 self.eol = lr.eol
424 424 return lines
425 425 finally:
426 426 fp.close()
427 427
428 428 def writelines(self, fname, lines):
429 429 # Ensure supplied data ends in fname, being a regular file or
430 430 # a symlink. cmdutil.updatedir will -too magically- take care
431 431 # of setting it to the proper type afterwards.
432 432 islink = os.path.islink(fname)
433 433 if islink:
434 434 fp = cStringIO.StringIO()
435 435 else:
436 436 fp = self.opener(fname, 'w')
437 437 try:
438 438 if self.eolmode == 'auto':
439 439 eol = self.eol
440 440 elif self.eolmode == 'crlf':
441 441 eol = '\r\n'
442 442 else:
443 443 eol = '\n'
444 444
445 445 if self.eolmode != 'strict' and eol and eol != '\n':
446 446 for l in lines:
447 447 if l and l[-1] == '\n':
448 448 l = l[:-1] + eol
449 449 fp.write(l)
450 450 else:
451 451 fp.writelines(lines)
452 452 if islink:
453 453 self.opener.symlink(fp.getvalue(), fname)
454 454 finally:
455 455 fp.close()
456 456
457 457 def unlink(self, fname):
458 458 os.unlink(fname)
459 459
460 460 def printfile(self, warn):
461 461 if self.fileprinted:
462 462 return
463 463 if warn or self.ui.verbose:
464 464 self.fileprinted = True
465 465 s = _("patching file %s\n") % self.fname
466 466 if warn:
467 467 self.ui.warn(s)
468 468 else:
469 469 self.ui.note(s)
470 470
471 471
472 472 def findlines(self, l, linenum):
473 473 # looks through the hash and finds candidate lines. The
474 474 # result is a list of line numbers sorted based on distance
475 475 # from linenum
476 476
477 477 cand = self.hash.get(l, [])
478 478 if len(cand) > 1:
479 479 # resort our list of potentials forward then back.
480 480 cand.sort(key=lambda x: abs(x - linenum))
481 481 return cand
482 482
483 483 def hashlines(self):
484 484 self.hash = {}
485 485 for x, s in enumerate(self.lines):
486 486 self.hash.setdefault(s, []).append(x)
487 487
488 def makerejlines(self, fname):
489 base = os.path.basename(fname)
490 yield "--- %s\n+++ %s\n" % (base, base)
491 for x in self.rej:
492 for l in x.hunk:
493 yield l
494 if l[-1] != '\n':
495 yield "\n\ No newline at end of file\n"
496
488 497 def write_rej(self):
489 498 # our rejects are a little different from patch(1). This always
490 499 # creates rejects in the same form as the original patch. A file
491 500 # header is inserted so that you can run the reject through patch again
492 501 # without having to type the filename.
493 502
494 503 if not self.rej:
495 504 return
496 505
497 506 fname = self.fname + ".rej"
498 507 self.ui.warn(
499 508 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
500 509 (len(self.rej), self.hunks, fname))
501 510
502 def rejlines():
503 base = os.path.basename(self.fname)
504 yield "--- %s\n+++ %s\n" % (base, base)
505 for x in self.rej:
506 for l in x.hunk:
507 yield l
508 if l[-1] != '\n':
509 yield "\n\ No newline at end of file\n"
510
511 self.writelines(fname, rejlines())
511 fp = self.opener(fname, 'w')
512 fp.writelines(self.makerejlines(self.fname))
513 fp.close()
512 514
513 515 def apply(self, h):
514 516 if not h.complete():
515 517 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
516 518 (h.number, h.desc, len(h.a), h.lena, len(h.b),
517 519 h.lenb))
518 520
519 521 self.hunks += 1
520 522
521 523 if self.missing:
522 524 self.rej.append(h)
523 525 return -1
524 526
525 527 if self.exists and h.createfile():
526 528 self.ui.warn(_("file %s already exists\n") % self.fname)
527 529 self.rej.append(h)
528 530 return -1
529 531
530 532 if isinstance(h, binhunk):
531 533 if h.rmfile():
532 534 self.unlink(self.fname)
533 535 else:
534 536 self.lines[:] = h.new()
535 537 self.offset += len(h.new())
536 538 self.dirty = 1
537 539 return 0
538 540
539 541 horig = h
540 542 if (self.eolmode in ('crlf', 'lf')
541 543 or self.eolmode == 'auto' and self.eol):
542 544 # If new eols are going to be normalized, then normalize
543 545 # hunk data before patching. Otherwise, preserve input
544 546 # line-endings.
545 547 h = h.getnormalized()
546 548
547 549 # fast case first, no offsets, no fuzz
548 550 old = h.old()
549 551 # patch starts counting at 1 unless we are adding the file
550 552 if h.starta == 0:
551 553 start = 0
552 554 else:
553 555 start = h.starta + self.offset - 1
554 556 orig_start = start
555 557 # if there's skew we want to emit the "(offset %d lines)" even
556 558 # when the hunk cleanly applies at start + skew, so skip the
557 559 # fast case code
558 560 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
559 561 if h.rmfile():
560 562 self.unlink(self.fname)
561 563 else:
562 564 self.lines[start : start + h.lena] = h.new()
563 565 self.offset += h.lenb - h.lena
564 566 self.dirty = 1
565 567 return 0
566 568
567 569 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
568 570 self.hashlines()
569 571 if h.hunk[-1][0] != ' ':
570 572 # if the hunk tried to put something at the bottom of the file
571 573 # override the start line and use eof here
572 574 search_start = len(self.lines)
573 575 else:
574 576 search_start = orig_start + self.skew
575 577
576 578 for fuzzlen in xrange(3):
577 579 for toponly in [True, False]:
578 580 old = h.old(fuzzlen, toponly)
579 581
580 582 cand = self.findlines(old[0][1:], search_start)
581 583 for l in cand:
582 584 if diffhelpers.testhunk(old, self.lines, l) == 0:
583 585 newlines = h.new(fuzzlen, toponly)
584 586 self.lines[l : l + len(old)] = newlines
585 587 self.offset += len(newlines) - len(old)
586 588 self.skew = l - orig_start
587 589 self.dirty = 1
588 590 offset = l - orig_start - fuzzlen
589 591 if fuzzlen:
590 592 msg = _("Hunk #%d succeeded at %d "
591 593 "with fuzz %d "
592 594 "(offset %d lines).\n")
593 595 self.printfile(True)
594 596 self.ui.warn(msg %
595 597 (h.number, l + 1, fuzzlen, offset))
596 598 else:
597 599 msg = _("Hunk #%d succeeded at %d "
598 600 "(offset %d lines).\n")
599 601 self.ui.note(msg % (h.number, l + 1, offset))
600 602 return fuzzlen
601 603 self.printfile(True)
602 604 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
603 605 self.rej.append(horig)
604 606 return -1
605 607
606 608 class hunk(object):
607 609 def __init__(self, desc, num, lr, context, create=False, remove=False):
608 610 self.number = num
609 611 self.desc = desc
610 612 self.hunk = [desc]
611 613 self.a = []
612 614 self.b = []
613 615 self.starta = self.lena = None
614 616 self.startb = self.lenb = None
615 617 if lr is not None:
616 618 if context:
617 619 self.read_context_hunk(lr)
618 620 else:
619 621 self.read_unified_hunk(lr)
620 622 self.create = create
621 623 self.remove = remove and not create
622 624
623 625 def getnormalized(self):
624 626 """Return a copy with line endings normalized to LF."""
625 627
626 628 def normalize(lines):
627 629 nlines = []
628 630 for line in lines:
629 631 if line.endswith('\r\n'):
630 632 line = line[:-2] + '\n'
631 633 nlines.append(line)
632 634 return nlines
633 635
634 636 # Dummy object, it is rebuilt manually
635 637 nh = hunk(self.desc, self.number, None, None, False, False)
636 638 nh.number = self.number
637 639 nh.desc = self.desc
638 640 nh.hunk = self.hunk
639 641 nh.a = normalize(self.a)
640 642 nh.b = normalize(self.b)
641 643 nh.starta = self.starta
642 644 nh.startb = self.startb
643 645 nh.lena = self.lena
644 646 nh.lenb = self.lenb
645 647 nh.create = self.create
646 648 nh.remove = self.remove
647 649 return nh
648 650
649 651 def read_unified_hunk(self, lr):
650 652 m = unidesc.match(self.desc)
651 653 if not m:
652 654 raise PatchError(_("bad hunk #%d") % self.number)
653 655 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
654 656 if self.lena is None:
655 657 self.lena = 1
656 658 else:
657 659 self.lena = int(self.lena)
658 660 if self.lenb is None:
659 661 self.lenb = 1
660 662 else:
661 663 self.lenb = int(self.lenb)
662 664 self.starta = int(self.starta)
663 665 self.startb = int(self.startb)
664 666 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
665 667 # if we hit eof before finishing out the hunk, the last line will
666 668 # be zero length. Lets try to fix it up.
667 669 while len(self.hunk[-1]) == 0:
668 670 del self.hunk[-1]
669 671 del self.a[-1]
670 672 del self.b[-1]
671 673 self.lena -= 1
672 674 self.lenb -= 1
673 675
674 676 def read_context_hunk(self, lr):
675 677 self.desc = lr.readline()
676 678 m = contextdesc.match(self.desc)
677 679 if not m:
678 680 raise PatchError(_("bad hunk #%d") % self.number)
679 681 foo, self.starta, foo2, aend, foo3 = m.groups()
680 682 self.starta = int(self.starta)
681 683 if aend is None:
682 684 aend = self.starta
683 685 self.lena = int(aend) - self.starta
684 686 if self.starta:
685 687 self.lena += 1
686 688 for x in xrange(self.lena):
687 689 l = lr.readline()
688 690 if l.startswith('---'):
689 691 # lines addition, old block is empty
690 692 lr.push(l)
691 693 break
692 694 s = l[2:]
693 695 if l.startswith('- ') or l.startswith('! '):
694 696 u = '-' + s
695 697 elif l.startswith(' '):
696 698 u = ' ' + s
697 699 else:
698 700 raise PatchError(_("bad hunk #%d old text line %d") %
699 701 (self.number, x))
700 702 self.a.append(u)
701 703 self.hunk.append(u)
702 704
703 705 l = lr.readline()
704 706 if l.startswith('\ '):
705 707 s = self.a[-1][:-1]
706 708 self.a[-1] = s
707 709 self.hunk[-1] = s
708 710 l = lr.readline()
709 711 m = contextdesc.match(l)
710 712 if not m:
711 713 raise PatchError(_("bad hunk #%d") % self.number)
712 714 foo, self.startb, foo2, bend, foo3 = m.groups()
713 715 self.startb = int(self.startb)
714 716 if bend is None:
715 717 bend = self.startb
716 718 self.lenb = int(bend) - self.startb
717 719 if self.startb:
718 720 self.lenb += 1
719 721 hunki = 1
720 722 for x in xrange(self.lenb):
721 723 l = lr.readline()
722 724 if l.startswith('\ '):
723 725 # XXX: the only way to hit this is with an invalid line range.
724 726 # The no-eol marker is not counted in the line range, but I
725 727 # guess there are diff(1) out there which behave differently.
726 728 s = self.b[-1][:-1]
727 729 self.b[-1] = s
728 730 self.hunk[hunki - 1] = s
729 731 continue
730 732 if not l:
731 733 # line deletions, new block is empty and we hit EOF
732 734 lr.push(l)
733 735 break
734 736 s = l[2:]
735 737 if l.startswith('+ ') or l.startswith('! '):
736 738 u = '+' + s
737 739 elif l.startswith(' '):
738 740 u = ' ' + s
739 741 elif len(self.b) == 0:
740 742 # line deletions, new block is empty
741 743 lr.push(l)
742 744 break
743 745 else:
744 746 raise PatchError(_("bad hunk #%d old text line %d") %
745 747 (self.number, x))
746 748 self.b.append(s)
747 749 while True:
748 750 if hunki >= len(self.hunk):
749 751 h = ""
750 752 else:
751 753 h = self.hunk[hunki]
752 754 hunki += 1
753 755 if h == u:
754 756 break
755 757 elif h.startswith('-'):
756 758 continue
757 759 else:
758 760 self.hunk.insert(hunki - 1, u)
759 761 break
760 762
761 763 if not self.a:
762 764 # this happens when lines were only added to the hunk
763 765 for x in self.hunk:
764 766 if x.startswith('-') or x.startswith(' '):
765 767 self.a.append(x)
766 768 if not self.b:
767 769 # this happens when lines were only deleted from the hunk
768 770 for x in self.hunk:
769 771 if x.startswith('+') or x.startswith(' '):
770 772 self.b.append(x[1:])
771 773 # @@ -start,len +start,len @@
772 774 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
773 775 self.startb, self.lenb)
774 776 self.hunk[0] = self.desc
775 777
776 778 def fix_newline(self):
777 779 diffhelpers.fix_newline(self.hunk, self.a, self.b)
778 780
779 781 def complete(self):
780 782 return len(self.a) == self.lena and len(self.b) == self.lenb
781 783
782 784 def createfile(self):
783 785 return self.starta == 0 and self.lena == 0 and self.create
784 786
785 787 def rmfile(self):
786 788 return self.startb == 0 and self.lenb == 0 and self.remove
787 789
788 790 def fuzzit(self, l, fuzz, toponly):
789 791 # this removes context lines from the top and bottom of list 'l'. It
790 792 # checks the hunk to make sure only context lines are removed, and then
791 793 # returns a new shortened list of lines.
792 794 fuzz = min(fuzz, len(l)-1)
793 795 if fuzz:
794 796 top = 0
795 797 bot = 0
796 798 hlen = len(self.hunk)
797 799 for x in xrange(hlen - 1):
798 800 # the hunk starts with the @@ line, so use x+1
799 801 if self.hunk[x + 1][0] == ' ':
800 802 top += 1
801 803 else:
802 804 break
803 805 if not toponly:
804 806 for x in xrange(hlen - 1):
805 807 if self.hunk[hlen - bot - 1][0] == ' ':
806 808 bot += 1
807 809 else:
808 810 break
809 811
810 812 # top and bot now count context in the hunk
811 813 # adjust them if either one is short
812 814 context = max(top, bot, 3)
813 815 if bot < context:
814 816 bot = max(0, fuzz - (context - bot))
815 817 else:
816 818 bot = min(fuzz, bot)
817 819 if top < context:
818 820 top = max(0, fuzz - (context - top))
819 821 else:
820 822 top = min(fuzz, top)
821 823
822 824 return l[top:len(l)-bot]
823 825 return l
824 826
825 827 def old(self, fuzz=0, toponly=False):
826 828 return self.fuzzit(self.a, fuzz, toponly)
827 829
828 830 def new(self, fuzz=0, toponly=False):
829 831 return self.fuzzit(self.b, fuzz, toponly)
830 832
831 833 class binhunk:
832 834 'A binary patch file. Only understands literals so far.'
833 835 def __init__(self, gitpatch):
834 836 self.gitpatch = gitpatch
835 837 self.text = None
836 838 self.hunk = ['GIT binary patch\n']
837 839
838 840 def createfile(self):
839 841 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
840 842
841 843 def rmfile(self):
842 844 return self.gitpatch.op == 'DELETE'
843 845
844 846 def complete(self):
845 847 return self.text is not None
846 848
847 849 def new(self):
848 850 return [self.text]
849 851
850 852 def extract(self, lr):
851 853 line = lr.readline()
852 854 self.hunk.append(line)
853 855 while line and not line.startswith('literal '):
854 856 line = lr.readline()
855 857 self.hunk.append(line)
856 858 if not line:
857 859 raise PatchError(_('could not extract binary patch'))
858 860 size = int(line[8:].rstrip())
859 861 dec = []
860 862 line = lr.readline()
861 863 self.hunk.append(line)
862 864 while len(line) > 1:
863 865 l = line[0]
864 866 if l <= 'Z' and l >= 'A':
865 867 l = ord(l) - ord('A') + 1
866 868 else:
867 869 l = ord(l) - ord('a') + 27
868 870 dec.append(base85.b85decode(line[1:-1])[:l])
869 871 line = lr.readline()
870 872 self.hunk.append(line)
871 873 text = zlib.decompress(''.join(dec))
872 874 if len(text) != size:
873 875 raise PatchError(_('binary patch is %d bytes, not %d') %
874 876 len(text), size)
875 877 self.text = text
876 878
877 879 def parsefilename(str):
878 880 # --- filename \t|space stuff
879 881 s = str[4:].rstrip('\r\n')
880 882 i = s.find('\t')
881 883 if i < 0:
882 884 i = s.find(' ')
883 885 if i < 0:
884 886 return s
885 887 return s[:i]
886 888
887 889 def pathstrip(path, strip):
888 890 pathlen = len(path)
889 891 i = 0
890 892 if strip == 0:
891 893 return '', path.rstrip()
892 894 count = strip
893 895 while count > 0:
894 896 i = path.find('/', i)
895 897 if i == -1:
896 898 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
897 899 (count, strip, path))
898 900 i += 1
899 901 # consume '//' in the path
900 902 while i < pathlen - 1 and path[i] == '/':
901 903 i += 1
902 904 count -= 1
903 905 return path[:i].lstrip(), path[i:].rstrip()
904 906
905 907 def selectfile(afile_orig, bfile_orig, hunk, strip):
906 908 nulla = afile_orig == "/dev/null"
907 909 nullb = bfile_orig == "/dev/null"
908 910 abase, afile = pathstrip(afile_orig, strip)
909 911 gooda = not nulla and os.path.lexists(afile)
910 912 bbase, bfile = pathstrip(bfile_orig, strip)
911 913 if afile == bfile:
912 914 goodb = gooda
913 915 else:
914 916 goodb = not nullb and os.path.lexists(bfile)
915 917 createfunc = hunk.createfile
916 918 missing = not goodb and not gooda and not createfunc()
917 919
918 920 # some diff programs apparently produce patches where the afile is
919 921 # not /dev/null, but afile starts with bfile
920 922 abasedir = afile[:afile.rfind('/') + 1]
921 923 bbasedir = bfile[:bfile.rfind('/') + 1]
922 924 if missing and abasedir == bbasedir and afile.startswith(bfile):
923 925 # this isn't very pretty
924 926 hunk.create = True
925 927 if createfunc():
926 928 missing = False
927 929 else:
928 930 hunk.create = False
929 931
930 932 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
931 933 # diff is between a file and its backup. In this case, the original
932 934 # file should be patched (see original mpatch code).
933 935 isbackup = (abase == bbase and bfile.startswith(afile))
934 936 fname = None
935 937 if not missing:
936 938 if gooda and goodb:
937 939 fname = isbackup and afile or bfile
938 940 elif gooda:
939 941 fname = afile
940 942
941 943 if not fname:
942 944 if not nullb:
943 945 fname = isbackup and afile or bfile
944 946 elif not nulla:
945 947 fname = afile
946 948 else:
947 949 raise PatchError(_("undefined source and destination files"))
948 950
949 951 return fname, missing
950 952
951 953 def scangitpatch(lr, firstline):
952 954 """
953 955 Git patches can emit:
954 956 - rename a to b
955 957 - change b
956 958 - copy a to c
957 959 - change c
958 960
959 961 We cannot apply this sequence as-is, the renamed 'a' could not be
960 962 found for it would have been renamed already. And we cannot copy
961 963 from 'b' instead because 'b' would have been changed already. So
962 964 we scan the git patch for copy and rename commands so we can
963 965 perform the copies ahead of time.
964 966 """
965 967 pos = 0
966 968 try:
967 969 pos = lr.fp.tell()
968 970 fp = lr.fp
969 971 except IOError:
970 972 fp = cStringIO.StringIO(lr.fp.read())
971 973 gitlr = linereader(fp, lr.textmode)
972 974 gitlr.push(firstline)
973 975 gitpatches = readgitpatch(gitlr)
974 976 fp.seek(pos)
975 977 return gitpatches
976 978
977 979 def iterhunks(ui, fp):
978 980 """Read a patch and yield the following events:
979 981 - ("file", afile, bfile, firsthunk): select a new target file.
980 982 - ("hunk", hunk): a new hunk is ready to be applied, follows a
981 983 "file" event.
982 984 - ("git", gitchanges): current diff is in git format, gitchanges
983 985 maps filenames to gitpatch records. Unique event.
984 986 """
985 987 changed = {}
986 988 current_hunk = None
987 989 afile = ""
988 990 bfile = ""
989 991 state = None
990 992 hunknum = 0
991 993 emitfile = False
992 994 git = False
993 995
994 996 # our states
995 997 BFILE = 1
996 998 context = None
997 999 lr = linereader(fp)
998 1000
999 1001 while True:
1000 1002 newfile = newgitfile = False
1001 1003 x = lr.readline()
1002 1004 if not x:
1003 1005 break
1004 1006 if current_hunk:
1005 1007 if x.startswith('\ '):
1006 1008 current_hunk.fix_newline()
1007 1009 yield 'hunk', current_hunk
1008 1010 current_hunk = None
1009 1011 if (state == BFILE and ((not context and x[0] == '@') or
1010 1012 ((context is not False) and x.startswith('***************')))):
1011 1013 if context is None and x.startswith('***************'):
1012 1014 context = True
1013 1015 gpatch = changed.get(bfile)
1014 1016 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1015 1017 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1016 1018 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1017 1019 hunknum += 1
1018 1020 if emitfile:
1019 1021 emitfile = False
1020 1022 yield 'file', (afile, bfile, current_hunk)
1021 1023 elif state == BFILE and x.startswith('GIT binary patch'):
1022 1024 current_hunk = binhunk(changed[bfile])
1023 1025 hunknum += 1
1024 1026 if emitfile:
1025 1027 emitfile = False
1026 1028 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1027 1029 current_hunk.extract(lr)
1028 1030 elif x.startswith('diff --git'):
1029 1031 # check for git diff, scanning the whole patch file if needed
1030 1032 m = gitre.match(x)
1031 1033 if m:
1032 1034 afile, bfile = m.group(1, 2)
1033 1035 if not git:
1034 1036 git = True
1035 1037 gitpatches = scangitpatch(lr, x)
1036 1038 yield 'git', gitpatches
1037 1039 for gp in gitpatches:
1038 1040 changed[gp.path] = gp
1039 1041 # else error?
1040 1042 # copy/rename + modify should modify target, not source
1041 1043 gp = changed.get(bfile)
1042 1044 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1043 1045 or gp.mode):
1044 1046 afile = bfile
1045 1047 newgitfile = True
1046 1048 elif x.startswith('---'):
1047 1049 # check for a unified diff
1048 1050 l2 = lr.readline()
1049 1051 if not l2.startswith('+++'):
1050 1052 lr.push(l2)
1051 1053 continue
1052 1054 newfile = True
1053 1055 context = False
1054 1056 afile = parsefilename(x)
1055 1057 bfile = parsefilename(l2)
1056 1058 elif x.startswith('***'):
1057 1059 # check for a context diff
1058 1060 l2 = lr.readline()
1059 1061 if not l2.startswith('---'):
1060 1062 lr.push(l2)
1061 1063 continue
1062 1064 l3 = lr.readline()
1063 1065 lr.push(l3)
1064 1066 if not l3.startswith("***************"):
1065 1067 lr.push(l2)
1066 1068 continue
1067 1069 newfile = True
1068 1070 context = True
1069 1071 afile = parsefilename(x)
1070 1072 bfile = parsefilename(l2)
1071 1073
1072 1074 if newgitfile or newfile:
1073 1075 emitfile = True
1074 1076 state = BFILE
1075 1077 hunknum = 0
1076 1078 if current_hunk:
1077 1079 if current_hunk.complete():
1078 1080 yield 'hunk', current_hunk
1079 1081 else:
1080 1082 raise PatchError(_("malformed patch %s %s") % (afile,
1081 1083 current_hunk.desc))
1082 1084
1083 1085 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1084 1086 """Reads a patch from fp and tries to apply it.
1085 1087
1086 1088 The dict 'changed' is filled in with all of the filenames changed
1087 1089 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1088 1090 found and 1 if there was any fuzz.
1089 1091
1090 1092 If 'eolmode' is 'strict', the patch content and patched file are
1091 1093 read in binary mode. Otherwise, line endings are ignored when
1092 1094 patching then normalized according to 'eolmode'.
1093 1095
1094 1096 Callers probably want to call 'cmdutil.updatedir' after this to
1095 1097 apply certain categories of changes not done by this function.
1096 1098 """
1097 1099 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1098 1100 eolmode=eolmode)
1099 1101
1100 1102 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1101 1103 rejects = 0
1102 1104 err = 0
1103 1105 current_file = None
1104 1106 cwd = os.getcwd()
1105 1107 opener = util.opener(cwd)
1106 1108
1107 1109 def closefile():
1108 1110 if not current_file:
1109 1111 return 0
1110 1112 if current_file.dirty:
1111 1113 current_file.writelines(current_file.fname, current_file.lines)
1112 1114 current_file.write_rej()
1113 1115 return len(current_file.rej)
1114 1116
1115 1117 for state, values in iterhunks(ui, fp):
1116 1118 if state == 'hunk':
1117 1119 if not current_file:
1118 1120 continue
1119 1121 ret = current_file.apply(values)
1120 1122 if ret >= 0:
1121 1123 changed.setdefault(current_file.fname, None)
1122 1124 if ret > 0:
1123 1125 err = 1
1124 1126 elif state == 'file':
1125 1127 rejects += closefile()
1126 1128 afile, bfile, first_hunk = values
1127 1129 try:
1128 1130 current_file, missing = selectfile(afile, bfile,
1129 1131 first_hunk, strip)
1130 1132 current_file = patcher(ui, current_file, opener,
1131 1133 missing=missing, eolmode=eolmode)
1132 1134 except PatchError, err:
1133 1135 ui.warn(str(err) + '\n')
1134 1136 current_file = None
1135 1137 rejects += 1
1136 1138 continue
1137 1139 elif state == 'git':
1138 1140 for gp in values:
1139 1141 gp.path = pathstrip(gp.path, strip - 1)[1]
1140 1142 if gp.oldpath:
1141 1143 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1142 1144 # Binary patches really overwrite target files, copying them
1143 1145 # will just make it fails with "target file exists"
1144 1146 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1145 1147 copyfn(gp.oldpath, gp.path, cwd)
1146 1148 changed[gp.path] = gp
1147 1149 else:
1148 1150 raise util.Abort(_('unsupported parser state: %s') % state)
1149 1151
1150 1152 rejects += closefile()
1151 1153
1152 1154 if rejects:
1153 1155 return -1
1154 1156 return err
1155 1157
1156 1158 def externalpatch(patcher, patchname, ui, strip, cwd, files):
1157 1159 """use <patcher> to apply <patchname> to the working directory.
1158 1160 returns whether patch was applied with fuzz factor."""
1159 1161
1160 1162 fuzz = False
1161 1163 args = []
1162 1164 if cwd:
1163 1165 args.append('-d %s' % util.shellquote(cwd))
1164 1166 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1165 1167 util.shellquote(patchname)))
1166 1168
1167 1169 for line in fp:
1168 1170 line = line.rstrip()
1169 1171 ui.note(line + '\n')
1170 1172 if line.startswith('patching file '):
1171 1173 pf = util.parse_patch_output(line)
1172 1174 printed_file = False
1173 1175 files.setdefault(pf, None)
1174 1176 elif line.find('with fuzz') >= 0:
1175 1177 fuzz = True
1176 1178 if not printed_file:
1177 1179 ui.warn(pf + '\n')
1178 1180 printed_file = True
1179 1181 ui.warn(line + '\n')
1180 1182 elif line.find('saving rejects to file') >= 0:
1181 1183 ui.warn(line + '\n')
1182 1184 elif line.find('FAILED') >= 0:
1183 1185 if not printed_file:
1184 1186 ui.warn(pf + '\n')
1185 1187 printed_file = True
1186 1188 ui.warn(line + '\n')
1187 1189 code = fp.close()
1188 1190 if code:
1189 1191 raise PatchError(_("patch command failed: %s") %
1190 1192 util.explain_exit(code)[0])
1191 1193 return fuzz
1192 1194
1193 1195 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1194 1196 """use builtin patch to apply <patchobj> to the working directory.
1195 1197 returns whether patch was applied with fuzz factor."""
1196 1198
1197 1199 if files is None:
1198 1200 files = {}
1199 1201 if eolmode is None:
1200 1202 eolmode = ui.config('patch', 'eol', 'strict')
1201 1203 if eolmode.lower() not in eolmodes:
1202 1204 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1203 1205 eolmode = eolmode.lower()
1204 1206
1205 1207 try:
1206 1208 fp = open(patchobj, 'rb')
1207 1209 except TypeError:
1208 1210 fp = patchobj
1209 1211 if cwd:
1210 1212 curdir = os.getcwd()
1211 1213 os.chdir(cwd)
1212 1214 try:
1213 1215 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1214 1216 finally:
1215 1217 if cwd:
1216 1218 os.chdir(curdir)
1217 1219 if fp != patchobj:
1218 1220 fp.close()
1219 1221 if ret < 0:
1220 1222 raise PatchError(_('patch failed to apply'))
1221 1223 return ret > 0
1222 1224
1223 1225 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1224 1226 """Apply <patchname> to the working directory.
1225 1227
1226 1228 'eolmode' specifies how end of lines should be handled. It can be:
1227 1229 - 'strict': inputs are read in binary mode, EOLs are preserved
1228 1230 - 'crlf': EOLs are ignored when patching and reset to CRLF
1229 1231 - 'lf': EOLs are ignored when patching and reset to LF
1230 1232 - None: get it from user settings, default to 'strict'
1231 1233 'eolmode' is ignored when using an external patcher program.
1232 1234
1233 1235 Returns whether patch was applied with fuzz factor.
1234 1236 """
1235 1237 patcher = ui.config('ui', 'patch')
1236 1238 if files is None:
1237 1239 files = {}
1238 1240 try:
1239 1241 if patcher:
1240 1242 return externalpatch(patcher, patchname, ui, strip, cwd, files)
1241 1243 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1242 1244 except PatchError, err:
1243 1245 raise util.Abort(str(err))
1244 1246
1245 1247 def b85diff(to, tn):
1246 1248 '''print base85-encoded binary diff'''
1247 1249 def gitindex(text):
1248 1250 if not text:
1249 1251 return hex(nullid)
1250 1252 l = len(text)
1251 1253 s = util.sha1('blob %d\0' % l)
1252 1254 s.update(text)
1253 1255 return s.hexdigest()
1254 1256
1255 1257 def fmtline(line):
1256 1258 l = len(line)
1257 1259 if l <= 26:
1258 1260 l = chr(ord('A') + l - 1)
1259 1261 else:
1260 1262 l = chr(l - 26 + ord('a') - 1)
1261 1263 return '%c%s\n' % (l, base85.b85encode(line, True))
1262 1264
1263 1265 def chunk(text, csize=52):
1264 1266 l = len(text)
1265 1267 i = 0
1266 1268 while i < l:
1267 1269 yield text[i:i + csize]
1268 1270 i += csize
1269 1271
1270 1272 tohash = gitindex(to)
1271 1273 tnhash = gitindex(tn)
1272 1274 if tohash == tnhash:
1273 1275 return ""
1274 1276
1275 1277 # TODO: deltas
1276 1278 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1277 1279 (tohash, tnhash, len(tn))]
1278 1280 for l in chunk(zlib.compress(tn)):
1279 1281 ret.append(fmtline(l))
1280 1282 ret.append('\n')
1281 1283 return ''.join(ret)
1282 1284
1283 1285 class GitDiffRequired(Exception):
1284 1286 pass
1285 1287
1286 1288 def diffopts(ui, opts=None, untrusted=False):
1287 1289 def get(key, name=None, getter=ui.configbool):
1288 1290 return ((opts and opts.get(key)) or
1289 1291 getter('diff', name or key, None, untrusted=untrusted))
1290 1292 return mdiff.diffopts(
1291 1293 text=opts and opts.get('text'),
1292 1294 git=get('git'),
1293 1295 nodates=get('nodates'),
1294 1296 showfunc=get('show_function', 'showfunc'),
1295 1297 ignorews=get('ignore_all_space', 'ignorews'),
1296 1298 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1297 1299 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1298 1300 context=get('unified', getter=ui.config))
1299 1301
1300 1302 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1301 1303 losedatafn=None, prefix=''):
1302 1304 '''yields diff of changes to files between two nodes, or node and
1303 1305 working directory.
1304 1306
1305 1307 if node1 is None, use first dirstate parent instead.
1306 1308 if node2 is None, compare node1 with working directory.
1307 1309
1308 1310 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1309 1311 every time some change cannot be represented with the current
1310 1312 patch format. Return False to upgrade to git patch format, True to
1311 1313 accept the loss or raise an exception to abort the diff. It is
1312 1314 called with the name of current file being diffed as 'fn'. If set
1313 1315 to None, patches will always be upgraded to git format when
1314 1316 necessary.
1315 1317
1316 1318 prefix is a filename prefix that is prepended to all filenames on
1317 1319 display (used for subrepos).
1318 1320 '''
1319 1321
1320 1322 if opts is None:
1321 1323 opts = mdiff.defaultopts
1322 1324
1323 1325 if not node1 and not node2:
1324 1326 node1 = repo.dirstate.parents()[0]
1325 1327
1326 1328 def lrugetfilectx():
1327 1329 cache = {}
1328 1330 order = []
1329 1331 def getfilectx(f, ctx):
1330 1332 fctx = ctx.filectx(f, filelog=cache.get(f))
1331 1333 if f not in cache:
1332 1334 if len(cache) > 20:
1333 1335 del cache[order.pop(0)]
1334 1336 cache[f] = fctx.filelog()
1335 1337 else:
1336 1338 order.remove(f)
1337 1339 order.append(f)
1338 1340 return fctx
1339 1341 return getfilectx
1340 1342 getfilectx = lrugetfilectx()
1341 1343
1342 1344 ctx1 = repo[node1]
1343 1345 ctx2 = repo[node2]
1344 1346
1345 1347 if not changes:
1346 1348 changes = repo.status(ctx1, ctx2, match=match)
1347 1349 modified, added, removed = changes[:3]
1348 1350
1349 1351 if not modified and not added and not removed:
1350 1352 return []
1351 1353
1352 1354 revs = None
1353 1355 if not repo.ui.quiet:
1354 1356 hexfunc = repo.ui.debugflag and hex or short
1355 1357 revs = [hexfunc(node) for node in [node1, node2] if node]
1356 1358
1357 1359 copy = {}
1358 1360 if opts.git or opts.upgrade:
1359 1361 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1360 1362
1361 1363 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1362 1364 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1363 1365 if opts.upgrade and not opts.git:
1364 1366 try:
1365 1367 def losedata(fn):
1366 1368 if not losedatafn or not losedatafn(fn=fn):
1367 1369 raise GitDiffRequired()
1368 1370 # Buffer the whole output until we are sure it can be generated
1369 1371 return list(difffn(opts.copy(git=False), losedata))
1370 1372 except GitDiffRequired:
1371 1373 return difffn(opts.copy(git=True), None)
1372 1374 else:
1373 1375 return difffn(opts, None)
1374 1376
1375 1377 def difflabel(func, *args, **kw):
1376 1378 '''yields 2-tuples of (output, label) based on the output of func()'''
1377 1379 prefixes = [('diff', 'diff.diffline'),
1378 1380 ('copy', 'diff.extended'),
1379 1381 ('rename', 'diff.extended'),
1380 1382 ('old', 'diff.extended'),
1381 1383 ('new', 'diff.extended'),
1382 1384 ('deleted', 'diff.extended'),
1383 1385 ('---', 'diff.file_a'),
1384 1386 ('+++', 'diff.file_b'),
1385 1387 ('@@', 'diff.hunk'),
1386 1388 ('-', 'diff.deleted'),
1387 1389 ('+', 'diff.inserted')]
1388 1390
1389 1391 for chunk in func(*args, **kw):
1390 1392 lines = chunk.split('\n')
1391 1393 for i, line in enumerate(lines):
1392 1394 if i != 0:
1393 1395 yield ('\n', '')
1394 1396 stripline = line
1395 1397 if line and line[0] in '+-':
1396 1398 # highlight trailing whitespace, but only in changed lines
1397 1399 stripline = line.rstrip()
1398 1400 for prefix, label in prefixes:
1399 1401 if stripline.startswith(prefix):
1400 1402 yield (stripline, label)
1401 1403 break
1402 1404 else:
1403 1405 yield (line, '')
1404 1406 if line != stripline:
1405 1407 yield (line[len(stripline):], 'diff.trailingwhitespace')
1406 1408
1407 1409 def diffui(*args, **kw):
1408 1410 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1409 1411 return difflabel(diff, *args, **kw)
1410 1412
1411 1413
1412 1414 def _addmodehdr(header, omode, nmode):
1413 1415 if omode != nmode:
1414 1416 header.append('old mode %s\n' % omode)
1415 1417 header.append('new mode %s\n' % nmode)
1416 1418
1417 1419 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1418 1420 copy, getfilectx, opts, losedatafn, prefix):
1419 1421
1420 1422 def join(f):
1421 1423 return os.path.join(prefix, f)
1422 1424
1423 1425 date1 = util.datestr(ctx1.date())
1424 1426 man1 = ctx1.manifest()
1425 1427
1426 1428 gone = set()
1427 1429 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1428 1430
1429 1431 copyto = dict([(v, k) for k, v in copy.items()])
1430 1432
1431 1433 if opts.git:
1432 1434 revs = None
1433 1435
1434 1436 for f in sorted(modified + added + removed):
1435 1437 to = None
1436 1438 tn = None
1437 1439 dodiff = True
1438 1440 header = []
1439 1441 if f in man1:
1440 1442 to = getfilectx(f, ctx1).data()
1441 1443 if f not in removed:
1442 1444 tn = getfilectx(f, ctx2).data()
1443 1445 a, b = f, f
1444 1446 if opts.git or losedatafn:
1445 1447 if f in added:
1446 1448 mode = gitmode[ctx2.flags(f)]
1447 1449 if f in copy or f in copyto:
1448 1450 if opts.git:
1449 1451 if f in copy:
1450 1452 a = copy[f]
1451 1453 else:
1452 1454 a = copyto[f]
1453 1455 omode = gitmode[man1.flags(a)]
1454 1456 _addmodehdr(header, omode, mode)
1455 1457 if a in removed and a not in gone:
1456 1458 op = 'rename'
1457 1459 gone.add(a)
1458 1460 else:
1459 1461 op = 'copy'
1460 1462 header.append('%s from %s\n' % (op, join(a)))
1461 1463 header.append('%s to %s\n' % (op, join(f)))
1462 1464 to = getfilectx(a, ctx1).data()
1463 1465 else:
1464 1466 losedatafn(f)
1465 1467 else:
1466 1468 if opts.git:
1467 1469 header.append('new file mode %s\n' % mode)
1468 1470 elif ctx2.flags(f):
1469 1471 losedatafn(f)
1470 1472 # In theory, if tn was copied or renamed we should check
1471 1473 # if the source is binary too but the copy record already
1472 1474 # forces git mode.
1473 1475 if util.binary(tn):
1474 1476 if opts.git:
1475 1477 dodiff = 'binary'
1476 1478 else:
1477 1479 losedatafn(f)
1478 1480 if not opts.git and not tn:
1479 1481 # regular diffs cannot represent new empty file
1480 1482 losedatafn(f)
1481 1483 elif f in removed:
1482 1484 if opts.git:
1483 1485 # have we already reported a copy above?
1484 1486 if ((f in copy and copy[f] in added
1485 1487 and copyto[copy[f]] == f) or
1486 1488 (f in copyto and copyto[f] in added
1487 1489 and copy[copyto[f]] == f)):
1488 1490 dodiff = False
1489 1491 else:
1490 1492 header.append('deleted file mode %s\n' %
1491 1493 gitmode[man1.flags(f)])
1492 1494 elif not to or util.binary(to):
1493 1495 # regular diffs cannot represent empty file deletion
1494 1496 losedatafn(f)
1495 1497 else:
1496 1498 oflag = man1.flags(f)
1497 1499 nflag = ctx2.flags(f)
1498 1500 binary = util.binary(to) or util.binary(tn)
1499 1501 if opts.git:
1500 1502 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1501 1503 if binary:
1502 1504 dodiff = 'binary'
1503 1505 elif binary or nflag != oflag:
1504 1506 losedatafn(f)
1505 1507 if opts.git:
1506 1508 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1507 1509
1508 1510 if dodiff:
1509 1511 if dodiff == 'binary':
1510 1512 text = b85diff(to, tn)
1511 1513 else:
1512 1514 text = mdiff.unidiff(to, date1,
1513 1515 # ctx2 date may be dynamic
1514 1516 tn, util.datestr(ctx2.date()),
1515 1517 join(a), join(b), revs, opts=opts)
1516 1518 if header and (text or len(header) > 1):
1517 1519 yield ''.join(header)
1518 1520 if text:
1519 1521 yield text
1520 1522
1521 1523 def diffstatdata(lines):
1522 1524 filename, adds, removes = None, 0, 0
1523 1525 for line in lines:
1524 1526 if line.startswith('diff'):
1525 1527 if filename:
1526 1528 isbinary = adds == 0 and removes == 0
1527 1529 yield (filename, adds, removes, isbinary)
1528 1530 # set numbers to 0 anyway when starting new file
1529 1531 adds, removes = 0, 0
1530 1532 if line.startswith('diff --git'):
1531 1533 filename = gitre.search(line).group(1)
1532 1534 else:
1533 1535 # format: "diff -r ... -r ... filename"
1534 1536 filename = line.split(None, 5)[-1]
1535 1537 elif line.startswith('+') and not line.startswith('+++'):
1536 1538 adds += 1
1537 1539 elif line.startswith('-') and not line.startswith('---'):
1538 1540 removes += 1
1539 1541 if filename:
1540 1542 isbinary = adds == 0 and removes == 0
1541 1543 yield (filename, adds, removes, isbinary)
1542 1544
1543 1545 def diffstat(lines, width=80, git=False):
1544 1546 output = []
1545 1547 stats = list(diffstatdata(lines))
1546 1548
1547 1549 maxtotal, maxname = 0, 0
1548 1550 totaladds, totalremoves = 0, 0
1549 1551 hasbinary = False
1550 1552
1551 1553 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1552 1554 for filename, adds, removes, isbinary in stats]
1553 1555
1554 1556 for filename, adds, removes, isbinary, namewidth in sized:
1555 1557 totaladds += adds
1556 1558 totalremoves += removes
1557 1559 maxname = max(maxname, namewidth)
1558 1560 maxtotal = max(maxtotal, adds + removes)
1559 1561 if isbinary:
1560 1562 hasbinary = True
1561 1563
1562 1564 countwidth = len(str(maxtotal))
1563 1565 if hasbinary and countwidth < 3:
1564 1566 countwidth = 3
1565 1567 graphwidth = width - countwidth - maxname - 6
1566 1568 if graphwidth < 10:
1567 1569 graphwidth = 10
1568 1570
1569 1571 def scale(i):
1570 1572 if maxtotal <= graphwidth:
1571 1573 return i
1572 1574 # If diffstat runs out of room it doesn't print anything,
1573 1575 # which isn't very useful, so always print at least one + or -
1574 1576 # if there were at least some changes.
1575 1577 return max(i * graphwidth // maxtotal, int(bool(i)))
1576 1578
1577 1579 for filename, adds, removes, isbinary, namewidth in sized:
1578 1580 if git and isbinary:
1579 1581 count = 'Bin'
1580 1582 else:
1581 1583 count = adds + removes
1582 1584 pluses = '+' * scale(adds)
1583 1585 minuses = '-' * scale(removes)
1584 1586 output.append(' %s%s | %*s %s%s\n' %
1585 1587 (filename, ' ' * (maxname - namewidth),
1586 1588 countwidth, count,
1587 1589 pluses, minuses))
1588 1590
1589 1591 if stats:
1590 1592 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1591 1593 % (len(stats), totaladds, totalremoves))
1592 1594
1593 1595 return ''.join(output)
1594 1596
1595 1597 def diffstatui(*args, **kw):
1596 1598 '''like diffstat(), but yields 2-tuples of (output, label) for
1597 1599 ui.write()
1598 1600 '''
1599 1601
1600 1602 for line in diffstat(*args, **kw).splitlines():
1601 1603 if line and line[-1] in '+-':
1602 1604 name, graph = line.rsplit(' ', 1)
1603 1605 yield (name + ' ', '')
1604 1606 m = re.search(r'\++', graph)
1605 1607 if m:
1606 1608 yield (m.group(0), 'diffstat.inserted')
1607 1609 m = re.search(r'-+', graph)
1608 1610 if m:
1609 1611 yield (m.group(0), 'diffstat.deleted')
1610 1612 else:
1611 1613 yield (line, '')
1612 1614 yield ('\n', '')
@@ -1,143 +1,207 b''
1 1
2 2 Test interactions between mq and patch.eol
3 3
4 4
5 5 $ echo "[extensions]" >> $HGRCPATH
6 6 $ echo "mq=" >> $HGRCPATH
7 7 $ echo "[diff]" >> $HGRCPATH
8 8 $ echo "nodates=1" >> $HGRCPATH
9 9
10 10 $ cat > makepatch.py <<EOF
11 11 > f = file('eol.diff', 'wb')
12 12 > w = f.write
13 13 > w('test message\n')
14 14 > w('diff --git a/a b/a\n')
15 15 > w('--- a/a\n')
16 16 > w('+++ b/a\n')
17 17 > w('@@ -1,5 +1,5 @@\n')
18 18 > w(' a\n')
19 19 > w('-b\r\n')
20 20 > w('+y\r\n')
21 21 > w(' c\r\n')
22 22 > w(' d\n')
23 23 > w('-e\n')
24 24 > w('\ No newline at end of file\n')
25 25 > w('+z\r\n')
26 26 > w('\ No newline at end of file\r\n')
27 27 > EOF
28 28
29 29 $ cat > cateol.py <<EOF
30 30 > import sys
31 31 > for line in file(sys.argv[1], 'rb'):
32 32 > line = line.replace('\r', '<CR>')
33 33 > line = line.replace('\n', '<LF>')
34 34 > print line
35 35 > EOF
36 36
37 37 $ hg init repo
38 38 $ cd repo
39 39 $ echo '\.diff' > .hgignore
40 40 $ echo '\.rej' >> .hgignore
41 41
42 42
43 43 Test different --eol values
44 44
45 45 $ python -c 'file("a", "wb").write("a\nb\nc\nd\ne")'
46 46 $ hg ci -Am adda
47 47 adding .hgignore
48 48 adding a
49 49 $ python ../makepatch.py
50 50 $ hg qimport eol.diff
51 51 adding eol.diff to series file
52 52
53 53 should fail in strict mode
54 54
55 55 $ hg qpush
56 56 applying eol.diff
57 57 patching file a
58 58 Hunk #1 FAILED at 0
59 59 1 out of 1 hunks FAILED -- saving rejects to file a.rej
60 60 patch failed, unable to continue (try -v)
61 61 patch failed, rejects left in working dir
62 62 errors during apply, please fix and refresh eol.diff
63 63 [2]
64 64 $ hg qpop
65 65 popping eol.diff
66 66 patch queue now empty
67 67
68 68 invalid eol
69 69
70 70 $ hg --config patch.eol='LFCR' qpush
71 71 applying eol.diff
72 72 patch failed, unable to continue (try -v)
73 73 patch failed, rejects left in working dir
74 74 errors during apply, please fix and refresh eol.diff
75 75 [2]
76 76 $ hg qpop
77 77 popping eol.diff
78 78 patch queue now empty
79 79
80 80 force LF
81 81
82 82 $ hg --config patch.eol='CRLF' qpush
83 83 applying eol.diff
84 84 now at: eol.diff
85 85 $ hg qrefresh
86 86 $ python ../cateol.py .hg/patches/eol.diff
87 87 test message<LF>
88 88 <LF>
89 89 diff -r 0d0bf99a8b7a a<LF>
90 90 --- a/a<LF>
91 91 +++ b/a<LF>
92 92 @@ -1,5 +1,5 @@<LF>
93 93 -a<LF>
94 94 -b<LF>
95 95 -c<LF>
96 96 -d<LF>
97 97 -e<LF>
98 98 \ No newline at end of file<LF>
99 99 +a<CR><LF>
100 100 +y<CR><LF>
101 101 +c<CR><LF>
102 102 +d<CR><LF>
103 103 +z<LF>
104 104 \ No newline at end of file<LF>
105 105 $ python ../cateol.py a
106 106 a<CR><LF>
107 107 y<CR><LF>
108 108 c<CR><LF>
109 109 d<CR><LF>
110 110 z
111 111 $ hg qpop
112 112 popping eol.diff
113 113 patch queue now empty
114 114
115 115 push again forcing LF and compare revisions
116 116
117 117 $ hg --config patch.eol='CRLF' qpush
118 118 applying eol.diff
119 119 now at: eol.diff
120 120 $ python ../cateol.py a
121 121 a<CR><LF>
122 122 y<CR><LF>
123 123 c<CR><LF>
124 124 d<CR><LF>
125 125 z
126 126 $ hg qpop
127 127 popping eol.diff
128 128 patch queue now empty
129 129
130 130 push again without LF and compare revisions
131 131
132 132 $ hg qpush
133 133 applying eol.diff
134 134 now at: eol.diff
135 135 $ python ../cateol.py a
136 136 a<CR><LF>
137 137 y<CR><LF>
138 138 c<CR><LF>
139 139 d<CR><LF>
140 140 z
141 141 $ hg qpop
142 142 popping eol.diff
143 143 patch queue now empty
144 $ cd ..
145
146
147 Test .rej file EOL are left unchanged
148
149 $ hg init testeol
150 $ cd testeol
151 $ python -c "file('a', 'wb').write('1\r\n2\r\n3\r\n4')"
152 $ hg ci -Am adda
153 adding a
154 $ python -c "file('a', 'wb').write('1\r\n2\r\n33\r\n4')"
155 $ hg qnew patch1
156 $ hg qpop
157 popping patch1
158 patch queue now empty
159 $ python -c "file('a', 'wb').write('1\r\n22\r\n33\r\n4')"
160 $ hg ci -m changea
161
162 $ hg --config 'patch.eol=LF' qpush
163 applying patch1
164 patching file a
165 Hunk #1 FAILED at 0
166 1 out of 1 hunks FAILED -- saving rejects to file a.rej
167 patch failed, unable to continue (try -v)
168 patch failed, rejects left in working dir
169 errors during apply, please fix and refresh patch1
170 [2]
171 $ hg qpop
172 popping patch1
173 patch queue now empty
174 $ cat a.rej
175 --- a
176 +++ a
177 @@ -1,4 +1,4 @@
178 1\r (esc)
179 2\r (esc)
180 -3\r (esc)
181 +33\r (esc)
182 4
183 \ No newline at end of file
184
185 $ hg --config 'patch.eol=auto' qpush
186 applying patch1
187 patching file a
188 Hunk #1 FAILED at 0
189 1 out of 1 hunks FAILED -- saving rejects to file a.rej
190 patch failed, unable to continue (try -v)
191 patch failed, rejects left in working dir
192 errors during apply, please fix and refresh patch1
193 [2]
194 $ hg qpop
195 popping patch1
196 patch queue now empty
197 $ cat a.rej
198 --- a
199 +++ a
200 @@ -1,4 +1,4 @@
201 1\r (esc)
202 2\r (esc)
203 -3\r (esc)
204 +33\r (esc)
205 4
206 \ No newline at end of file
207 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now