##// END OF EJS Templates
configitems: register the 'patch.eol' config
marmoute -
r33226:dd50a370 default
parent child Browse files
Show More
@@ -1,154 +1,157 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from . import (
13 13 error,
14 14 )
15 15
16 16 def loadconfigtable(ui, extname, configtable):
17 17 """update config item known to the ui with the extension ones"""
18 18 for section, items in configtable.items():
19 19 knownitems = ui._knownconfig.setdefault(section, {})
20 20 knownkeys = set(knownitems)
21 21 newkeys = set(items)
22 22 for key in sorted(knownkeys & newkeys):
23 23 msg = "extension '%s' overwrite config item '%s.%s'"
24 24 msg %= (extname, section, key)
25 25 ui.develwarn(msg, config='warn-config')
26 26
27 27 knownitems.update(items)
28 28
29 29 class configitem(object):
30 30 """represent a known config item
31 31
32 32 :section: the official config section where to find this item,
33 33 :name: the official name within the section,
34 34 :default: default value for this item,
35 35 """
36 36
37 37 def __init__(self, section, name, default=None):
38 38 self.section = section
39 39 self.name = name
40 40 self.default = default
41 41
42 42 coreitems = {}
43 43
44 44 def _register(configtable, *args, **kwargs):
45 45 item = configitem(*args, **kwargs)
46 46 section = configtable.setdefault(item.section, {})
47 47 if item.name in section:
48 48 msg = "duplicated config item registration for '%s.%s'"
49 49 raise error.ProgrammingError(msg % (item.section, item.name))
50 50 section[item.name] = item
51 51
52 52 # Registering actual config items
53 53
54 54 def getitemregister(configtable):
55 55 return functools.partial(_register, configtable)
56 56
57 57 coreconfigitem = getitemregister(coreitems)
58 58
59 59 coreconfigitem('auth', 'cookiefile',
60 60 default=None,
61 61 )
62 62 # bookmarks.pushing: internal hack for discovery
63 63 coreconfigitem('bookmarks', 'pushing',
64 64 default=list,
65 65 )
66 66 # bundle.mainreporoot: internal hack for bundlerepo
67 67 coreconfigitem('bundle', 'mainreporoot',
68 68 default='',
69 69 )
70 70 # bundle.reorder: experimental config
71 71 coreconfigitem('bundle', 'reorder',
72 72 default='auto',
73 73 )
74 74 coreconfigitem('color', 'mode',
75 75 default='auto',
76 76 )
77 77 coreconfigitem('devel', 'all-warnings',
78 78 default=False,
79 79 )
80 80 coreconfigitem('devel', 'bundle2.debug',
81 81 default=False,
82 82 )
83 83 coreconfigitem('devel', 'check-locks',
84 84 default=False,
85 85 )
86 86 coreconfigitem('devel', 'check-relroot',
87 87 default=False,
88 88 )
89 89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 90 default=False,
91 91 )
92 92 coreconfigitem('devel', 'legacy.exchange',
93 93 default=list,
94 94 )
95 95 coreconfigitem('devel', 'servercafile',
96 96 default='',
97 97 )
98 98 coreconfigitem('devel', 'serverexactprotocol',
99 99 default='',
100 100 )
101 101 coreconfigitem('devel', 'serverrequirecert',
102 102 default=False,
103 103 )
104 104 coreconfigitem('devel', 'strip-obsmarkers',
105 105 default=True,
106 106 )
107 107 coreconfigitem('hostsecurity', 'ciphers',
108 108 default=None,
109 109 )
110 110 coreconfigitem('hostsecurity', 'disabletls10warning',
111 111 default=False,
112 112 )
113 coreconfigitem('patch', 'eol',
114 default='strict',
115 )
113 116 coreconfigitem('patch', 'fuzz',
114 117 default=2,
115 118 )
116 119 coreconfigitem('server', 'bundle1',
117 120 default=True,
118 121 )
119 122 coreconfigitem('server', 'bundle1gd',
120 123 default=None,
121 124 )
122 125 coreconfigitem('server', 'compressionengines',
123 126 default=list,
124 127 )
125 128 coreconfigitem('server', 'concurrent-push-mode',
126 129 default='strict',
127 130 )
128 131 coreconfigitem('server', 'disablefullbundle',
129 132 default=False,
130 133 )
131 134 coreconfigitem('server', 'maxhttpheaderlen',
132 135 default=1024,
133 136 )
134 137 coreconfigitem('server', 'preferuncompressed',
135 138 default=False,
136 139 )
137 140 coreconfigitem('server', 'uncompressedallowsecret',
138 141 default=False,
139 142 )
140 143 coreconfigitem('server', 'validate',
141 144 default=False,
142 145 )
143 146 coreconfigitem('server', 'zliblevel',
144 147 default=-1,
145 148 )
146 149 coreconfigitem('ui', 'clonebundleprefers',
147 150 default=list,
148 151 )
149 152 coreconfigitem('ui', 'interactive',
150 153 default=None,
151 154 )
152 155 coreconfigitem('ui', 'quiet',
153 156 default=False,
154 157 )
@@ -1,2746 +1,2746 b''
1 1 # patch.py - patch file parsing routines
2 2 #
3 3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import collections
12 12 import copy
13 13 import email
14 14 import errno
15 15 import hashlib
16 16 import os
17 17 import posixpath
18 18 import re
19 19 import shutil
20 20 import tempfile
21 21 import zlib
22 22
23 23 from .i18n import _
24 24 from .node import (
25 25 hex,
26 26 short,
27 27 )
28 28 from . import (
29 29 copies,
30 30 encoding,
31 31 error,
32 32 mail,
33 33 mdiff,
34 34 pathutil,
35 35 policy,
36 36 pycompat,
37 37 scmutil,
38 38 similar,
39 39 util,
40 40 vfs as vfsmod,
41 41 )
42 42
43 43 diffhelpers = policy.importmod(r'diffhelpers')
44 44 stringio = util.stringio
45 45
46 46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48 48
49 49 class PatchError(Exception):
50 50 pass
51 51
52 52
53 53 # public functions
54 54
55 55 def split(stream):
56 56 '''return an iterator of individual patches from a stream'''
57 57 def isheader(line, inheader):
58 58 if inheader and line[0] in (' ', '\t'):
59 59 # continuation
60 60 return True
61 61 if line[0] in (' ', '-', '+'):
62 62 # diff line - don't check for header pattern in there
63 63 return False
64 64 l = line.split(': ', 1)
65 65 return len(l) == 2 and ' ' not in l[0]
66 66
67 67 def chunk(lines):
68 68 return stringio(''.join(lines))
69 69
70 70 def hgsplit(stream, cur):
71 71 inheader = True
72 72
73 73 for line in stream:
74 74 if not line.strip():
75 75 inheader = False
76 76 if not inheader and line.startswith('# HG changeset patch'):
77 77 yield chunk(cur)
78 78 cur = []
79 79 inheader = True
80 80
81 81 cur.append(line)
82 82
83 83 if cur:
84 84 yield chunk(cur)
85 85
86 86 def mboxsplit(stream, cur):
87 87 for line in stream:
88 88 if line.startswith('From '):
89 89 for c in split(chunk(cur[1:])):
90 90 yield c
91 91 cur = []
92 92
93 93 cur.append(line)
94 94
95 95 if cur:
96 96 for c in split(chunk(cur[1:])):
97 97 yield c
98 98
99 99 def mimesplit(stream, cur):
100 100 def msgfp(m):
101 101 fp = stringio()
102 102 g = email.Generator.Generator(fp, mangle_from_=False)
103 103 g.flatten(m)
104 104 fp.seek(0)
105 105 return fp
106 106
107 107 for line in stream:
108 108 cur.append(line)
109 109 c = chunk(cur)
110 110
111 111 m = email.Parser.Parser().parse(c)
112 112 if not m.is_multipart():
113 113 yield msgfp(m)
114 114 else:
115 115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 116 for part in m.walk():
117 117 ct = part.get_content_type()
118 118 if ct not in ok_types:
119 119 continue
120 120 yield msgfp(part)
121 121
122 122 def headersplit(stream, cur):
123 123 inheader = False
124 124
125 125 for line in stream:
126 126 if not inheader and isheader(line, inheader):
127 127 yield chunk(cur)
128 128 cur = []
129 129 inheader = True
130 130 if inheader and not isheader(line, inheader):
131 131 inheader = False
132 132
133 133 cur.append(line)
134 134
135 135 if cur:
136 136 yield chunk(cur)
137 137
138 138 def remainder(cur):
139 139 yield chunk(cur)
140 140
141 141 class fiter(object):
142 142 def __init__(self, fp):
143 143 self.fp = fp
144 144
145 145 def __iter__(self):
146 146 return self
147 147
148 148 def next(self):
149 149 l = self.fp.readline()
150 150 if not l:
151 151 raise StopIteration
152 152 return l
153 153
154 154 inheader = False
155 155 cur = []
156 156
157 157 mimeheaders = ['content-type']
158 158
159 159 if not util.safehasattr(stream, 'next'):
160 160 # http responses, for example, have readline but not next
161 161 stream = fiter(stream)
162 162
163 163 for line in stream:
164 164 cur.append(line)
165 165 if line.startswith('# HG changeset patch'):
166 166 return hgsplit(stream, cur)
167 167 elif line.startswith('From '):
168 168 return mboxsplit(stream, cur)
169 169 elif isheader(line, inheader):
170 170 inheader = True
171 171 if line.split(':', 1)[0].lower() in mimeheaders:
172 172 # let email parser handle this
173 173 return mimesplit(stream, cur)
174 174 elif line.startswith('--- ') and inheader:
175 175 # No evil headers seen by diff start, split by hand
176 176 return headersplit(stream, cur)
177 177 # Not enough info, keep reading
178 178
179 179 # if we are here, we have a very plain patch
180 180 return remainder(cur)
181 181
182 182 ## Some facility for extensible patch parsing:
183 183 # list of pairs ("header to match", "data key")
184 184 patchheadermap = [('Date', 'date'),
185 185 ('Branch', 'branch'),
186 186 ('Node ID', 'nodeid'),
187 187 ]
188 188
189 189 def extract(ui, fileobj):
190 190 '''extract patch from data read from fileobj.
191 191
192 192 patch can be a normal patch or contained in an email message.
193 193
194 194 return a dictionary. Standard keys are:
195 195 - filename,
196 196 - message,
197 197 - user,
198 198 - date,
199 199 - branch,
200 200 - node,
201 201 - p1,
202 202 - p2.
203 203 Any item can be missing from the dictionary. If filename is missing,
204 204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205 205
206 206 # attempt to detect the start of a patch
207 207 # (this heuristic is borrowed from quilt)
208 208 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
209 209 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 210 r'---[ \t].*?^\+\+\+[ \t]|'
211 211 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
212 212
213 213 data = {}
214 214 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
215 215 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
216 216 try:
217 217 msg = email.Parser.Parser().parse(fileobj)
218 218
219 219 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
220 220 data['user'] = msg['From'] and mail.headdecode(msg['From'])
221 221 if not subject and not data['user']:
222 222 # Not an email, restore parsed headers if any
223 223 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
224 224
225 225 # should try to parse msg['Date']
226 226 parents = []
227 227
228 228 if subject:
229 229 if subject.startswith('[PATCH'):
230 230 pend = subject.find(']')
231 231 if pend >= 0:
232 232 subject = subject[pend + 1:].lstrip()
233 233 subject = re.sub(r'\n[ \t]+', ' ', subject)
234 234 ui.debug('Subject: %s\n' % subject)
235 235 if data['user']:
236 236 ui.debug('From: %s\n' % data['user'])
237 237 diffs_seen = 0
238 238 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
239 239 message = ''
240 240 for part in msg.walk():
241 241 content_type = part.get_content_type()
242 242 ui.debug('Content-Type: %s\n' % content_type)
243 243 if content_type not in ok_types:
244 244 continue
245 245 payload = part.get_payload(decode=True)
246 246 m = diffre.search(payload)
247 247 if m:
248 248 hgpatch = False
249 249 hgpatchheader = False
250 250 ignoretext = False
251 251
252 252 ui.debug('found patch at byte %d\n' % m.start(0))
253 253 diffs_seen += 1
254 254 cfp = stringio()
255 255 for line in payload[:m.start(0)].splitlines():
256 256 if line.startswith('# HG changeset patch') and not hgpatch:
257 257 ui.debug('patch generated by hg export\n')
258 258 hgpatch = True
259 259 hgpatchheader = True
260 260 # drop earlier commit message content
261 261 cfp.seek(0)
262 262 cfp.truncate()
263 263 subject = None
264 264 elif hgpatchheader:
265 265 if line.startswith('# User '):
266 266 data['user'] = line[7:]
267 267 ui.debug('From: %s\n' % data['user'])
268 268 elif line.startswith("# Parent "):
269 269 parents.append(line[9:].lstrip())
270 270 elif line.startswith("# "):
271 271 for header, key in patchheadermap:
272 272 prefix = '# %s ' % header
273 273 if line.startswith(prefix):
274 274 data[key] = line[len(prefix):]
275 275 else:
276 276 hgpatchheader = False
277 277 elif line == '---':
278 278 ignoretext = True
279 279 if not hgpatchheader and not ignoretext:
280 280 cfp.write(line)
281 281 cfp.write('\n')
282 282 message = cfp.getvalue()
283 283 if tmpfp:
284 284 tmpfp.write(payload)
285 285 if not payload.endswith('\n'):
286 286 tmpfp.write('\n')
287 287 elif not diffs_seen and message and content_type == 'text/plain':
288 288 message += '\n' + payload
289 289 except: # re-raises
290 290 tmpfp.close()
291 291 os.unlink(tmpname)
292 292 raise
293 293
294 294 if subject and not message.startswith(subject):
295 295 message = '%s\n%s' % (subject, message)
296 296 data['message'] = message
297 297 tmpfp.close()
298 298 if parents:
299 299 data['p1'] = parents.pop(0)
300 300 if parents:
301 301 data['p2'] = parents.pop(0)
302 302
303 303 if diffs_seen:
304 304 data['filename'] = tmpname
305 305 else:
306 306 os.unlink(tmpname)
307 307 return data
308 308
309 309 class patchmeta(object):
310 310 """Patched file metadata
311 311
312 312 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
313 313 or COPY. 'path' is patched file path. 'oldpath' is set to the
314 314 origin file when 'op' is either COPY or RENAME, None otherwise. If
315 315 file mode is changed, 'mode' is a tuple (islink, isexec) where
316 316 'islink' is True if the file is a symlink and 'isexec' is True if
317 317 the file is executable. Otherwise, 'mode' is None.
318 318 """
319 319 def __init__(self, path):
320 320 self.path = path
321 321 self.oldpath = None
322 322 self.mode = None
323 323 self.op = 'MODIFY'
324 324 self.binary = False
325 325
326 326 def setmode(self, mode):
327 327 islink = mode & 0o20000
328 328 isexec = mode & 0o100
329 329 self.mode = (islink, isexec)
330 330
331 331 def copy(self):
332 332 other = patchmeta(self.path)
333 333 other.oldpath = self.oldpath
334 334 other.mode = self.mode
335 335 other.op = self.op
336 336 other.binary = self.binary
337 337 return other
338 338
339 339 def _ispatchinga(self, afile):
340 340 if afile == '/dev/null':
341 341 return self.op == 'ADD'
342 342 return afile == 'a/' + (self.oldpath or self.path)
343 343
344 344 def _ispatchingb(self, bfile):
345 345 if bfile == '/dev/null':
346 346 return self.op == 'DELETE'
347 347 return bfile == 'b/' + self.path
348 348
349 349 def ispatching(self, afile, bfile):
350 350 return self._ispatchinga(afile) and self._ispatchingb(bfile)
351 351
352 352 def __repr__(self):
353 353 return "<patchmeta %s %r>" % (self.op, self.path)
354 354
355 355 def readgitpatch(lr):
356 356 """extract git-style metadata about patches from <patchname>"""
357 357
358 358 # Filter patch for git information
359 359 gp = None
360 360 gitpatches = []
361 361 for line in lr:
362 362 line = line.rstrip(' \r\n')
363 363 if line.startswith('diff --git a/'):
364 364 m = gitre.match(line)
365 365 if m:
366 366 if gp:
367 367 gitpatches.append(gp)
368 368 dst = m.group(2)
369 369 gp = patchmeta(dst)
370 370 elif gp:
371 371 if line.startswith('--- '):
372 372 gitpatches.append(gp)
373 373 gp = None
374 374 continue
375 375 if line.startswith('rename from '):
376 376 gp.op = 'RENAME'
377 377 gp.oldpath = line[12:]
378 378 elif line.startswith('rename to '):
379 379 gp.path = line[10:]
380 380 elif line.startswith('copy from '):
381 381 gp.op = 'COPY'
382 382 gp.oldpath = line[10:]
383 383 elif line.startswith('copy to '):
384 384 gp.path = line[8:]
385 385 elif line.startswith('deleted file'):
386 386 gp.op = 'DELETE'
387 387 elif line.startswith('new file mode '):
388 388 gp.op = 'ADD'
389 389 gp.setmode(int(line[-6:], 8))
390 390 elif line.startswith('new mode '):
391 391 gp.setmode(int(line[-6:], 8))
392 392 elif line.startswith('GIT binary patch'):
393 393 gp.binary = True
394 394 if gp:
395 395 gitpatches.append(gp)
396 396
397 397 return gitpatches
398 398
399 399 class linereader(object):
400 400 # simple class to allow pushing lines back into the input stream
401 401 def __init__(self, fp):
402 402 self.fp = fp
403 403 self.buf = []
404 404
405 405 def push(self, line):
406 406 if line is not None:
407 407 self.buf.append(line)
408 408
409 409 def readline(self):
410 410 if self.buf:
411 411 l = self.buf[0]
412 412 del self.buf[0]
413 413 return l
414 414 return self.fp.readline()
415 415
416 416 def __iter__(self):
417 417 return iter(self.readline, '')
418 418
419 419 class abstractbackend(object):
420 420 def __init__(self, ui):
421 421 self.ui = ui
422 422
423 423 def getfile(self, fname):
424 424 """Return target file data and flags as a (data, (islink,
425 425 isexec)) tuple. Data is None if file is missing/deleted.
426 426 """
427 427 raise NotImplementedError
428 428
429 429 def setfile(self, fname, data, mode, copysource):
430 430 """Write data to target file fname and set its mode. mode is a
431 431 (islink, isexec) tuple. If data is None, the file content should
432 432 be left unchanged. If the file is modified after being copied,
433 433 copysource is set to the original file name.
434 434 """
435 435 raise NotImplementedError
436 436
437 437 def unlink(self, fname):
438 438 """Unlink target file."""
439 439 raise NotImplementedError
440 440
441 441 def writerej(self, fname, failed, total, lines):
442 442 """Write rejected lines for fname. total is the number of hunks
443 443 which failed to apply and total the total number of hunks for this
444 444 files.
445 445 """
446 446 pass
447 447
448 448 def exists(self, fname):
449 449 raise NotImplementedError
450 450
451 451 def close(self):
452 452 raise NotImplementedError
453 453
454 454 class fsbackend(abstractbackend):
455 455 def __init__(self, ui, basedir):
456 456 super(fsbackend, self).__init__(ui)
457 457 self.opener = vfsmod.vfs(basedir)
458 458
459 459 def getfile(self, fname):
460 460 if self.opener.islink(fname):
461 461 return (self.opener.readlink(fname), (True, False))
462 462
463 463 isexec = False
464 464 try:
465 465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 466 except OSError as e:
467 467 if e.errno != errno.ENOENT:
468 468 raise
469 469 try:
470 470 return (self.opener.read(fname), (False, isexec))
471 471 except IOError as e:
472 472 if e.errno != errno.ENOENT:
473 473 raise
474 474 return None, None
475 475
476 476 def setfile(self, fname, data, mode, copysource):
477 477 islink, isexec = mode
478 478 if data is None:
479 479 self.opener.setflags(fname, islink, isexec)
480 480 return
481 481 if islink:
482 482 self.opener.symlink(data, fname)
483 483 else:
484 484 self.opener.write(fname, data)
485 485 if isexec:
486 486 self.opener.setflags(fname, False, True)
487 487
488 488 def unlink(self, fname):
489 489 self.opener.unlinkpath(fname, ignoremissing=True)
490 490
491 491 def writerej(self, fname, failed, total, lines):
492 492 fname = fname + ".rej"
493 493 self.ui.warn(
494 494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 495 (failed, total, fname))
496 496 fp = self.opener(fname, 'w')
497 497 fp.writelines(lines)
498 498 fp.close()
499 499
500 500 def exists(self, fname):
501 501 return self.opener.lexists(fname)
502 502
503 503 class workingbackend(fsbackend):
504 504 def __init__(self, ui, repo, similarity):
505 505 super(workingbackend, self).__init__(ui, repo.root)
506 506 self.repo = repo
507 507 self.similarity = similarity
508 508 self.removed = set()
509 509 self.changed = set()
510 510 self.copied = []
511 511
512 512 def _checkknown(self, fname):
513 513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515 515
516 516 def setfile(self, fname, data, mode, copysource):
517 517 self._checkknown(fname)
518 518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 519 if copysource is not None:
520 520 self.copied.append((copysource, fname))
521 521 self.changed.add(fname)
522 522
523 523 def unlink(self, fname):
524 524 self._checkknown(fname)
525 525 super(workingbackend, self).unlink(fname)
526 526 self.removed.add(fname)
527 527 self.changed.add(fname)
528 528
529 529 def close(self):
530 530 wctx = self.repo[None]
531 531 changed = set(self.changed)
532 532 for src, dst in self.copied:
533 533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 534 if self.removed:
535 535 wctx.forget(sorted(self.removed))
536 536 for f in self.removed:
537 537 if f not in self.repo.dirstate:
538 538 # File was deleted and no longer belongs to the
539 539 # dirstate, it was probably marked added then
540 540 # deleted, and should not be considered by
541 541 # marktouched().
542 542 changed.discard(f)
543 543 if changed:
544 544 scmutil.marktouched(self.repo, changed, self.similarity)
545 545 return sorted(self.changed)
546 546
547 547 class filestore(object):
548 548 def __init__(self, maxsize=None):
549 549 self.opener = None
550 550 self.files = {}
551 551 self.created = 0
552 552 self.maxsize = maxsize
553 553 if self.maxsize is None:
554 554 self.maxsize = 4*(2**20)
555 555 self.size = 0
556 556 self.data = {}
557 557
558 558 def setfile(self, fname, data, mode, copied=None):
559 559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 560 self.data[fname] = (data, mode, copied)
561 561 self.size += len(data)
562 562 else:
563 563 if self.opener is None:
564 564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 565 self.opener = vfsmod.vfs(root)
566 566 # Avoid filename issues with these simple names
567 567 fn = str(self.created)
568 568 self.opener.write(fn, data)
569 569 self.created += 1
570 570 self.files[fname] = (fn, mode, copied)
571 571
572 572 def getfile(self, fname):
573 573 if fname in self.data:
574 574 return self.data[fname]
575 575 if not self.opener or fname not in self.files:
576 576 return None, None, None
577 577 fn, mode, copied = self.files[fname]
578 578 return self.opener.read(fn), mode, copied
579 579
580 580 def close(self):
581 581 if self.opener:
582 582 shutil.rmtree(self.opener.base)
583 583
584 584 class repobackend(abstractbackend):
585 585 def __init__(self, ui, repo, ctx, store):
586 586 super(repobackend, self).__init__(ui)
587 587 self.repo = repo
588 588 self.ctx = ctx
589 589 self.store = store
590 590 self.changed = set()
591 591 self.removed = set()
592 592 self.copied = {}
593 593
594 594 def _checkknown(self, fname):
595 595 if fname not in self.ctx:
596 596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597 597
598 598 def getfile(self, fname):
599 599 try:
600 600 fctx = self.ctx[fname]
601 601 except error.LookupError:
602 602 return None, None
603 603 flags = fctx.flags()
604 604 return fctx.data(), ('l' in flags, 'x' in flags)
605 605
606 606 def setfile(self, fname, data, mode, copysource):
607 607 if copysource:
608 608 self._checkknown(copysource)
609 609 if data is None:
610 610 data = self.ctx[fname].data()
611 611 self.store.setfile(fname, data, mode, copysource)
612 612 self.changed.add(fname)
613 613 if copysource:
614 614 self.copied[fname] = copysource
615 615
616 616 def unlink(self, fname):
617 617 self._checkknown(fname)
618 618 self.removed.add(fname)
619 619
620 620 def exists(self, fname):
621 621 return fname in self.ctx
622 622
623 623 def close(self):
624 624 return self.changed | self.removed
625 625
626 626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630 630
631 631 class patchfile(object):
632 632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 633 self.fname = gp.path
634 634 self.eolmode = eolmode
635 635 self.eol = None
636 636 self.backend = backend
637 637 self.ui = ui
638 638 self.lines = []
639 639 self.exists = False
640 640 self.missing = True
641 641 self.mode = gp.mode
642 642 self.copysource = gp.oldpath
643 643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 644 self.remove = gp.op == 'DELETE'
645 645 if self.copysource is None:
646 646 data, mode = backend.getfile(self.fname)
647 647 else:
648 648 data, mode = store.getfile(self.copysource)[:2]
649 649 if data is not None:
650 650 self.exists = self.copysource is None or backend.exists(self.fname)
651 651 self.missing = False
652 652 if data:
653 653 self.lines = mdiff.splitnewlines(data)
654 654 if self.mode is None:
655 655 self.mode = mode
656 656 if self.lines:
657 657 # Normalize line endings
658 658 if self.lines[0].endswith('\r\n'):
659 659 self.eol = '\r\n'
660 660 elif self.lines[0].endswith('\n'):
661 661 self.eol = '\n'
662 662 if eolmode != 'strict':
663 663 nlines = []
664 664 for l in self.lines:
665 665 if l.endswith('\r\n'):
666 666 l = l[:-2] + '\n'
667 667 nlines.append(l)
668 668 self.lines = nlines
669 669 else:
670 670 if self.create:
671 671 self.missing = False
672 672 if self.mode is None:
673 673 self.mode = (False, False)
674 674 if self.missing:
675 675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 677 "current directory)\n"))
678 678
679 679 self.hash = {}
680 680 self.dirty = 0
681 681 self.offset = 0
682 682 self.skew = 0
683 683 self.rej = []
684 684 self.fileprinted = False
685 685 self.printfile(False)
686 686 self.hunks = 0
687 687
688 688 def writelines(self, fname, lines, mode):
689 689 if self.eolmode == 'auto':
690 690 eol = self.eol
691 691 elif self.eolmode == 'crlf':
692 692 eol = '\r\n'
693 693 else:
694 694 eol = '\n'
695 695
696 696 if self.eolmode != 'strict' and eol and eol != '\n':
697 697 rawlines = []
698 698 for l in lines:
699 699 if l and l[-1] == '\n':
700 700 l = l[:-1] + eol
701 701 rawlines.append(l)
702 702 lines = rawlines
703 703
704 704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705 705
706 706 def printfile(self, warn):
707 707 if self.fileprinted:
708 708 return
709 709 if warn or self.ui.verbose:
710 710 self.fileprinted = True
711 711 s = _("patching file %s\n") % self.fname
712 712 if warn:
713 713 self.ui.warn(s)
714 714 else:
715 715 self.ui.note(s)
716 716
717 717
718 718 def findlines(self, l, linenum):
719 719 # looks through the hash and finds candidate lines. The
720 720 # result is a list of line numbers sorted based on distance
721 721 # from linenum
722 722
723 723 cand = self.hash.get(l, [])
724 724 if len(cand) > 1:
725 725 # resort our list of potentials forward then back.
726 726 cand.sort(key=lambda x: abs(x - linenum))
727 727 return cand
728 728
729 729 def write_rej(self):
730 730 # our rejects are a little different from patch(1). This always
731 731 # creates rejects in the same form as the original patch. A file
732 732 # header is inserted so that you can run the reject through patch again
733 733 # without having to type the filename.
734 734 if not self.rej:
735 735 return
736 736 base = os.path.basename(self.fname)
737 737 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 738 for x in self.rej:
739 739 for l in x.hunk:
740 740 lines.append(l)
741 741 if l[-1:] != '\n':
742 742 lines.append("\n\ No newline at end of file\n")
743 743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744 744
745 745 def apply(self, h):
746 746 if not h.complete():
747 747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 749 h.lenb))
750 750
751 751 self.hunks += 1
752 752
753 753 if self.missing:
754 754 self.rej.append(h)
755 755 return -1
756 756
757 757 if self.exists and self.create:
758 758 if self.copysource:
759 759 self.ui.warn(_("cannot create %s: destination already "
760 760 "exists\n") % self.fname)
761 761 else:
762 762 self.ui.warn(_("file %s already exists\n") % self.fname)
763 763 self.rej.append(h)
764 764 return -1
765 765
766 766 if isinstance(h, binhunk):
767 767 if self.remove:
768 768 self.backend.unlink(self.fname)
769 769 else:
770 770 l = h.new(self.lines)
771 771 self.lines[:] = l
772 772 self.offset += len(l)
773 773 self.dirty = True
774 774 return 0
775 775
776 776 horig = h
777 777 if (self.eolmode in ('crlf', 'lf')
778 778 or self.eolmode == 'auto' and self.eol):
779 779 # If new eols are going to be normalized, then normalize
780 780 # hunk data before patching. Otherwise, preserve input
781 781 # line-endings.
782 782 h = h.getnormalized()
783 783
784 784 # fast case first, no offsets, no fuzz
785 785 old, oldstart, new, newstart = h.fuzzit(0, False)
786 786 oldstart += self.offset
787 787 orig_start = oldstart
788 788 # if there's skew we want to emit the "(offset %d lines)" even
789 789 # when the hunk cleanly applies at start + skew, so skip the
790 790 # fast case code
791 791 if (self.skew == 0 and
792 792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 793 if self.remove:
794 794 self.backend.unlink(self.fname)
795 795 else:
796 796 self.lines[oldstart:oldstart + len(old)] = new
797 797 self.offset += len(new) - len(old)
798 798 self.dirty = True
799 799 return 0
800 800
801 801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 802 self.hash = {}
803 803 for x, s in enumerate(self.lines):
804 804 self.hash.setdefault(s, []).append(x)
805 805
806 806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 807 for toponly in [True, False]:
808 808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 809 oldstart = oldstart + self.offset + self.skew
810 810 oldstart = min(oldstart, len(self.lines))
811 811 if old:
812 812 cand = self.findlines(old[0][1:], oldstart)
813 813 else:
814 814 # Only adding lines with no or fuzzed context, just
815 815 # take the skew in account
816 816 cand = [oldstart]
817 817
818 818 for l in cand:
819 819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 820 self.lines[l : l + len(old)] = new
821 821 self.offset += len(new) - len(old)
822 822 self.skew = l - orig_start
823 823 self.dirty = True
824 824 offset = l - orig_start - fuzzlen
825 825 if fuzzlen:
826 826 msg = _("Hunk #%d succeeded at %d "
827 827 "with fuzz %d "
828 828 "(offset %d lines).\n")
829 829 self.printfile(True)
830 830 self.ui.warn(msg %
831 831 (h.number, l + 1, fuzzlen, offset))
832 832 else:
833 833 msg = _("Hunk #%d succeeded at %d "
834 834 "(offset %d lines).\n")
835 835 self.ui.note(msg % (h.number, l + 1, offset))
836 836 return fuzzlen
837 837 self.printfile(True)
838 838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 839 self.rej.append(horig)
840 840 return -1
841 841
842 842 def close(self):
843 843 if self.dirty:
844 844 self.writelines(self.fname, self.lines, self.mode)
845 845 self.write_rej()
846 846 return len(self.rej)
847 847
848 848 class header(object):
849 849 """patch header
850 850 """
851 851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 852 diff_re = re.compile('diff -r .* (.*)$')
853 853 allhunks_re = re.compile('(?:index|deleted file) ')
854 854 pretty_re = re.compile('(?:new file|deleted file) ')
855 855 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 856 newfile_re = re.compile('(?:new file)')
857 857
858 858 def __init__(self, header):
859 859 self.header = header
860 860 self.hunks = []
861 861
862 862 def binary(self):
863 863 return any(h.startswith('index ') for h in self.header)
864 864
865 865 def pretty(self, fp):
866 866 for h in self.header:
867 867 if h.startswith('index '):
868 868 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 869 break
870 870 if self.pretty_re.match(h):
871 871 fp.write(h)
872 872 if self.binary():
873 873 fp.write(_('this is a binary file\n'))
874 874 break
875 875 if h.startswith('---'):
876 876 fp.write(_('%d hunks, %d lines changed\n') %
877 877 (len(self.hunks),
878 878 sum([max(h.added, h.removed) for h in self.hunks])))
879 879 break
880 880 fp.write(h)
881 881
882 882 def write(self, fp):
883 883 fp.write(''.join(self.header))
884 884
885 885 def allhunks(self):
886 886 return any(self.allhunks_re.match(h) for h in self.header)
887 887
888 888 def files(self):
889 889 match = self.diffgit_re.match(self.header[0])
890 890 if match:
891 891 fromfile, tofile = match.groups()
892 892 if fromfile == tofile:
893 893 return [fromfile]
894 894 return [fromfile, tofile]
895 895 else:
896 896 return self.diff_re.match(self.header[0]).groups()
897 897
898 898 def filename(self):
899 899 return self.files()[-1]
900 900
901 901 def __repr__(self):
902 902 return '<header %s>' % (' '.join(map(repr, self.files())))
903 903
904 904 def isnewfile(self):
905 905 return any(self.newfile_re.match(h) for h in self.header)
906 906
907 907 def special(self):
908 908 # Special files are shown only at the header level and not at the hunk
909 909 # level for example a file that has been deleted is a special file.
910 910 # The user cannot change the content of the operation, in the case of
911 911 # the deleted file he has to take the deletion or not take it, he
912 912 # cannot take some of it.
913 913 # Newly added files are special if they are empty, they are not special
914 914 # if they have some content as we want to be able to change it
915 915 nocontent = len(self.header) == 2
916 916 emptynewfile = self.isnewfile() and nocontent
917 917 return emptynewfile or \
918 918 any(self.special_re.match(h) for h in self.header)
919 919
920 920 class recordhunk(object):
921 921 """patch hunk
922 922
923 923 XXX shouldn't we merge this with the other hunk class?
924 924 """
925 925 maxcontext = 3
926 926
927 927 def __init__(self, header, fromline, toline, proc, before, hunk, after):
928 928 def trimcontext(number, lines):
929 929 delta = len(lines) - self.maxcontext
930 930 if False and delta > 0:
931 931 return number + delta, lines[:self.maxcontext]
932 932 return number, lines
933 933
934 934 self.header = header
935 935 self.fromline, self.before = trimcontext(fromline, before)
936 936 self.toline, self.after = trimcontext(toline, after)
937 937 self.proc = proc
938 938 self.hunk = hunk
939 939 self.added, self.removed = self.countchanges(self.hunk)
940 940
941 941 def __eq__(self, v):
942 942 if not isinstance(v, recordhunk):
943 943 return False
944 944
945 945 return ((v.hunk == self.hunk) and
946 946 (v.proc == self.proc) and
947 947 (self.fromline == v.fromline) and
948 948 (self.header.files() == v.header.files()))
949 949
950 950 def __hash__(self):
951 951 return hash((tuple(self.hunk),
952 952 tuple(self.header.files()),
953 953 self.fromline,
954 954 self.proc))
955 955
956 956 def countchanges(self, hunk):
957 957 """hunk -> (n+,n-)"""
958 958 add = len([h for h in hunk if h[0] == '+'])
959 959 rem = len([h for h in hunk if h[0] == '-'])
960 960 return add, rem
961 961
962 962 def reversehunk(self):
963 963 """return another recordhunk which is the reverse of the hunk
964 964
965 965 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
966 966 that, swap fromline/toline and +/- signs while keep other things
967 967 unchanged.
968 968 """
969 969 m = {'+': '-', '-': '+'}
970 970 hunk = ['%s%s' % (m[l[0]], l[1:]) for l in self.hunk]
971 971 return recordhunk(self.header, self.toline, self.fromline, self.proc,
972 972 self.before, hunk, self.after)
973 973
974 974 def write(self, fp):
975 975 delta = len(self.before) + len(self.after)
976 976 if self.after and self.after[-1] == '\\ No newline at end of file\n':
977 977 delta -= 1
978 978 fromlen = delta + self.removed
979 979 tolen = delta + self.added
980 980 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
981 981 (self.fromline, fromlen, self.toline, tolen,
982 982 self.proc and (' ' + self.proc)))
983 983 fp.write(''.join(self.before + self.hunk + self.after))
984 984
985 985 pretty = write
986 986
987 987 def filename(self):
988 988 return self.header.filename()
989 989
990 990 def __repr__(self):
991 991 return '<hunk %r@%d>' % (self.filename(), self.fromline)
992 992
993 993 def filterpatch(ui, headers, operation=None):
994 994 """Interactively filter patch chunks into applied-only chunks"""
995 995 if operation is None:
996 996 operation = 'record'
997 997 messages = {
998 998 'multiple': {
999 999 'discard': _("discard change %d/%d to '%s'?"),
1000 1000 'record': _("record change %d/%d to '%s'?"),
1001 1001 'revert': _("revert change %d/%d to '%s'?"),
1002 1002 }[operation],
1003 1003 'single': {
1004 1004 'discard': _("discard this change to '%s'?"),
1005 1005 'record': _("record this change to '%s'?"),
1006 1006 'revert': _("revert this change to '%s'?"),
1007 1007 }[operation],
1008 1008 'help': {
1009 1009 'discard': _('[Ynesfdaq?]'
1010 1010 '$$ &Yes, discard this change'
1011 1011 '$$ &No, skip this change'
1012 1012 '$$ &Edit this change manually'
1013 1013 '$$ &Skip remaining changes to this file'
1014 1014 '$$ Discard remaining changes to this &file'
1015 1015 '$$ &Done, skip remaining changes and files'
1016 1016 '$$ Discard &all changes to all remaining files'
1017 1017 '$$ &Quit, discarding no changes'
1018 1018 '$$ &? (display help)'),
1019 1019 'record': _('[Ynesfdaq?]'
1020 1020 '$$ &Yes, record this change'
1021 1021 '$$ &No, skip this change'
1022 1022 '$$ &Edit this change manually'
1023 1023 '$$ &Skip remaining changes to this file'
1024 1024 '$$ Record remaining changes to this &file'
1025 1025 '$$ &Done, skip remaining changes and files'
1026 1026 '$$ Record &all changes to all remaining files'
1027 1027 '$$ &Quit, recording no changes'
1028 1028 '$$ &? (display help)'),
1029 1029 'revert': _('[Ynesfdaq?]'
1030 1030 '$$ &Yes, revert this change'
1031 1031 '$$ &No, skip this change'
1032 1032 '$$ &Edit this change manually'
1033 1033 '$$ &Skip remaining changes to this file'
1034 1034 '$$ Revert remaining changes to this &file'
1035 1035 '$$ &Done, skip remaining changes and files'
1036 1036 '$$ Revert &all changes to all remaining files'
1037 1037 '$$ &Quit, reverting no changes'
1038 1038 '$$ &? (display help)')
1039 1039 }[operation]
1040 1040 }
1041 1041
1042 1042 def prompt(skipfile, skipall, query, chunk):
1043 1043 """prompt query, and process base inputs
1044 1044
1045 1045 - y/n for the rest of file
1046 1046 - y/n for the rest
1047 1047 - ? (help)
1048 1048 - q (quit)
1049 1049
1050 1050 Return True/False and possibly updated skipfile and skipall.
1051 1051 """
1052 1052 newpatches = None
1053 1053 if skipall is not None:
1054 1054 return skipall, skipfile, skipall, newpatches
1055 1055 if skipfile is not None:
1056 1056 return skipfile, skipfile, skipall, newpatches
1057 1057 while True:
1058 1058 resps = messages['help']
1059 1059 r = ui.promptchoice("%s %s" % (query, resps))
1060 1060 ui.write("\n")
1061 1061 if r == 8: # ?
1062 1062 for c, t in ui.extractchoices(resps)[1]:
1063 1063 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1064 1064 continue
1065 1065 elif r == 0: # yes
1066 1066 ret = True
1067 1067 elif r == 1: # no
1068 1068 ret = False
1069 1069 elif r == 2: # Edit patch
1070 1070 if chunk is None:
1071 1071 ui.write(_('cannot edit patch for whole file'))
1072 1072 ui.write("\n")
1073 1073 continue
1074 1074 if chunk.header.binary():
1075 1075 ui.write(_('cannot edit patch for binary file'))
1076 1076 ui.write("\n")
1077 1077 continue
1078 1078 # Patch comment based on the Git one (based on comment at end of
1079 1079 # https://mercurial-scm.org/wiki/RecordExtension)
1080 1080 phelp = '---' + _("""
1081 1081 To remove '-' lines, make them ' ' lines (context).
1082 1082 To remove '+' lines, delete them.
1083 1083 Lines starting with # will be removed from the patch.
1084 1084
1085 1085 If the patch applies cleanly, the edited hunk will immediately be
1086 1086 added to the record list. If it does not apply cleanly, a rejects
1087 1087 file will be generated: you can use that when you try again. If
1088 1088 all lines of the hunk are removed, then the edit is aborted and
1089 1089 the hunk is left unchanged.
1090 1090 """)
1091 1091 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1092 1092 suffix=".diff", text=True)
1093 1093 ncpatchfp = None
1094 1094 try:
1095 1095 # Write the initial patch
1096 1096 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1097 1097 chunk.header.write(f)
1098 1098 chunk.write(f)
1099 1099 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1100 1100 f.close()
1101 1101 # Start the editor and wait for it to complete
1102 1102 editor = ui.geteditor()
1103 1103 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1104 1104 environ={'HGUSER': ui.username()},
1105 1105 blockedtag='filterpatch')
1106 1106 if ret != 0:
1107 1107 ui.warn(_("editor exited with exit code %d\n") % ret)
1108 1108 continue
1109 1109 # Remove comment lines
1110 1110 patchfp = open(patchfn)
1111 1111 ncpatchfp = stringio()
1112 1112 for line in util.iterfile(patchfp):
1113 1113 if not line.startswith('#'):
1114 1114 ncpatchfp.write(line)
1115 1115 patchfp.close()
1116 1116 ncpatchfp.seek(0)
1117 1117 newpatches = parsepatch(ncpatchfp)
1118 1118 finally:
1119 1119 os.unlink(patchfn)
1120 1120 del ncpatchfp
1121 1121 # Signal that the chunk shouldn't be applied as-is, but
1122 1122 # provide the new patch to be used instead.
1123 1123 ret = False
1124 1124 elif r == 3: # Skip
1125 1125 ret = skipfile = False
1126 1126 elif r == 4: # file (Record remaining)
1127 1127 ret = skipfile = True
1128 1128 elif r == 5: # done, skip remaining
1129 1129 ret = skipall = False
1130 1130 elif r == 6: # all
1131 1131 ret = skipall = True
1132 1132 elif r == 7: # quit
1133 1133 raise error.Abort(_('user quit'))
1134 1134 return ret, skipfile, skipall, newpatches
1135 1135
1136 1136 seen = set()
1137 1137 applied = {} # 'filename' -> [] of chunks
1138 1138 skipfile, skipall = None, None
1139 1139 pos, total = 1, sum(len(h.hunks) for h in headers)
1140 1140 for h in headers:
1141 1141 pos += len(h.hunks)
1142 1142 skipfile = None
1143 1143 fixoffset = 0
1144 1144 hdr = ''.join(h.header)
1145 1145 if hdr in seen:
1146 1146 continue
1147 1147 seen.add(hdr)
1148 1148 if skipall is None:
1149 1149 h.pretty(ui)
1150 1150 msg = (_('examine changes to %s?') %
1151 1151 _(' and ').join("'%s'" % f for f in h.files()))
1152 1152 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1153 1153 if not r:
1154 1154 continue
1155 1155 applied[h.filename()] = [h]
1156 1156 if h.allhunks():
1157 1157 applied[h.filename()] += h.hunks
1158 1158 continue
1159 1159 for i, chunk in enumerate(h.hunks):
1160 1160 if skipfile is None and skipall is None:
1161 1161 chunk.pretty(ui)
1162 1162 if total == 1:
1163 1163 msg = messages['single'] % chunk.filename()
1164 1164 else:
1165 1165 idx = pos - len(h.hunks) + i
1166 1166 msg = messages['multiple'] % (idx, total, chunk.filename())
1167 1167 r, skipfile, skipall, newpatches = prompt(skipfile,
1168 1168 skipall, msg, chunk)
1169 1169 if r:
1170 1170 if fixoffset:
1171 1171 chunk = copy.copy(chunk)
1172 1172 chunk.toline += fixoffset
1173 1173 applied[chunk.filename()].append(chunk)
1174 1174 elif newpatches is not None:
1175 1175 for newpatch in newpatches:
1176 1176 for newhunk in newpatch.hunks:
1177 1177 if fixoffset:
1178 1178 newhunk.toline += fixoffset
1179 1179 applied[newhunk.filename()].append(newhunk)
1180 1180 else:
1181 1181 fixoffset += chunk.removed - chunk.added
1182 1182 return (sum([h for h in applied.itervalues()
1183 1183 if h[0].special() or len(h) > 1], []), {})
1184 1184 class hunk(object):
1185 1185 def __init__(self, desc, num, lr, context):
1186 1186 self.number = num
1187 1187 self.desc = desc
1188 1188 self.hunk = [desc]
1189 1189 self.a = []
1190 1190 self.b = []
1191 1191 self.starta = self.lena = None
1192 1192 self.startb = self.lenb = None
1193 1193 if lr is not None:
1194 1194 if context:
1195 1195 self.read_context_hunk(lr)
1196 1196 else:
1197 1197 self.read_unified_hunk(lr)
1198 1198
1199 1199 def getnormalized(self):
1200 1200 """Return a copy with line endings normalized to LF."""
1201 1201
1202 1202 def normalize(lines):
1203 1203 nlines = []
1204 1204 for line in lines:
1205 1205 if line.endswith('\r\n'):
1206 1206 line = line[:-2] + '\n'
1207 1207 nlines.append(line)
1208 1208 return nlines
1209 1209
1210 1210 # Dummy object, it is rebuilt manually
1211 1211 nh = hunk(self.desc, self.number, None, None)
1212 1212 nh.number = self.number
1213 1213 nh.desc = self.desc
1214 1214 nh.hunk = self.hunk
1215 1215 nh.a = normalize(self.a)
1216 1216 nh.b = normalize(self.b)
1217 1217 nh.starta = self.starta
1218 1218 nh.startb = self.startb
1219 1219 nh.lena = self.lena
1220 1220 nh.lenb = self.lenb
1221 1221 return nh
1222 1222
1223 1223 def read_unified_hunk(self, lr):
1224 1224 m = unidesc.match(self.desc)
1225 1225 if not m:
1226 1226 raise PatchError(_("bad hunk #%d") % self.number)
1227 1227 self.starta, self.lena, self.startb, self.lenb = m.groups()
1228 1228 if self.lena is None:
1229 1229 self.lena = 1
1230 1230 else:
1231 1231 self.lena = int(self.lena)
1232 1232 if self.lenb is None:
1233 1233 self.lenb = 1
1234 1234 else:
1235 1235 self.lenb = int(self.lenb)
1236 1236 self.starta = int(self.starta)
1237 1237 self.startb = int(self.startb)
1238 1238 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1239 1239 self.b)
1240 1240 # if we hit eof before finishing out the hunk, the last line will
1241 1241 # be zero length. Lets try to fix it up.
1242 1242 while len(self.hunk[-1]) == 0:
1243 1243 del self.hunk[-1]
1244 1244 del self.a[-1]
1245 1245 del self.b[-1]
1246 1246 self.lena -= 1
1247 1247 self.lenb -= 1
1248 1248 self._fixnewline(lr)
1249 1249
1250 1250 def read_context_hunk(self, lr):
1251 1251 self.desc = lr.readline()
1252 1252 m = contextdesc.match(self.desc)
1253 1253 if not m:
1254 1254 raise PatchError(_("bad hunk #%d") % self.number)
1255 1255 self.starta, aend = m.groups()
1256 1256 self.starta = int(self.starta)
1257 1257 if aend is None:
1258 1258 aend = self.starta
1259 1259 self.lena = int(aend) - self.starta
1260 1260 if self.starta:
1261 1261 self.lena += 1
1262 1262 for x in xrange(self.lena):
1263 1263 l = lr.readline()
1264 1264 if l.startswith('---'):
1265 1265 # lines addition, old block is empty
1266 1266 lr.push(l)
1267 1267 break
1268 1268 s = l[2:]
1269 1269 if l.startswith('- ') or l.startswith('! '):
1270 1270 u = '-' + s
1271 1271 elif l.startswith(' '):
1272 1272 u = ' ' + s
1273 1273 else:
1274 1274 raise PatchError(_("bad hunk #%d old text line %d") %
1275 1275 (self.number, x))
1276 1276 self.a.append(u)
1277 1277 self.hunk.append(u)
1278 1278
1279 1279 l = lr.readline()
1280 1280 if l.startswith('\ '):
1281 1281 s = self.a[-1][:-1]
1282 1282 self.a[-1] = s
1283 1283 self.hunk[-1] = s
1284 1284 l = lr.readline()
1285 1285 m = contextdesc.match(l)
1286 1286 if not m:
1287 1287 raise PatchError(_("bad hunk #%d") % self.number)
1288 1288 self.startb, bend = m.groups()
1289 1289 self.startb = int(self.startb)
1290 1290 if bend is None:
1291 1291 bend = self.startb
1292 1292 self.lenb = int(bend) - self.startb
1293 1293 if self.startb:
1294 1294 self.lenb += 1
1295 1295 hunki = 1
1296 1296 for x in xrange(self.lenb):
1297 1297 l = lr.readline()
1298 1298 if l.startswith('\ '):
1299 1299 # XXX: the only way to hit this is with an invalid line range.
1300 1300 # The no-eol marker is not counted in the line range, but I
1301 1301 # guess there are diff(1) out there which behave differently.
1302 1302 s = self.b[-1][:-1]
1303 1303 self.b[-1] = s
1304 1304 self.hunk[hunki - 1] = s
1305 1305 continue
1306 1306 if not l:
1307 1307 # line deletions, new block is empty and we hit EOF
1308 1308 lr.push(l)
1309 1309 break
1310 1310 s = l[2:]
1311 1311 if l.startswith('+ ') or l.startswith('! '):
1312 1312 u = '+' + s
1313 1313 elif l.startswith(' '):
1314 1314 u = ' ' + s
1315 1315 elif len(self.b) == 0:
1316 1316 # line deletions, new block is empty
1317 1317 lr.push(l)
1318 1318 break
1319 1319 else:
1320 1320 raise PatchError(_("bad hunk #%d old text line %d") %
1321 1321 (self.number, x))
1322 1322 self.b.append(s)
1323 1323 while True:
1324 1324 if hunki >= len(self.hunk):
1325 1325 h = ""
1326 1326 else:
1327 1327 h = self.hunk[hunki]
1328 1328 hunki += 1
1329 1329 if h == u:
1330 1330 break
1331 1331 elif h.startswith('-'):
1332 1332 continue
1333 1333 else:
1334 1334 self.hunk.insert(hunki - 1, u)
1335 1335 break
1336 1336
1337 1337 if not self.a:
1338 1338 # this happens when lines were only added to the hunk
1339 1339 for x in self.hunk:
1340 1340 if x.startswith('-') or x.startswith(' '):
1341 1341 self.a.append(x)
1342 1342 if not self.b:
1343 1343 # this happens when lines were only deleted from the hunk
1344 1344 for x in self.hunk:
1345 1345 if x.startswith('+') or x.startswith(' '):
1346 1346 self.b.append(x[1:])
1347 1347 # @@ -start,len +start,len @@
1348 1348 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1349 1349 self.startb, self.lenb)
1350 1350 self.hunk[0] = self.desc
1351 1351 self._fixnewline(lr)
1352 1352
1353 1353 def _fixnewline(self, lr):
1354 1354 l = lr.readline()
1355 1355 if l.startswith('\ '):
1356 1356 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1357 1357 else:
1358 1358 lr.push(l)
1359 1359
1360 1360 def complete(self):
1361 1361 return len(self.a) == self.lena and len(self.b) == self.lenb
1362 1362
1363 1363 def _fuzzit(self, old, new, fuzz, toponly):
1364 1364 # this removes context lines from the top and bottom of list 'l'. It
1365 1365 # checks the hunk to make sure only context lines are removed, and then
1366 1366 # returns a new shortened list of lines.
1367 1367 fuzz = min(fuzz, len(old))
1368 1368 if fuzz:
1369 1369 top = 0
1370 1370 bot = 0
1371 1371 hlen = len(self.hunk)
1372 1372 for x in xrange(hlen - 1):
1373 1373 # the hunk starts with the @@ line, so use x+1
1374 1374 if self.hunk[x + 1][0] == ' ':
1375 1375 top += 1
1376 1376 else:
1377 1377 break
1378 1378 if not toponly:
1379 1379 for x in xrange(hlen - 1):
1380 1380 if self.hunk[hlen - bot - 1][0] == ' ':
1381 1381 bot += 1
1382 1382 else:
1383 1383 break
1384 1384
1385 1385 bot = min(fuzz, bot)
1386 1386 top = min(fuzz, top)
1387 1387 return old[top:len(old) - bot], new[top:len(new) - bot], top
1388 1388 return old, new, 0
1389 1389
1390 1390 def fuzzit(self, fuzz, toponly):
1391 1391 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1392 1392 oldstart = self.starta + top
1393 1393 newstart = self.startb + top
1394 1394 # zero length hunk ranges already have their start decremented
1395 1395 if self.lena and oldstart > 0:
1396 1396 oldstart -= 1
1397 1397 if self.lenb and newstart > 0:
1398 1398 newstart -= 1
1399 1399 return old, oldstart, new, newstart
1400 1400
1401 1401 class binhunk(object):
1402 1402 'A binary patch file.'
1403 1403 def __init__(self, lr, fname):
1404 1404 self.text = None
1405 1405 self.delta = False
1406 1406 self.hunk = ['GIT binary patch\n']
1407 1407 self._fname = fname
1408 1408 self._read(lr)
1409 1409
1410 1410 def complete(self):
1411 1411 return self.text is not None
1412 1412
1413 1413 def new(self, lines):
1414 1414 if self.delta:
1415 1415 return [applybindelta(self.text, ''.join(lines))]
1416 1416 return [self.text]
1417 1417
1418 1418 def _read(self, lr):
1419 1419 def getline(lr, hunk):
1420 1420 l = lr.readline()
1421 1421 hunk.append(l)
1422 1422 return l.rstrip('\r\n')
1423 1423
1424 1424 size = 0
1425 1425 while True:
1426 1426 line = getline(lr, self.hunk)
1427 1427 if not line:
1428 1428 raise PatchError(_('could not extract "%s" binary data')
1429 1429 % self._fname)
1430 1430 if line.startswith('literal '):
1431 1431 size = int(line[8:].rstrip())
1432 1432 break
1433 1433 if line.startswith('delta '):
1434 1434 size = int(line[6:].rstrip())
1435 1435 self.delta = True
1436 1436 break
1437 1437 dec = []
1438 1438 line = getline(lr, self.hunk)
1439 1439 while len(line) > 1:
1440 1440 l = line[0]
1441 1441 if l <= 'Z' and l >= 'A':
1442 1442 l = ord(l) - ord('A') + 1
1443 1443 else:
1444 1444 l = ord(l) - ord('a') + 27
1445 1445 try:
1446 1446 dec.append(util.b85decode(line[1:])[:l])
1447 1447 except ValueError as e:
1448 1448 raise PatchError(_('could not decode "%s" binary patch: %s')
1449 1449 % (self._fname, str(e)))
1450 1450 line = getline(lr, self.hunk)
1451 1451 text = zlib.decompress(''.join(dec))
1452 1452 if len(text) != size:
1453 1453 raise PatchError(_('"%s" length is %d bytes, should be %d')
1454 1454 % (self._fname, len(text), size))
1455 1455 self.text = text
1456 1456
1457 1457 def parsefilename(str):
1458 1458 # --- filename \t|space stuff
1459 1459 s = str[4:].rstrip('\r\n')
1460 1460 i = s.find('\t')
1461 1461 if i < 0:
1462 1462 i = s.find(' ')
1463 1463 if i < 0:
1464 1464 return s
1465 1465 return s[:i]
1466 1466
1467 1467 def reversehunks(hunks):
1468 1468 '''reverse the signs in the hunks given as argument
1469 1469
1470 1470 This function operates on hunks coming out of patch.filterpatch, that is
1471 1471 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1472 1472
1473 1473 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1474 1474 ... --- a/folder1/g
1475 1475 ... +++ b/folder1/g
1476 1476 ... @@ -1,7 +1,7 @@
1477 1477 ... +firstline
1478 1478 ... c
1479 1479 ... 1
1480 1480 ... 2
1481 1481 ... + 3
1482 1482 ... -4
1483 1483 ... 5
1484 1484 ... d
1485 1485 ... +lastline"""
1486 1486 >>> hunks = parsepatch(rawpatch)
1487 1487 >>> hunkscomingfromfilterpatch = []
1488 1488 >>> for h in hunks:
1489 1489 ... hunkscomingfromfilterpatch.append(h)
1490 1490 ... hunkscomingfromfilterpatch.extend(h.hunks)
1491 1491
1492 1492 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1493 1493 >>> from . import util
1494 1494 >>> fp = util.stringio()
1495 1495 >>> for c in reversedhunks:
1496 1496 ... c.write(fp)
1497 1497 >>> fp.seek(0)
1498 1498 >>> reversedpatch = fp.read()
1499 1499 >>> print reversedpatch
1500 1500 diff --git a/folder1/g b/folder1/g
1501 1501 --- a/folder1/g
1502 1502 +++ b/folder1/g
1503 1503 @@ -1,4 +1,3 @@
1504 1504 -firstline
1505 1505 c
1506 1506 1
1507 1507 2
1508 1508 @@ -2,6 +1,6 @@
1509 1509 c
1510 1510 1
1511 1511 2
1512 1512 - 3
1513 1513 +4
1514 1514 5
1515 1515 d
1516 1516 @@ -6,3 +5,2 @@
1517 1517 5
1518 1518 d
1519 1519 -lastline
1520 1520
1521 1521 '''
1522 1522
1523 1523 newhunks = []
1524 1524 for c in hunks:
1525 1525 if util.safehasattr(c, 'reversehunk'):
1526 1526 c = c.reversehunk()
1527 1527 newhunks.append(c)
1528 1528 return newhunks
1529 1529
1530 1530 def parsepatch(originalchunks):
1531 1531 """patch -> [] of headers -> [] of hunks """
1532 1532 class parser(object):
1533 1533 """patch parsing state machine"""
1534 1534 def __init__(self):
1535 1535 self.fromline = 0
1536 1536 self.toline = 0
1537 1537 self.proc = ''
1538 1538 self.header = None
1539 1539 self.context = []
1540 1540 self.before = []
1541 1541 self.hunk = []
1542 1542 self.headers = []
1543 1543
1544 1544 def addrange(self, limits):
1545 1545 fromstart, fromend, tostart, toend, proc = limits
1546 1546 self.fromline = int(fromstart)
1547 1547 self.toline = int(tostart)
1548 1548 self.proc = proc
1549 1549
1550 1550 def addcontext(self, context):
1551 1551 if self.hunk:
1552 1552 h = recordhunk(self.header, self.fromline, self.toline,
1553 1553 self.proc, self.before, self.hunk, context)
1554 1554 self.header.hunks.append(h)
1555 1555 self.fromline += len(self.before) + h.removed
1556 1556 self.toline += len(self.before) + h.added
1557 1557 self.before = []
1558 1558 self.hunk = []
1559 1559 self.context = context
1560 1560
1561 1561 def addhunk(self, hunk):
1562 1562 if self.context:
1563 1563 self.before = self.context
1564 1564 self.context = []
1565 1565 self.hunk = hunk
1566 1566
1567 1567 def newfile(self, hdr):
1568 1568 self.addcontext([])
1569 1569 h = header(hdr)
1570 1570 self.headers.append(h)
1571 1571 self.header = h
1572 1572
1573 1573 def addother(self, line):
1574 1574 pass # 'other' lines are ignored
1575 1575
1576 1576 def finished(self):
1577 1577 self.addcontext([])
1578 1578 return self.headers
1579 1579
1580 1580 transitions = {
1581 1581 'file': {'context': addcontext,
1582 1582 'file': newfile,
1583 1583 'hunk': addhunk,
1584 1584 'range': addrange},
1585 1585 'context': {'file': newfile,
1586 1586 'hunk': addhunk,
1587 1587 'range': addrange,
1588 1588 'other': addother},
1589 1589 'hunk': {'context': addcontext,
1590 1590 'file': newfile,
1591 1591 'range': addrange},
1592 1592 'range': {'context': addcontext,
1593 1593 'hunk': addhunk},
1594 1594 'other': {'other': addother},
1595 1595 }
1596 1596
1597 1597 p = parser()
1598 1598 fp = stringio()
1599 1599 fp.write(''.join(originalchunks))
1600 1600 fp.seek(0)
1601 1601
1602 1602 state = 'context'
1603 1603 for newstate, data in scanpatch(fp):
1604 1604 try:
1605 1605 p.transitions[state][newstate](p, data)
1606 1606 except KeyError:
1607 1607 raise PatchError('unhandled transition: %s -> %s' %
1608 1608 (state, newstate))
1609 1609 state = newstate
1610 1610 del fp
1611 1611 return p.finished()
1612 1612
1613 1613 def pathtransform(path, strip, prefix):
1614 1614 '''turn a path from a patch into a path suitable for the repository
1615 1615
1616 1616 prefix, if not empty, is expected to be normalized with a / at the end.
1617 1617
1618 1618 Returns (stripped components, path in repository).
1619 1619
1620 1620 >>> pathtransform('a/b/c', 0, '')
1621 1621 ('', 'a/b/c')
1622 1622 >>> pathtransform(' a/b/c ', 0, '')
1623 1623 ('', ' a/b/c')
1624 1624 >>> pathtransform(' a/b/c ', 2, '')
1625 1625 ('a/b/', 'c')
1626 1626 >>> pathtransform('a/b/c', 0, 'd/e/')
1627 1627 ('', 'd/e/a/b/c')
1628 1628 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1629 1629 ('a//b/', 'd/e/c')
1630 1630 >>> pathtransform('a/b/c', 3, '')
1631 1631 Traceback (most recent call last):
1632 1632 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1633 1633 '''
1634 1634 pathlen = len(path)
1635 1635 i = 0
1636 1636 if strip == 0:
1637 1637 return '', prefix + path.rstrip()
1638 1638 count = strip
1639 1639 while count > 0:
1640 1640 i = path.find('/', i)
1641 1641 if i == -1:
1642 1642 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1643 1643 (count, strip, path))
1644 1644 i += 1
1645 1645 # consume '//' in the path
1646 1646 while i < pathlen - 1 and path[i] == '/':
1647 1647 i += 1
1648 1648 count -= 1
1649 1649 return path[:i].lstrip(), prefix + path[i:].rstrip()
1650 1650
1651 1651 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1652 1652 nulla = afile_orig == "/dev/null"
1653 1653 nullb = bfile_orig == "/dev/null"
1654 1654 create = nulla and hunk.starta == 0 and hunk.lena == 0
1655 1655 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1656 1656 abase, afile = pathtransform(afile_orig, strip, prefix)
1657 1657 gooda = not nulla and backend.exists(afile)
1658 1658 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1659 1659 if afile == bfile:
1660 1660 goodb = gooda
1661 1661 else:
1662 1662 goodb = not nullb and backend.exists(bfile)
1663 1663 missing = not goodb and not gooda and not create
1664 1664
1665 1665 # some diff programs apparently produce patches where the afile is
1666 1666 # not /dev/null, but afile starts with bfile
1667 1667 abasedir = afile[:afile.rfind('/') + 1]
1668 1668 bbasedir = bfile[:bfile.rfind('/') + 1]
1669 1669 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1670 1670 and hunk.starta == 0 and hunk.lena == 0):
1671 1671 create = True
1672 1672 missing = False
1673 1673
1674 1674 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1675 1675 # diff is between a file and its backup. In this case, the original
1676 1676 # file should be patched (see original mpatch code).
1677 1677 isbackup = (abase == bbase and bfile.startswith(afile))
1678 1678 fname = None
1679 1679 if not missing:
1680 1680 if gooda and goodb:
1681 1681 if isbackup:
1682 1682 fname = afile
1683 1683 else:
1684 1684 fname = bfile
1685 1685 elif gooda:
1686 1686 fname = afile
1687 1687
1688 1688 if not fname:
1689 1689 if not nullb:
1690 1690 if isbackup:
1691 1691 fname = afile
1692 1692 else:
1693 1693 fname = bfile
1694 1694 elif not nulla:
1695 1695 fname = afile
1696 1696 else:
1697 1697 raise PatchError(_("undefined source and destination files"))
1698 1698
1699 1699 gp = patchmeta(fname)
1700 1700 if create:
1701 1701 gp.op = 'ADD'
1702 1702 elif remove:
1703 1703 gp.op = 'DELETE'
1704 1704 return gp
1705 1705
1706 1706 def scanpatch(fp):
1707 1707 """like patch.iterhunks, but yield different events
1708 1708
1709 1709 - ('file', [header_lines + fromfile + tofile])
1710 1710 - ('context', [context_lines])
1711 1711 - ('hunk', [hunk_lines])
1712 1712 - ('range', (-start,len, +start,len, proc))
1713 1713 """
1714 1714 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1715 1715 lr = linereader(fp)
1716 1716
1717 1717 def scanwhile(first, p):
1718 1718 """scan lr while predicate holds"""
1719 1719 lines = [first]
1720 1720 for line in iter(lr.readline, ''):
1721 1721 if p(line):
1722 1722 lines.append(line)
1723 1723 else:
1724 1724 lr.push(line)
1725 1725 break
1726 1726 return lines
1727 1727
1728 1728 for line in iter(lr.readline, ''):
1729 1729 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1730 1730 def notheader(line):
1731 1731 s = line.split(None, 1)
1732 1732 return not s or s[0] not in ('---', 'diff')
1733 1733 header = scanwhile(line, notheader)
1734 1734 fromfile = lr.readline()
1735 1735 if fromfile.startswith('---'):
1736 1736 tofile = lr.readline()
1737 1737 header += [fromfile, tofile]
1738 1738 else:
1739 1739 lr.push(fromfile)
1740 1740 yield 'file', header
1741 1741 elif line[0] == ' ':
1742 1742 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1743 1743 elif line[0] in '-+':
1744 1744 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1745 1745 else:
1746 1746 m = lines_re.match(line)
1747 1747 if m:
1748 1748 yield 'range', m.groups()
1749 1749 else:
1750 1750 yield 'other', line
1751 1751
1752 1752 def scangitpatch(lr, firstline):
1753 1753 """
1754 1754 Git patches can emit:
1755 1755 - rename a to b
1756 1756 - change b
1757 1757 - copy a to c
1758 1758 - change c
1759 1759
1760 1760 We cannot apply this sequence as-is, the renamed 'a' could not be
1761 1761 found for it would have been renamed already. And we cannot copy
1762 1762 from 'b' instead because 'b' would have been changed already. So
1763 1763 we scan the git patch for copy and rename commands so we can
1764 1764 perform the copies ahead of time.
1765 1765 """
1766 1766 pos = 0
1767 1767 try:
1768 1768 pos = lr.fp.tell()
1769 1769 fp = lr.fp
1770 1770 except IOError:
1771 1771 fp = stringio(lr.fp.read())
1772 1772 gitlr = linereader(fp)
1773 1773 gitlr.push(firstline)
1774 1774 gitpatches = readgitpatch(gitlr)
1775 1775 fp.seek(pos)
1776 1776 return gitpatches
1777 1777
1778 1778 def iterhunks(fp):
1779 1779 """Read a patch and yield the following events:
1780 1780 - ("file", afile, bfile, firsthunk): select a new target file.
1781 1781 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1782 1782 "file" event.
1783 1783 - ("git", gitchanges): current diff is in git format, gitchanges
1784 1784 maps filenames to gitpatch records. Unique event.
1785 1785 """
1786 1786 afile = ""
1787 1787 bfile = ""
1788 1788 state = None
1789 1789 hunknum = 0
1790 1790 emitfile = newfile = False
1791 1791 gitpatches = None
1792 1792
1793 1793 # our states
1794 1794 BFILE = 1
1795 1795 context = None
1796 1796 lr = linereader(fp)
1797 1797
1798 1798 for x in iter(lr.readline, ''):
1799 1799 if state == BFILE and (
1800 1800 (not context and x[0] == '@')
1801 1801 or (context is not False and x.startswith('***************'))
1802 1802 or x.startswith('GIT binary patch')):
1803 1803 gp = None
1804 1804 if (gitpatches and
1805 1805 gitpatches[-1].ispatching(afile, bfile)):
1806 1806 gp = gitpatches.pop()
1807 1807 if x.startswith('GIT binary patch'):
1808 1808 h = binhunk(lr, gp.path)
1809 1809 else:
1810 1810 if context is None and x.startswith('***************'):
1811 1811 context = True
1812 1812 h = hunk(x, hunknum + 1, lr, context)
1813 1813 hunknum += 1
1814 1814 if emitfile:
1815 1815 emitfile = False
1816 1816 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1817 1817 yield 'hunk', h
1818 1818 elif x.startswith('diff --git a/'):
1819 1819 m = gitre.match(x.rstrip(' \r\n'))
1820 1820 if not m:
1821 1821 continue
1822 1822 if gitpatches is None:
1823 1823 # scan whole input for git metadata
1824 1824 gitpatches = scangitpatch(lr, x)
1825 1825 yield 'git', [g.copy() for g in gitpatches
1826 1826 if g.op in ('COPY', 'RENAME')]
1827 1827 gitpatches.reverse()
1828 1828 afile = 'a/' + m.group(1)
1829 1829 bfile = 'b/' + m.group(2)
1830 1830 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1831 1831 gp = gitpatches.pop()
1832 1832 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1833 1833 if not gitpatches:
1834 1834 raise PatchError(_('failed to synchronize metadata for "%s"')
1835 1835 % afile[2:])
1836 1836 gp = gitpatches[-1]
1837 1837 newfile = True
1838 1838 elif x.startswith('---'):
1839 1839 # check for a unified diff
1840 1840 l2 = lr.readline()
1841 1841 if not l2.startswith('+++'):
1842 1842 lr.push(l2)
1843 1843 continue
1844 1844 newfile = True
1845 1845 context = False
1846 1846 afile = parsefilename(x)
1847 1847 bfile = parsefilename(l2)
1848 1848 elif x.startswith('***'):
1849 1849 # check for a context diff
1850 1850 l2 = lr.readline()
1851 1851 if not l2.startswith('---'):
1852 1852 lr.push(l2)
1853 1853 continue
1854 1854 l3 = lr.readline()
1855 1855 lr.push(l3)
1856 1856 if not l3.startswith("***************"):
1857 1857 lr.push(l2)
1858 1858 continue
1859 1859 newfile = True
1860 1860 context = True
1861 1861 afile = parsefilename(x)
1862 1862 bfile = parsefilename(l2)
1863 1863
1864 1864 if newfile:
1865 1865 newfile = False
1866 1866 emitfile = True
1867 1867 state = BFILE
1868 1868 hunknum = 0
1869 1869
1870 1870 while gitpatches:
1871 1871 gp = gitpatches.pop()
1872 1872 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1873 1873
1874 1874 def applybindelta(binchunk, data):
1875 1875 """Apply a binary delta hunk
1876 1876 The algorithm used is the algorithm from git's patch-delta.c
1877 1877 """
1878 1878 def deltahead(binchunk):
1879 1879 i = 0
1880 1880 for c in binchunk:
1881 1881 i += 1
1882 1882 if not (ord(c) & 0x80):
1883 1883 return i
1884 1884 return i
1885 1885 out = ""
1886 1886 s = deltahead(binchunk)
1887 1887 binchunk = binchunk[s:]
1888 1888 s = deltahead(binchunk)
1889 1889 binchunk = binchunk[s:]
1890 1890 i = 0
1891 1891 while i < len(binchunk):
1892 1892 cmd = ord(binchunk[i])
1893 1893 i += 1
1894 1894 if (cmd & 0x80):
1895 1895 offset = 0
1896 1896 size = 0
1897 1897 if (cmd & 0x01):
1898 1898 offset = ord(binchunk[i])
1899 1899 i += 1
1900 1900 if (cmd & 0x02):
1901 1901 offset |= ord(binchunk[i]) << 8
1902 1902 i += 1
1903 1903 if (cmd & 0x04):
1904 1904 offset |= ord(binchunk[i]) << 16
1905 1905 i += 1
1906 1906 if (cmd & 0x08):
1907 1907 offset |= ord(binchunk[i]) << 24
1908 1908 i += 1
1909 1909 if (cmd & 0x10):
1910 1910 size = ord(binchunk[i])
1911 1911 i += 1
1912 1912 if (cmd & 0x20):
1913 1913 size |= ord(binchunk[i]) << 8
1914 1914 i += 1
1915 1915 if (cmd & 0x40):
1916 1916 size |= ord(binchunk[i]) << 16
1917 1917 i += 1
1918 1918 if size == 0:
1919 1919 size = 0x10000
1920 1920 offset_end = offset + size
1921 1921 out += data[offset:offset_end]
1922 1922 elif cmd != 0:
1923 1923 offset_end = i + cmd
1924 1924 out += binchunk[i:offset_end]
1925 1925 i += cmd
1926 1926 else:
1927 1927 raise PatchError(_('unexpected delta opcode 0'))
1928 1928 return out
1929 1929
1930 1930 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1931 1931 """Reads a patch from fp and tries to apply it.
1932 1932
1933 1933 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1934 1934 there was any fuzz.
1935 1935
1936 1936 If 'eolmode' is 'strict', the patch content and patched file are
1937 1937 read in binary mode. Otherwise, line endings are ignored when
1938 1938 patching then normalized according to 'eolmode'.
1939 1939 """
1940 1940 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1941 1941 prefix=prefix, eolmode=eolmode)
1942 1942
1943 1943 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1944 1944 eolmode='strict'):
1945 1945
1946 1946 if prefix:
1947 1947 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1948 1948 prefix)
1949 1949 if prefix != '':
1950 1950 prefix += '/'
1951 1951 def pstrip(p):
1952 1952 return pathtransform(p, strip - 1, prefix)[1]
1953 1953
1954 1954 rejects = 0
1955 1955 err = 0
1956 1956 current_file = None
1957 1957
1958 1958 for state, values in iterhunks(fp):
1959 1959 if state == 'hunk':
1960 1960 if not current_file:
1961 1961 continue
1962 1962 ret = current_file.apply(values)
1963 1963 if ret > 0:
1964 1964 err = 1
1965 1965 elif state == 'file':
1966 1966 if current_file:
1967 1967 rejects += current_file.close()
1968 1968 current_file = None
1969 1969 afile, bfile, first_hunk, gp = values
1970 1970 if gp:
1971 1971 gp.path = pstrip(gp.path)
1972 1972 if gp.oldpath:
1973 1973 gp.oldpath = pstrip(gp.oldpath)
1974 1974 else:
1975 1975 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1976 1976 prefix)
1977 1977 if gp.op == 'RENAME':
1978 1978 backend.unlink(gp.oldpath)
1979 1979 if not first_hunk:
1980 1980 if gp.op == 'DELETE':
1981 1981 backend.unlink(gp.path)
1982 1982 continue
1983 1983 data, mode = None, None
1984 1984 if gp.op in ('RENAME', 'COPY'):
1985 1985 data, mode = store.getfile(gp.oldpath)[:2]
1986 1986 if data is None:
1987 1987 # This means that the old path does not exist
1988 1988 raise PatchError(_("source file '%s' does not exist")
1989 1989 % gp.oldpath)
1990 1990 if gp.mode:
1991 1991 mode = gp.mode
1992 1992 if gp.op == 'ADD':
1993 1993 # Added files without content have no hunk and
1994 1994 # must be created
1995 1995 data = ''
1996 1996 if data or mode:
1997 1997 if (gp.op in ('ADD', 'RENAME', 'COPY')
1998 1998 and backend.exists(gp.path)):
1999 1999 raise PatchError(_("cannot create %s: destination "
2000 2000 "already exists") % gp.path)
2001 2001 backend.setfile(gp.path, data, mode, gp.oldpath)
2002 2002 continue
2003 2003 try:
2004 2004 current_file = patcher(ui, gp, backend, store,
2005 2005 eolmode=eolmode)
2006 2006 except PatchError as inst:
2007 2007 ui.warn(str(inst) + '\n')
2008 2008 current_file = None
2009 2009 rejects += 1
2010 2010 continue
2011 2011 elif state == 'git':
2012 2012 for gp in values:
2013 2013 path = pstrip(gp.oldpath)
2014 2014 data, mode = backend.getfile(path)
2015 2015 if data is None:
2016 2016 # The error ignored here will trigger a getfile()
2017 2017 # error in a place more appropriate for error
2018 2018 # handling, and will not interrupt the patching
2019 2019 # process.
2020 2020 pass
2021 2021 else:
2022 2022 store.setfile(path, data, mode)
2023 2023 else:
2024 2024 raise error.Abort(_('unsupported parser state: %s') % state)
2025 2025
2026 2026 if current_file:
2027 2027 rejects += current_file.close()
2028 2028
2029 2029 if rejects:
2030 2030 return -1
2031 2031 return err
2032 2032
2033 2033 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2034 2034 similarity):
2035 2035 """use <patcher> to apply <patchname> to the working directory.
2036 2036 returns whether patch was applied with fuzz factor."""
2037 2037
2038 2038 fuzz = False
2039 2039 args = []
2040 2040 cwd = repo.root
2041 2041 if cwd:
2042 2042 args.append('-d %s' % util.shellquote(cwd))
2043 2043 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2044 2044 util.shellquote(patchname)))
2045 2045 try:
2046 2046 for line in util.iterfile(fp):
2047 2047 line = line.rstrip()
2048 2048 ui.note(line + '\n')
2049 2049 if line.startswith('patching file '):
2050 2050 pf = util.parsepatchoutput(line)
2051 2051 printed_file = False
2052 2052 files.add(pf)
2053 2053 elif line.find('with fuzz') >= 0:
2054 2054 fuzz = True
2055 2055 if not printed_file:
2056 2056 ui.warn(pf + '\n')
2057 2057 printed_file = True
2058 2058 ui.warn(line + '\n')
2059 2059 elif line.find('saving rejects to file') >= 0:
2060 2060 ui.warn(line + '\n')
2061 2061 elif line.find('FAILED') >= 0:
2062 2062 if not printed_file:
2063 2063 ui.warn(pf + '\n')
2064 2064 printed_file = True
2065 2065 ui.warn(line + '\n')
2066 2066 finally:
2067 2067 if files:
2068 2068 scmutil.marktouched(repo, files, similarity)
2069 2069 code = fp.close()
2070 2070 if code:
2071 2071 raise PatchError(_("patch command failed: %s") %
2072 2072 util.explainexit(code)[0])
2073 2073 return fuzz
2074 2074
2075 2075 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2076 2076 eolmode='strict'):
2077 2077 if files is None:
2078 2078 files = set()
2079 2079 if eolmode is None:
2080 eolmode = ui.config('patch', 'eol', 'strict')
2080 eolmode = ui.config('patch', 'eol')
2081 2081 if eolmode.lower() not in eolmodes:
2082 2082 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2083 2083 eolmode = eolmode.lower()
2084 2084
2085 2085 store = filestore()
2086 2086 try:
2087 2087 fp = open(patchobj, 'rb')
2088 2088 except TypeError:
2089 2089 fp = patchobj
2090 2090 try:
2091 2091 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2092 2092 eolmode=eolmode)
2093 2093 finally:
2094 2094 if fp != patchobj:
2095 2095 fp.close()
2096 2096 files.update(backend.close())
2097 2097 store.close()
2098 2098 if ret < 0:
2099 2099 raise PatchError(_('patch failed to apply'))
2100 2100 return ret > 0
2101 2101
2102 2102 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2103 2103 eolmode='strict', similarity=0):
2104 2104 """use builtin patch to apply <patchobj> to the working directory.
2105 2105 returns whether patch was applied with fuzz factor."""
2106 2106 backend = workingbackend(ui, repo, similarity)
2107 2107 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2108 2108
2109 2109 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2110 2110 eolmode='strict'):
2111 2111 backend = repobackend(ui, repo, ctx, store)
2112 2112 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2113 2113
2114 2114 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2115 2115 similarity=0):
2116 2116 """Apply <patchname> to the working directory.
2117 2117
2118 2118 'eolmode' specifies how end of lines should be handled. It can be:
2119 2119 - 'strict': inputs are read in binary mode, EOLs are preserved
2120 2120 - 'crlf': EOLs are ignored when patching and reset to CRLF
2121 2121 - 'lf': EOLs are ignored when patching and reset to LF
2122 2122 - None: get it from user settings, default to 'strict'
2123 2123 'eolmode' is ignored when using an external patcher program.
2124 2124
2125 2125 Returns whether patch was applied with fuzz factor.
2126 2126 """
2127 2127 patcher = ui.config('ui', 'patch')
2128 2128 if files is None:
2129 2129 files = set()
2130 2130 if patcher:
2131 2131 return _externalpatch(ui, repo, patcher, patchname, strip,
2132 2132 files, similarity)
2133 2133 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2134 2134 similarity)
2135 2135
2136 2136 def changedfiles(ui, repo, patchpath, strip=1):
2137 2137 backend = fsbackend(ui, repo.root)
2138 2138 with open(patchpath, 'rb') as fp:
2139 2139 changed = set()
2140 2140 for state, values in iterhunks(fp):
2141 2141 if state == 'file':
2142 2142 afile, bfile, first_hunk, gp = values
2143 2143 if gp:
2144 2144 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2145 2145 if gp.oldpath:
2146 2146 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2147 2147 else:
2148 2148 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2149 2149 '')
2150 2150 changed.add(gp.path)
2151 2151 if gp.op == 'RENAME':
2152 2152 changed.add(gp.oldpath)
2153 2153 elif state not in ('hunk', 'git'):
2154 2154 raise error.Abort(_('unsupported parser state: %s') % state)
2155 2155 return changed
2156 2156
2157 2157 class GitDiffRequired(Exception):
2158 2158 pass
2159 2159
2160 2160 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2161 2161 '''return diffopts with all features supported and parsed'''
2162 2162 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2163 2163 git=True, whitespace=True, formatchanging=True)
2164 2164
2165 2165 diffopts = diffallopts
2166 2166
2167 2167 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2168 2168 whitespace=False, formatchanging=False):
2169 2169 '''return diffopts with only opted-in features parsed
2170 2170
2171 2171 Features:
2172 2172 - git: git-style diffs
2173 2173 - whitespace: whitespace options like ignoreblanklines and ignorews
2174 2174 - formatchanging: options that will likely break or cause correctness issues
2175 2175 with most diff parsers
2176 2176 '''
2177 2177 def get(key, name=None, getter=ui.configbool, forceplain=None):
2178 2178 if opts:
2179 2179 v = opts.get(key)
2180 2180 # diffopts flags are either None-default (which is passed
2181 2181 # through unchanged, so we can identify unset values), or
2182 2182 # some other falsey default (eg --unified, which defaults
2183 2183 # to an empty string). We only want to override the config
2184 2184 # entries from hgrc with command line values if they
2185 2185 # appear to have been set, which is any truthy value,
2186 2186 # True, or False.
2187 2187 if v or isinstance(v, bool):
2188 2188 return v
2189 2189 if forceplain is not None and ui.plain():
2190 2190 return forceplain
2191 2191 return getter(section, name or key, None, untrusted=untrusted)
2192 2192
2193 2193 # core options, expected to be understood by every diff parser
2194 2194 buildopts = {
2195 2195 'nodates': get('nodates'),
2196 2196 'showfunc': get('show_function', 'showfunc'),
2197 2197 'context': get('unified', getter=ui.config),
2198 2198 }
2199 2199
2200 2200 if git:
2201 2201 buildopts['git'] = get('git')
2202 2202
2203 2203 # since this is in the experimental section, we need to call
2204 2204 # ui.configbool directory
2205 2205 buildopts['showsimilarity'] = ui.configbool('experimental',
2206 2206 'extendedheader.similarity')
2207 2207
2208 2208 # need to inspect the ui object instead of using get() since we want to
2209 2209 # test for an int
2210 2210 hconf = ui.config('experimental', 'extendedheader.index')
2211 2211 if hconf is not None:
2212 2212 hlen = None
2213 2213 try:
2214 2214 # the hash config could be an integer (for length of hash) or a
2215 2215 # word (e.g. short, full, none)
2216 2216 hlen = int(hconf)
2217 2217 if hlen < 0 or hlen > 40:
2218 2218 msg = _("invalid length for extendedheader.index: '%d'\n")
2219 2219 ui.warn(msg % hlen)
2220 2220 except ValueError:
2221 2221 # default value
2222 2222 if hconf == 'short' or hconf == '':
2223 2223 hlen = 12
2224 2224 elif hconf == 'full':
2225 2225 hlen = 40
2226 2226 elif hconf != 'none':
2227 2227 msg = _("invalid value for extendedheader.index: '%s'\n")
2228 2228 ui.warn(msg % hconf)
2229 2229 finally:
2230 2230 buildopts['index'] = hlen
2231 2231
2232 2232 if whitespace:
2233 2233 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2234 2234 buildopts['ignorewsamount'] = get('ignore_space_change',
2235 2235 'ignorewsamount')
2236 2236 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2237 2237 'ignoreblanklines')
2238 2238 if formatchanging:
2239 2239 buildopts['text'] = opts and opts.get('text')
2240 2240 binary = None if opts is None else opts.get('binary')
2241 2241 buildopts['nobinary'] = (not binary if binary is not None
2242 2242 else get('nobinary', forceplain=False))
2243 2243 buildopts['noprefix'] = get('noprefix', forceplain=False)
2244 2244
2245 2245 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2246 2246
2247 2247 def diff(repo, node1=None, node2=None, match=None, changes=None,
2248 2248 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2249 2249 '''yields diff of changes to files between two nodes, or node and
2250 2250 working directory.
2251 2251
2252 2252 if node1 is None, use first dirstate parent instead.
2253 2253 if node2 is None, compare node1 with working directory.
2254 2254
2255 2255 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2256 2256 every time some change cannot be represented with the current
2257 2257 patch format. Return False to upgrade to git patch format, True to
2258 2258 accept the loss or raise an exception to abort the diff. It is
2259 2259 called with the name of current file being diffed as 'fn'. If set
2260 2260 to None, patches will always be upgraded to git format when
2261 2261 necessary.
2262 2262
2263 2263 prefix is a filename prefix that is prepended to all filenames on
2264 2264 display (used for subrepos).
2265 2265
2266 2266 relroot, if not empty, must be normalized with a trailing /. Any match
2267 2267 patterns that fall outside it will be ignored.
2268 2268
2269 2269 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2270 2270 information.'''
2271 2271 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2272 2272 changes=changes, opts=opts,
2273 2273 losedatafn=losedatafn, prefix=prefix,
2274 2274 relroot=relroot, copy=copy):
2275 2275 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2276 2276 if header and (text or len(header) > 1):
2277 2277 yield '\n'.join(header) + '\n'
2278 2278 if text:
2279 2279 yield text
2280 2280
2281 2281 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2282 2282 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2283 2283 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2284 2284 where `header` is a list of diff headers and `hunks` is an iterable of
2285 2285 (`hunkrange`, `hunklines`) tuples.
2286 2286
2287 2287 See diff() for the meaning of parameters.
2288 2288 """
2289 2289
2290 2290 if opts is None:
2291 2291 opts = mdiff.defaultopts
2292 2292
2293 2293 if not node1 and not node2:
2294 2294 node1 = repo.dirstate.p1()
2295 2295
2296 2296 def lrugetfilectx():
2297 2297 cache = {}
2298 2298 order = collections.deque()
2299 2299 def getfilectx(f, ctx):
2300 2300 fctx = ctx.filectx(f, filelog=cache.get(f))
2301 2301 if f not in cache:
2302 2302 if len(cache) > 20:
2303 2303 del cache[order.popleft()]
2304 2304 cache[f] = fctx.filelog()
2305 2305 else:
2306 2306 order.remove(f)
2307 2307 order.append(f)
2308 2308 return fctx
2309 2309 return getfilectx
2310 2310 getfilectx = lrugetfilectx()
2311 2311
2312 2312 ctx1 = repo[node1]
2313 2313 ctx2 = repo[node2]
2314 2314
2315 2315 relfiltered = False
2316 2316 if relroot != '' and match.always():
2317 2317 # as a special case, create a new matcher with just the relroot
2318 2318 pats = [relroot]
2319 2319 match = scmutil.match(ctx2, pats, default='path')
2320 2320 relfiltered = True
2321 2321
2322 2322 if not changes:
2323 2323 changes = repo.status(ctx1, ctx2, match=match)
2324 2324 modified, added, removed = changes[:3]
2325 2325
2326 2326 if not modified and not added and not removed:
2327 2327 return []
2328 2328
2329 2329 if repo.ui.debugflag:
2330 2330 hexfunc = hex
2331 2331 else:
2332 2332 hexfunc = short
2333 2333 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2334 2334
2335 2335 if copy is None:
2336 2336 copy = {}
2337 2337 if opts.git or opts.upgrade:
2338 2338 copy = copies.pathcopies(ctx1, ctx2, match=match)
2339 2339
2340 2340 if relroot is not None:
2341 2341 if not relfiltered:
2342 2342 # XXX this would ideally be done in the matcher, but that is
2343 2343 # generally meant to 'or' patterns, not 'and' them. In this case we
2344 2344 # need to 'and' all the patterns from the matcher with relroot.
2345 2345 def filterrel(l):
2346 2346 return [f for f in l if f.startswith(relroot)]
2347 2347 modified = filterrel(modified)
2348 2348 added = filterrel(added)
2349 2349 removed = filterrel(removed)
2350 2350 relfiltered = True
2351 2351 # filter out copies where either side isn't inside the relative root
2352 2352 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2353 2353 if dst.startswith(relroot)
2354 2354 and src.startswith(relroot)))
2355 2355
2356 2356 modifiedset = set(modified)
2357 2357 addedset = set(added)
2358 2358 removedset = set(removed)
2359 2359 for f in modified:
2360 2360 if f not in ctx1:
2361 2361 # Fix up added, since merged-in additions appear as
2362 2362 # modifications during merges
2363 2363 modifiedset.remove(f)
2364 2364 addedset.add(f)
2365 2365 for f in removed:
2366 2366 if f not in ctx1:
2367 2367 # Merged-in additions that are then removed are reported as removed.
2368 2368 # They are not in ctx1, so We don't want to show them in the diff.
2369 2369 removedset.remove(f)
2370 2370 modified = sorted(modifiedset)
2371 2371 added = sorted(addedset)
2372 2372 removed = sorted(removedset)
2373 2373 for dst, src in copy.items():
2374 2374 if src not in ctx1:
2375 2375 # Files merged in during a merge and then copied/renamed are
2376 2376 # reported as copies. We want to show them in the diff as additions.
2377 2377 del copy[dst]
2378 2378
2379 2379 def difffn(opts, losedata):
2380 2380 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2381 2381 copy, getfilectx, opts, losedata, prefix, relroot)
2382 2382 if opts.upgrade and not opts.git:
2383 2383 try:
2384 2384 def losedata(fn):
2385 2385 if not losedatafn or not losedatafn(fn=fn):
2386 2386 raise GitDiffRequired
2387 2387 # Buffer the whole output until we are sure it can be generated
2388 2388 return list(difffn(opts.copy(git=False), losedata))
2389 2389 except GitDiffRequired:
2390 2390 return difffn(opts.copy(git=True), None)
2391 2391 else:
2392 2392 return difffn(opts, None)
2393 2393
2394 2394 def difflabel(func, *args, **kw):
2395 2395 '''yields 2-tuples of (output, label) based on the output of func()'''
2396 2396 headprefixes = [('diff', 'diff.diffline'),
2397 2397 ('copy', 'diff.extended'),
2398 2398 ('rename', 'diff.extended'),
2399 2399 ('old', 'diff.extended'),
2400 2400 ('new', 'diff.extended'),
2401 2401 ('deleted', 'diff.extended'),
2402 2402 ('index', 'diff.extended'),
2403 2403 ('similarity', 'diff.extended'),
2404 2404 ('---', 'diff.file_a'),
2405 2405 ('+++', 'diff.file_b')]
2406 2406 textprefixes = [('@', 'diff.hunk'),
2407 2407 ('-', 'diff.deleted'),
2408 2408 ('+', 'diff.inserted')]
2409 2409 head = False
2410 2410 for chunk in func(*args, **kw):
2411 2411 lines = chunk.split('\n')
2412 2412 for i, line in enumerate(lines):
2413 2413 if i != 0:
2414 2414 yield ('\n', '')
2415 2415 if head:
2416 2416 if line.startswith('@'):
2417 2417 head = False
2418 2418 else:
2419 2419 if line and line[0] not in ' +-@\\':
2420 2420 head = True
2421 2421 stripline = line
2422 2422 diffline = False
2423 2423 if not head and line and line[0] in '+-':
2424 2424 # highlight tabs and trailing whitespace, but only in
2425 2425 # changed lines
2426 2426 stripline = line.rstrip()
2427 2427 diffline = True
2428 2428
2429 2429 prefixes = textprefixes
2430 2430 if head:
2431 2431 prefixes = headprefixes
2432 2432 for prefix, label in prefixes:
2433 2433 if stripline.startswith(prefix):
2434 2434 if diffline:
2435 2435 for token in tabsplitter.findall(stripline):
2436 2436 if '\t' == token[0]:
2437 2437 yield (token, 'diff.tab')
2438 2438 else:
2439 2439 yield (token, label)
2440 2440 else:
2441 2441 yield (stripline, label)
2442 2442 break
2443 2443 else:
2444 2444 yield (line, '')
2445 2445 if line != stripline:
2446 2446 yield (line[len(stripline):], 'diff.trailingwhitespace')
2447 2447
2448 2448 def diffui(*args, **kw):
2449 2449 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2450 2450 return difflabel(diff, *args, **kw)
2451 2451
2452 2452 def _filepairs(modified, added, removed, copy, opts):
2453 2453 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2454 2454 before and f2 is the the name after. For added files, f1 will be None,
2455 2455 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2456 2456 or 'rename' (the latter two only if opts.git is set).'''
2457 2457 gone = set()
2458 2458
2459 2459 copyto = dict([(v, k) for k, v in copy.items()])
2460 2460
2461 2461 addedset, removedset = set(added), set(removed)
2462 2462
2463 2463 for f in sorted(modified + added + removed):
2464 2464 copyop = None
2465 2465 f1, f2 = f, f
2466 2466 if f in addedset:
2467 2467 f1 = None
2468 2468 if f in copy:
2469 2469 if opts.git:
2470 2470 f1 = copy[f]
2471 2471 if f1 in removedset and f1 not in gone:
2472 2472 copyop = 'rename'
2473 2473 gone.add(f1)
2474 2474 else:
2475 2475 copyop = 'copy'
2476 2476 elif f in removedset:
2477 2477 f2 = None
2478 2478 if opts.git:
2479 2479 # have we already reported a copy above?
2480 2480 if (f in copyto and copyto[f] in addedset
2481 2481 and copy[copyto[f]] == f):
2482 2482 continue
2483 2483 yield f1, f2, copyop
2484 2484
2485 2485 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2486 2486 copy, getfilectx, opts, losedatafn, prefix, relroot):
2487 2487 '''given input data, generate a diff and yield it in blocks
2488 2488
2489 2489 If generating a diff would lose data like flags or binary data and
2490 2490 losedatafn is not None, it will be called.
2491 2491
2492 2492 relroot is removed and prefix is added to every path in the diff output.
2493 2493
2494 2494 If relroot is not empty, this function expects every path in modified,
2495 2495 added, removed and copy to start with it.'''
2496 2496
2497 2497 def gitindex(text):
2498 2498 if not text:
2499 2499 text = ""
2500 2500 l = len(text)
2501 2501 s = hashlib.sha1('blob %d\0' % l)
2502 2502 s.update(text)
2503 2503 return s.hexdigest()
2504 2504
2505 2505 if opts.noprefix:
2506 2506 aprefix = bprefix = ''
2507 2507 else:
2508 2508 aprefix = 'a/'
2509 2509 bprefix = 'b/'
2510 2510
2511 2511 def diffline(f, revs):
2512 2512 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2513 2513 return 'diff %s %s' % (revinfo, f)
2514 2514
2515 2515 def isempty(fctx):
2516 2516 return fctx is None or fctx.size() == 0
2517 2517
2518 2518 date1 = util.datestr(ctx1.date())
2519 2519 date2 = util.datestr(ctx2.date())
2520 2520
2521 2521 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2522 2522
2523 2523 if relroot != '' and (repo.ui.configbool('devel', 'all')
2524 2524 or repo.ui.configbool('devel', 'check-relroot')):
2525 2525 for f in modified + added + removed + copy.keys() + copy.values():
2526 2526 if f is not None and not f.startswith(relroot):
2527 2527 raise AssertionError(
2528 2528 "file %s doesn't start with relroot %s" % (f, relroot))
2529 2529
2530 2530 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2531 2531 content1 = None
2532 2532 content2 = None
2533 2533 fctx1 = None
2534 2534 fctx2 = None
2535 2535 flag1 = None
2536 2536 flag2 = None
2537 2537 if f1:
2538 2538 fctx1 = getfilectx(f1, ctx1)
2539 2539 if opts.git or losedatafn:
2540 2540 flag1 = ctx1.flags(f1)
2541 2541 if f2:
2542 2542 fctx2 = getfilectx(f2, ctx2)
2543 2543 if opts.git or losedatafn:
2544 2544 flag2 = ctx2.flags(f2)
2545 2545 # if binary is True, output "summary" or "base85", but not "text diff"
2546 2546 binary = not opts.text and any(f.isbinary()
2547 2547 for f in [fctx1, fctx2] if f is not None)
2548 2548
2549 2549 if losedatafn and not opts.git:
2550 2550 if (binary or
2551 2551 # copy/rename
2552 2552 f2 in copy or
2553 2553 # empty file creation
2554 2554 (not f1 and isempty(fctx2)) or
2555 2555 # empty file deletion
2556 2556 (isempty(fctx1) and not f2) or
2557 2557 # create with flags
2558 2558 (not f1 and flag2) or
2559 2559 # change flags
2560 2560 (f1 and f2 and flag1 != flag2)):
2561 2561 losedatafn(f2 or f1)
2562 2562
2563 2563 path1 = f1 or f2
2564 2564 path2 = f2 or f1
2565 2565 path1 = posixpath.join(prefix, path1[len(relroot):])
2566 2566 path2 = posixpath.join(prefix, path2[len(relroot):])
2567 2567 header = []
2568 2568 if opts.git:
2569 2569 header.append('diff --git %s%s %s%s' %
2570 2570 (aprefix, path1, bprefix, path2))
2571 2571 if not f1: # added
2572 2572 header.append('new file mode %s' % gitmode[flag2])
2573 2573 elif not f2: # removed
2574 2574 header.append('deleted file mode %s' % gitmode[flag1])
2575 2575 else: # modified/copied/renamed
2576 2576 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2577 2577 if mode1 != mode2:
2578 2578 header.append('old mode %s' % mode1)
2579 2579 header.append('new mode %s' % mode2)
2580 2580 if copyop is not None:
2581 2581 if opts.showsimilarity:
2582 2582 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2583 2583 header.append('similarity index %d%%' % sim)
2584 2584 header.append('%s from %s' % (copyop, path1))
2585 2585 header.append('%s to %s' % (copyop, path2))
2586 2586 elif revs and not repo.ui.quiet:
2587 2587 header.append(diffline(path1, revs))
2588 2588
2589 2589 # fctx.is | diffopts | what to | is fctx.data()
2590 2590 # binary() | text nobinary git index | output? | outputted?
2591 2591 # ------------------------------------|----------------------------
2592 2592 # yes | no no no * | summary | no
2593 2593 # yes | no no yes * | base85 | yes
2594 2594 # yes | no yes no * | summary | no
2595 2595 # yes | no yes yes 0 | summary | no
2596 2596 # yes | no yes yes >0 | summary | semi [1]
2597 2597 # yes | yes * * * | text diff | yes
2598 2598 # no | * * * * | text diff | yes
2599 2599 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2600 2600 if binary and (not opts.git or (opts.git and opts.nobinary and not
2601 2601 opts.index)):
2602 2602 # fast path: no binary content will be displayed, content1 and
2603 2603 # content2 are only used for equivalent test. cmp() could have a
2604 2604 # fast path.
2605 2605 if fctx1 is not None:
2606 2606 content1 = b'\0'
2607 2607 if fctx2 is not None:
2608 2608 if fctx1 is not None and not fctx1.cmp(fctx2):
2609 2609 content2 = b'\0' # not different
2610 2610 else:
2611 2611 content2 = b'\0\0'
2612 2612 else:
2613 2613 # normal path: load contents
2614 2614 if fctx1 is not None:
2615 2615 content1 = fctx1.data()
2616 2616 if fctx2 is not None:
2617 2617 content2 = fctx2.data()
2618 2618
2619 2619 if binary and opts.git and not opts.nobinary:
2620 2620 text = mdiff.b85diff(content1, content2)
2621 2621 if text:
2622 2622 header.append('index %s..%s' %
2623 2623 (gitindex(content1), gitindex(content2)))
2624 2624 hunks = (None, [text]),
2625 2625 else:
2626 2626 if opts.git and opts.index > 0:
2627 2627 flag = flag1
2628 2628 if flag is None:
2629 2629 flag = flag2
2630 2630 header.append('index %s..%s %s' %
2631 2631 (gitindex(content1)[0:opts.index],
2632 2632 gitindex(content2)[0:opts.index],
2633 2633 gitmode[flag]))
2634 2634
2635 2635 uheaders, hunks = mdiff.unidiff(content1, date1,
2636 2636 content2, date2,
2637 2637 path1, path2, opts=opts)
2638 2638 header.extend(uheaders)
2639 2639 yield header, hunks
2640 2640
2641 2641 def diffstatsum(stats):
2642 2642 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2643 2643 for f, a, r, b in stats:
2644 2644 maxfile = max(maxfile, encoding.colwidth(f))
2645 2645 maxtotal = max(maxtotal, a + r)
2646 2646 addtotal += a
2647 2647 removetotal += r
2648 2648 binary = binary or b
2649 2649
2650 2650 return maxfile, maxtotal, addtotal, removetotal, binary
2651 2651
2652 2652 def diffstatdata(lines):
2653 2653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2654 2654
2655 2655 results = []
2656 2656 filename, adds, removes, isbinary = None, 0, 0, False
2657 2657
2658 2658 def addresult():
2659 2659 if filename:
2660 2660 results.append((filename, adds, removes, isbinary))
2661 2661
2662 2662 # inheader is used to track if a line is in the
2663 2663 # header portion of the diff. This helps properly account
2664 2664 # for lines that start with '--' or '++'
2665 2665 inheader = False
2666 2666
2667 2667 for line in lines:
2668 2668 if line.startswith('diff'):
2669 2669 addresult()
2670 2670 # starting a new file diff
2671 2671 # set numbers to 0 and reset inheader
2672 2672 inheader = True
2673 2673 adds, removes, isbinary = 0, 0, False
2674 2674 if line.startswith('diff --git a/'):
2675 2675 filename = gitre.search(line).group(2)
2676 2676 elif line.startswith('diff -r'):
2677 2677 # format: "diff -r ... -r ... filename"
2678 2678 filename = diffre.search(line).group(1)
2679 2679 elif line.startswith('@@'):
2680 2680 inheader = False
2681 2681 elif line.startswith('+') and not inheader:
2682 2682 adds += 1
2683 2683 elif line.startswith('-') and not inheader:
2684 2684 removes += 1
2685 2685 elif (line.startswith('GIT binary patch') or
2686 2686 line.startswith('Binary file')):
2687 2687 isbinary = True
2688 2688 addresult()
2689 2689 return results
2690 2690
2691 2691 def diffstat(lines, width=80):
2692 2692 output = []
2693 2693 stats = diffstatdata(lines)
2694 2694 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2695 2695
2696 2696 countwidth = len(str(maxtotal))
2697 2697 if hasbinary and countwidth < 3:
2698 2698 countwidth = 3
2699 2699 graphwidth = width - countwidth - maxname - 6
2700 2700 if graphwidth < 10:
2701 2701 graphwidth = 10
2702 2702
2703 2703 def scale(i):
2704 2704 if maxtotal <= graphwidth:
2705 2705 return i
2706 2706 # If diffstat runs out of room it doesn't print anything,
2707 2707 # which isn't very useful, so always print at least one + or -
2708 2708 # if there were at least some changes.
2709 2709 return max(i * graphwidth // maxtotal, int(bool(i)))
2710 2710
2711 2711 for filename, adds, removes, isbinary in stats:
2712 2712 if isbinary:
2713 2713 count = 'Bin'
2714 2714 else:
2715 2715 count = '%d' % (adds + removes)
2716 2716 pluses = '+' * scale(adds)
2717 2717 minuses = '-' * scale(removes)
2718 2718 output.append(' %s%s | %*s %s%s\n' %
2719 2719 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2720 2720 countwidth, count, pluses, minuses))
2721 2721
2722 2722 if stats:
2723 2723 output.append(_(' %d files changed, %d insertions(+), '
2724 2724 '%d deletions(-)\n')
2725 2725 % (len(stats), totaladds, totalremoves))
2726 2726
2727 2727 return ''.join(output)
2728 2728
2729 2729 def diffstatui(*args, **kw):
2730 2730 '''like diffstat(), but yields 2-tuples of (output, label) for
2731 2731 ui.write()
2732 2732 '''
2733 2733
2734 2734 for line in diffstat(*args, **kw).splitlines():
2735 2735 if line and line[-1] in '+-':
2736 2736 name, graph = line.rsplit(' ', 1)
2737 2737 yield (name + ' ', '')
2738 2738 m = re.search(br'\++', graph)
2739 2739 if m:
2740 2740 yield (m.group(0), 'diffstat.inserted')
2741 2741 m = re.search(br'-+', graph)
2742 2742 if m:
2743 2743 yield (m.group(0), 'diffstat.deleted')
2744 2744 else:
2745 2745 yield (line, '')
2746 2746 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now