##// END OF EJS Templates
py3: fix doctests in patch.py to be compatible with Python 3...
Yuya Nishihara -
r34254:5ce32fe7 default
parent child Browse files
Show More
@@ -1,2795 +1,2795 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 PatchError = error.PatchError
49 PatchError = error.PatchError
50
50
51 # public functions
51 # public functions
52
52
53 def split(stream):
53 def split(stream):
54 '''return an iterator of individual patches from a stream'''
54 '''return an iterator of individual patches from a stream'''
55 def isheader(line, inheader):
55 def isheader(line, inheader):
56 if inheader and line[0] in (' ', '\t'):
56 if inheader and line[0] in (' ', '\t'):
57 # continuation
57 # continuation
58 return True
58 return True
59 if line[0] in (' ', '-', '+'):
59 if line[0] in (' ', '-', '+'):
60 # diff line - don't check for header pattern in there
60 # diff line - don't check for header pattern in there
61 return False
61 return False
62 l = line.split(': ', 1)
62 l = line.split(': ', 1)
63 return len(l) == 2 and ' ' not in l[0]
63 return len(l) == 2 and ' ' not in l[0]
64
64
65 def chunk(lines):
65 def chunk(lines):
66 return stringio(''.join(lines))
66 return stringio(''.join(lines))
67
67
68 def hgsplit(stream, cur):
68 def hgsplit(stream, cur):
69 inheader = True
69 inheader = True
70
70
71 for line in stream:
71 for line in stream:
72 if not line.strip():
72 if not line.strip():
73 inheader = False
73 inheader = False
74 if not inheader and line.startswith('# HG changeset patch'):
74 if not inheader and line.startswith('# HG changeset patch'):
75 yield chunk(cur)
75 yield chunk(cur)
76 cur = []
76 cur = []
77 inheader = True
77 inheader = True
78
78
79 cur.append(line)
79 cur.append(line)
80
80
81 if cur:
81 if cur:
82 yield chunk(cur)
82 yield chunk(cur)
83
83
84 def mboxsplit(stream, cur):
84 def mboxsplit(stream, cur):
85 for line in stream:
85 for line in stream:
86 if line.startswith('From '):
86 if line.startswith('From '):
87 for c in split(chunk(cur[1:])):
87 for c in split(chunk(cur[1:])):
88 yield c
88 yield c
89 cur = []
89 cur = []
90
90
91 cur.append(line)
91 cur.append(line)
92
92
93 if cur:
93 if cur:
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96
96
97 def mimesplit(stream, cur):
97 def mimesplit(stream, cur):
98 def msgfp(m):
98 def msgfp(m):
99 fp = stringio()
99 fp = stringio()
100 g = email.Generator.Generator(fp, mangle_from_=False)
100 g = email.Generator.Generator(fp, mangle_from_=False)
101 g.flatten(m)
101 g.flatten(m)
102 fp.seek(0)
102 fp.seek(0)
103 return fp
103 return fp
104
104
105 for line in stream:
105 for line in stream:
106 cur.append(line)
106 cur.append(line)
107 c = chunk(cur)
107 c = chunk(cur)
108
108
109 m = email.Parser.Parser().parse(c)
109 m = email.Parser.Parser().parse(c)
110 if not m.is_multipart():
110 if not m.is_multipart():
111 yield msgfp(m)
111 yield msgfp(m)
112 else:
112 else:
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 for part in m.walk():
114 for part in m.walk():
115 ct = part.get_content_type()
115 ct = part.get_content_type()
116 if ct not in ok_types:
116 if ct not in ok_types:
117 continue
117 continue
118 yield msgfp(part)
118 yield msgfp(part)
119
119
120 def headersplit(stream, cur):
120 def headersplit(stream, cur):
121 inheader = False
121 inheader = False
122
122
123 for line in stream:
123 for line in stream:
124 if not inheader and isheader(line, inheader):
124 if not inheader and isheader(line, inheader):
125 yield chunk(cur)
125 yield chunk(cur)
126 cur = []
126 cur = []
127 inheader = True
127 inheader = True
128 if inheader and not isheader(line, inheader):
128 if inheader and not isheader(line, inheader):
129 inheader = False
129 inheader = False
130
130
131 cur.append(line)
131 cur.append(line)
132
132
133 if cur:
133 if cur:
134 yield chunk(cur)
134 yield chunk(cur)
135
135
136 def remainder(cur):
136 def remainder(cur):
137 yield chunk(cur)
137 yield chunk(cur)
138
138
139 class fiter(object):
139 class fiter(object):
140 def __init__(self, fp):
140 def __init__(self, fp):
141 self.fp = fp
141 self.fp = fp
142
142
143 def __iter__(self):
143 def __iter__(self):
144 return self
144 return self
145
145
146 def next(self):
146 def next(self):
147 l = self.fp.readline()
147 l = self.fp.readline()
148 if not l:
148 if not l:
149 raise StopIteration
149 raise StopIteration
150 return l
150 return l
151
151
152 inheader = False
152 inheader = False
153 cur = []
153 cur = []
154
154
155 mimeheaders = ['content-type']
155 mimeheaders = ['content-type']
156
156
157 if not util.safehasattr(stream, 'next'):
157 if not util.safehasattr(stream, 'next'):
158 # http responses, for example, have readline but not next
158 # http responses, for example, have readline but not next
159 stream = fiter(stream)
159 stream = fiter(stream)
160
160
161 for line in stream:
161 for line in stream:
162 cur.append(line)
162 cur.append(line)
163 if line.startswith('# HG changeset patch'):
163 if line.startswith('# HG changeset patch'):
164 return hgsplit(stream, cur)
164 return hgsplit(stream, cur)
165 elif line.startswith('From '):
165 elif line.startswith('From '):
166 return mboxsplit(stream, cur)
166 return mboxsplit(stream, cur)
167 elif isheader(line, inheader):
167 elif isheader(line, inheader):
168 inheader = True
168 inheader = True
169 if line.split(':', 1)[0].lower() in mimeheaders:
169 if line.split(':', 1)[0].lower() in mimeheaders:
170 # let email parser handle this
170 # let email parser handle this
171 return mimesplit(stream, cur)
171 return mimesplit(stream, cur)
172 elif line.startswith('--- ') and inheader:
172 elif line.startswith('--- ') and inheader:
173 # No evil headers seen by diff start, split by hand
173 # No evil headers seen by diff start, split by hand
174 return headersplit(stream, cur)
174 return headersplit(stream, cur)
175 # Not enough info, keep reading
175 # Not enough info, keep reading
176
176
177 # if we are here, we have a very plain patch
177 # if we are here, we have a very plain patch
178 return remainder(cur)
178 return remainder(cur)
179
179
180 ## Some facility for extensible patch parsing:
180 ## Some facility for extensible patch parsing:
181 # list of pairs ("header to match", "data key")
181 # list of pairs ("header to match", "data key")
182 patchheadermap = [('Date', 'date'),
182 patchheadermap = [('Date', 'date'),
183 ('Branch', 'branch'),
183 ('Branch', 'branch'),
184 ('Node ID', 'nodeid'),
184 ('Node ID', 'nodeid'),
185 ]
185 ]
186
186
187 def extract(ui, fileobj):
187 def extract(ui, fileobj):
188 '''extract patch from data read from fileobj.
188 '''extract patch from data read from fileobj.
189
189
190 patch can be a normal patch or contained in an email message.
190 patch can be a normal patch or contained in an email message.
191
191
192 return a dictionary. Standard keys are:
192 return a dictionary. Standard keys are:
193 - filename,
193 - filename,
194 - message,
194 - message,
195 - user,
195 - user,
196 - date,
196 - date,
197 - branch,
197 - branch,
198 - node,
198 - node,
199 - p1,
199 - p1,
200 - p2.
200 - p2.
201 Any item can be missing from the dictionary. If filename is missing,
201 Any item can be missing from the dictionary. If filename is missing,
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
203
203
204 # attempt to detect the start of a patch
204 # attempt to detect the start of a patch
205 # (this heuristic is borrowed from quilt)
205 # (this heuristic is borrowed from quilt)
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
209 br'\*\*\*[ \t].*?^---[ \t])',
209 br'\*\*\*[ \t].*?^---[ \t])',
210 re.MULTILINE | re.DOTALL)
210 re.MULTILINE | re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445 pass
445 pass
446
446
447 def exists(self, fname):
447 def exists(self, fname):
448 raise NotImplementedError
448 raise NotImplementedError
449
449
450 def close(self):
450 def close(self):
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 class fsbackend(abstractbackend):
453 class fsbackend(abstractbackend):
454 def __init__(self, ui, basedir):
454 def __init__(self, ui, basedir):
455 super(fsbackend, self).__init__(ui)
455 super(fsbackend, self).__init__(ui)
456 self.opener = vfsmod.vfs(basedir)
456 self.opener = vfsmod.vfs(basedir)
457
457
458 def getfile(self, fname):
458 def getfile(self, fname):
459 if self.opener.islink(fname):
459 if self.opener.islink(fname):
460 return (self.opener.readlink(fname), (True, False))
460 return (self.opener.readlink(fname), (True, False))
461
461
462 isexec = False
462 isexec = False
463 try:
463 try:
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
465 except OSError as e:
465 except OSError as e:
466 if e.errno != errno.ENOENT:
466 if e.errno != errno.ENOENT:
467 raise
467 raise
468 try:
468 try:
469 return (self.opener.read(fname), (False, isexec))
469 return (self.opener.read(fname), (False, isexec))
470 except IOError as e:
470 except IOError as e:
471 if e.errno != errno.ENOENT:
471 if e.errno != errno.ENOENT:
472 raise
472 raise
473 return None, None
473 return None, None
474
474
475 def setfile(self, fname, data, mode, copysource):
475 def setfile(self, fname, data, mode, copysource):
476 islink, isexec = mode
476 islink, isexec = mode
477 if data is None:
477 if data is None:
478 self.opener.setflags(fname, islink, isexec)
478 self.opener.setflags(fname, islink, isexec)
479 return
479 return
480 if islink:
480 if islink:
481 self.opener.symlink(data, fname)
481 self.opener.symlink(data, fname)
482 else:
482 else:
483 self.opener.write(fname, data)
483 self.opener.write(fname, data)
484 if isexec:
484 if isexec:
485 self.opener.setflags(fname, False, True)
485 self.opener.setflags(fname, False, True)
486
486
487 def unlink(self, fname):
487 def unlink(self, fname):
488 self.opener.unlinkpath(fname, ignoremissing=True)
488 self.opener.unlinkpath(fname, ignoremissing=True)
489
489
490 def writerej(self, fname, failed, total, lines):
490 def writerej(self, fname, failed, total, lines):
491 fname = fname + ".rej"
491 fname = fname + ".rej"
492 self.ui.warn(
492 self.ui.warn(
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
494 (failed, total, fname))
494 (failed, total, fname))
495 fp = self.opener(fname, 'w')
495 fp = self.opener(fname, 'w')
496 fp.writelines(lines)
496 fp.writelines(lines)
497 fp.close()
497 fp.close()
498
498
499 def exists(self, fname):
499 def exists(self, fname):
500 return self.opener.lexists(fname)
500 return self.opener.lexists(fname)
501
501
502 class workingbackend(fsbackend):
502 class workingbackend(fsbackend):
503 def __init__(self, ui, repo, similarity):
503 def __init__(self, ui, repo, similarity):
504 super(workingbackend, self).__init__(ui, repo.root)
504 super(workingbackend, self).__init__(ui, repo.root)
505 self.repo = repo
505 self.repo = repo
506 self.similarity = similarity
506 self.similarity = similarity
507 self.removed = set()
507 self.removed = set()
508 self.changed = set()
508 self.changed = set()
509 self.copied = []
509 self.copied = []
510
510
511 def _checkknown(self, fname):
511 def _checkknown(self, fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 if self.repo.dirstate[fname] == '?' and self.exists(fname):
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
514
514
515 def setfile(self, fname, data, mode, copysource):
515 def setfile(self, fname, data, mode, copysource):
516 self._checkknown(fname)
516 self._checkknown(fname)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 super(workingbackend, self).setfile(fname, data, mode, copysource)
518 if copysource is not None:
518 if copysource is not None:
519 self.copied.append((copysource, fname))
519 self.copied.append((copysource, fname))
520 self.changed.add(fname)
520 self.changed.add(fname)
521
521
522 def unlink(self, fname):
522 def unlink(self, fname):
523 self._checkknown(fname)
523 self._checkknown(fname)
524 super(workingbackend, self).unlink(fname)
524 super(workingbackend, self).unlink(fname)
525 self.removed.add(fname)
525 self.removed.add(fname)
526 self.changed.add(fname)
526 self.changed.add(fname)
527
527
528 def close(self):
528 def close(self):
529 wctx = self.repo[None]
529 wctx = self.repo[None]
530 changed = set(self.changed)
530 changed = set(self.changed)
531 for src, dst in self.copied:
531 for src, dst in self.copied:
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
533 if self.removed:
533 if self.removed:
534 wctx.forget(sorted(self.removed))
534 wctx.forget(sorted(self.removed))
535 for f in self.removed:
535 for f in self.removed:
536 if f not in self.repo.dirstate:
536 if f not in self.repo.dirstate:
537 # File was deleted and no longer belongs to the
537 # File was deleted and no longer belongs to the
538 # dirstate, it was probably marked added then
538 # dirstate, it was probably marked added then
539 # deleted, and should not be considered by
539 # deleted, and should not be considered by
540 # marktouched().
540 # marktouched().
541 changed.discard(f)
541 changed.discard(f)
542 if changed:
542 if changed:
543 scmutil.marktouched(self.repo, changed, self.similarity)
543 scmutil.marktouched(self.repo, changed, self.similarity)
544 return sorted(self.changed)
544 return sorted(self.changed)
545
545
546 class filestore(object):
546 class filestore(object):
547 def __init__(self, maxsize=None):
547 def __init__(self, maxsize=None):
548 self.opener = None
548 self.opener = None
549 self.files = {}
549 self.files = {}
550 self.created = 0
550 self.created = 0
551 self.maxsize = maxsize
551 self.maxsize = maxsize
552 if self.maxsize is None:
552 if self.maxsize is None:
553 self.maxsize = 4*(2**20)
553 self.maxsize = 4*(2**20)
554 self.size = 0
554 self.size = 0
555 self.data = {}
555 self.data = {}
556
556
557 def setfile(self, fname, data, mode, copied=None):
557 def setfile(self, fname, data, mode, copied=None):
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
559 self.data[fname] = (data, mode, copied)
559 self.data[fname] = (data, mode, copied)
560 self.size += len(data)
560 self.size += len(data)
561 else:
561 else:
562 if self.opener is None:
562 if self.opener is None:
563 root = tempfile.mkdtemp(prefix='hg-patch-')
563 root = tempfile.mkdtemp(prefix='hg-patch-')
564 self.opener = vfsmod.vfs(root)
564 self.opener = vfsmod.vfs(root)
565 # Avoid filename issues with these simple names
565 # Avoid filename issues with these simple names
566 fn = str(self.created)
566 fn = str(self.created)
567 self.opener.write(fn, data)
567 self.opener.write(fn, data)
568 self.created += 1
568 self.created += 1
569 self.files[fname] = (fn, mode, copied)
569 self.files[fname] = (fn, mode, copied)
570
570
571 def getfile(self, fname):
571 def getfile(self, fname):
572 if fname in self.data:
572 if fname in self.data:
573 return self.data[fname]
573 return self.data[fname]
574 if not self.opener or fname not in self.files:
574 if not self.opener or fname not in self.files:
575 return None, None, None
575 return None, None, None
576 fn, mode, copied = self.files[fname]
576 fn, mode, copied = self.files[fname]
577 return self.opener.read(fn), mode, copied
577 return self.opener.read(fn), mode, copied
578
578
579 def close(self):
579 def close(self):
580 if self.opener:
580 if self.opener:
581 shutil.rmtree(self.opener.base)
581 shutil.rmtree(self.opener.base)
582
582
583 class repobackend(abstractbackend):
583 class repobackend(abstractbackend):
584 def __init__(self, ui, repo, ctx, store):
584 def __init__(self, ui, repo, ctx, store):
585 super(repobackend, self).__init__(ui)
585 super(repobackend, self).__init__(ui)
586 self.repo = repo
586 self.repo = repo
587 self.ctx = ctx
587 self.ctx = ctx
588 self.store = store
588 self.store = store
589 self.changed = set()
589 self.changed = set()
590 self.removed = set()
590 self.removed = set()
591 self.copied = {}
591 self.copied = {}
592
592
593 def _checkknown(self, fname):
593 def _checkknown(self, fname):
594 if fname not in self.ctx:
594 if fname not in self.ctx:
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
596
596
597 def getfile(self, fname):
597 def getfile(self, fname):
598 try:
598 try:
599 fctx = self.ctx[fname]
599 fctx = self.ctx[fname]
600 except error.LookupError:
600 except error.LookupError:
601 return None, None
601 return None, None
602 flags = fctx.flags()
602 flags = fctx.flags()
603 return fctx.data(), ('l' in flags, 'x' in flags)
603 return fctx.data(), ('l' in flags, 'x' in flags)
604
604
605 def setfile(self, fname, data, mode, copysource):
605 def setfile(self, fname, data, mode, copysource):
606 if copysource:
606 if copysource:
607 self._checkknown(copysource)
607 self._checkknown(copysource)
608 if data is None:
608 if data is None:
609 data = self.ctx[fname].data()
609 data = self.ctx[fname].data()
610 self.store.setfile(fname, data, mode, copysource)
610 self.store.setfile(fname, data, mode, copysource)
611 self.changed.add(fname)
611 self.changed.add(fname)
612 if copysource:
612 if copysource:
613 self.copied[fname] = copysource
613 self.copied[fname] = copysource
614
614
615 def unlink(self, fname):
615 def unlink(self, fname):
616 self._checkknown(fname)
616 self._checkknown(fname)
617 self.removed.add(fname)
617 self.removed.add(fname)
618
618
619 def exists(self, fname):
619 def exists(self, fname):
620 return fname in self.ctx
620 return fname in self.ctx
621
621
622 def close(self):
622 def close(self):
623 return self.changed | self.removed
623 return self.changed | self.removed
624
624
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628 eolmodes = ['strict', 'crlf', 'lf', 'auto']
629
629
630 class patchfile(object):
630 class patchfile(object):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 def __init__(self, ui, gp, backend, store, eolmode='strict'):
632 self.fname = gp.path
632 self.fname = gp.path
633 self.eolmode = eolmode
633 self.eolmode = eolmode
634 self.eol = None
634 self.eol = None
635 self.backend = backend
635 self.backend = backend
636 self.ui = ui
636 self.ui = ui
637 self.lines = []
637 self.lines = []
638 self.exists = False
638 self.exists = False
639 self.missing = True
639 self.missing = True
640 self.mode = gp.mode
640 self.mode = gp.mode
641 self.copysource = gp.oldpath
641 self.copysource = gp.oldpath
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
643 self.remove = gp.op == 'DELETE'
643 self.remove = gp.op == 'DELETE'
644 if self.copysource is None:
644 if self.copysource is None:
645 data, mode = backend.getfile(self.fname)
645 data, mode = backend.getfile(self.fname)
646 else:
646 else:
647 data, mode = store.getfile(self.copysource)[:2]
647 data, mode = store.getfile(self.copysource)[:2]
648 if data is not None:
648 if data is not None:
649 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.exists = self.copysource is None or backend.exists(self.fname)
650 self.missing = False
650 self.missing = False
651 if data:
651 if data:
652 self.lines = mdiff.splitnewlines(data)
652 self.lines = mdiff.splitnewlines(data)
653 if self.mode is None:
653 if self.mode is None:
654 self.mode = mode
654 self.mode = mode
655 if self.lines:
655 if self.lines:
656 # Normalize line endings
656 # Normalize line endings
657 if self.lines[0].endswith('\r\n'):
657 if self.lines[0].endswith('\r\n'):
658 self.eol = '\r\n'
658 self.eol = '\r\n'
659 elif self.lines[0].endswith('\n'):
659 elif self.lines[0].endswith('\n'):
660 self.eol = '\n'
660 self.eol = '\n'
661 if eolmode != 'strict':
661 if eolmode != 'strict':
662 nlines = []
662 nlines = []
663 for l in self.lines:
663 for l in self.lines:
664 if l.endswith('\r\n'):
664 if l.endswith('\r\n'):
665 l = l[:-2] + '\n'
665 l = l[:-2] + '\n'
666 nlines.append(l)
666 nlines.append(l)
667 self.lines = nlines
667 self.lines = nlines
668 else:
668 else:
669 if self.create:
669 if self.create:
670 self.missing = False
670 self.missing = False
671 if self.mode is None:
671 if self.mode is None:
672 self.mode = (False, False)
672 self.mode = (False, False)
673 if self.missing:
673 if self.missing:
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
676 "current directory)\n"))
676 "current directory)\n"))
677
677
678 self.hash = {}
678 self.hash = {}
679 self.dirty = 0
679 self.dirty = 0
680 self.offset = 0
680 self.offset = 0
681 self.skew = 0
681 self.skew = 0
682 self.rej = []
682 self.rej = []
683 self.fileprinted = False
683 self.fileprinted = False
684 self.printfile(False)
684 self.printfile(False)
685 self.hunks = 0
685 self.hunks = 0
686
686
687 def writelines(self, fname, lines, mode):
687 def writelines(self, fname, lines, mode):
688 if self.eolmode == 'auto':
688 if self.eolmode == 'auto':
689 eol = self.eol
689 eol = self.eol
690 elif self.eolmode == 'crlf':
690 elif self.eolmode == 'crlf':
691 eol = '\r\n'
691 eol = '\r\n'
692 else:
692 else:
693 eol = '\n'
693 eol = '\n'
694
694
695 if self.eolmode != 'strict' and eol and eol != '\n':
695 if self.eolmode != 'strict' and eol and eol != '\n':
696 rawlines = []
696 rawlines = []
697 for l in lines:
697 for l in lines:
698 if l and l[-1] == '\n':
698 if l and l[-1] == '\n':
699 l = l[:-1] + eol
699 l = l[:-1] + eol
700 rawlines.append(l)
700 rawlines.append(l)
701 lines = rawlines
701 lines = rawlines
702
702
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
704
704
705 def printfile(self, warn):
705 def printfile(self, warn):
706 if self.fileprinted:
706 if self.fileprinted:
707 return
707 return
708 if warn or self.ui.verbose:
708 if warn or self.ui.verbose:
709 self.fileprinted = True
709 self.fileprinted = True
710 s = _("patching file %s\n") % self.fname
710 s = _("patching file %s\n") % self.fname
711 if warn:
711 if warn:
712 self.ui.warn(s)
712 self.ui.warn(s)
713 else:
713 else:
714 self.ui.note(s)
714 self.ui.note(s)
715
715
716
716
717 def findlines(self, l, linenum):
717 def findlines(self, l, linenum):
718 # looks through the hash and finds candidate lines. The
718 # looks through the hash and finds candidate lines. The
719 # result is a list of line numbers sorted based on distance
719 # result is a list of line numbers sorted based on distance
720 # from linenum
720 # from linenum
721
721
722 cand = self.hash.get(l, [])
722 cand = self.hash.get(l, [])
723 if len(cand) > 1:
723 if len(cand) > 1:
724 # resort our list of potentials forward then back.
724 # resort our list of potentials forward then back.
725 cand.sort(key=lambda x: abs(x - linenum))
725 cand.sort(key=lambda x: abs(x - linenum))
726 return cand
726 return cand
727
727
728 def write_rej(self):
728 def write_rej(self):
729 # our rejects are a little different from patch(1). This always
729 # our rejects are a little different from patch(1). This always
730 # creates rejects in the same form as the original patch. A file
730 # creates rejects in the same form as the original patch. A file
731 # header is inserted so that you can run the reject through patch again
731 # header is inserted so that you can run the reject through patch again
732 # without having to type the filename.
732 # without having to type the filename.
733 if not self.rej:
733 if not self.rej:
734 return
734 return
735 base = os.path.basename(self.fname)
735 base = os.path.basename(self.fname)
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 lines = ["--- %s\n+++ %s\n" % (base, base)]
737 for x in self.rej:
737 for x in self.rej:
738 for l in x.hunk:
738 for l in x.hunk:
739 lines.append(l)
739 lines.append(l)
740 if l[-1:] != '\n':
740 if l[-1:] != '\n':
741 lines.append("\n\ No newline at end of file\n")
741 lines.append("\n\ No newline at end of file\n")
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
743
743
744 def apply(self, h):
744 def apply(self, h):
745 if not h.complete():
745 if not h.complete():
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 (h.number, h.desc, len(h.a), h.lena, len(h.b),
748 h.lenb))
748 h.lenb))
749
749
750 self.hunks += 1
750 self.hunks += 1
751
751
752 if self.missing:
752 if self.missing:
753 self.rej.append(h)
753 self.rej.append(h)
754 return -1
754 return -1
755
755
756 if self.exists and self.create:
756 if self.exists and self.create:
757 if self.copysource:
757 if self.copysource:
758 self.ui.warn(_("cannot create %s: destination already "
758 self.ui.warn(_("cannot create %s: destination already "
759 "exists\n") % self.fname)
759 "exists\n") % self.fname)
760 else:
760 else:
761 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.ui.warn(_("file %s already exists\n") % self.fname)
762 self.rej.append(h)
762 self.rej.append(h)
763 return -1
763 return -1
764
764
765 if isinstance(h, binhunk):
765 if isinstance(h, binhunk):
766 if self.remove:
766 if self.remove:
767 self.backend.unlink(self.fname)
767 self.backend.unlink(self.fname)
768 else:
768 else:
769 l = h.new(self.lines)
769 l = h.new(self.lines)
770 self.lines[:] = l
770 self.lines[:] = l
771 self.offset += len(l)
771 self.offset += len(l)
772 self.dirty = True
772 self.dirty = True
773 return 0
773 return 0
774
774
775 horig = h
775 horig = h
776 if (self.eolmode in ('crlf', 'lf')
776 if (self.eolmode in ('crlf', 'lf')
777 or self.eolmode == 'auto' and self.eol):
777 or self.eolmode == 'auto' and self.eol):
778 # If new eols are going to be normalized, then normalize
778 # If new eols are going to be normalized, then normalize
779 # hunk data before patching. Otherwise, preserve input
779 # hunk data before patching. Otherwise, preserve input
780 # line-endings.
780 # line-endings.
781 h = h.getnormalized()
781 h = h.getnormalized()
782
782
783 # fast case first, no offsets, no fuzz
783 # fast case first, no offsets, no fuzz
784 old, oldstart, new, newstart = h.fuzzit(0, False)
784 old, oldstart, new, newstart = h.fuzzit(0, False)
785 oldstart += self.offset
785 oldstart += self.offset
786 orig_start = oldstart
786 orig_start = oldstart
787 # if there's skew we want to emit the "(offset %d lines)" even
787 # if there's skew we want to emit the "(offset %d lines)" even
788 # when the hunk cleanly applies at start + skew, so skip the
788 # when the hunk cleanly applies at start + skew, so skip the
789 # fast case code
789 # fast case code
790 if (self.skew == 0 and
790 if (self.skew == 0 and
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
792 if self.remove:
792 if self.remove:
793 self.backend.unlink(self.fname)
793 self.backend.unlink(self.fname)
794 else:
794 else:
795 self.lines[oldstart:oldstart + len(old)] = new
795 self.lines[oldstart:oldstart + len(old)] = new
796 self.offset += len(new) - len(old)
796 self.offset += len(new) - len(old)
797 self.dirty = True
797 self.dirty = True
798 return 0
798 return 0
799
799
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
801 self.hash = {}
801 self.hash = {}
802 for x, s in enumerate(self.lines):
802 for x, s in enumerate(self.lines):
803 self.hash.setdefault(s, []).append(x)
803 self.hash.setdefault(s, []).append(x)
804
804
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
805 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
806 for toponly in [True, False]:
806 for toponly in [True, False]:
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
808 oldstart = oldstart + self.offset + self.skew
808 oldstart = oldstart + self.offset + self.skew
809 oldstart = min(oldstart, len(self.lines))
809 oldstart = min(oldstart, len(self.lines))
810 if old:
810 if old:
811 cand = self.findlines(old[0][1:], oldstart)
811 cand = self.findlines(old[0][1:], oldstart)
812 else:
812 else:
813 # Only adding lines with no or fuzzed context, just
813 # Only adding lines with no or fuzzed context, just
814 # take the skew in account
814 # take the skew in account
815 cand = [oldstart]
815 cand = [oldstart]
816
816
817 for l in cand:
817 for l in cand:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
819 self.lines[l : l + len(old)] = new
819 self.lines[l : l + len(old)] = new
820 self.offset += len(new) - len(old)
820 self.offset += len(new) - len(old)
821 self.skew = l - orig_start
821 self.skew = l - orig_start
822 self.dirty = True
822 self.dirty = True
823 offset = l - orig_start - fuzzlen
823 offset = l - orig_start - fuzzlen
824 if fuzzlen:
824 if fuzzlen:
825 msg = _("Hunk #%d succeeded at %d "
825 msg = _("Hunk #%d succeeded at %d "
826 "with fuzz %d "
826 "with fuzz %d "
827 "(offset %d lines).\n")
827 "(offset %d lines).\n")
828 self.printfile(True)
828 self.printfile(True)
829 self.ui.warn(msg %
829 self.ui.warn(msg %
830 (h.number, l + 1, fuzzlen, offset))
830 (h.number, l + 1, fuzzlen, offset))
831 else:
831 else:
832 msg = _("Hunk #%d succeeded at %d "
832 msg = _("Hunk #%d succeeded at %d "
833 "(offset %d lines).\n")
833 "(offset %d lines).\n")
834 self.ui.note(msg % (h.number, l + 1, offset))
834 self.ui.note(msg % (h.number, l + 1, offset))
835 return fuzzlen
835 return fuzzlen
836 self.printfile(True)
836 self.printfile(True)
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
838 self.rej.append(horig)
838 self.rej.append(horig)
839 return -1
839 return -1
840
840
841 def close(self):
841 def close(self):
842 if self.dirty:
842 if self.dirty:
843 self.writelines(self.fname, self.lines, self.mode)
843 self.writelines(self.fname, self.lines, self.mode)
844 self.write_rej()
844 self.write_rej()
845 return len(self.rej)
845 return len(self.rej)
846
846
847 class header(object):
847 class header(object):
848 """patch header
848 """patch header
849 """
849 """
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
851 diff_re = re.compile('diff -r .* (.*)$')
852 allhunks_re = re.compile('(?:index|deleted file) ')
852 allhunks_re = re.compile('(?:index|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
853 pretty_re = re.compile('(?:new file|deleted file) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 special_re = re.compile('(?:index|deleted|copy|rename) ')
855 newfile_re = re.compile('(?:new file)')
855 newfile_re = re.compile('(?:new file)')
856
856
857 def __init__(self, header):
857 def __init__(self, header):
858 self.header = header
858 self.header = header
859 self.hunks = []
859 self.hunks = []
860
860
861 def binary(self):
861 def binary(self):
862 return any(h.startswith('index ') for h in self.header)
862 return any(h.startswith('index ') for h in self.header)
863
863
864 def pretty(self, fp):
864 def pretty(self, fp):
865 for h in self.header:
865 for h in self.header:
866 if h.startswith('index '):
866 if h.startswith('index '):
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 fp.write(_('this modifies a binary file (all or nothing)\n'))
868 break
868 break
869 if self.pretty_re.match(h):
869 if self.pretty_re.match(h):
870 fp.write(h)
870 fp.write(h)
871 if self.binary():
871 if self.binary():
872 fp.write(_('this is a binary file\n'))
872 fp.write(_('this is a binary file\n'))
873 break
873 break
874 if h.startswith('---'):
874 if h.startswith('---'):
875 fp.write(_('%d hunks, %d lines changed\n') %
875 fp.write(_('%d hunks, %d lines changed\n') %
876 (len(self.hunks),
876 (len(self.hunks),
877 sum([max(h.added, h.removed) for h in self.hunks])))
877 sum([max(h.added, h.removed) for h in self.hunks])))
878 break
878 break
879 fp.write(h)
879 fp.write(h)
880
880
881 def write(self, fp):
881 def write(self, fp):
882 fp.write(''.join(self.header))
882 fp.write(''.join(self.header))
883
883
884 def allhunks(self):
884 def allhunks(self):
885 return any(self.allhunks_re.match(h) for h in self.header)
885 return any(self.allhunks_re.match(h) for h in self.header)
886
886
887 def files(self):
887 def files(self):
888 match = self.diffgit_re.match(self.header[0])
888 match = self.diffgit_re.match(self.header[0])
889 if match:
889 if match:
890 fromfile, tofile = match.groups()
890 fromfile, tofile = match.groups()
891 if fromfile == tofile:
891 if fromfile == tofile:
892 return [fromfile]
892 return [fromfile]
893 return [fromfile, tofile]
893 return [fromfile, tofile]
894 else:
894 else:
895 return self.diff_re.match(self.header[0]).groups()
895 return self.diff_re.match(self.header[0]).groups()
896
896
897 def filename(self):
897 def filename(self):
898 return self.files()[-1]
898 return self.files()[-1]
899
899
900 def __repr__(self):
900 def __repr__(self):
901 return '<header %s>' % (' '.join(map(repr, self.files())))
901 return '<header %s>' % (' '.join(map(repr, self.files())))
902
902
903 def isnewfile(self):
903 def isnewfile(self):
904 return any(self.newfile_re.match(h) for h in self.header)
904 return any(self.newfile_re.match(h) for h in self.header)
905
905
906 def special(self):
906 def special(self):
907 # Special files are shown only at the header level and not at the hunk
907 # Special files are shown only at the header level and not at the hunk
908 # level for example a file that has been deleted is a special file.
908 # level for example a file that has been deleted is a special file.
909 # The user cannot change the content of the operation, in the case of
909 # The user cannot change the content of the operation, in the case of
910 # the deleted file he has to take the deletion or not take it, he
910 # the deleted file he has to take the deletion or not take it, he
911 # cannot take some of it.
911 # cannot take some of it.
912 # Newly added files are special if they are empty, they are not special
912 # Newly added files are special if they are empty, they are not special
913 # if they have some content as we want to be able to change it
913 # if they have some content as we want to be able to change it
914 nocontent = len(self.header) == 2
914 nocontent = len(self.header) == 2
915 emptynewfile = self.isnewfile() and nocontent
915 emptynewfile = self.isnewfile() and nocontent
916 return emptynewfile or \
916 return emptynewfile or \
917 any(self.special_re.match(h) for h in self.header)
917 any(self.special_re.match(h) for h in self.header)
918
918
919 class recordhunk(object):
919 class recordhunk(object):
920 """patch hunk
920 """patch hunk
921
921
922 XXX shouldn't we merge this with the other hunk class?
922 XXX shouldn't we merge this with the other hunk class?
923 """
923 """
924
924
925 def __init__(self, header, fromline, toline, proc, before, hunk, after,
925 def __init__(self, header, fromline, toline, proc, before, hunk, after,
926 maxcontext=None):
926 maxcontext=None):
927 def trimcontext(lines, reverse=False):
927 def trimcontext(lines, reverse=False):
928 if maxcontext is not None:
928 if maxcontext is not None:
929 delta = len(lines) - maxcontext
929 delta = len(lines) - maxcontext
930 if delta > 0:
930 if delta > 0:
931 if reverse:
931 if reverse:
932 return delta, lines[delta:]
932 return delta, lines[delta:]
933 else:
933 else:
934 return delta, lines[:maxcontext]
934 return delta, lines[:maxcontext]
935 return 0, lines
935 return 0, lines
936
936
937 self.header = header
937 self.header = header
938 trimedbefore, self.before = trimcontext(before, True)
938 trimedbefore, self.before = trimcontext(before, True)
939 self.fromline = fromline + trimedbefore
939 self.fromline = fromline + trimedbefore
940 self.toline = toline + trimedbefore
940 self.toline = toline + trimedbefore
941 _trimedafter, self.after = trimcontext(after, False)
941 _trimedafter, self.after = trimcontext(after, False)
942 self.proc = proc
942 self.proc = proc
943 self.hunk = hunk
943 self.hunk = hunk
944 self.added, self.removed = self.countchanges(self.hunk)
944 self.added, self.removed = self.countchanges(self.hunk)
945
945
946 def __eq__(self, v):
946 def __eq__(self, v):
947 if not isinstance(v, recordhunk):
947 if not isinstance(v, recordhunk):
948 return False
948 return False
949
949
950 return ((v.hunk == self.hunk) and
950 return ((v.hunk == self.hunk) and
951 (v.proc == self.proc) and
951 (v.proc == self.proc) and
952 (self.fromline == v.fromline) and
952 (self.fromline == v.fromline) and
953 (self.header.files() == v.header.files()))
953 (self.header.files() == v.header.files()))
954
954
955 def __hash__(self):
955 def __hash__(self):
956 return hash((tuple(self.hunk),
956 return hash((tuple(self.hunk),
957 tuple(self.header.files()),
957 tuple(self.header.files()),
958 self.fromline,
958 self.fromline,
959 self.proc))
959 self.proc))
960
960
961 def countchanges(self, hunk):
961 def countchanges(self, hunk):
962 """hunk -> (n+,n-)"""
962 """hunk -> (n+,n-)"""
963 add = len([h for h in hunk if h.startswith('+')])
963 add = len([h for h in hunk if h.startswith('+')])
964 rem = len([h for h in hunk if h.startswith('-')])
964 rem = len([h for h in hunk if h.startswith('-')])
965 return add, rem
965 return add, rem
966
966
967 def reversehunk(self):
967 def reversehunk(self):
968 """return another recordhunk which is the reverse of the hunk
968 """return another recordhunk which is the reverse of the hunk
969
969
970 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
970 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
971 that, swap fromline/toline and +/- signs while keep other things
971 that, swap fromline/toline and +/- signs while keep other things
972 unchanged.
972 unchanged.
973 """
973 """
974 m = {'+': '-', '-': '+', '\\': '\\'}
974 m = {'+': '-', '-': '+', '\\': '\\'}
975 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
975 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
976 return recordhunk(self.header, self.toline, self.fromline, self.proc,
976 return recordhunk(self.header, self.toline, self.fromline, self.proc,
977 self.before, hunk, self.after)
977 self.before, hunk, self.after)
978
978
979 def write(self, fp):
979 def write(self, fp):
980 delta = len(self.before) + len(self.after)
980 delta = len(self.before) + len(self.after)
981 if self.after and self.after[-1] == '\\ No newline at end of file\n':
981 if self.after and self.after[-1] == '\\ No newline at end of file\n':
982 delta -= 1
982 delta -= 1
983 fromlen = delta + self.removed
983 fromlen = delta + self.removed
984 tolen = delta + self.added
984 tolen = delta + self.added
985 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
985 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
986 (self.fromline, fromlen, self.toline, tolen,
986 (self.fromline, fromlen, self.toline, tolen,
987 self.proc and (' ' + self.proc)))
987 self.proc and (' ' + self.proc)))
988 fp.write(''.join(self.before + self.hunk + self.after))
988 fp.write(''.join(self.before + self.hunk + self.after))
989
989
990 pretty = write
990 pretty = write
991
991
992 def filename(self):
992 def filename(self):
993 return self.header.filename()
993 return self.header.filename()
994
994
995 def __repr__(self):
995 def __repr__(self):
996 return '<hunk %r@%d>' % (self.filename(), self.fromline)
996 return '<hunk %r@%d>' % (self.filename(), self.fromline)
997
997
998 messages = {
998 messages = {
999 'multiple': {
999 'multiple': {
1000 'discard': _("discard change %d/%d to '%s'?"),
1000 'discard': _("discard change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1003 },
1003 },
1004 'single': {
1004 'single': {
1005 'discard': _("discard this change to '%s'?"),
1005 'discard': _("discard this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1008 },
1008 },
1009 'help': {
1009 'help': {
1010 'discard': _('[Ynesfdaq?]'
1010 'discard': _('[Ynesfdaq?]'
1011 '$$ &Yes, discard this change'
1011 '$$ &Yes, discard this change'
1012 '$$ &No, skip this change'
1012 '$$ &No, skip this change'
1013 '$$ &Edit this change manually'
1013 '$$ &Edit this change manually'
1014 '$$ &Skip remaining changes to this file'
1014 '$$ &Skip remaining changes to this file'
1015 '$$ Discard remaining changes to this &file'
1015 '$$ Discard remaining changes to this &file'
1016 '$$ &Done, skip remaining changes and files'
1016 '$$ &Done, skip remaining changes and files'
1017 '$$ Discard &all changes to all remaining files'
1017 '$$ Discard &all changes to all remaining files'
1018 '$$ &Quit, discarding no changes'
1018 '$$ &Quit, discarding no changes'
1019 '$$ &? (display help)'),
1019 '$$ &? (display help)'),
1020 'record': _('[Ynesfdaq?]'
1020 'record': _('[Ynesfdaq?]'
1021 '$$ &Yes, record this change'
1021 '$$ &Yes, record this change'
1022 '$$ &No, skip this change'
1022 '$$ &No, skip this change'
1023 '$$ &Edit this change manually'
1023 '$$ &Edit this change manually'
1024 '$$ &Skip remaining changes to this file'
1024 '$$ &Skip remaining changes to this file'
1025 '$$ Record remaining changes to this &file'
1025 '$$ Record remaining changes to this &file'
1026 '$$ &Done, skip remaining changes and files'
1026 '$$ &Done, skip remaining changes and files'
1027 '$$ Record &all changes to all remaining files'
1027 '$$ Record &all changes to all remaining files'
1028 '$$ &Quit, recording no changes'
1028 '$$ &Quit, recording no changes'
1029 '$$ &? (display help)'),
1029 '$$ &? (display help)'),
1030 'revert': _('[Ynesfdaq?]'
1030 'revert': _('[Ynesfdaq?]'
1031 '$$ &Yes, revert this change'
1031 '$$ &Yes, revert this change'
1032 '$$ &No, skip this change'
1032 '$$ &No, skip this change'
1033 '$$ &Edit this change manually'
1033 '$$ &Edit this change manually'
1034 '$$ &Skip remaining changes to this file'
1034 '$$ &Skip remaining changes to this file'
1035 '$$ Revert remaining changes to this &file'
1035 '$$ Revert remaining changes to this &file'
1036 '$$ &Done, skip remaining changes and files'
1036 '$$ &Done, skip remaining changes and files'
1037 '$$ Revert &all changes to all remaining files'
1037 '$$ Revert &all changes to all remaining files'
1038 '$$ &Quit, reverting no changes'
1038 '$$ &Quit, reverting no changes'
1039 '$$ &? (display help)')
1039 '$$ &? (display help)')
1040 }
1040 }
1041 }
1041 }
1042
1042
1043 def filterpatch(ui, headers, operation=None):
1043 def filterpatch(ui, headers, operation=None):
1044 """Interactively filter patch chunks into applied-only chunks"""
1044 """Interactively filter patch chunks into applied-only chunks"""
1045 if operation is None:
1045 if operation is None:
1046 operation = 'record'
1046 operation = 'record'
1047
1047
1048 def prompt(skipfile, skipall, query, chunk):
1048 def prompt(skipfile, skipall, query, chunk):
1049 """prompt query, and process base inputs
1049 """prompt query, and process base inputs
1050
1050
1051 - y/n for the rest of file
1051 - y/n for the rest of file
1052 - y/n for the rest
1052 - y/n for the rest
1053 - ? (help)
1053 - ? (help)
1054 - q (quit)
1054 - q (quit)
1055
1055
1056 Return True/False and possibly updated skipfile and skipall.
1056 Return True/False and possibly updated skipfile and skipall.
1057 """
1057 """
1058 newpatches = None
1058 newpatches = None
1059 if skipall is not None:
1059 if skipall is not None:
1060 return skipall, skipfile, skipall, newpatches
1060 return skipall, skipfile, skipall, newpatches
1061 if skipfile is not None:
1061 if skipfile is not None:
1062 return skipfile, skipfile, skipall, newpatches
1062 return skipfile, skipfile, skipall, newpatches
1063 while True:
1063 while True:
1064 resps = messages['help'][operation]
1064 resps = messages['help'][operation]
1065 r = ui.promptchoice("%s %s" % (query, resps))
1065 r = ui.promptchoice("%s %s" % (query, resps))
1066 ui.write("\n")
1066 ui.write("\n")
1067 if r == 8: # ?
1067 if r == 8: # ?
1068 for c, t in ui.extractchoices(resps)[1]:
1068 for c, t in ui.extractchoices(resps)[1]:
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1069 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1070 continue
1070 continue
1071 elif r == 0: # yes
1071 elif r == 0: # yes
1072 ret = True
1072 ret = True
1073 elif r == 1: # no
1073 elif r == 1: # no
1074 ret = False
1074 ret = False
1075 elif r == 2: # Edit patch
1075 elif r == 2: # Edit patch
1076 if chunk is None:
1076 if chunk is None:
1077 ui.write(_('cannot edit patch for whole file'))
1077 ui.write(_('cannot edit patch for whole file'))
1078 ui.write("\n")
1078 ui.write("\n")
1079 continue
1079 continue
1080 if chunk.header.binary():
1080 if chunk.header.binary():
1081 ui.write(_('cannot edit patch for binary file'))
1081 ui.write(_('cannot edit patch for binary file'))
1082 ui.write("\n")
1082 ui.write("\n")
1083 continue
1083 continue
1084 # Patch comment based on the Git one (based on comment at end of
1084 # Patch comment based on the Git one (based on comment at end of
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1085 # https://mercurial-scm.org/wiki/RecordExtension)
1086 phelp = '---' + _("""
1086 phelp = '---' + _("""
1087 To remove '-' lines, make them ' ' lines (context).
1087 To remove '-' lines, make them ' ' lines (context).
1088 To remove '+' lines, delete them.
1088 To remove '+' lines, delete them.
1089 Lines starting with # will be removed from the patch.
1089 Lines starting with # will be removed from the patch.
1090
1090
1091 If the patch applies cleanly, the edited hunk will immediately be
1091 If the patch applies cleanly, the edited hunk will immediately be
1092 added to the record list. If it does not apply cleanly, a rejects
1092 added to the record list. If it does not apply cleanly, a rejects
1093 file will be generated: you can use that when you try again. If
1093 file will be generated: you can use that when you try again. If
1094 all lines of the hunk are removed, then the edit is aborted and
1094 all lines of the hunk are removed, then the edit is aborted and
1095 the hunk is left unchanged.
1095 the hunk is left unchanged.
1096 """)
1096 """)
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1097 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1098 suffix=".diff", text=True)
1098 suffix=".diff", text=True)
1099 ncpatchfp = None
1099 ncpatchfp = None
1100 try:
1100 try:
1101 # Write the initial patch
1101 # Write the initial patch
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1102 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1103 chunk.header.write(f)
1103 chunk.header.write(f)
1104 chunk.write(f)
1104 chunk.write(f)
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1105 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1106 f.close()
1106 f.close()
1107 # Start the editor and wait for it to complete
1107 # Start the editor and wait for it to complete
1108 editor = ui.geteditor()
1108 editor = ui.geteditor()
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1109 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1110 environ={'HGUSER': ui.username()},
1110 environ={'HGUSER': ui.username()},
1111 blockedtag='filterpatch')
1111 blockedtag='filterpatch')
1112 if ret != 0:
1112 if ret != 0:
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1113 ui.warn(_("editor exited with exit code %d\n") % ret)
1114 continue
1114 continue
1115 # Remove comment lines
1115 # Remove comment lines
1116 patchfp = open(patchfn)
1116 patchfp = open(patchfn)
1117 ncpatchfp = stringio()
1117 ncpatchfp = stringio()
1118 for line in util.iterfile(patchfp):
1118 for line in util.iterfile(patchfp):
1119 if not line.startswith('#'):
1119 if not line.startswith('#'):
1120 ncpatchfp.write(line)
1120 ncpatchfp.write(line)
1121 patchfp.close()
1121 patchfp.close()
1122 ncpatchfp.seek(0)
1122 ncpatchfp.seek(0)
1123 newpatches = parsepatch(ncpatchfp)
1123 newpatches = parsepatch(ncpatchfp)
1124 finally:
1124 finally:
1125 os.unlink(patchfn)
1125 os.unlink(patchfn)
1126 del ncpatchfp
1126 del ncpatchfp
1127 # Signal that the chunk shouldn't be applied as-is, but
1127 # Signal that the chunk shouldn't be applied as-is, but
1128 # provide the new patch to be used instead.
1128 # provide the new patch to be used instead.
1129 ret = False
1129 ret = False
1130 elif r == 3: # Skip
1130 elif r == 3: # Skip
1131 ret = skipfile = False
1131 ret = skipfile = False
1132 elif r == 4: # file (Record remaining)
1132 elif r == 4: # file (Record remaining)
1133 ret = skipfile = True
1133 ret = skipfile = True
1134 elif r == 5: # done, skip remaining
1134 elif r == 5: # done, skip remaining
1135 ret = skipall = False
1135 ret = skipall = False
1136 elif r == 6: # all
1136 elif r == 6: # all
1137 ret = skipall = True
1137 ret = skipall = True
1138 elif r == 7: # quit
1138 elif r == 7: # quit
1139 raise error.Abort(_('user quit'))
1139 raise error.Abort(_('user quit'))
1140 return ret, skipfile, skipall, newpatches
1140 return ret, skipfile, skipall, newpatches
1141
1141
1142 seen = set()
1142 seen = set()
1143 applied = {} # 'filename' -> [] of chunks
1143 applied = {} # 'filename' -> [] of chunks
1144 skipfile, skipall = None, None
1144 skipfile, skipall = None, None
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1145 pos, total = 1, sum(len(h.hunks) for h in headers)
1146 for h in headers:
1146 for h in headers:
1147 pos += len(h.hunks)
1147 pos += len(h.hunks)
1148 skipfile = None
1148 skipfile = None
1149 fixoffset = 0
1149 fixoffset = 0
1150 hdr = ''.join(h.header)
1150 hdr = ''.join(h.header)
1151 if hdr in seen:
1151 if hdr in seen:
1152 continue
1152 continue
1153 seen.add(hdr)
1153 seen.add(hdr)
1154 if skipall is None:
1154 if skipall is None:
1155 h.pretty(ui)
1155 h.pretty(ui)
1156 msg = (_('examine changes to %s?') %
1156 msg = (_('examine changes to %s?') %
1157 _(' and ').join("'%s'" % f for f in h.files()))
1157 _(' and ').join("'%s'" % f for f in h.files()))
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1158 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1159 if not r:
1159 if not r:
1160 continue
1160 continue
1161 applied[h.filename()] = [h]
1161 applied[h.filename()] = [h]
1162 if h.allhunks():
1162 if h.allhunks():
1163 applied[h.filename()] += h.hunks
1163 applied[h.filename()] += h.hunks
1164 continue
1164 continue
1165 for i, chunk in enumerate(h.hunks):
1165 for i, chunk in enumerate(h.hunks):
1166 if skipfile is None and skipall is None:
1166 if skipfile is None and skipall is None:
1167 chunk.pretty(ui)
1167 chunk.pretty(ui)
1168 if total == 1:
1168 if total == 1:
1169 msg = messages['single'][operation] % chunk.filename()
1169 msg = messages['single'][operation] % chunk.filename()
1170 else:
1170 else:
1171 idx = pos - len(h.hunks) + i
1171 idx = pos - len(h.hunks) + i
1172 msg = messages['multiple'][operation] % (idx, total,
1172 msg = messages['multiple'][operation] % (idx, total,
1173 chunk.filename())
1173 chunk.filename())
1174 r, skipfile, skipall, newpatches = prompt(skipfile,
1174 r, skipfile, skipall, newpatches = prompt(skipfile,
1175 skipall, msg, chunk)
1175 skipall, msg, chunk)
1176 if r:
1176 if r:
1177 if fixoffset:
1177 if fixoffset:
1178 chunk = copy.copy(chunk)
1178 chunk = copy.copy(chunk)
1179 chunk.toline += fixoffset
1179 chunk.toline += fixoffset
1180 applied[chunk.filename()].append(chunk)
1180 applied[chunk.filename()].append(chunk)
1181 elif newpatches is not None:
1181 elif newpatches is not None:
1182 for newpatch in newpatches:
1182 for newpatch in newpatches:
1183 for newhunk in newpatch.hunks:
1183 for newhunk in newpatch.hunks:
1184 if fixoffset:
1184 if fixoffset:
1185 newhunk.toline += fixoffset
1185 newhunk.toline += fixoffset
1186 applied[newhunk.filename()].append(newhunk)
1186 applied[newhunk.filename()].append(newhunk)
1187 else:
1187 else:
1188 fixoffset += chunk.removed - chunk.added
1188 fixoffset += chunk.removed - chunk.added
1189 return (sum([h for h in applied.itervalues()
1189 return (sum([h for h in applied.itervalues()
1190 if h[0].special() or len(h) > 1], []), {})
1190 if h[0].special() or len(h) > 1], []), {})
1191 class hunk(object):
1191 class hunk(object):
1192 def __init__(self, desc, num, lr, context):
1192 def __init__(self, desc, num, lr, context):
1193 self.number = num
1193 self.number = num
1194 self.desc = desc
1194 self.desc = desc
1195 self.hunk = [desc]
1195 self.hunk = [desc]
1196 self.a = []
1196 self.a = []
1197 self.b = []
1197 self.b = []
1198 self.starta = self.lena = None
1198 self.starta = self.lena = None
1199 self.startb = self.lenb = None
1199 self.startb = self.lenb = None
1200 if lr is not None:
1200 if lr is not None:
1201 if context:
1201 if context:
1202 self.read_context_hunk(lr)
1202 self.read_context_hunk(lr)
1203 else:
1203 else:
1204 self.read_unified_hunk(lr)
1204 self.read_unified_hunk(lr)
1205
1205
1206 def getnormalized(self):
1206 def getnormalized(self):
1207 """Return a copy with line endings normalized to LF."""
1207 """Return a copy with line endings normalized to LF."""
1208
1208
1209 def normalize(lines):
1209 def normalize(lines):
1210 nlines = []
1210 nlines = []
1211 for line in lines:
1211 for line in lines:
1212 if line.endswith('\r\n'):
1212 if line.endswith('\r\n'):
1213 line = line[:-2] + '\n'
1213 line = line[:-2] + '\n'
1214 nlines.append(line)
1214 nlines.append(line)
1215 return nlines
1215 return nlines
1216
1216
1217 # Dummy object, it is rebuilt manually
1217 # Dummy object, it is rebuilt manually
1218 nh = hunk(self.desc, self.number, None, None)
1218 nh = hunk(self.desc, self.number, None, None)
1219 nh.number = self.number
1219 nh.number = self.number
1220 nh.desc = self.desc
1220 nh.desc = self.desc
1221 nh.hunk = self.hunk
1221 nh.hunk = self.hunk
1222 nh.a = normalize(self.a)
1222 nh.a = normalize(self.a)
1223 nh.b = normalize(self.b)
1223 nh.b = normalize(self.b)
1224 nh.starta = self.starta
1224 nh.starta = self.starta
1225 nh.startb = self.startb
1225 nh.startb = self.startb
1226 nh.lena = self.lena
1226 nh.lena = self.lena
1227 nh.lenb = self.lenb
1227 nh.lenb = self.lenb
1228 return nh
1228 return nh
1229
1229
1230 def read_unified_hunk(self, lr):
1230 def read_unified_hunk(self, lr):
1231 m = unidesc.match(self.desc)
1231 m = unidesc.match(self.desc)
1232 if not m:
1232 if not m:
1233 raise PatchError(_("bad hunk #%d") % self.number)
1233 raise PatchError(_("bad hunk #%d") % self.number)
1234 self.starta, self.lena, self.startb, self.lenb = m.groups()
1234 self.starta, self.lena, self.startb, self.lenb = m.groups()
1235 if self.lena is None:
1235 if self.lena is None:
1236 self.lena = 1
1236 self.lena = 1
1237 else:
1237 else:
1238 self.lena = int(self.lena)
1238 self.lena = int(self.lena)
1239 if self.lenb is None:
1239 if self.lenb is None:
1240 self.lenb = 1
1240 self.lenb = 1
1241 else:
1241 else:
1242 self.lenb = int(self.lenb)
1242 self.lenb = int(self.lenb)
1243 self.starta = int(self.starta)
1243 self.starta = int(self.starta)
1244 self.startb = int(self.startb)
1244 self.startb = int(self.startb)
1245 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1245 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1246 self.b)
1246 self.b)
1247 # if we hit eof before finishing out the hunk, the last line will
1247 # if we hit eof before finishing out the hunk, the last line will
1248 # be zero length. Lets try to fix it up.
1248 # be zero length. Lets try to fix it up.
1249 while len(self.hunk[-1]) == 0:
1249 while len(self.hunk[-1]) == 0:
1250 del self.hunk[-1]
1250 del self.hunk[-1]
1251 del self.a[-1]
1251 del self.a[-1]
1252 del self.b[-1]
1252 del self.b[-1]
1253 self.lena -= 1
1253 self.lena -= 1
1254 self.lenb -= 1
1254 self.lenb -= 1
1255 self._fixnewline(lr)
1255 self._fixnewline(lr)
1256
1256
1257 def read_context_hunk(self, lr):
1257 def read_context_hunk(self, lr):
1258 self.desc = lr.readline()
1258 self.desc = lr.readline()
1259 m = contextdesc.match(self.desc)
1259 m = contextdesc.match(self.desc)
1260 if not m:
1260 if not m:
1261 raise PatchError(_("bad hunk #%d") % self.number)
1261 raise PatchError(_("bad hunk #%d") % self.number)
1262 self.starta, aend = m.groups()
1262 self.starta, aend = m.groups()
1263 self.starta = int(self.starta)
1263 self.starta = int(self.starta)
1264 if aend is None:
1264 if aend is None:
1265 aend = self.starta
1265 aend = self.starta
1266 self.lena = int(aend) - self.starta
1266 self.lena = int(aend) - self.starta
1267 if self.starta:
1267 if self.starta:
1268 self.lena += 1
1268 self.lena += 1
1269 for x in xrange(self.lena):
1269 for x in xrange(self.lena):
1270 l = lr.readline()
1270 l = lr.readline()
1271 if l.startswith('---'):
1271 if l.startswith('---'):
1272 # lines addition, old block is empty
1272 # lines addition, old block is empty
1273 lr.push(l)
1273 lr.push(l)
1274 break
1274 break
1275 s = l[2:]
1275 s = l[2:]
1276 if l.startswith('- ') or l.startswith('! '):
1276 if l.startswith('- ') or l.startswith('! '):
1277 u = '-' + s
1277 u = '-' + s
1278 elif l.startswith(' '):
1278 elif l.startswith(' '):
1279 u = ' ' + s
1279 u = ' ' + s
1280 else:
1280 else:
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1281 raise PatchError(_("bad hunk #%d old text line %d") %
1282 (self.number, x))
1282 (self.number, x))
1283 self.a.append(u)
1283 self.a.append(u)
1284 self.hunk.append(u)
1284 self.hunk.append(u)
1285
1285
1286 l = lr.readline()
1286 l = lr.readline()
1287 if l.startswith('\ '):
1287 if l.startswith('\ '):
1288 s = self.a[-1][:-1]
1288 s = self.a[-1][:-1]
1289 self.a[-1] = s
1289 self.a[-1] = s
1290 self.hunk[-1] = s
1290 self.hunk[-1] = s
1291 l = lr.readline()
1291 l = lr.readline()
1292 m = contextdesc.match(l)
1292 m = contextdesc.match(l)
1293 if not m:
1293 if not m:
1294 raise PatchError(_("bad hunk #%d") % self.number)
1294 raise PatchError(_("bad hunk #%d") % self.number)
1295 self.startb, bend = m.groups()
1295 self.startb, bend = m.groups()
1296 self.startb = int(self.startb)
1296 self.startb = int(self.startb)
1297 if bend is None:
1297 if bend is None:
1298 bend = self.startb
1298 bend = self.startb
1299 self.lenb = int(bend) - self.startb
1299 self.lenb = int(bend) - self.startb
1300 if self.startb:
1300 if self.startb:
1301 self.lenb += 1
1301 self.lenb += 1
1302 hunki = 1
1302 hunki = 1
1303 for x in xrange(self.lenb):
1303 for x in xrange(self.lenb):
1304 l = lr.readline()
1304 l = lr.readline()
1305 if l.startswith('\ '):
1305 if l.startswith('\ '):
1306 # XXX: the only way to hit this is with an invalid line range.
1306 # XXX: the only way to hit this is with an invalid line range.
1307 # The no-eol marker is not counted in the line range, but I
1307 # The no-eol marker is not counted in the line range, but I
1308 # guess there are diff(1) out there which behave differently.
1308 # guess there are diff(1) out there which behave differently.
1309 s = self.b[-1][:-1]
1309 s = self.b[-1][:-1]
1310 self.b[-1] = s
1310 self.b[-1] = s
1311 self.hunk[hunki - 1] = s
1311 self.hunk[hunki - 1] = s
1312 continue
1312 continue
1313 if not l:
1313 if not l:
1314 # line deletions, new block is empty and we hit EOF
1314 # line deletions, new block is empty and we hit EOF
1315 lr.push(l)
1315 lr.push(l)
1316 break
1316 break
1317 s = l[2:]
1317 s = l[2:]
1318 if l.startswith('+ ') or l.startswith('! '):
1318 if l.startswith('+ ') or l.startswith('! '):
1319 u = '+' + s
1319 u = '+' + s
1320 elif l.startswith(' '):
1320 elif l.startswith(' '):
1321 u = ' ' + s
1321 u = ' ' + s
1322 elif len(self.b) == 0:
1322 elif len(self.b) == 0:
1323 # line deletions, new block is empty
1323 # line deletions, new block is empty
1324 lr.push(l)
1324 lr.push(l)
1325 break
1325 break
1326 else:
1326 else:
1327 raise PatchError(_("bad hunk #%d old text line %d") %
1327 raise PatchError(_("bad hunk #%d old text line %d") %
1328 (self.number, x))
1328 (self.number, x))
1329 self.b.append(s)
1329 self.b.append(s)
1330 while True:
1330 while True:
1331 if hunki >= len(self.hunk):
1331 if hunki >= len(self.hunk):
1332 h = ""
1332 h = ""
1333 else:
1333 else:
1334 h = self.hunk[hunki]
1334 h = self.hunk[hunki]
1335 hunki += 1
1335 hunki += 1
1336 if h == u:
1336 if h == u:
1337 break
1337 break
1338 elif h.startswith('-'):
1338 elif h.startswith('-'):
1339 continue
1339 continue
1340 else:
1340 else:
1341 self.hunk.insert(hunki - 1, u)
1341 self.hunk.insert(hunki - 1, u)
1342 break
1342 break
1343
1343
1344 if not self.a:
1344 if not self.a:
1345 # this happens when lines were only added to the hunk
1345 # this happens when lines were only added to the hunk
1346 for x in self.hunk:
1346 for x in self.hunk:
1347 if x.startswith('-') or x.startswith(' '):
1347 if x.startswith('-') or x.startswith(' '):
1348 self.a.append(x)
1348 self.a.append(x)
1349 if not self.b:
1349 if not self.b:
1350 # this happens when lines were only deleted from the hunk
1350 # this happens when lines were only deleted from the hunk
1351 for x in self.hunk:
1351 for x in self.hunk:
1352 if x.startswith('+') or x.startswith(' '):
1352 if x.startswith('+') or x.startswith(' '):
1353 self.b.append(x[1:])
1353 self.b.append(x[1:])
1354 # @@ -start,len +start,len @@
1354 # @@ -start,len +start,len @@
1355 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1355 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1356 self.startb, self.lenb)
1356 self.startb, self.lenb)
1357 self.hunk[0] = self.desc
1357 self.hunk[0] = self.desc
1358 self._fixnewline(lr)
1358 self._fixnewline(lr)
1359
1359
1360 def _fixnewline(self, lr):
1360 def _fixnewline(self, lr):
1361 l = lr.readline()
1361 l = lr.readline()
1362 if l.startswith('\ '):
1362 if l.startswith('\ '):
1363 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1363 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1364 else:
1364 else:
1365 lr.push(l)
1365 lr.push(l)
1366
1366
1367 def complete(self):
1367 def complete(self):
1368 return len(self.a) == self.lena and len(self.b) == self.lenb
1368 return len(self.a) == self.lena and len(self.b) == self.lenb
1369
1369
1370 def _fuzzit(self, old, new, fuzz, toponly):
1370 def _fuzzit(self, old, new, fuzz, toponly):
1371 # this removes context lines from the top and bottom of list 'l'. It
1371 # this removes context lines from the top and bottom of list 'l'. It
1372 # checks the hunk to make sure only context lines are removed, and then
1372 # checks the hunk to make sure only context lines are removed, and then
1373 # returns a new shortened list of lines.
1373 # returns a new shortened list of lines.
1374 fuzz = min(fuzz, len(old))
1374 fuzz = min(fuzz, len(old))
1375 if fuzz:
1375 if fuzz:
1376 top = 0
1376 top = 0
1377 bot = 0
1377 bot = 0
1378 hlen = len(self.hunk)
1378 hlen = len(self.hunk)
1379 for x in xrange(hlen - 1):
1379 for x in xrange(hlen - 1):
1380 # the hunk starts with the @@ line, so use x+1
1380 # the hunk starts with the @@ line, so use x+1
1381 if self.hunk[x + 1][0] == ' ':
1381 if self.hunk[x + 1][0] == ' ':
1382 top += 1
1382 top += 1
1383 else:
1383 else:
1384 break
1384 break
1385 if not toponly:
1385 if not toponly:
1386 for x in xrange(hlen - 1):
1386 for x in xrange(hlen - 1):
1387 if self.hunk[hlen - bot - 1][0] == ' ':
1387 if self.hunk[hlen - bot - 1][0] == ' ':
1388 bot += 1
1388 bot += 1
1389 else:
1389 else:
1390 break
1390 break
1391
1391
1392 bot = min(fuzz, bot)
1392 bot = min(fuzz, bot)
1393 top = min(fuzz, top)
1393 top = min(fuzz, top)
1394 return old[top:len(old) - bot], new[top:len(new) - bot], top
1394 return old[top:len(old) - bot], new[top:len(new) - bot], top
1395 return old, new, 0
1395 return old, new, 0
1396
1396
1397 def fuzzit(self, fuzz, toponly):
1397 def fuzzit(self, fuzz, toponly):
1398 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1398 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1399 oldstart = self.starta + top
1399 oldstart = self.starta + top
1400 newstart = self.startb + top
1400 newstart = self.startb + top
1401 # zero length hunk ranges already have their start decremented
1401 # zero length hunk ranges already have their start decremented
1402 if self.lena and oldstart > 0:
1402 if self.lena and oldstart > 0:
1403 oldstart -= 1
1403 oldstart -= 1
1404 if self.lenb and newstart > 0:
1404 if self.lenb and newstart > 0:
1405 newstart -= 1
1405 newstart -= 1
1406 return old, oldstart, new, newstart
1406 return old, oldstart, new, newstart
1407
1407
1408 class binhunk(object):
1408 class binhunk(object):
1409 'A binary patch file.'
1409 'A binary patch file.'
1410 def __init__(self, lr, fname):
1410 def __init__(self, lr, fname):
1411 self.text = None
1411 self.text = None
1412 self.delta = False
1412 self.delta = False
1413 self.hunk = ['GIT binary patch\n']
1413 self.hunk = ['GIT binary patch\n']
1414 self._fname = fname
1414 self._fname = fname
1415 self._read(lr)
1415 self._read(lr)
1416
1416
1417 def complete(self):
1417 def complete(self):
1418 return self.text is not None
1418 return self.text is not None
1419
1419
1420 def new(self, lines):
1420 def new(self, lines):
1421 if self.delta:
1421 if self.delta:
1422 return [applybindelta(self.text, ''.join(lines))]
1422 return [applybindelta(self.text, ''.join(lines))]
1423 return [self.text]
1423 return [self.text]
1424
1424
1425 def _read(self, lr):
1425 def _read(self, lr):
1426 def getline(lr, hunk):
1426 def getline(lr, hunk):
1427 l = lr.readline()
1427 l = lr.readline()
1428 hunk.append(l)
1428 hunk.append(l)
1429 return l.rstrip('\r\n')
1429 return l.rstrip('\r\n')
1430
1430
1431 size = 0
1431 size = 0
1432 while True:
1432 while True:
1433 line = getline(lr, self.hunk)
1433 line = getline(lr, self.hunk)
1434 if not line:
1434 if not line:
1435 raise PatchError(_('could not extract "%s" binary data')
1435 raise PatchError(_('could not extract "%s" binary data')
1436 % self._fname)
1436 % self._fname)
1437 if line.startswith('literal '):
1437 if line.startswith('literal '):
1438 size = int(line[8:].rstrip())
1438 size = int(line[8:].rstrip())
1439 break
1439 break
1440 if line.startswith('delta '):
1440 if line.startswith('delta '):
1441 size = int(line[6:].rstrip())
1441 size = int(line[6:].rstrip())
1442 self.delta = True
1442 self.delta = True
1443 break
1443 break
1444 dec = []
1444 dec = []
1445 line = getline(lr, self.hunk)
1445 line = getline(lr, self.hunk)
1446 while len(line) > 1:
1446 while len(line) > 1:
1447 l = line[0]
1447 l = line[0]
1448 if l <= 'Z' and l >= 'A':
1448 if l <= 'Z' and l >= 'A':
1449 l = ord(l) - ord('A') + 1
1449 l = ord(l) - ord('A') + 1
1450 else:
1450 else:
1451 l = ord(l) - ord('a') + 27
1451 l = ord(l) - ord('a') + 27
1452 try:
1452 try:
1453 dec.append(util.b85decode(line[1:])[:l])
1453 dec.append(util.b85decode(line[1:])[:l])
1454 except ValueError as e:
1454 except ValueError as e:
1455 raise PatchError(_('could not decode "%s" binary patch: %s')
1455 raise PatchError(_('could not decode "%s" binary patch: %s')
1456 % (self._fname, str(e)))
1456 % (self._fname, str(e)))
1457 line = getline(lr, self.hunk)
1457 line = getline(lr, self.hunk)
1458 text = zlib.decompress(''.join(dec))
1458 text = zlib.decompress(''.join(dec))
1459 if len(text) != size:
1459 if len(text) != size:
1460 raise PatchError(_('"%s" length is %d bytes, should be %d')
1460 raise PatchError(_('"%s" length is %d bytes, should be %d')
1461 % (self._fname, len(text), size))
1461 % (self._fname, len(text), size))
1462 self.text = text
1462 self.text = text
1463
1463
1464 def parsefilename(str):
1464 def parsefilename(str):
1465 # --- filename \t|space stuff
1465 # --- filename \t|space stuff
1466 s = str[4:].rstrip('\r\n')
1466 s = str[4:].rstrip('\r\n')
1467 i = s.find('\t')
1467 i = s.find('\t')
1468 if i < 0:
1468 if i < 0:
1469 i = s.find(' ')
1469 i = s.find(' ')
1470 if i < 0:
1470 if i < 0:
1471 return s
1471 return s
1472 return s[:i]
1472 return s[:i]
1473
1473
1474 def reversehunks(hunks):
1474 def reversehunks(hunks):
1475 '''reverse the signs in the hunks given as argument
1475 '''reverse the signs in the hunks given as argument
1476
1476
1477 This function operates on hunks coming out of patch.filterpatch, that is
1477 This function operates on hunks coming out of patch.filterpatch, that is
1478 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1478 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1479
1479
1480 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1480 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1481 ... --- a/folder1/g
1481 ... --- a/folder1/g
1482 ... +++ b/folder1/g
1482 ... +++ b/folder1/g
1483 ... @@ -1,7 +1,7 @@
1483 ... @@ -1,7 +1,7 @@
1484 ... +firstline
1484 ... +firstline
1485 ... c
1485 ... c
1486 ... 1
1486 ... 1
1487 ... 2
1487 ... 2
1488 ... + 3
1488 ... + 3
1489 ... -4
1489 ... -4
1490 ... 5
1490 ... 5
1491 ... d
1491 ... d
1492 ... +lastline"""
1492 ... +lastline"""
1493 >>> hunks = parsepatch(rawpatch)
1493 >>> hunks = parsepatch([rawpatch])
1494 >>> hunkscomingfromfilterpatch = []
1494 >>> hunkscomingfromfilterpatch = []
1495 >>> for h in hunks:
1495 >>> for h in hunks:
1496 ... hunkscomingfromfilterpatch.append(h)
1496 ... hunkscomingfromfilterpatch.append(h)
1497 ... hunkscomingfromfilterpatch.extend(h.hunks)
1497 ... hunkscomingfromfilterpatch.extend(h.hunks)
1498
1498
1499 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1499 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1500 >>> from . import util
1500 >>> from . import util
1501 >>> fp = util.stringio()
1501 >>> fp = util.stringio()
1502 >>> for c in reversedhunks:
1502 >>> for c in reversedhunks:
1503 ... c.write(fp)
1503 ... c.write(fp)
1504 >>> fp.seek(0)
1504 >>> fp.seek(0) or None
1505 >>> reversedpatch = fp.read()
1505 >>> reversedpatch = fp.read()
1506 >>> print(pycompat.sysstr(reversedpatch))
1506 >>> print(pycompat.sysstr(reversedpatch))
1507 diff --git a/folder1/g b/folder1/g
1507 diff --git a/folder1/g b/folder1/g
1508 --- a/folder1/g
1508 --- a/folder1/g
1509 +++ b/folder1/g
1509 +++ b/folder1/g
1510 @@ -1,4 +1,3 @@
1510 @@ -1,4 +1,3 @@
1511 -firstline
1511 -firstline
1512 c
1512 c
1513 1
1513 1
1514 2
1514 2
1515 @@ -2,6 +1,6 @@
1515 @@ -2,6 +1,6 @@
1516 c
1516 c
1517 1
1517 1
1518 2
1518 2
1519 - 3
1519 - 3
1520 +4
1520 +4
1521 5
1521 5
1522 d
1522 d
1523 @@ -6,3 +5,2 @@
1523 @@ -6,3 +5,2 @@
1524 5
1524 5
1525 d
1525 d
1526 -lastline
1526 -lastline
1527
1527
1528 '''
1528 '''
1529
1529
1530 newhunks = []
1530 newhunks = []
1531 for c in hunks:
1531 for c in hunks:
1532 if util.safehasattr(c, 'reversehunk'):
1532 if util.safehasattr(c, 'reversehunk'):
1533 c = c.reversehunk()
1533 c = c.reversehunk()
1534 newhunks.append(c)
1534 newhunks.append(c)
1535 return newhunks
1535 return newhunks
1536
1536
1537 def parsepatch(originalchunks, maxcontext=None):
1537 def parsepatch(originalchunks, maxcontext=None):
1538 """patch -> [] of headers -> [] of hunks
1538 """patch -> [] of headers -> [] of hunks
1539
1539
1540 If maxcontext is not None, trim context lines if necessary.
1540 If maxcontext is not None, trim context lines if necessary.
1541
1541
1542 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1542 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1543 ... --- a/folder1/g
1543 ... --- a/folder1/g
1544 ... +++ b/folder1/g
1544 ... +++ b/folder1/g
1545 ... @@ -1,8 +1,10 @@
1545 ... @@ -1,8 +1,10 @@
1546 ... 1
1546 ... 1
1547 ... 2
1547 ... 2
1548 ... -3
1548 ... -3
1549 ... 4
1549 ... 4
1550 ... 5
1550 ... 5
1551 ... 6
1551 ... 6
1552 ... +6.1
1552 ... +6.1
1553 ... +6.2
1553 ... +6.2
1554 ... 7
1554 ... 7
1555 ... 8
1555 ... 8
1556 ... +9'''
1556 ... +9'''
1557 >>> out = util.stringio()
1557 >>> out = util.stringio()
1558 >>> headers = parsepatch([rawpatch], maxcontext=1)
1558 >>> headers = parsepatch([rawpatch], maxcontext=1)
1559 >>> for header in headers:
1559 >>> for header in headers:
1560 ... header.write(out)
1560 ... header.write(out)
1561 ... for hunk in header.hunks:
1561 ... for hunk in header.hunks:
1562 ... hunk.write(out)
1562 ... hunk.write(out)
1563 >>> print(pycompat.sysstr(out.getvalue()))
1563 >>> print(pycompat.sysstr(out.getvalue()))
1564 diff --git a/folder1/g b/folder1/g
1564 diff --git a/folder1/g b/folder1/g
1565 --- a/folder1/g
1565 --- a/folder1/g
1566 +++ b/folder1/g
1566 +++ b/folder1/g
1567 @@ -2,3 +2,2 @@
1567 @@ -2,3 +2,2 @@
1568 2
1568 2
1569 -3
1569 -3
1570 4
1570 4
1571 @@ -6,2 +5,4 @@
1571 @@ -6,2 +5,4 @@
1572 6
1572 6
1573 +6.1
1573 +6.1
1574 +6.2
1574 +6.2
1575 7
1575 7
1576 @@ -8,1 +9,2 @@
1576 @@ -8,1 +9,2 @@
1577 8
1577 8
1578 +9
1578 +9
1579 """
1579 """
1580 class parser(object):
1580 class parser(object):
1581 """patch parsing state machine"""
1581 """patch parsing state machine"""
1582 def __init__(self):
1582 def __init__(self):
1583 self.fromline = 0
1583 self.fromline = 0
1584 self.toline = 0
1584 self.toline = 0
1585 self.proc = ''
1585 self.proc = ''
1586 self.header = None
1586 self.header = None
1587 self.context = []
1587 self.context = []
1588 self.before = []
1588 self.before = []
1589 self.hunk = []
1589 self.hunk = []
1590 self.headers = []
1590 self.headers = []
1591
1591
1592 def addrange(self, limits):
1592 def addrange(self, limits):
1593 fromstart, fromend, tostart, toend, proc = limits
1593 fromstart, fromend, tostart, toend, proc = limits
1594 self.fromline = int(fromstart)
1594 self.fromline = int(fromstart)
1595 self.toline = int(tostart)
1595 self.toline = int(tostart)
1596 self.proc = proc
1596 self.proc = proc
1597
1597
1598 def addcontext(self, context):
1598 def addcontext(self, context):
1599 if self.hunk:
1599 if self.hunk:
1600 h = recordhunk(self.header, self.fromline, self.toline,
1600 h = recordhunk(self.header, self.fromline, self.toline,
1601 self.proc, self.before, self.hunk, context, maxcontext)
1601 self.proc, self.before, self.hunk, context, maxcontext)
1602 self.header.hunks.append(h)
1602 self.header.hunks.append(h)
1603 self.fromline += len(self.before) + h.removed
1603 self.fromline += len(self.before) + h.removed
1604 self.toline += len(self.before) + h.added
1604 self.toline += len(self.before) + h.added
1605 self.before = []
1605 self.before = []
1606 self.hunk = []
1606 self.hunk = []
1607 self.context = context
1607 self.context = context
1608
1608
1609 def addhunk(self, hunk):
1609 def addhunk(self, hunk):
1610 if self.context:
1610 if self.context:
1611 self.before = self.context
1611 self.before = self.context
1612 self.context = []
1612 self.context = []
1613 self.hunk = hunk
1613 self.hunk = hunk
1614
1614
1615 def newfile(self, hdr):
1615 def newfile(self, hdr):
1616 self.addcontext([])
1616 self.addcontext([])
1617 h = header(hdr)
1617 h = header(hdr)
1618 self.headers.append(h)
1618 self.headers.append(h)
1619 self.header = h
1619 self.header = h
1620
1620
1621 def addother(self, line):
1621 def addother(self, line):
1622 pass # 'other' lines are ignored
1622 pass # 'other' lines are ignored
1623
1623
1624 def finished(self):
1624 def finished(self):
1625 self.addcontext([])
1625 self.addcontext([])
1626 return self.headers
1626 return self.headers
1627
1627
1628 transitions = {
1628 transitions = {
1629 'file': {'context': addcontext,
1629 'file': {'context': addcontext,
1630 'file': newfile,
1630 'file': newfile,
1631 'hunk': addhunk,
1631 'hunk': addhunk,
1632 'range': addrange},
1632 'range': addrange},
1633 'context': {'file': newfile,
1633 'context': {'file': newfile,
1634 'hunk': addhunk,
1634 'hunk': addhunk,
1635 'range': addrange,
1635 'range': addrange,
1636 'other': addother},
1636 'other': addother},
1637 'hunk': {'context': addcontext,
1637 'hunk': {'context': addcontext,
1638 'file': newfile,
1638 'file': newfile,
1639 'range': addrange},
1639 'range': addrange},
1640 'range': {'context': addcontext,
1640 'range': {'context': addcontext,
1641 'hunk': addhunk},
1641 'hunk': addhunk},
1642 'other': {'other': addother},
1642 'other': {'other': addother},
1643 }
1643 }
1644
1644
1645 p = parser()
1645 p = parser()
1646 fp = stringio()
1646 fp = stringio()
1647 fp.write(''.join(originalchunks))
1647 fp.write(''.join(originalchunks))
1648 fp.seek(0)
1648 fp.seek(0)
1649
1649
1650 state = 'context'
1650 state = 'context'
1651 for newstate, data in scanpatch(fp):
1651 for newstate, data in scanpatch(fp):
1652 try:
1652 try:
1653 p.transitions[state][newstate](p, data)
1653 p.transitions[state][newstate](p, data)
1654 except KeyError:
1654 except KeyError:
1655 raise PatchError('unhandled transition: %s -> %s' %
1655 raise PatchError('unhandled transition: %s -> %s' %
1656 (state, newstate))
1656 (state, newstate))
1657 state = newstate
1657 state = newstate
1658 del fp
1658 del fp
1659 return p.finished()
1659 return p.finished()
1660
1660
1661 def pathtransform(path, strip, prefix):
1661 def pathtransform(path, strip, prefix):
1662 '''turn a path from a patch into a path suitable for the repository
1662 '''turn a path from a patch into a path suitable for the repository
1663
1663
1664 prefix, if not empty, is expected to be normalized with a / at the end.
1664 prefix, if not empty, is expected to be normalized with a / at the end.
1665
1665
1666 Returns (stripped components, path in repository).
1666 Returns (stripped components, path in repository).
1667
1667
1668 >>> pathtransform(b'a/b/c', 0, b'')
1668 >>> pathtransform(b'a/b/c', 0, b'')
1669 ('', 'a/b/c')
1669 ('', 'a/b/c')
1670 >>> pathtransform(b' a/b/c ', 0, b'')
1670 >>> pathtransform(b' a/b/c ', 0, b'')
1671 ('', ' a/b/c')
1671 ('', ' a/b/c')
1672 >>> pathtransform(b' a/b/c ', 2, b'')
1672 >>> pathtransform(b' a/b/c ', 2, b'')
1673 ('a/b/', 'c')
1673 ('a/b/', 'c')
1674 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1674 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1675 ('', 'd/e/a/b/c')
1675 ('', 'd/e/a/b/c')
1676 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1676 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1677 ('a//b/', 'd/e/c')
1677 ('a//b/', 'd/e/c')
1678 >>> pathtransform(b'a/b/c', 3, b'')
1678 >>> pathtransform(b'a/b/c', 3, b'')
1679 Traceback (most recent call last):
1679 Traceback (most recent call last):
1680 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1680 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1681 '''
1681 '''
1682 pathlen = len(path)
1682 pathlen = len(path)
1683 i = 0
1683 i = 0
1684 if strip == 0:
1684 if strip == 0:
1685 return '', prefix + path.rstrip()
1685 return '', prefix + path.rstrip()
1686 count = strip
1686 count = strip
1687 while count > 0:
1687 while count > 0:
1688 i = path.find('/', i)
1688 i = path.find('/', i)
1689 if i == -1:
1689 if i == -1:
1690 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1690 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1691 (count, strip, path))
1691 (count, strip, path))
1692 i += 1
1692 i += 1
1693 # consume '//' in the path
1693 # consume '//' in the path
1694 while i < pathlen - 1 and path[i:i + 1] == '/':
1694 while i < pathlen - 1 and path[i:i + 1] == '/':
1695 i += 1
1695 i += 1
1696 count -= 1
1696 count -= 1
1697 return path[:i].lstrip(), prefix + path[i:].rstrip()
1697 return path[:i].lstrip(), prefix + path[i:].rstrip()
1698
1698
1699 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1699 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1700 nulla = afile_orig == "/dev/null"
1700 nulla = afile_orig == "/dev/null"
1701 nullb = bfile_orig == "/dev/null"
1701 nullb = bfile_orig == "/dev/null"
1702 create = nulla and hunk.starta == 0 and hunk.lena == 0
1702 create = nulla and hunk.starta == 0 and hunk.lena == 0
1703 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1703 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1704 abase, afile = pathtransform(afile_orig, strip, prefix)
1704 abase, afile = pathtransform(afile_orig, strip, prefix)
1705 gooda = not nulla and backend.exists(afile)
1705 gooda = not nulla and backend.exists(afile)
1706 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1706 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1707 if afile == bfile:
1707 if afile == bfile:
1708 goodb = gooda
1708 goodb = gooda
1709 else:
1709 else:
1710 goodb = not nullb and backend.exists(bfile)
1710 goodb = not nullb and backend.exists(bfile)
1711 missing = not goodb and not gooda and not create
1711 missing = not goodb and not gooda and not create
1712
1712
1713 # some diff programs apparently produce patches where the afile is
1713 # some diff programs apparently produce patches where the afile is
1714 # not /dev/null, but afile starts with bfile
1714 # not /dev/null, but afile starts with bfile
1715 abasedir = afile[:afile.rfind('/') + 1]
1715 abasedir = afile[:afile.rfind('/') + 1]
1716 bbasedir = bfile[:bfile.rfind('/') + 1]
1716 bbasedir = bfile[:bfile.rfind('/') + 1]
1717 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1717 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1718 and hunk.starta == 0 and hunk.lena == 0):
1718 and hunk.starta == 0 and hunk.lena == 0):
1719 create = True
1719 create = True
1720 missing = False
1720 missing = False
1721
1721
1722 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1722 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1723 # diff is between a file and its backup. In this case, the original
1723 # diff is between a file and its backup. In this case, the original
1724 # file should be patched (see original mpatch code).
1724 # file should be patched (see original mpatch code).
1725 isbackup = (abase == bbase and bfile.startswith(afile))
1725 isbackup = (abase == bbase and bfile.startswith(afile))
1726 fname = None
1726 fname = None
1727 if not missing:
1727 if not missing:
1728 if gooda and goodb:
1728 if gooda and goodb:
1729 if isbackup:
1729 if isbackup:
1730 fname = afile
1730 fname = afile
1731 else:
1731 else:
1732 fname = bfile
1732 fname = bfile
1733 elif gooda:
1733 elif gooda:
1734 fname = afile
1734 fname = afile
1735
1735
1736 if not fname:
1736 if not fname:
1737 if not nullb:
1737 if not nullb:
1738 if isbackup:
1738 if isbackup:
1739 fname = afile
1739 fname = afile
1740 else:
1740 else:
1741 fname = bfile
1741 fname = bfile
1742 elif not nulla:
1742 elif not nulla:
1743 fname = afile
1743 fname = afile
1744 else:
1744 else:
1745 raise PatchError(_("undefined source and destination files"))
1745 raise PatchError(_("undefined source and destination files"))
1746
1746
1747 gp = patchmeta(fname)
1747 gp = patchmeta(fname)
1748 if create:
1748 if create:
1749 gp.op = 'ADD'
1749 gp.op = 'ADD'
1750 elif remove:
1750 elif remove:
1751 gp.op = 'DELETE'
1751 gp.op = 'DELETE'
1752 return gp
1752 return gp
1753
1753
1754 def scanpatch(fp):
1754 def scanpatch(fp):
1755 """like patch.iterhunks, but yield different events
1755 """like patch.iterhunks, but yield different events
1756
1756
1757 - ('file', [header_lines + fromfile + tofile])
1757 - ('file', [header_lines + fromfile + tofile])
1758 - ('context', [context_lines])
1758 - ('context', [context_lines])
1759 - ('hunk', [hunk_lines])
1759 - ('hunk', [hunk_lines])
1760 - ('range', (-start,len, +start,len, proc))
1760 - ('range', (-start,len, +start,len, proc))
1761 """
1761 """
1762 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1762 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1763 lr = linereader(fp)
1763 lr = linereader(fp)
1764
1764
1765 def scanwhile(first, p):
1765 def scanwhile(first, p):
1766 """scan lr while predicate holds"""
1766 """scan lr while predicate holds"""
1767 lines = [first]
1767 lines = [first]
1768 for line in iter(lr.readline, ''):
1768 for line in iter(lr.readline, ''):
1769 if p(line):
1769 if p(line):
1770 lines.append(line)
1770 lines.append(line)
1771 else:
1771 else:
1772 lr.push(line)
1772 lr.push(line)
1773 break
1773 break
1774 return lines
1774 return lines
1775
1775
1776 for line in iter(lr.readline, ''):
1776 for line in iter(lr.readline, ''):
1777 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1777 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1778 def notheader(line):
1778 def notheader(line):
1779 s = line.split(None, 1)
1779 s = line.split(None, 1)
1780 return not s or s[0] not in ('---', 'diff')
1780 return not s or s[0] not in ('---', 'diff')
1781 header = scanwhile(line, notheader)
1781 header = scanwhile(line, notheader)
1782 fromfile = lr.readline()
1782 fromfile = lr.readline()
1783 if fromfile.startswith('---'):
1783 if fromfile.startswith('---'):
1784 tofile = lr.readline()
1784 tofile = lr.readline()
1785 header += [fromfile, tofile]
1785 header += [fromfile, tofile]
1786 else:
1786 else:
1787 lr.push(fromfile)
1787 lr.push(fromfile)
1788 yield 'file', header
1788 yield 'file', header
1789 elif line[0:1] == ' ':
1789 elif line[0:1] == ' ':
1790 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1790 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1791 elif line[0] in '-+':
1791 elif line[0] in '-+':
1792 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1792 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1793 else:
1793 else:
1794 m = lines_re.match(line)
1794 m = lines_re.match(line)
1795 if m:
1795 if m:
1796 yield 'range', m.groups()
1796 yield 'range', m.groups()
1797 else:
1797 else:
1798 yield 'other', line
1798 yield 'other', line
1799
1799
1800 def scangitpatch(lr, firstline):
1800 def scangitpatch(lr, firstline):
1801 """
1801 """
1802 Git patches can emit:
1802 Git patches can emit:
1803 - rename a to b
1803 - rename a to b
1804 - change b
1804 - change b
1805 - copy a to c
1805 - copy a to c
1806 - change c
1806 - change c
1807
1807
1808 We cannot apply this sequence as-is, the renamed 'a' could not be
1808 We cannot apply this sequence as-is, the renamed 'a' could not be
1809 found for it would have been renamed already. And we cannot copy
1809 found for it would have been renamed already. And we cannot copy
1810 from 'b' instead because 'b' would have been changed already. So
1810 from 'b' instead because 'b' would have been changed already. So
1811 we scan the git patch for copy and rename commands so we can
1811 we scan the git patch for copy and rename commands so we can
1812 perform the copies ahead of time.
1812 perform the copies ahead of time.
1813 """
1813 """
1814 pos = 0
1814 pos = 0
1815 try:
1815 try:
1816 pos = lr.fp.tell()
1816 pos = lr.fp.tell()
1817 fp = lr.fp
1817 fp = lr.fp
1818 except IOError:
1818 except IOError:
1819 fp = stringio(lr.fp.read())
1819 fp = stringio(lr.fp.read())
1820 gitlr = linereader(fp)
1820 gitlr = linereader(fp)
1821 gitlr.push(firstline)
1821 gitlr.push(firstline)
1822 gitpatches = readgitpatch(gitlr)
1822 gitpatches = readgitpatch(gitlr)
1823 fp.seek(pos)
1823 fp.seek(pos)
1824 return gitpatches
1824 return gitpatches
1825
1825
1826 def iterhunks(fp):
1826 def iterhunks(fp):
1827 """Read a patch and yield the following events:
1827 """Read a patch and yield the following events:
1828 - ("file", afile, bfile, firsthunk): select a new target file.
1828 - ("file", afile, bfile, firsthunk): select a new target file.
1829 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1829 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1830 "file" event.
1830 "file" event.
1831 - ("git", gitchanges): current diff is in git format, gitchanges
1831 - ("git", gitchanges): current diff is in git format, gitchanges
1832 maps filenames to gitpatch records. Unique event.
1832 maps filenames to gitpatch records. Unique event.
1833 """
1833 """
1834 afile = ""
1834 afile = ""
1835 bfile = ""
1835 bfile = ""
1836 state = None
1836 state = None
1837 hunknum = 0
1837 hunknum = 0
1838 emitfile = newfile = False
1838 emitfile = newfile = False
1839 gitpatches = None
1839 gitpatches = None
1840
1840
1841 # our states
1841 # our states
1842 BFILE = 1
1842 BFILE = 1
1843 context = None
1843 context = None
1844 lr = linereader(fp)
1844 lr = linereader(fp)
1845
1845
1846 for x in iter(lr.readline, ''):
1846 for x in iter(lr.readline, ''):
1847 if state == BFILE and (
1847 if state == BFILE and (
1848 (not context and x[0] == '@')
1848 (not context and x[0] == '@')
1849 or (context is not False and x.startswith('***************'))
1849 or (context is not False and x.startswith('***************'))
1850 or x.startswith('GIT binary patch')):
1850 or x.startswith('GIT binary patch')):
1851 gp = None
1851 gp = None
1852 if (gitpatches and
1852 if (gitpatches and
1853 gitpatches[-1].ispatching(afile, bfile)):
1853 gitpatches[-1].ispatching(afile, bfile)):
1854 gp = gitpatches.pop()
1854 gp = gitpatches.pop()
1855 if x.startswith('GIT binary patch'):
1855 if x.startswith('GIT binary patch'):
1856 h = binhunk(lr, gp.path)
1856 h = binhunk(lr, gp.path)
1857 else:
1857 else:
1858 if context is None and x.startswith('***************'):
1858 if context is None and x.startswith('***************'):
1859 context = True
1859 context = True
1860 h = hunk(x, hunknum + 1, lr, context)
1860 h = hunk(x, hunknum + 1, lr, context)
1861 hunknum += 1
1861 hunknum += 1
1862 if emitfile:
1862 if emitfile:
1863 emitfile = False
1863 emitfile = False
1864 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1864 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1865 yield 'hunk', h
1865 yield 'hunk', h
1866 elif x.startswith('diff --git a/'):
1866 elif x.startswith('diff --git a/'):
1867 m = gitre.match(x.rstrip(' \r\n'))
1867 m = gitre.match(x.rstrip(' \r\n'))
1868 if not m:
1868 if not m:
1869 continue
1869 continue
1870 if gitpatches is None:
1870 if gitpatches is None:
1871 # scan whole input for git metadata
1871 # scan whole input for git metadata
1872 gitpatches = scangitpatch(lr, x)
1872 gitpatches = scangitpatch(lr, x)
1873 yield 'git', [g.copy() for g in gitpatches
1873 yield 'git', [g.copy() for g in gitpatches
1874 if g.op in ('COPY', 'RENAME')]
1874 if g.op in ('COPY', 'RENAME')]
1875 gitpatches.reverse()
1875 gitpatches.reverse()
1876 afile = 'a/' + m.group(1)
1876 afile = 'a/' + m.group(1)
1877 bfile = 'b/' + m.group(2)
1877 bfile = 'b/' + m.group(2)
1878 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1878 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1879 gp = gitpatches.pop()
1879 gp = gitpatches.pop()
1880 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1880 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1881 if not gitpatches:
1881 if not gitpatches:
1882 raise PatchError(_('failed to synchronize metadata for "%s"')
1882 raise PatchError(_('failed to synchronize metadata for "%s"')
1883 % afile[2:])
1883 % afile[2:])
1884 gp = gitpatches[-1]
1884 gp = gitpatches[-1]
1885 newfile = True
1885 newfile = True
1886 elif x.startswith('---'):
1886 elif x.startswith('---'):
1887 # check for a unified diff
1887 # check for a unified diff
1888 l2 = lr.readline()
1888 l2 = lr.readline()
1889 if not l2.startswith('+++'):
1889 if not l2.startswith('+++'):
1890 lr.push(l2)
1890 lr.push(l2)
1891 continue
1891 continue
1892 newfile = True
1892 newfile = True
1893 context = False
1893 context = False
1894 afile = parsefilename(x)
1894 afile = parsefilename(x)
1895 bfile = parsefilename(l2)
1895 bfile = parsefilename(l2)
1896 elif x.startswith('***'):
1896 elif x.startswith('***'):
1897 # check for a context diff
1897 # check for a context diff
1898 l2 = lr.readline()
1898 l2 = lr.readline()
1899 if not l2.startswith('---'):
1899 if not l2.startswith('---'):
1900 lr.push(l2)
1900 lr.push(l2)
1901 continue
1901 continue
1902 l3 = lr.readline()
1902 l3 = lr.readline()
1903 lr.push(l3)
1903 lr.push(l3)
1904 if not l3.startswith("***************"):
1904 if not l3.startswith("***************"):
1905 lr.push(l2)
1905 lr.push(l2)
1906 continue
1906 continue
1907 newfile = True
1907 newfile = True
1908 context = True
1908 context = True
1909 afile = parsefilename(x)
1909 afile = parsefilename(x)
1910 bfile = parsefilename(l2)
1910 bfile = parsefilename(l2)
1911
1911
1912 if newfile:
1912 if newfile:
1913 newfile = False
1913 newfile = False
1914 emitfile = True
1914 emitfile = True
1915 state = BFILE
1915 state = BFILE
1916 hunknum = 0
1916 hunknum = 0
1917
1917
1918 while gitpatches:
1918 while gitpatches:
1919 gp = gitpatches.pop()
1919 gp = gitpatches.pop()
1920 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1920 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1921
1921
1922 def applybindelta(binchunk, data):
1922 def applybindelta(binchunk, data):
1923 """Apply a binary delta hunk
1923 """Apply a binary delta hunk
1924 The algorithm used is the algorithm from git's patch-delta.c
1924 The algorithm used is the algorithm from git's patch-delta.c
1925 """
1925 """
1926 def deltahead(binchunk):
1926 def deltahead(binchunk):
1927 i = 0
1927 i = 0
1928 for c in binchunk:
1928 for c in binchunk:
1929 i += 1
1929 i += 1
1930 if not (ord(c) & 0x80):
1930 if not (ord(c) & 0x80):
1931 return i
1931 return i
1932 return i
1932 return i
1933 out = ""
1933 out = ""
1934 s = deltahead(binchunk)
1934 s = deltahead(binchunk)
1935 binchunk = binchunk[s:]
1935 binchunk = binchunk[s:]
1936 s = deltahead(binchunk)
1936 s = deltahead(binchunk)
1937 binchunk = binchunk[s:]
1937 binchunk = binchunk[s:]
1938 i = 0
1938 i = 0
1939 while i < len(binchunk):
1939 while i < len(binchunk):
1940 cmd = ord(binchunk[i])
1940 cmd = ord(binchunk[i])
1941 i += 1
1941 i += 1
1942 if (cmd & 0x80):
1942 if (cmd & 0x80):
1943 offset = 0
1943 offset = 0
1944 size = 0
1944 size = 0
1945 if (cmd & 0x01):
1945 if (cmd & 0x01):
1946 offset = ord(binchunk[i])
1946 offset = ord(binchunk[i])
1947 i += 1
1947 i += 1
1948 if (cmd & 0x02):
1948 if (cmd & 0x02):
1949 offset |= ord(binchunk[i]) << 8
1949 offset |= ord(binchunk[i]) << 8
1950 i += 1
1950 i += 1
1951 if (cmd & 0x04):
1951 if (cmd & 0x04):
1952 offset |= ord(binchunk[i]) << 16
1952 offset |= ord(binchunk[i]) << 16
1953 i += 1
1953 i += 1
1954 if (cmd & 0x08):
1954 if (cmd & 0x08):
1955 offset |= ord(binchunk[i]) << 24
1955 offset |= ord(binchunk[i]) << 24
1956 i += 1
1956 i += 1
1957 if (cmd & 0x10):
1957 if (cmd & 0x10):
1958 size = ord(binchunk[i])
1958 size = ord(binchunk[i])
1959 i += 1
1959 i += 1
1960 if (cmd & 0x20):
1960 if (cmd & 0x20):
1961 size |= ord(binchunk[i]) << 8
1961 size |= ord(binchunk[i]) << 8
1962 i += 1
1962 i += 1
1963 if (cmd & 0x40):
1963 if (cmd & 0x40):
1964 size |= ord(binchunk[i]) << 16
1964 size |= ord(binchunk[i]) << 16
1965 i += 1
1965 i += 1
1966 if size == 0:
1966 if size == 0:
1967 size = 0x10000
1967 size = 0x10000
1968 offset_end = offset + size
1968 offset_end = offset + size
1969 out += data[offset:offset_end]
1969 out += data[offset:offset_end]
1970 elif cmd != 0:
1970 elif cmd != 0:
1971 offset_end = i + cmd
1971 offset_end = i + cmd
1972 out += binchunk[i:offset_end]
1972 out += binchunk[i:offset_end]
1973 i += cmd
1973 i += cmd
1974 else:
1974 else:
1975 raise PatchError(_('unexpected delta opcode 0'))
1975 raise PatchError(_('unexpected delta opcode 0'))
1976 return out
1976 return out
1977
1977
1978 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1978 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1979 """Reads a patch from fp and tries to apply it.
1979 """Reads a patch from fp and tries to apply it.
1980
1980
1981 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1981 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1982 there was any fuzz.
1982 there was any fuzz.
1983
1983
1984 If 'eolmode' is 'strict', the patch content and patched file are
1984 If 'eolmode' is 'strict', the patch content and patched file are
1985 read in binary mode. Otherwise, line endings are ignored when
1985 read in binary mode. Otherwise, line endings are ignored when
1986 patching then normalized according to 'eolmode'.
1986 patching then normalized according to 'eolmode'.
1987 """
1987 """
1988 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1988 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1989 prefix=prefix, eolmode=eolmode)
1989 prefix=prefix, eolmode=eolmode)
1990
1990
1991 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1991 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1992 eolmode='strict'):
1992 eolmode='strict'):
1993
1993
1994 if prefix:
1994 if prefix:
1995 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1995 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1996 prefix)
1996 prefix)
1997 if prefix != '':
1997 if prefix != '':
1998 prefix += '/'
1998 prefix += '/'
1999 def pstrip(p):
1999 def pstrip(p):
2000 return pathtransform(p, strip - 1, prefix)[1]
2000 return pathtransform(p, strip - 1, prefix)[1]
2001
2001
2002 rejects = 0
2002 rejects = 0
2003 err = 0
2003 err = 0
2004 current_file = None
2004 current_file = None
2005
2005
2006 for state, values in iterhunks(fp):
2006 for state, values in iterhunks(fp):
2007 if state == 'hunk':
2007 if state == 'hunk':
2008 if not current_file:
2008 if not current_file:
2009 continue
2009 continue
2010 ret = current_file.apply(values)
2010 ret = current_file.apply(values)
2011 if ret > 0:
2011 if ret > 0:
2012 err = 1
2012 err = 1
2013 elif state == 'file':
2013 elif state == 'file':
2014 if current_file:
2014 if current_file:
2015 rejects += current_file.close()
2015 rejects += current_file.close()
2016 current_file = None
2016 current_file = None
2017 afile, bfile, first_hunk, gp = values
2017 afile, bfile, first_hunk, gp = values
2018 if gp:
2018 if gp:
2019 gp.path = pstrip(gp.path)
2019 gp.path = pstrip(gp.path)
2020 if gp.oldpath:
2020 if gp.oldpath:
2021 gp.oldpath = pstrip(gp.oldpath)
2021 gp.oldpath = pstrip(gp.oldpath)
2022 else:
2022 else:
2023 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2023 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2024 prefix)
2024 prefix)
2025 if gp.op == 'RENAME':
2025 if gp.op == 'RENAME':
2026 backend.unlink(gp.oldpath)
2026 backend.unlink(gp.oldpath)
2027 if not first_hunk:
2027 if not first_hunk:
2028 if gp.op == 'DELETE':
2028 if gp.op == 'DELETE':
2029 backend.unlink(gp.path)
2029 backend.unlink(gp.path)
2030 continue
2030 continue
2031 data, mode = None, None
2031 data, mode = None, None
2032 if gp.op in ('RENAME', 'COPY'):
2032 if gp.op in ('RENAME', 'COPY'):
2033 data, mode = store.getfile(gp.oldpath)[:2]
2033 data, mode = store.getfile(gp.oldpath)[:2]
2034 if data is None:
2034 if data is None:
2035 # This means that the old path does not exist
2035 # This means that the old path does not exist
2036 raise PatchError(_("source file '%s' does not exist")
2036 raise PatchError(_("source file '%s' does not exist")
2037 % gp.oldpath)
2037 % gp.oldpath)
2038 if gp.mode:
2038 if gp.mode:
2039 mode = gp.mode
2039 mode = gp.mode
2040 if gp.op == 'ADD':
2040 if gp.op == 'ADD':
2041 # Added files without content have no hunk and
2041 # Added files without content have no hunk and
2042 # must be created
2042 # must be created
2043 data = ''
2043 data = ''
2044 if data or mode:
2044 if data or mode:
2045 if (gp.op in ('ADD', 'RENAME', 'COPY')
2045 if (gp.op in ('ADD', 'RENAME', 'COPY')
2046 and backend.exists(gp.path)):
2046 and backend.exists(gp.path)):
2047 raise PatchError(_("cannot create %s: destination "
2047 raise PatchError(_("cannot create %s: destination "
2048 "already exists") % gp.path)
2048 "already exists") % gp.path)
2049 backend.setfile(gp.path, data, mode, gp.oldpath)
2049 backend.setfile(gp.path, data, mode, gp.oldpath)
2050 continue
2050 continue
2051 try:
2051 try:
2052 current_file = patcher(ui, gp, backend, store,
2052 current_file = patcher(ui, gp, backend, store,
2053 eolmode=eolmode)
2053 eolmode=eolmode)
2054 except PatchError as inst:
2054 except PatchError as inst:
2055 ui.warn(str(inst) + '\n')
2055 ui.warn(str(inst) + '\n')
2056 current_file = None
2056 current_file = None
2057 rejects += 1
2057 rejects += 1
2058 continue
2058 continue
2059 elif state == 'git':
2059 elif state == 'git':
2060 for gp in values:
2060 for gp in values:
2061 path = pstrip(gp.oldpath)
2061 path = pstrip(gp.oldpath)
2062 data, mode = backend.getfile(path)
2062 data, mode = backend.getfile(path)
2063 if data is None:
2063 if data is None:
2064 # The error ignored here will trigger a getfile()
2064 # The error ignored here will trigger a getfile()
2065 # error in a place more appropriate for error
2065 # error in a place more appropriate for error
2066 # handling, and will not interrupt the patching
2066 # handling, and will not interrupt the patching
2067 # process.
2067 # process.
2068 pass
2068 pass
2069 else:
2069 else:
2070 store.setfile(path, data, mode)
2070 store.setfile(path, data, mode)
2071 else:
2071 else:
2072 raise error.Abort(_('unsupported parser state: %s') % state)
2072 raise error.Abort(_('unsupported parser state: %s') % state)
2073
2073
2074 if current_file:
2074 if current_file:
2075 rejects += current_file.close()
2075 rejects += current_file.close()
2076
2076
2077 if rejects:
2077 if rejects:
2078 return -1
2078 return -1
2079 return err
2079 return err
2080
2080
2081 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2081 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2082 similarity):
2082 similarity):
2083 """use <patcher> to apply <patchname> to the working directory.
2083 """use <patcher> to apply <patchname> to the working directory.
2084 returns whether patch was applied with fuzz factor."""
2084 returns whether patch was applied with fuzz factor."""
2085
2085
2086 fuzz = False
2086 fuzz = False
2087 args = []
2087 args = []
2088 cwd = repo.root
2088 cwd = repo.root
2089 if cwd:
2089 if cwd:
2090 args.append('-d %s' % util.shellquote(cwd))
2090 args.append('-d %s' % util.shellquote(cwd))
2091 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2091 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2092 util.shellquote(patchname)))
2092 util.shellquote(patchname)))
2093 try:
2093 try:
2094 for line in util.iterfile(fp):
2094 for line in util.iterfile(fp):
2095 line = line.rstrip()
2095 line = line.rstrip()
2096 ui.note(line + '\n')
2096 ui.note(line + '\n')
2097 if line.startswith('patching file '):
2097 if line.startswith('patching file '):
2098 pf = util.parsepatchoutput(line)
2098 pf = util.parsepatchoutput(line)
2099 printed_file = False
2099 printed_file = False
2100 files.add(pf)
2100 files.add(pf)
2101 elif line.find('with fuzz') >= 0:
2101 elif line.find('with fuzz') >= 0:
2102 fuzz = True
2102 fuzz = True
2103 if not printed_file:
2103 if not printed_file:
2104 ui.warn(pf + '\n')
2104 ui.warn(pf + '\n')
2105 printed_file = True
2105 printed_file = True
2106 ui.warn(line + '\n')
2106 ui.warn(line + '\n')
2107 elif line.find('saving rejects to file') >= 0:
2107 elif line.find('saving rejects to file') >= 0:
2108 ui.warn(line + '\n')
2108 ui.warn(line + '\n')
2109 elif line.find('FAILED') >= 0:
2109 elif line.find('FAILED') >= 0:
2110 if not printed_file:
2110 if not printed_file:
2111 ui.warn(pf + '\n')
2111 ui.warn(pf + '\n')
2112 printed_file = True
2112 printed_file = True
2113 ui.warn(line + '\n')
2113 ui.warn(line + '\n')
2114 finally:
2114 finally:
2115 if files:
2115 if files:
2116 scmutil.marktouched(repo, files, similarity)
2116 scmutil.marktouched(repo, files, similarity)
2117 code = fp.close()
2117 code = fp.close()
2118 if code:
2118 if code:
2119 raise PatchError(_("patch command failed: %s") %
2119 raise PatchError(_("patch command failed: %s") %
2120 util.explainexit(code)[0])
2120 util.explainexit(code)[0])
2121 return fuzz
2121 return fuzz
2122
2122
2123 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2123 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2124 eolmode='strict'):
2124 eolmode='strict'):
2125 if files is None:
2125 if files is None:
2126 files = set()
2126 files = set()
2127 if eolmode is None:
2127 if eolmode is None:
2128 eolmode = ui.config('patch', 'eol')
2128 eolmode = ui.config('patch', 'eol')
2129 if eolmode.lower() not in eolmodes:
2129 if eolmode.lower() not in eolmodes:
2130 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2130 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2131 eolmode = eolmode.lower()
2131 eolmode = eolmode.lower()
2132
2132
2133 store = filestore()
2133 store = filestore()
2134 try:
2134 try:
2135 fp = open(patchobj, 'rb')
2135 fp = open(patchobj, 'rb')
2136 except TypeError:
2136 except TypeError:
2137 fp = patchobj
2137 fp = patchobj
2138 try:
2138 try:
2139 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2139 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2140 eolmode=eolmode)
2140 eolmode=eolmode)
2141 finally:
2141 finally:
2142 if fp != patchobj:
2142 if fp != patchobj:
2143 fp.close()
2143 fp.close()
2144 files.update(backend.close())
2144 files.update(backend.close())
2145 store.close()
2145 store.close()
2146 if ret < 0:
2146 if ret < 0:
2147 raise PatchError(_('patch failed to apply'))
2147 raise PatchError(_('patch failed to apply'))
2148 return ret > 0
2148 return ret > 0
2149
2149
2150 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2150 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2151 eolmode='strict', similarity=0):
2151 eolmode='strict', similarity=0):
2152 """use builtin patch to apply <patchobj> to the working directory.
2152 """use builtin patch to apply <patchobj> to the working directory.
2153 returns whether patch was applied with fuzz factor."""
2153 returns whether patch was applied with fuzz factor."""
2154 backend = workingbackend(ui, repo, similarity)
2154 backend = workingbackend(ui, repo, similarity)
2155 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2155 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2156
2156
2157 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2157 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2158 eolmode='strict'):
2158 eolmode='strict'):
2159 backend = repobackend(ui, repo, ctx, store)
2159 backend = repobackend(ui, repo, ctx, store)
2160 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2160 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2161
2161
2162 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2162 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2163 similarity=0):
2163 similarity=0):
2164 """Apply <patchname> to the working directory.
2164 """Apply <patchname> to the working directory.
2165
2165
2166 'eolmode' specifies how end of lines should be handled. It can be:
2166 'eolmode' specifies how end of lines should be handled. It can be:
2167 - 'strict': inputs are read in binary mode, EOLs are preserved
2167 - 'strict': inputs are read in binary mode, EOLs are preserved
2168 - 'crlf': EOLs are ignored when patching and reset to CRLF
2168 - 'crlf': EOLs are ignored when patching and reset to CRLF
2169 - 'lf': EOLs are ignored when patching and reset to LF
2169 - 'lf': EOLs are ignored when patching and reset to LF
2170 - None: get it from user settings, default to 'strict'
2170 - None: get it from user settings, default to 'strict'
2171 'eolmode' is ignored when using an external patcher program.
2171 'eolmode' is ignored when using an external patcher program.
2172
2172
2173 Returns whether patch was applied with fuzz factor.
2173 Returns whether patch was applied with fuzz factor.
2174 """
2174 """
2175 patcher = ui.config('ui', 'patch')
2175 patcher = ui.config('ui', 'patch')
2176 if files is None:
2176 if files is None:
2177 files = set()
2177 files = set()
2178 if patcher:
2178 if patcher:
2179 return _externalpatch(ui, repo, patcher, patchname, strip,
2179 return _externalpatch(ui, repo, patcher, patchname, strip,
2180 files, similarity)
2180 files, similarity)
2181 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2181 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2182 similarity)
2182 similarity)
2183
2183
2184 def changedfiles(ui, repo, patchpath, strip=1):
2184 def changedfiles(ui, repo, patchpath, strip=1):
2185 backend = fsbackend(ui, repo.root)
2185 backend = fsbackend(ui, repo.root)
2186 with open(patchpath, 'rb') as fp:
2186 with open(patchpath, 'rb') as fp:
2187 changed = set()
2187 changed = set()
2188 for state, values in iterhunks(fp):
2188 for state, values in iterhunks(fp):
2189 if state == 'file':
2189 if state == 'file':
2190 afile, bfile, first_hunk, gp = values
2190 afile, bfile, first_hunk, gp = values
2191 if gp:
2191 if gp:
2192 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2192 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2193 if gp.oldpath:
2193 if gp.oldpath:
2194 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2194 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2195 else:
2195 else:
2196 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2196 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2197 '')
2197 '')
2198 changed.add(gp.path)
2198 changed.add(gp.path)
2199 if gp.op == 'RENAME':
2199 if gp.op == 'RENAME':
2200 changed.add(gp.oldpath)
2200 changed.add(gp.oldpath)
2201 elif state not in ('hunk', 'git'):
2201 elif state not in ('hunk', 'git'):
2202 raise error.Abort(_('unsupported parser state: %s') % state)
2202 raise error.Abort(_('unsupported parser state: %s') % state)
2203 return changed
2203 return changed
2204
2204
2205 class GitDiffRequired(Exception):
2205 class GitDiffRequired(Exception):
2206 pass
2206 pass
2207
2207
2208 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2208 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2209 '''return diffopts with all features supported and parsed'''
2209 '''return diffopts with all features supported and parsed'''
2210 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2210 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2211 git=True, whitespace=True, formatchanging=True)
2211 git=True, whitespace=True, formatchanging=True)
2212
2212
2213 diffopts = diffallopts
2213 diffopts = diffallopts
2214
2214
2215 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2215 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2216 whitespace=False, formatchanging=False):
2216 whitespace=False, formatchanging=False):
2217 '''return diffopts with only opted-in features parsed
2217 '''return diffopts with only opted-in features parsed
2218
2218
2219 Features:
2219 Features:
2220 - git: git-style diffs
2220 - git: git-style diffs
2221 - whitespace: whitespace options like ignoreblanklines and ignorews
2221 - whitespace: whitespace options like ignoreblanklines and ignorews
2222 - formatchanging: options that will likely break or cause correctness issues
2222 - formatchanging: options that will likely break or cause correctness issues
2223 with most diff parsers
2223 with most diff parsers
2224 '''
2224 '''
2225 def get(key, name=None, getter=ui.configbool, forceplain=None):
2225 def get(key, name=None, getter=ui.configbool, forceplain=None):
2226 if opts:
2226 if opts:
2227 v = opts.get(key)
2227 v = opts.get(key)
2228 # diffopts flags are either None-default (which is passed
2228 # diffopts flags are either None-default (which is passed
2229 # through unchanged, so we can identify unset values), or
2229 # through unchanged, so we can identify unset values), or
2230 # some other falsey default (eg --unified, which defaults
2230 # some other falsey default (eg --unified, which defaults
2231 # to an empty string). We only want to override the config
2231 # to an empty string). We only want to override the config
2232 # entries from hgrc with command line values if they
2232 # entries from hgrc with command line values if they
2233 # appear to have been set, which is any truthy value,
2233 # appear to have been set, which is any truthy value,
2234 # True, or False.
2234 # True, or False.
2235 if v or isinstance(v, bool):
2235 if v or isinstance(v, bool):
2236 return v
2236 return v
2237 if forceplain is not None and ui.plain():
2237 if forceplain is not None and ui.plain():
2238 return forceplain
2238 return forceplain
2239 return getter(section, name or key, None, untrusted=untrusted)
2239 return getter(section, name or key, None, untrusted=untrusted)
2240
2240
2241 # core options, expected to be understood by every diff parser
2241 # core options, expected to be understood by every diff parser
2242 buildopts = {
2242 buildopts = {
2243 'nodates': get('nodates'),
2243 'nodates': get('nodates'),
2244 'showfunc': get('show_function', 'showfunc'),
2244 'showfunc': get('show_function', 'showfunc'),
2245 'context': get('unified', getter=ui.config),
2245 'context': get('unified', getter=ui.config),
2246 }
2246 }
2247
2247
2248 if git:
2248 if git:
2249 buildopts['git'] = get('git')
2249 buildopts['git'] = get('git')
2250
2250
2251 # since this is in the experimental section, we need to call
2251 # since this is in the experimental section, we need to call
2252 # ui.configbool directory
2252 # ui.configbool directory
2253 buildopts['showsimilarity'] = ui.configbool('experimental',
2253 buildopts['showsimilarity'] = ui.configbool('experimental',
2254 'extendedheader.similarity')
2254 'extendedheader.similarity')
2255
2255
2256 # need to inspect the ui object instead of using get() since we want to
2256 # need to inspect the ui object instead of using get() since we want to
2257 # test for an int
2257 # test for an int
2258 hconf = ui.config('experimental', 'extendedheader.index')
2258 hconf = ui.config('experimental', 'extendedheader.index')
2259 if hconf is not None:
2259 if hconf is not None:
2260 hlen = None
2260 hlen = None
2261 try:
2261 try:
2262 # the hash config could be an integer (for length of hash) or a
2262 # the hash config could be an integer (for length of hash) or a
2263 # word (e.g. short, full, none)
2263 # word (e.g. short, full, none)
2264 hlen = int(hconf)
2264 hlen = int(hconf)
2265 if hlen < 0 or hlen > 40:
2265 if hlen < 0 or hlen > 40:
2266 msg = _("invalid length for extendedheader.index: '%d'\n")
2266 msg = _("invalid length for extendedheader.index: '%d'\n")
2267 ui.warn(msg % hlen)
2267 ui.warn(msg % hlen)
2268 except ValueError:
2268 except ValueError:
2269 # default value
2269 # default value
2270 if hconf == 'short' or hconf == '':
2270 if hconf == 'short' or hconf == '':
2271 hlen = 12
2271 hlen = 12
2272 elif hconf == 'full':
2272 elif hconf == 'full':
2273 hlen = 40
2273 hlen = 40
2274 elif hconf != 'none':
2274 elif hconf != 'none':
2275 msg = _("invalid value for extendedheader.index: '%s'\n")
2275 msg = _("invalid value for extendedheader.index: '%s'\n")
2276 ui.warn(msg % hconf)
2276 ui.warn(msg % hconf)
2277 finally:
2277 finally:
2278 buildopts['index'] = hlen
2278 buildopts['index'] = hlen
2279
2279
2280 if whitespace:
2280 if whitespace:
2281 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2281 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2282 buildopts['ignorewsamount'] = get('ignore_space_change',
2282 buildopts['ignorewsamount'] = get('ignore_space_change',
2283 'ignorewsamount')
2283 'ignorewsamount')
2284 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2284 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2285 'ignoreblanklines')
2285 'ignoreblanklines')
2286 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2286 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2287 if formatchanging:
2287 if formatchanging:
2288 buildopts['text'] = opts and opts.get('text')
2288 buildopts['text'] = opts and opts.get('text')
2289 binary = None if opts is None else opts.get('binary')
2289 binary = None if opts is None else opts.get('binary')
2290 buildopts['nobinary'] = (not binary if binary is not None
2290 buildopts['nobinary'] = (not binary if binary is not None
2291 else get('nobinary', forceplain=False))
2291 else get('nobinary', forceplain=False))
2292 buildopts['noprefix'] = get('noprefix', forceplain=False)
2292 buildopts['noprefix'] = get('noprefix', forceplain=False)
2293
2293
2294 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2294 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2295
2295
2296 def diff(repo, node1=None, node2=None, match=None, changes=None,
2296 def diff(repo, node1=None, node2=None, match=None, changes=None,
2297 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2297 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2298 '''yields diff of changes to files between two nodes, or node and
2298 '''yields diff of changes to files between two nodes, or node and
2299 working directory.
2299 working directory.
2300
2300
2301 if node1 is None, use first dirstate parent instead.
2301 if node1 is None, use first dirstate parent instead.
2302 if node2 is None, compare node1 with working directory.
2302 if node2 is None, compare node1 with working directory.
2303
2303
2304 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2304 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2305 every time some change cannot be represented with the current
2305 every time some change cannot be represented with the current
2306 patch format. Return False to upgrade to git patch format, True to
2306 patch format. Return False to upgrade to git patch format, True to
2307 accept the loss or raise an exception to abort the diff. It is
2307 accept the loss or raise an exception to abort the diff. It is
2308 called with the name of current file being diffed as 'fn'. If set
2308 called with the name of current file being diffed as 'fn'. If set
2309 to None, patches will always be upgraded to git format when
2309 to None, patches will always be upgraded to git format when
2310 necessary.
2310 necessary.
2311
2311
2312 prefix is a filename prefix that is prepended to all filenames on
2312 prefix is a filename prefix that is prepended to all filenames on
2313 display (used for subrepos).
2313 display (used for subrepos).
2314
2314
2315 relroot, if not empty, must be normalized with a trailing /. Any match
2315 relroot, if not empty, must be normalized with a trailing /. Any match
2316 patterns that fall outside it will be ignored.
2316 patterns that fall outside it will be ignored.
2317
2317
2318 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2318 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2319 information.'''
2319 information.'''
2320 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2320 for header, hunks in diffhunks(repo, node1=node1, node2=node2, match=match,
2321 changes=changes, opts=opts,
2321 changes=changes, opts=opts,
2322 losedatafn=losedatafn, prefix=prefix,
2322 losedatafn=losedatafn, prefix=prefix,
2323 relroot=relroot, copy=copy):
2323 relroot=relroot, copy=copy):
2324 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2324 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2325 if header and (text or len(header) > 1):
2325 if header and (text or len(header) > 1):
2326 yield '\n'.join(header) + '\n'
2326 yield '\n'.join(header) + '\n'
2327 if text:
2327 if text:
2328 yield text
2328 yield text
2329
2329
2330 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2330 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2331 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2331 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2332 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2332 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2333 where `header` is a list of diff headers and `hunks` is an iterable of
2333 where `header` is a list of diff headers and `hunks` is an iterable of
2334 (`hunkrange`, `hunklines`) tuples.
2334 (`hunkrange`, `hunklines`) tuples.
2335
2335
2336 See diff() for the meaning of parameters.
2336 See diff() for the meaning of parameters.
2337 """
2337 """
2338
2338
2339 if opts is None:
2339 if opts is None:
2340 opts = mdiff.defaultopts
2340 opts = mdiff.defaultopts
2341
2341
2342 if not node1 and not node2:
2342 if not node1 and not node2:
2343 node1 = repo.dirstate.p1()
2343 node1 = repo.dirstate.p1()
2344
2344
2345 def lrugetfilectx():
2345 def lrugetfilectx():
2346 cache = {}
2346 cache = {}
2347 order = collections.deque()
2347 order = collections.deque()
2348 def getfilectx(f, ctx):
2348 def getfilectx(f, ctx):
2349 fctx = ctx.filectx(f, filelog=cache.get(f))
2349 fctx = ctx.filectx(f, filelog=cache.get(f))
2350 if f not in cache:
2350 if f not in cache:
2351 if len(cache) > 20:
2351 if len(cache) > 20:
2352 del cache[order.popleft()]
2352 del cache[order.popleft()]
2353 cache[f] = fctx.filelog()
2353 cache[f] = fctx.filelog()
2354 else:
2354 else:
2355 order.remove(f)
2355 order.remove(f)
2356 order.append(f)
2356 order.append(f)
2357 return fctx
2357 return fctx
2358 return getfilectx
2358 return getfilectx
2359 getfilectx = lrugetfilectx()
2359 getfilectx = lrugetfilectx()
2360
2360
2361 ctx1 = repo[node1]
2361 ctx1 = repo[node1]
2362 ctx2 = repo[node2]
2362 ctx2 = repo[node2]
2363
2363
2364 relfiltered = False
2364 relfiltered = False
2365 if relroot != '' and match.always():
2365 if relroot != '' and match.always():
2366 # as a special case, create a new matcher with just the relroot
2366 # as a special case, create a new matcher with just the relroot
2367 pats = [relroot]
2367 pats = [relroot]
2368 match = scmutil.match(ctx2, pats, default='path')
2368 match = scmutil.match(ctx2, pats, default='path')
2369 relfiltered = True
2369 relfiltered = True
2370
2370
2371 if not changes:
2371 if not changes:
2372 changes = repo.status(ctx1, ctx2, match=match)
2372 changes = repo.status(ctx1, ctx2, match=match)
2373 modified, added, removed = changes[:3]
2373 modified, added, removed = changes[:3]
2374
2374
2375 if not modified and not added and not removed:
2375 if not modified and not added and not removed:
2376 return []
2376 return []
2377
2377
2378 if repo.ui.debugflag:
2378 if repo.ui.debugflag:
2379 hexfunc = hex
2379 hexfunc = hex
2380 else:
2380 else:
2381 hexfunc = short
2381 hexfunc = short
2382 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2382 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2383
2383
2384 if copy is None:
2384 if copy is None:
2385 copy = {}
2385 copy = {}
2386 if opts.git or opts.upgrade:
2386 if opts.git or opts.upgrade:
2387 copy = copies.pathcopies(ctx1, ctx2, match=match)
2387 copy = copies.pathcopies(ctx1, ctx2, match=match)
2388
2388
2389 if relroot is not None:
2389 if relroot is not None:
2390 if not relfiltered:
2390 if not relfiltered:
2391 # XXX this would ideally be done in the matcher, but that is
2391 # XXX this would ideally be done in the matcher, but that is
2392 # generally meant to 'or' patterns, not 'and' them. In this case we
2392 # generally meant to 'or' patterns, not 'and' them. In this case we
2393 # need to 'and' all the patterns from the matcher with relroot.
2393 # need to 'and' all the patterns from the matcher with relroot.
2394 def filterrel(l):
2394 def filterrel(l):
2395 return [f for f in l if f.startswith(relroot)]
2395 return [f for f in l if f.startswith(relroot)]
2396 modified = filterrel(modified)
2396 modified = filterrel(modified)
2397 added = filterrel(added)
2397 added = filterrel(added)
2398 removed = filterrel(removed)
2398 removed = filterrel(removed)
2399 relfiltered = True
2399 relfiltered = True
2400 # filter out copies where either side isn't inside the relative root
2400 # filter out copies where either side isn't inside the relative root
2401 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2401 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2402 if dst.startswith(relroot)
2402 if dst.startswith(relroot)
2403 and src.startswith(relroot)))
2403 and src.startswith(relroot)))
2404
2404
2405 modifiedset = set(modified)
2405 modifiedset = set(modified)
2406 addedset = set(added)
2406 addedset = set(added)
2407 removedset = set(removed)
2407 removedset = set(removed)
2408 for f in modified:
2408 for f in modified:
2409 if f not in ctx1:
2409 if f not in ctx1:
2410 # Fix up added, since merged-in additions appear as
2410 # Fix up added, since merged-in additions appear as
2411 # modifications during merges
2411 # modifications during merges
2412 modifiedset.remove(f)
2412 modifiedset.remove(f)
2413 addedset.add(f)
2413 addedset.add(f)
2414 for f in removed:
2414 for f in removed:
2415 if f not in ctx1:
2415 if f not in ctx1:
2416 # Merged-in additions that are then removed are reported as removed.
2416 # Merged-in additions that are then removed are reported as removed.
2417 # They are not in ctx1, so We don't want to show them in the diff.
2417 # They are not in ctx1, so We don't want to show them in the diff.
2418 removedset.remove(f)
2418 removedset.remove(f)
2419 modified = sorted(modifiedset)
2419 modified = sorted(modifiedset)
2420 added = sorted(addedset)
2420 added = sorted(addedset)
2421 removed = sorted(removedset)
2421 removed = sorted(removedset)
2422 for dst, src in copy.items():
2422 for dst, src in copy.items():
2423 if src not in ctx1:
2423 if src not in ctx1:
2424 # Files merged in during a merge and then copied/renamed are
2424 # Files merged in during a merge and then copied/renamed are
2425 # reported as copies. We want to show them in the diff as additions.
2425 # reported as copies. We want to show them in the diff as additions.
2426 del copy[dst]
2426 del copy[dst]
2427
2427
2428 def difffn(opts, losedata):
2428 def difffn(opts, losedata):
2429 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2429 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2430 copy, getfilectx, opts, losedata, prefix, relroot)
2430 copy, getfilectx, opts, losedata, prefix, relroot)
2431 if opts.upgrade and not opts.git:
2431 if opts.upgrade and not opts.git:
2432 try:
2432 try:
2433 def losedata(fn):
2433 def losedata(fn):
2434 if not losedatafn or not losedatafn(fn=fn):
2434 if not losedatafn or not losedatafn(fn=fn):
2435 raise GitDiffRequired
2435 raise GitDiffRequired
2436 # Buffer the whole output until we are sure it can be generated
2436 # Buffer the whole output until we are sure it can be generated
2437 return list(difffn(opts.copy(git=False), losedata))
2437 return list(difffn(opts.copy(git=False), losedata))
2438 except GitDiffRequired:
2438 except GitDiffRequired:
2439 return difffn(opts.copy(git=True), None)
2439 return difffn(opts.copy(git=True), None)
2440 else:
2440 else:
2441 return difffn(opts, None)
2441 return difffn(opts, None)
2442
2442
2443 def difflabel(func, *args, **kw):
2443 def difflabel(func, *args, **kw):
2444 '''yields 2-tuples of (output, label) based on the output of func()'''
2444 '''yields 2-tuples of (output, label) based on the output of func()'''
2445 headprefixes = [('diff', 'diff.diffline'),
2445 headprefixes = [('diff', 'diff.diffline'),
2446 ('copy', 'diff.extended'),
2446 ('copy', 'diff.extended'),
2447 ('rename', 'diff.extended'),
2447 ('rename', 'diff.extended'),
2448 ('old', 'diff.extended'),
2448 ('old', 'diff.extended'),
2449 ('new', 'diff.extended'),
2449 ('new', 'diff.extended'),
2450 ('deleted', 'diff.extended'),
2450 ('deleted', 'diff.extended'),
2451 ('index', 'diff.extended'),
2451 ('index', 'diff.extended'),
2452 ('similarity', 'diff.extended'),
2452 ('similarity', 'diff.extended'),
2453 ('---', 'diff.file_a'),
2453 ('---', 'diff.file_a'),
2454 ('+++', 'diff.file_b')]
2454 ('+++', 'diff.file_b')]
2455 textprefixes = [('@', 'diff.hunk'),
2455 textprefixes = [('@', 'diff.hunk'),
2456 ('-', 'diff.deleted'),
2456 ('-', 'diff.deleted'),
2457 ('+', 'diff.inserted')]
2457 ('+', 'diff.inserted')]
2458 head = False
2458 head = False
2459 for chunk in func(*args, **kw):
2459 for chunk in func(*args, **kw):
2460 lines = chunk.split('\n')
2460 lines = chunk.split('\n')
2461 for i, line in enumerate(lines):
2461 for i, line in enumerate(lines):
2462 if i != 0:
2462 if i != 0:
2463 yield ('\n', '')
2463 yield ('\n', '')
2464 if head:
2464 if head:
2465 if line.startswith('@'):
2465 if line.startswith('@'):
2466 head = False
2466 head = False
2467 else:
2467 else:
2468 if line and line[0] not in ' +-@\\':
2468 if line and line[0] not in ' +-@\\':
2469 head = True
2469 head = True
2470 stripline = line
2470 stripline = line
2471 diffline = False
2471 diffline = False
2472 if not head and line and line[0] in '+-':
2472 if not head and line and line[0] in '+-':
2473 # highlight tabs and trailing whitespace, but only in
2473 # highlight tabs and trailing whitespace, but only in
2474 # changed lines
2474 # changed lines
2475 stripline = line.rstrip()
2475 stripline = line.rstrip()
2476 diffline = True
2476 diffline = True
2477
2477
2478 prefixes = textprefixes
2478 prefixes = textprefixes
2479 if head:
2479 if head:
2480 prefixes = headprefixes
2480 prefixes = headprefixes
2481 for prefix, label in prefixes:
2481 for prefix, label in prefixes:
2482 if stripline.startswith(prefix):
2482 if stripline.startswith(prefix):
2483 if diffline:
2483 if diffline:
2484 for token in tabsplitter.findall(stripline):
2484 for token in tabsplitter.findall(stripline):
2485 if '\t' == token[0]:
2485 if '\t' == token[0]:
2486 yield (token, 'diff.tab')
2486 yield (token, 'diff.tab')
2487 else:
2487 else:
2488 yield (token, label)
2488 yield (token, label)
2489 else:
2489 else:
2490 yield (stripline, label)
2490 yield (stripline, label)
2491 break
2491 break
2492 else:
2492 else:
2493 yield (line, '')
2493 yield (line, '')
2494 if line != stripline:
2494 if line != stripline:
2495 yield (line[len(stripline):], 'diff.trailingwhitespace')
2495 yield (line[len(stripline):], 'diff.trailingwhitespace')
2496
2496
2497 def diffui(*args, **kw):
2497 def diffui(*args, **kw):
2498 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2498 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2499 return difflabel(diff, *args, **kw)
2499 return difflabel(diff, *args, **kw)
2500
2500
2501 def _filepairs(modified, added, removed, copy, opts):
2501 def _filepairs(modified, added, removed, copy, opts):
2502 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2502 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2503 before and f2 is the the name after. For added files, f1 will be None,
2503 before and f2 is the the name after. For added files, f1 will be None,
2504 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2504 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2505 or 'rename' (the latter two only if opts.git is set).'''
2505 or 'rename' (the latter two only if opts.git is set).'''
2506 gone = set()
2506 gone = set()
2507
2507
2508 copyto = dict([(v, k) for k, v in copy.items()])
2508 copyto = dict([(v, k) for k, v in copy.items()])
2509
2509
2510 addedset, removedset = set(added), set(removed)
2510 addedset, removedset = set(added), set(removed)
2511
2511
2512 for f in sorted(modified + added + removed):
2512 for f in sorted(modified + added + removed):
2513 copyop = None
2513 copyop = None
2514 f1, f2 = f, f
2514 f1, f2 = f, f
2515 if f in addedset:
2515 if f in addedset:
2516 f1 = None
2516 f1 = None
2517 if f in copy:
2517 if f in copy:
2518 if opts.git:
2518 if opts.git:
2519 f1 = copy[f]
2519 f1 = copy[f]
2520 if f1 in removedset and f1 not in gone:
2520 if f1 in removedset and f1 not in gone:
2521 copyop = 'rename'
2521 copyop = 'rename'
2522 gone.add(f1)
2522 gone.add(f1)
2523 else:
2523 else:
2524 copyop = 'copy'
2524 copyop = 'copy'
2525 elif f in removedset:
2525 elif f in removedset:
2526 f2 = None
2526 f2 = None
2527 if opts.git:
2527 if opts.git:
2528 # have we already reported a copy above?
2528 # have we already reported a copy above?
2529 if (f in copyto and copyto[f] in addedset
2529 if (f in copyto and copyto[f] in addedset
2530 and copy[copyto[f]] == f):
2530 and copy[copyto[f]] == f):
2531 continue
2531 continue
2532 yield f1, f2, copyop
2532 yield f1, f2, copyop
2533
2533
2534 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2534 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2535 copy, getfilectx, opts, losedatafn, prefix, relroot):
2535 copy, getfilectx, opts, losedatafn, prefix, relroot):
2536 '''given input data, generate a diff and yield it in blocks
2536 '''given input data, generate a diff and yield it in blocks
2537
2537
2538 If generating a diff would lose data like flags or binary data and
2538 If generating a diff would lose data like flags or binary data and
2539 losedatafn is not None, it will be called.
2539 losedatafn is not None, it will be called.
2540
2540
2541 relroot is removed and prefix is added to every path in the diff output.
2541 relroot is removed and prefix is added to every path in the diff output.
2542
2542
2543 If relroot is not empty, this function expects every path in modified,
2543 If relroot is not empty, this function expects every path in modified,
2544 added, removed and copy to start with it.'''
2544 added, removed and copy to start with it.'''
2545
2545
2546 def gitindex(text):
2546 def gitindex(text):
2547 if not text:
2547 if not text:
2548 text = ""
2548 text = ""
2549 l = len(text)
2549 l = len(text)
2550 s = hashlib.sha1('blob %d\0' % l)
2550 s = hashlib.sha1('blob %d\0' % l)
2551 s.update(text)
2551 s.update(text)
2552 return s.hexdigest()
2552 return s.hexdigest()
2553
2553
2554 if opts.noprefix:
2554 if opts.noprefix:
2555 aprefix = bprefix = ''
2555 aprefix = bprefix = ''
2556 else:
2556 else:
2557 aprefix = 'a/'
2557 aprefix = 'a/'
2558 bprefix = 'b/'
2558 bprefix = 'b/'
2559
2559
2560 def diffline(f, revs):
2560 def diffline(f, revs):
2561 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2561 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2562 return 'diff %s %s' % (revinfo, f)
2562 return 'diff %s %s' % (revinfo, f)
2563
2563
2564 def isempty(fctx):
2564 def isempty(fctx):
2565 return fctx is None or fctx.size() == 0
2565 return fctx is None or fctx.size() == 0
2566
2566
2567 date1 = util.datestr(ctx1.date())
2567 date1 = util.datestr(ctx1.date())
2568 date2 = util.datestr(ctx2.date())
2568 date2 = util.datestr(ctx2.date())
2569
2569
2570 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2570 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2571
2571
2572 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2572 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2573 or repo.ui.configbool('devel', 'check-relroot')):
2573 or repo.ui.configbool('devel', 'check-relroot')):
2574 for f in modified + added + removed + list(copy) + list(copy.values()):
2574 for f in modified + added + removed + list(copy) + list(copy.values()):
2575 if f is not None and not f.startswith(relroot):
2575 if f is not None and not f.startswith(relroot):
2576 raise AssertionError(
2576 raise AssertionError(
2577 "file %s doesn't start with relroot %s" % (f, relroot))
2577 "file %s doesn't start with relroot %s" % (f, relroot))
2578
2578
2579 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2579 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2580 content1 = None
2580 content1 = None
2581 content2 = None
2581 content2 = None
2582 fctx1 = None
2582 fctx1 = None
2583 fctx2 = None
2583 fctx2 = None
2584 flag1 = None
2584 flag1 = None
2585 flag2 = None
2585 flag2 = None
2586 if f1:
2586 if f1:
2587 fctx1 = getfilectx(f1, ctx1)
2587 fctx1 = getfilectx(f1, ctx1)
2588 if opts.git or losedatafn:
2588 if opts.git or losedatafn:
2589 flag1 = ctx1.flags(f1)
2589 flag1 = ctx1.flags(f1)
2590 if f2:
2590 if f2:
2591 fctx2 = getfilectx(f2, ctx2)
2591 fctx2 = getfilectx(f2, ctx2)
2592 if opts.git or losedatafn:
2592 if opts.git or losedatafn:
2593 flag2 = ctx2.flags(f2)
2593 flag2 = ctx2.flags(f2)
2594 # if binary is True, output "summary" or "base85", but not "text diff"
2594 # if binary is True, output "summary" or "base85", but not "text diff"
2595 binary = not opts.text and any(f.isbinary()
2595 binary = not opts.text and any(f.isbinary()
2596 for f in [fctx1, fctx2] if f is not None)
2596 for f in [fctx1, fctx2] if f is not None)
2597
2597
2598 if losedatafn and not opts.git:
2598 if losedatafn and not opts.git:
2599 if (binary or
2599 if (binary or
2600 # copy/rename
2600 # copy/rename
2601 f2 in copy or
2601 f2 in copy or
2602 # empty file creation
2602 # empty file creation
2603 (not f1 and isempty(fctx2)) or
2603 (not f1 and isempty(fctx2)) or
2604 # empty file deletion
2604 # empty file deletion
2605 (isempty(fctx1) and not f2) or
2605 (isempty(fctx1) and not f2) or
2606 # create with flags
2606 # create with flags
2607 (not f1 and flag2) or
2607 (not f1 and flag2) or
2608 # change flags
2608 # change flags
2609 (f1 and f2 and flag1 != flag2)):
2609 (f1 and f2 and flag1 != flag2)):
2610 losedatafn(f2 or f1)
2610 losedatafn(f2 or f1)
2611
2611
2612 path1 = f1 or f2
2612 path1 = f1 or f2
2613 path2 = f2 or f1
2613 path2 = f2 or f1
2614 path1 = posixpath.join(prefix, path1[len(relroot):])
2614 path1 = posixpath.join(prefix, path1[len(relroot):])
2615 path2 = posixpath.join(prefix, path2[len(relroot):])
2615 path2 = posixpath.join(prefix, path2[len(relroot):])
2616 header = []
2616 header = []
2617 if opts.git:
2617 if opts.git:
2618 header.append('diff --git %s%s %s%s' %
2618 header.append('diff --git %s%s %s%s' %
2619 (aprefix, path1, bprefix, path2))
2619 (aprefix, path1, bprefix, path2))
2620 if not f1: # added
2620 if not f1: # added
2621 header.append('new file mode %s' % gitmode[flag2])
2621 header.append('new file mode %s' % gitmode[flag2])
2622 elif not f2: # removed
2622 elif not f2: # removed
2623 header.append('deleted file mode %s' % gitmode[flag1])
2623 header.append('deleted file mode %s' % gitmode[flag1])
2624 else: # modified/copied/renamed
2624 else: # modified/copied/renamed
2625 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2625 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2626 if mode1 != mode2:
2626 if mode1 != mode2:
2627 header.append('old mode %s' % mode1)
2627 header.append('old mode %s' % mode1)
2628 header.append('new mode %s' % mode2)
2628 header.append('new mode %s' % mode2)
2629 if copyop is not None:
2629 if copyop is not None:
2630 if opts.showsimilarity:
2630 if opts.showsimilarity:
2631 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2631 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2632 header.append('similarity index %d%%' % sim)
2632 header.append('similarity index %d%%' % sim)
2633 header.append('%s from %s' % (copyop, path1))
2633 header.append('%s from %s' % (copyop, path1))
2634 header.append('%s to %s' % (copyop, path2))
2634 header.append('%s to %s' % (copyop, path2))
2635 elif revs and not repo.ui.quiet:
2635 elif revs and not repo.ui.quiet:
2636 header.append(diffline(path1, revs))
2636 header.append(diffline(path1, revs))
2637
2637
2638 # fctx.is | diffopts | what to | is fctx.data()
2638 # fctx.is | diffopts | what to | is fctx.data()
2639 # binary() | text nobinary git index | output? | outputted?
2639 # binary() | text nobinary git index | output? | outputted?
2640 # ------------------------------------|----------------------------
2640 # ------------------------------------|----------------------------
2641 # yes | no no no * | summary | no
2641 # yes | no no no * | summary | no
2642 # yes | no no yes * | base85 | yes
2642 # yes | no no yes * | base85 | yes
2643 # yes | no yes no * | summary | no
2643 # yes | no yes no * | summary | no
2644 # yes | no yes yes 0 | summary | no
2644 # yes | no yes yes 0 | summary | no
2645 # yes | no yes yes >0 | summary | semi [1]
2645 # yes | no yes yes >0 | summary | semi [1]
2646 # yes | yes * * * | text diff | yes
2646 # yes | yes * * * | text diff | yes
2647 # no | * * * * | text diff | yes
2647 # no | * * * * | text diff | yes
2648 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2648 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2649 if binary and (not opts.git or (opts.git and opts.nobinary and not
2649 if binary and (not opts.git or (opts.git and opts.nobinary and not
2650 opts.index)):
2650 opts.index)):
2651 # fast path: no binary content will be displayed, content1 and
2651 # fast path: no binary content will be displayed, content1 and
2652 # content2 are only used for equivalent test. cmp() could have a
2652 # content2 are only used for equivalent test. cmp() could have a
2653 # fast path.
2653 # fast path.
2654 if fctx1 is not None:
2654 if fctx1 is not None:
2655 content1 = b'\0'
2655 content1 = b'\0'
2656 if fctx2 is not None:
2656 if fctx2 is not None:
2657 if fctx1 is not None and not fctx1.cmp(fctx2):
2657 if fctx1 is not None and not fctx1.cmp(fctx2):
2658 content2 = b'\0' # not different
2658 content2 = b'\0' # not different
2659 else:
2659 else:
2660 content2 = b'\0\0'
2660 content2 = b'\0\0'
2661 else:
2661 else:
2662 # normal path: load contents
2662 # normal path: load contents
2663 if fctx1 is not None:
2663 if fctx1 is not None:
2664 content1 = fctx1.data()
2664 content1 = fctx1.data()
2665 if fctx2 is not None:
2665 if fctx2 is not None:
2666 content2 = fctx2.data()
2666 content2 = fctx2.data()
2667
2667
2668 if binary and opts.git and not opts.nobinary:
2668 if binary and opts.git and not opts.nobinary:
2669 text = mdiff.b85diff(content1, content2)
2669 text = mdiff.b85diff(content1, content2)
2670 if text:
2670 if text:
2671 header.append('index %s..%s' %
2671 header.append('index %s..%s' %
2672 (gitindex(content1), gitindex(content2)))
2672 (gitindex(content1), gitindex(content2)))
2673 hunks = (None, [text]),
2673 hunks = (None, [text]),
2674 else:
2674 else:
2675 if opts.git and opts.index > 0:
2675 if opts.git and opts.index > 0:
2676 flag = flag1
2676 flag = flag1
2677 if flag is None:
2677 if flag is None:
2678 flag = flag2
2678 flag = flag2
2679 header.append('index %s..%s %s' %
2679 header.append('index %s..%s %s' %
2680 (gitindex(content1)[0:opts.index],
2680 (gitindex(content1)[0:opts.index],
2681 gitindex(content2)[0:opts.index],
2681 gitindex(content2)[0:opts.index],
2682 gitmode[flag]))
2682 gitmode[flag]))
2683
2683
2684 uheaders, hunks = mdiff.unidiff(content1, date1,
2684 uheaders, hunks = mdiff.unidiff(content1, date1,
2685 content2, date2,
2685 content2, date2,
2686 path1, path2, opts=opts)
2686 path1, path2, opts=opts)
2687 header.extend(uheaders)
2687 header.extend(uheaders)
2688 yield header, hunks
2688 yield header, hunks
2689
2689
2690 def diffstatsum(stats):
2690 def diffstatsum(stats):
2691 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2691 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2692 for f, a, r, b in stats:
2692 for f, a, r, b in stats:
2693 maxfile = max(maxfile, encoding.colwidth(f))
2693 maxfile = max(maxfile, encoding.colwidth(f))
2694 maxtotal = max(maxtotal, a + r)
2694 maxtotal = max(maxtotal, a + r)
2695 addtotal += a
2695 addtotal += a
2696 removetotal += r
2696 removetotal += r
2697 binary = binary or b
2697 binary = binary or b
2698
2698
2699 return maxfile, maxtotal, addtotal, removetotal, binary
2699 return maxfile, maxtotal, addtotal, removetotal, binary
2700
2700
2701 def diffstatdata(lines):
2701 def diffstatdata(lines):
2702 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2702 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2703
2703
2704 results = []
2704 results = []
2705 filename, adds, removes, isbinary = None, 0, 0, False
2705 filename, adds, removes, isbinary = None, 0, 0, False
2706
2706
2707 def addresult():
2707 def addresult():
2708 if filename:
2708 if filename:
2709 results.append((filename, adds, removes, isbinary))
2709 results.append((filename, adds, removes, isbinary))
2710
2710
2711 # inheader is used to track if a line is in the
2711 # inheader is used to track if a line is in the
2712 # header portion of the diff. This helps properly account
2712 # header portion of the diff. This helps properly account
2713 # for lines that start with '--' or '++'
2713 # for lines that start with '--' or '++'
2714 inheader = False
2714 inheader = False
2715
2715
2716 for line in lines:
2716 for line in lines:
2717 if line.startswith('diff'):
2717 if line.startswith('diff'):
2718 addresult()
2718 addresult()
2719 # starting a new file diff
2719 # starting a new file diff
2720 # set numbers to 0 and reset inheader
2720 # set numbers to 0 and reset inheader
2721 inheader = True
2721 inheader = True
2722 adds, removes, isbinary = 0, 0, False
2722 adds, removes, isbinary = 0, 0, False
2723 if line.startswith('diff --git a/'):
2723 if line.startswith('diff --git a/'):
2724 filename = gitre.search(line).group(2)
2724 filename = gitre.search(line).group(2)
2725 elif line.startswith('diff -r'):
2725 elif line.startswith('diff -r'):
2726 # format: "diff -r ... -r ... filename"
2726 # format: "diff -r ... -r ... filename"
2727 filename = diffre.search(line).group(1)
2727 filename = diffre.search(line).group(1)
2728 elif line.startswith('@@'):
2728 elif line.startswith('@@'):
2729 inheader = False
2729 inheader = False
2730 elif line.startswith('+') and not inheader:
2730 elif line.startswith('+') and not inheader:
2731 adds += 1
2731 adds += 1
2732 elif line.startswith('-') and not inheader:
2732 elif line.startswith('-') and not inheader:
2733 removes += 1
2733 removes += 1
2734 elif (line.startswith('GIT binary patch') or
2734 elif (line.startswith('GIT binary patch') or
2735 line.startswith('Binary file')):
2735 line.startswith('Binary file')):
2736 isbinary = True
2736 isbinary = True
2737 addresult()
2737 addresult()
2738 return results
2738 return results
2739
2739
2740 def diffstat(lines, width=80):
2740 def diffstat(lines, width=80):
2741 output = []
2741 output = []
2742 stats = diffstatdata(lines)
2742 stats = diffstatdata(lines)
2743 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2743 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2744
2744
2745 countwidth = len(str(maxtotal))
2745 countwidth = len(str(maxtotal))
2746 if hasbinary and countwidth < 3:
2746 if hasbinary and countwidth < 3:
2747 countwidth = 3
2747 countwidth = 3
2748 graphwidth = width - countwidth - maxname - 6
2748 graphwidth = width - countwidth - maxname - 6
2749 if graphwidth < 10:
2749 if graphwidth < 10:
2750 graphwidth = 10
2750 graphwidth = 10
2751
2751
2752 def scale(i):
2752 def scale(i):
2753 if maxtotal <= graphwidth:
2753 if maxtotal <= graphwidth:
2754 return i
2754 return i
2755 # If diffstat runs out of room it doesn't print anything,
2755 # If diffstat runs out of room it doesn't print anything,
2756 # which isn't very useful, so always print at least one + or -
2756 # which isn't very useful, so always print at least one + or -
2757 # if there were at least some changes.
2757 # if there were at least some changes.
2758 return max(i * graphwidth // maxtotal, int(bool(i)))
2758 return max(i * graphwidth // maxtotal, int(bool(i)))
2759
2759
2760 for filename, adds, removes, isbinary in stats:
2760 for filename, adds, removes, isbinary in stats:
2761 if isbinary:
2761 if isbinary:
2762 count = 'Bin'
2762 count = 'Bin'
2763 else:
2763 else:
2764 count = '%d' % (adds + removes)
2764 count = '%d' % (adds + removes)
2765 pluses = '+' * scale(adds)
2765 pluses = '+' * scale(adds)
2766 minuses = '-' * scale(removes)
2766 minuses = '-' * scale(removes)
2767 output.append(' %s%s | %*s %s%s\n' %
2767 output.append(' %s%s | %*s %s%s\n' %
2768 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2768 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2769 countwidth, count, pluses, minuses))
2769 countwidth, count, pluses, minuses))
2770
2770
2771 if stats:
2771 if stats:
2772 output.append(_(' %d files changed, %d insertions(+), '
2772 output.append(_(' %d files changed, %d insertions(+), '
2773 '%d deletions(-)\n')
2773 '%d deletions(-)\n')
2774 % (len(stats), totaladds, totalremoves))
2774 % (len(stats), totaladds, totalremoves))
2775
2775
2776 return ''.join(output)
2776 return ''.join(output)
2777
2777
2778 def diffstatui(*args, **kw):
2778 def diffstatui(*args, **kw):
2779 '''like diffstat(), but yields 2-tuples of (output, label) for
2779 '''like diffstat(), but yields 2-tuples of (output, label) for
2780 ui.write()
2780 ui.write()
2781 '''
2781 '''
2782
2782
2783 for line in diffstat(*args, **kw).splitlines():
2783 for line in diffstat(*args, **kw).splitlines():
2784 if line and line[-1] in '+-':
2784 if line and line[-1] in '+-':
2785 name, graph = line.rsplit(' ', 1)
2785 name, graph = line.rsplit(' ', 1)
2786 yield (name + ' ', '')
2786 yield (name + ' ', '')
2787 m = re.search(br'\++', graph)
2787 m = re.search(br'\++', graph)
2788 if m:
2788 if m:
2789 yield (m.group(0), 'diffstat.inserted')
2789 yield (m.group(0), 'diffstat.inserted')
2790 m = re.search(br'-+', graph)
2790 m = re.search(br'-+', graph)
2791 if m:
2791 if m:
2792 yield (m.group(0), 'diffstat.deleted')
2792 yield (m.group(0), 'diffstat.deleted')
2793 else:
2793 else:
2794 yield (line, '')
2794 yield (line, '')
2795 yield ('\n', '')
2795 yield ('\n', '')
@@ -1,81 +1,81 b''
1 # this is hack to make sure no escape characters are inserted into the output
1 # this is hack to make sure no escape characters are inserted into the output
2
2
3 from __future__ import absolute_import
3 from __future__ import absolute_import
4
4
5 import doctest
5 import doctest
6 import os
6 import os
7 import re
7 import re
8 import sys
8 import sys
9
9
10 ispy3 = (sys.version_info[0] >= 3)
10 ispy3 = (sys.version_info[0] >= 3)
11
11
12 if 'TERM' in os.environ:
12 if 'TERM' in os.environ:
13 del os.environ['TERM']
13 del os.environ['TERM']
14
14
15 class py3docchecker(doctest.OutputChecker):
15 class py3docchecker(doctest.OutputChecker):
16 def check_output(self, want, got, optionflags):
16 def check_output(self, want, got, optionflags):
17 want2 = re.sub(r'''\bu(['"])(.*?)\1''', r'\1\2\1', want) # py2: u''
17 want2 = re.sub(r'''\bu(['"])(.*?)\1''', r'\1\2\1', want) # py2: u''
18 got2 = re.sub(r'''\bb(['"])(.*?)\1''', r'\1\2\1', got) # py3: b''
18 got2 = re.sub(r'''\bb(['"])(.*?)\1''', r'\1\2\1', got) # py3: b''
19 # py3: <exc.name>: b'<msg>' -> <name>: <msg>
19 # py3: <exc.name>: b'<msg>' -> <name>: <msg>
20 # <exc.name>: <others> -> <name>: <others>
20 # <exc.name>: <others> -> <name>: <others>
21 got2 = re.sub(r'''^mercurial\.\w+\.(\w+): (['"])(.*?)\2''', r'\1: \3',
21 got2 = re.sub(r'''^mercurial\.\w+\.(\w+): (['"])(.*?)\2''', r'\1: \3',
22 got2, re.MULTILINE)
22 got2, re.MULTILINE)
23 got2 = re.sub(r'^mercurial\.\w+\.(\w+): ', r'\1: ', got2, re.MULTILINE)
23 got2 = re.sub(r'^mercurial\.\w+\.(\w+): ', r'\1: ', got2, re.MULTILINE)
24 return any(doctest.OutputChecker.check_output(self, w, g, optionflags)
24 return any(doctest.OutputChecker.check_output(self, w, g, optionflags)
25 for w, g in [(want, got), (want2, got2)])
25 for w, g in [(want, got), (want2, got2)])
26
26
27 # TODO: migrate doctests to py3 and enable them on both versions
27 # TODO: migrate doctests to py3 and enable them on both versions
28 def testmod(name, optionflags=0, testtarget=None, py2=True, py3=True):
28 def testmod(name, optionflags=0, testtarget=None, py2=True, py3=True):
29 if not (not ispy3 and py2 or ispy3 and py3):
29 if not (not ispy3 and py2 or ispy3 and py3):
30 return
30 return
31 __import__(name)
31 __import__(name)
32 mod = sys.modules[name]
32 mod = sys.modules[name]
33 if testtarget is not None:
33 if testtarget is not None:
34 mod = getattr(mod, testtarget)
34 mod = getattr(mod, testtarget)
35
35
36 # minimal copy of doctest.testmod()
36 # minimal copy of doctest.testmod()
37 finder = doctest.DocTestFinder()
37 finder = doctest.DocTestFinder()
38 checker = None
38 checker = None
39 if ispy3:
39 if ispy3:
40 checker = py3docchecker()
40 checker = py3docchecker()
41 runner = doctest.DocTestRunner(checker=checker, optionflags=optionflags)
41 runner = doctest.DocTestRunner(checker=checker, optionflags=optionflags)
42 for test in finder.find(mod, name):
42 for test in finder.find(mod, name):
43 runner.run(test)
43 runner.run(test)
44 runner.summarize()
44 runner.summarize()
45
45
46 testmod('mercurial.changegroup')
46 testmod('mercurial.changegroup')
47 testmod('mercurial.changelog')
47 testmod('mercurial.changelog')
48 testmod('mercurial.color')
48 testmod('mercurial.color')
49 testmod('mercurial.config')
49 testmod('mercurial.config')
50 testmod('mercurial.context')
50 testmod('mercurial.context')
51 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
51 testmod('mercurial.dagparser', optionflags=doctest.NORMALIZE_WHITESPACE)
52 testmod('mercurial.dispatch')
52 testmod('mercurial.dispatch')
53 testmod('mercurial.encoding')
53 testmod('mercurial.encoding')
54 testmod('mercurial.formatter', py3=False) # py3: write bytes to stdout
54 testmod('mercurial.formatter', py3=False) # py3: write bytes to stdout
55 testmod('mercurial.hg')
55 testmod('mercurial.hg')
56 testmod('mercurial.hgweb.hgwebdir_mod', py3=False) # py3: repr(bytes) ?
56 testmod('mercurial.hgweb.hgwebdir_mod', py3=False) # py3: repr(bytes) ?
57 testmod('mercurial.match')
57 testmod('mercurial.match')
58 testmod('mercurial.mdiff')
58 testmod('mercurial.mdiff')
59 testmod('mercurial.minirst')
59 testmod('mercurial.minirst')
60 testmod('mercurial.patch', py3=False) # py3: bytes[n], etc. ?
60 testmod('mercurial.patch')
61 testmod('mercurial.pathutil', py3=False) # py3: os.sep
61 testmod('mercurial.pathutil', py3=False) # py3: os.sep
62 testmod('mercurial.parser')
62 testmod('mercurial.parser')
63 testmod('mercurial.pycompat')
63 testmod('mercurial.pycompat')
64 testmod('mercurial.revsetlang')
64 testmod('mercurial.revsetlang')
65 testmod('mercurial.smartset')
65 testmod('mercurial.smartset')
66 testmod('mercurial.store')
66 testmod('mercurial.store')
67 testmod('mercurial.subrepo')
67 testmod('mercurial.subrepo')
68 testmod('mercurial.templatefilters')
68 testmod('mercurial.templatefilters')
69 testmod('mercurial.templater')
69 testmod('mercurial.templater')
70 testmod('mercurial.ui')
70 testmod('mercurial.ui')
71 testmod('mercurial.url')
71 testmod('mercurial.url')
72 testmod('mercurial.util', py3=False) # py3: multiple bytes/unicode issues
72 testmod('mercurial.util', py3=False) # py3: multiple bytes/unicode issues
73 testmod('mercurial.util', testtarget='platform')
73 testmod('mercurial.util', testtarget='platform')
74 testmod('hgext.convert.convcmd', py3=False) # py3: use of str() ?
74 testmod('hgext.convert.convcmd', py3=False) # py3: use of str() ?
75 testmod('hgext.convert.cvsps')
75 testmod('hgext.convert.cvsps')
76 testmod('hgext.convert.filemap')
76 testmod('hgext.convert.filemap')
77 testmod('hgext.convert.p4')
77 testmod('hgext.convert.p4')
78 testmod('hgext.convert.subversion')
78 testmod('hgext.convert.subversion')
79 testmod('hgext.mq')
79 testmod('hgext.mq')
80 # Helper scripts in tests/ that have doctests:
80 # Helper scripts in tests/ that have doctests:
81 testmod('drawdag')
81 testmod('drawdag')
General Comments 0
You need to be logged in to leave comments. Login now