##// END OF EJS Templates
extract: add some facility for extensible header parsing...
Pierre-Yves David -
r26557:23f3f1cb default
parent child Browse files
Show More
@@ -1,2551 +1,2560 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 ## Some facility for extensible patch parsing:
155 # list of pairs ("header to match", "data key")
156 patchheadermap = []
157
154 def extract(ui, fileobj):
158 def extract(ui, fileobj):
155 '''extract patch from data read from fileobj.
159 '''extract patch from data read from fileobj.
156
160
157 patch can be a normal patch or contained in an email message.
161 patch can be a normal patch or contained in an email message.
158
162
159 return a dictionnary. Standard keys are:
163 return a dictionnary. Standard keys are:
160 - filename,
164 - filename,
161 - message,
165 - message,
162 - user,
166 - user,
163 - date,
167 - date,
164 - branch,
168 - branch,
165 - node,
169 - node,
166 - p1,
170 - p1,
167 - p2.
171 - p2.
168 Any item can be missing from the dictionary. If filename is mising,
172 Any item can be missing from the dictionary. If filename is mising,
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
173 fileobj did not contain a patch. Caller must unlink filename when done.'''
170
174
171 # attempt to detect the start of a patch
175 # attempt to detect the start of a patch
172 # (this heuristic is borrowed from quilt)
176 # (this heuristic is borrowed from quilt)
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
177 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
178 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
179 r'---[ \t].*?^\+\+\+[ \t]|'
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
180 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
177
181
178 data = {}
182 data = {}
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
183 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
180 tmpfp = os.fdopen(fd, 'w')
184 tmpfp = os.fdopen(fd, 'w')
181 try:
185 try:
182 msg = email.Parser.Parser().parse(fileobj)
186 msg = email.Parser.Parser().parse(fileobj)
183
187
184 subject = msg['Subject']
188 subject = msg['Subject']
185 data['user'] = msg['From']
189 data['user'] = msg['From']
186 if not subject and not data['user']:
190 if not subject and not data['user']:
187 # Not an email, restore parsed headers if any
191 # Not an email, restore parsed headers if any
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
192 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
189
193
190 # should try to parse msg['Date']
194 # should try to parse msg['Date']
191 parents = []
195 parents = []
192
196
193 if subject:
197 if subject:
194 if subject.startswith('[PATCH'):
198 if subject.startswith('[PATCH'):
195 pend = subject.find(']')
199 pend = subject.find(']')
196 if pend >= 0:
200 if pend >= 0:
197 subject = subject[pend + 1:].lstrip()
201 subject = subject[pend + 1:].lstrip()
198 subject = re.sub(r'\n[ \t]+', ' ', subject)
202 subject = re.sub(r'\n[ \t]+', ' ', subject)
199 ui.debug('Subject: %s\n' % subject)
203 ui.debug('Subject: %s\n' % subject)
200 if data['user']:
204 if data['user']:
201 ui.debug('From: %s\n' % data['user'])
205 ui.debug('From: %s\n' % data['user'])
202 diffs_seen = 0
206 diffs_seen = 0
203 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
207 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
204 message = ''
208 message = ''
205 for part in msg.walk():
209 for part in msg.walk():
206 content_type = part.get_content_type()
210 content_type = part.get_content_type()
207 ui.debug('Content-Type: %s\n' % content_type)
211 ui.debug('Content-Type: %s\n' % content_type)
208 if content_type not in ok_types:
212 if content_type not in ok_types:
209 continue
213 continue
210 payload = part.get_payload(decode=True)
214 payload = part.get_payload(decode=True)
211 m = diffre.search(payload)
215 m = diffre.search(payload)
212 if m:
216 if m:
213 hgpatch = False
217 hgpatch = False
214 hgpatchheader = False
218 hgpatchheader = False
215 ignoretext = False
219 ignoretext = False
216
220
217 ui.debug('found patch at byte %d\n' % m.start(0))
221 ui.debug('found patch at byte %d\n' % m.start(0))
218 diffs_seen += 1
222 diffs_seen += 1
219 cfp = cStringIO.StringIO()
223 cfp = cStringIO.StringIO()
220 for line in payload[:m.start(0)].splitlines():
224 for line in payload[:m.start(0)].splitlines():
221 if line.startswith('# HG changeset patch') and not hgpatch:
225 if line.startswith('# HG changeset patch') and not hgpatch:
222 ui.debug('patch generated by hg export\n')
226 ui.debug('patch generated by hg export\n')
223 hgpatch = True
227 hgpatch = True
224 hgpatchheader = True
228 hgpatchheader = True
225 # drop earlier commit message content
229 # drop earlier commit message content
226 cfp.seek(0)
230 cfp.seek(0)
227 cfp.truncate()
231 cfp.truncate()
228 subject = None
232 subject = None
229 elif hgpatchheader:
233 elif hgpatchheader:
230 if line.startswith('# User '):
234 if line.startswith('# User '):
231 data['user'] = line[7:]
235 data['user'] = line[7:]
232 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
233 elif line.startswith("# Date "):
237 elif line.startswith("# Date "):
234 data['date'] = line[7:]
238 data['date'] = line[7:]
235 elif line.startswith("# Branch "):
239 elif line.startswith("# Branch "):
236 data['branch'] = line[9:]
240 data['branch'] = line[9:]
237 elif line.startswith("# Node ID "):
241 elif line.startswith("# Node ID "):
238 data['nodeid'] = line[10:]
242 data['nodeid'] = line[10:]
239 elif line.startswith("# Parent "):
243 elif line.startswith("# Parent "):
240 parents.append(line[9:].lstrip())
244 parents.append(line[9:].lstrip())
241 elif not line.startswith("# "):
245 elif line.startswith("# "):
246 for header, key in patchheadermap:
247 prefix = '# %s ' % header
248 if line.startswith(prefix):
249 data[key] = line[len(prefix):]
250 else:
242 hgpatchheader = False
251 hgpatchheader = False
243 elif line == '---':
252 elif line == '---':
244 ignoretext = True
253 ignoretext = True
245 if not hgpatchheader and not ignoretext:
254 if not hgpatchheader and not ignoretext:
246 cfp.write(line)
255 cfp.write(line)
247 cfp.write('\n')
256 cfp.write('\n')
248 message = cfp.getvalue()
257 message = cfp.getvalue()
249 if tmpfp:
258 if tmpfp:
250 tmpfp.write(payload)
259 tmpfp.write(payload)
251 if not payload.endswith('\n'):
260 if not payload.endswith('\n'):
252 tmpfp.write('\n')
261 tmpfp.write('\n')
253 elif not diffs_seen and message and content_type == 'text/plain':
262 elif not diffs_seen and message and content_type == 'text/plain':
254 message += '\n' + payload
263 message += '\n' + payload
255 except: # re-raises
264 except: # re-raises
256 tmpfp.close()
265 tmpfp.close()
257 os.unlink(tmpname)
266 os.unlink(tmpname)
258 raise
267 raise
259
268
260 if subject and not message.startswith(subject):
269 if subject and not message.startswith(subject):
261 message = '%s\n%s' % (subject, message)
270 message = '%s\n%s' % (subject, message)
262 data['message'] = message
271 data['message'] = message
263 tmpfp.close()
272 tmpfp.close()
264 if parents:
273 if parents:
265 data['p1'] = parents.pop(0)
274 data['p1'] = parents.pop(0)
266 if parents:
275 if parents:
267 data['p2'] = parents.pop(0)
276 data['p2'] = parents.pop(0)
268
277
269 if diffs_seen:
278 if diffs_seen:
270 data['filename'] = tmpname
279 data['filename'] = tmpname
271 else:
280 else:
272 os.unlink(tmpname)
281 os.unlink(tmpname)
273 return data
282 return data
274
283
275 class patchmeta(object):
284 class patchmeta(object):
276 """Patched file metadata
285 """Patched file metadata
277
286
278 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
287 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
279 or COPY. 'path' is patched file path. 'oldpath' is set to the
288 or COPY. 'path' is patched file path. 'oldpath' is set to the
280 origin file when 'op' is either COPY or RENAME, None otherwise. If
289 origin file when 'op' is either COPY or RENAME, None otherwise. If
281 file mode is changed, 'mode' is a tuple (islink, isexec) where
290 file mode is changed, 'mode' is a tuple (islink, isexec) where
282 'islink' is True if the file is a symlink and 'isexec' is True if
291 'islink' is True if the file is a symlink and 'isexec' is True if
283 the file is executable. Otherwise, 'mode' is None.
292 the file is executable. Otherwise, 'mode' is None.
284 """
293 """
285 def __init__(self, path):
294 def __init__(self, path):
286 self.path = path
295 self.path = path
287 self.oldpath = None
296 self.oldpath = None
288 self.mode = None
297 self.mode = None
289 self.op = 'MODIFY'
298 self.op = 'MODIFY'
290 self.binary = False
299 self.binary = False
291
300
292 def setmode(self, mode):
301 def setmode(self, mode):
293 islink = mode & 0o20000
302 islink = mode & 0o20000
294 isexec = mode & 0o100
303 isexec = mode & 0o100
295 self.mode = (islink, isexec)
304 self.mode = (islink, isexec)
296
305
297 def copy(self):
306 def copy(self):
298 other = patchmeta(self.path)
307 other = patchmeta(self.path)
299 other.oldpath = self.oldpath
308 other.oldpath = self.oldpath
300 other.mode = self.mode
309 other.mode = self.mode
301 other.op = self.op
310 other.op = self.op
302 other.binary = self.binary
311 other.binary = self.binary
303 return other
312 return other
304
313
305 def _ispatchinga(self, afile):
314 def _ispatchinga(self, afile):
306 if afile == '/dev/null':
315 if afile == '/dev/null':
307 return self.op == 'ADD'
316 return self.op == 'ADD'
308 return afile == 'a/' + (self.oldpath or self.path)
317 return afile == 'a/' + (self.oldpath or self.path)
309
318
310 def _ispatchingb(self, bfile):
319 def _ispatchingb(self, bfile):
311 if bfile == '/dev/null':
320 if bfile == '/dev/null':
312 return self.op == 'DELETE'
321 return self.op == 'DELETE'
313 return bfile == 'b/' + self.path
322 return bfile == 'b/' + self.path
314
323
315 def ispatching(self, afile, bfile):
324 def ispatching(self, afile, bfile):
316 return self._ispatchinga(afile) and self._ispatchingb(bfile)
325 return self._ispatchinga(afile) and self._ispatchingb(bfile)
317
326
318 def __repr__(self):
327 def __repr__(self):
319 return "<patchmeta %s %r>" % (self.op, self.path)
328 return "<patchmeta %s %r>" % (self.op, self.path)
320
329
321 def readgitpatch(lr):
330 def readgitpatch(lr):
322 """extract git-style metadata about patches from <patchname>"""
331 """extract git-style metadata about patches from <patchname>"""
323
332
324 # Filter patch for git information
333 # Filter patch for git information
325 gp = None
334 gp = None
326 gitpatches = []
335 gitpatches = []
327 for line in lr:
336 for line in lr:
328 line = line.rstrip(' \r\n')
337 line = line.rstrip(' \r\n')
329 if line.startswith('diff --git a/'):
338 if line.startswith('diff --git a/'):
330 m = gitre.match(line)
339 m = gitre.match(line)
331 if m:
340 if m:
332 if gp:
341 if gp:
333 gitpatches.append(gp)
342 gitpatches.append(gp)
334 dst = m.group(2)
343 dst = m.group(2)
335 gp = patchmeta(dst)
344 gp = patchmeta(dst)
336 elif gp:
345 elif gp:
337 if line.startswith('--- '):
346 if line.startswith('--- '):
338 gitpatches.append(gp)
347 gitpatches.append(gp)
339 gp = None
348 gp = None
340 continue
349 continue
341 if line.startswith('rename from '):
350 if line.startswith('rename from '):
342 gp.op = 'RENAME'
351 gp.op = 'RENAME'
343 gp.oldpath = line[12:]
352 gp.oldpath = line[12:]
344 elif line.startswith('rename to '):
353 elif line.startswith('rename to '):
345 gp.path = line[10:]
354 gp.path = line[10:]
346 elif line.startswith('copy from '):
355 elif line.startswith('copy from '):
347 gp.op = 'COPY'
356 gp.op = 'COPY'
348 gp.oldpath = line[10:]
357 gp.oldpath = line[10:]
349 elif line.startswith('copy to '):
358 elif line.startswith('copy to '):
350 gp.path = line[8:]
359 gp.path = line[8:]
351 elif line.startswith('deleted file'):
360 elif line.startswith('deleted file'):
352 gp.op = 'DELETE'
361 gp.op = 'DELETE'
353 elif line.startswith('new file mode '):
362 elif line.startswith('new file mode '):
354 gp.op = 'ADD'
363 gp.op = 'ADD'
355 gp.setmode(int(line[-6:], 8))
364 gp.setmode(int(line[-6:], 8))
356 elif line.startswith('new mode '):
365 elif line.startswith('new mode '):
357 gp.setmode(int(line[-6:], 8))
366 gp.setmode(int(line[-6:], 8))
358 elif line.startswith('GIT binary patch'):
367 elif line.startswith('GIT binary patch'):
359 gp.binary = True
368 gp.binary = True
360 if gp:
369 if gp:
361 gitpatches.append(gp)
370 gitpatches.append(gp)
362
371
363 return gitpatches
372 return gitpatches
364
373
365 class linereader(object):
374 class linereader(object):
366 # simple class to allow pushing lines back into the input stream
375 # simple class to allow pushing lines back into the input stream
367 def __init__(self, fp):
376 def __init__(self, fp):
368 self.fp = fp
377 self.fp = fp
369 self.buf = []
378 self.buf = []
370
379
371 def push(self, line):
380 def push(self, line):
372 if line is not None:
381 if line is not None:
373 self.buf.append(line)
382 self.buf.append(line)
374
383
375 def readline(self):
384 def readline(self):
376 if self.buf:
385 if self.buf:
377 l = self.buf[0]
386 l = self.buf[0]
378 del self.buf[0]
387 del self.buf[0]
379 return l
388 return l
380 return self.fp.readline()
389 return self.fp.readline()
381
390
382 def __iter__(self):
391 def __iter__(self):
383 while True:
392 while True:
384 l = self.readline()
393 l = self.readline()
385 if not l:
394 if not l:
386 break
395 break
387 yield l
396 yield l
388
397
389 class abstractbackend(object):
398 class abstractbackend(object):
390 def __init__(self, ui):
399 def __init__(self, ui):
391 self.ui = ui
400 self.ui = ui
392
401
393 def getfile(self, fname):
402 def getfile(self, fname):
394 """Return target file data and flags as a (data, (islink,
403 """Return target file data and flags as a (data, (islink,
395 isexec)) tuple. Data is None if file is missing/deleted.
404 isexec)) tuple. Data is None if file is missing/deleted.
396 """
405 """
397 raise NotImplementedError
406 raise NotImplementedError
398
407
399 def setfile(self, fname, data, mode, copysource):
408 def setfile(self, fname, data, mode, copysource):
400 """Write data to target file fname and set its mode. mode is a
409 """Write data to target file fname and set its mode. mode is a
401 (islink, isexec) tuple. If data is None, the file content should
410 (islink, isexec) tuple. If data is None, the file content should
402 be left unchanged. If the file is modified after being copied,
411 be left unchanged. If the file is modified after being copied,
403 copysource is set to the original file name.
412 copysource is set to the original file name.
404 """
413 """
405 raise NotImplementedError
414 raise NotImplementedError
406
415
407 def unlink(self, fname):
416 def unlink(self, fname):
408 """Unlink target file."""
417 """Unlink target file."""
409 raise NotImplementedError
418 raise NotImplementedError
410
419
411 def writerej(self, fname, failed, total, lines):
420 def writerej(self, fname, failed, total, lines):
412 """Write rejected lines for fname. total is the number of hunks
421 """Write rejected lines for fname. total is the number of hunks
413 which failed to apply and total the total number of hunks for this
422 which failed to apply and total the total number of hunks for this
414 files.
423 files.
415 """
424 """
416 pass
425 pass
417
426
418 def exists(self, fname):
427 def exists(self, fname):
419 raise NotImplementedError
428 raise NotImplementedError
420
429
421 class fsbackend(abstractbackend):
430 class fsbackend(abstractbackend):
422 def __init__(self, ui, basedir):
431 def __init__(self, ui, basedir):
423 super(fsbackend, self).__init__(ui)
432 super(fsbackend, self).__init__(ui)
424 self.opener = scmutil.opener(basedir)
433 self.opener = scmutil.opener(basedir)
425
434
426 def _join(self, f):
435 def _join(self, f):
427 return os.path.join(self.opener.base, f)
436 return os.path.join(self.opener.base, f)
428
437
429 def getfile(self, fname):
438 def getfile(self, fname):
430 if self.opener.islink(fname):
439 if self.opener.islink(fname):
431 return (self.opener.readlink(fname), (True, False))
440 return (self.opener.readlink(fname), (True, False))
432
441
433 isexec = False
442 isexec = False
434 try:
443 try:
435 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
444 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
436 except OSError as e:
445 except OSError as e:
437 if e.errno != errno.ENOENT:
446 if e.errno != errno.ENOENT:
438 raise
447 raise
439 try:
448 try:
440 return (self.opener.read(fname), (False, isexec))
449 return (self.opener.read(fname), (False, isexec))
441 except IOError as e:
450 except IOError as e:
442 if e.errno != errno.ENOENT:
451 if e.errno != errno.ENOENT:
443 raise
452 raise
444 return None, None
453 return None, None
445
454
446 def setfile(self, fname, data, mode, copysource):
455 def setfile(self, fname, data, mode, copysource):
447 islink, isexec = mode
456 islink, isexec = mode
448 if data is None:
457 if data is None:
449 self.opener.setflags(fname, islink, isexec)
458 self.opener.setflags(fname, islink, isexec)
450 return
459 return
451 if islink:
460 if islink:
452 self.opener.symlink(data, fname)
461 self.opener.symlink(data, fname)
453 else:
462 else:
454 self.opener.write(fname, data)
463 self.opener.write(fname, data)
455 if isexec:
464 if isexec:
456 self.opener.setflags(fname, False, True)
465 self.opener.setflags(fname, False, True)
457
466
458 def unlink(self, fname):
467 def unlink(self, fname):
459 self.opener.unlinkpath(fname, ignoremissing=True)
468 self.opener.unlinkpath(fname, ignoremissing=True)
460
469
461 def writerej(self, fname, failed, total, lines):
470 def writerej(self, fname, failed, total, lines):
462 fname = fname + ".rej"
471 fname = fname + ".rej"
463 self.ui.warn(
472 self.ui.warn(
464 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
473 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
465 (failed, total, fname))
474 (failed, total, fname))
466 fp = self.opener(fname, 'w')
475 fp = self.opener(fname, 'w')
467 fp.writelines(lines)
476 fp.writelines(lines)
468 fp.close()
477 fp.close()
469
478
470 def exists(self, fname):
479 def exists(self, fname):
471 return self.opener.lexists(fname)
480 return self.opener.lexists(fname)
472
481
473 class workingbackend(fsbackend):
482 class workingbackend(fsbackend):
474 def __init__(self, ui, repo, similarity):
483 def __init__(self, ui, repo, similarity):
475 super(workingbackend, self).__init__(ui, repo.root)
484 super(workingbackend, self).__init__(ui, repo.root)
476 self.repo = repo
485 self.repo = repo
477 self.similarity = similarity
486 self.similarity = similarity
478 self.removed = set()
487 self.removed = set()
479 self.changed = set()
488 self.changed = set()
480 self.copied = []
489 self.copied = []
481
490
482 def _checkknown(self, fname):
491 def _checkknown(self, fname):
483 if self.repo.dirstate[fname] == '?' and self.exists(fname):
492 if self.repo.dirstate[fname] == '?' and self.exists(fname):
484 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
493 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
485
494
486 def setfile(self, fname, data, mode, copysource):
495 def setfile(self, fname, data, mode, copysource):
487 self._checkknown(fname)
496 self._checkknown(fname)
488 super(workingbackend, self).setfile(fname, data, mode, copysource)
497 super(workingbackend, self).setfile(fname, data, mode, copysource)
489 if copysource is not None:
498 if copysource is not None:
490 self.copied.append((copysource, fname))
499 self.copied.append((copysource, fname))
491 self.changed.add(fname)
500 self.changed.add(fname)
492
501
493 def unlink(self, fname):
502 def unlink(self, fname):
494 self._checkknown(fname)
503 self._checkknown(fname)
495 super(workingbackend, self).unlink(fname)
504 super(workingbackend, self).unlink(fname)
496 self.removed.add(fname)
505 self.removed.add(fname)
497 self.changed.add(fname)
506 self.changed.add(fname)
498
507
499 def close(self):
508 def close(self):
500 wctx = self.repo[None]
509 wctx = self.repo[None]
501 changed = set(self.changed)
510 changed = set(self.changed)
502 for src, dst in self.copied:
511 for src, dst in self.copied:
503 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
512 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
504 if self.removed:
513 if self.removed:
505 wctx.forget(sorted(self.removed))
514 wctx.forget(sorted(self.removed))
506 for f in self.removed:
515 for f in self.removed:
507 if f not in self.repo.dirstate:
516 if f not in self.repo.dirstate:
508 # File was deleted and no longer belongs to the
517 # File was deleted and no longer belongs to the
509 # dirstate, it was probably marked added then
518 # dirstate, it was probably marked added then
510 # deleted, and should not be considered by
519 # deleted, and should not be considered by
511 # marktouched().
520 # marktouched().
512 changed.discard(f)
521 changed.discard(f)
513 if changed:
522 if changed:
514 scmutil.marktouched(self.repo, changed, self.similarity)
523 scmutil.marktouched(self.repo, changed, self.similarity)
515 return sorted(self.changed)
524 return sorted(self.changed)
516
525
517 class filestore(object):
526 class filestore(object):
518 def __init__(self, maxsize=None):
527 def __init__(self, maxsize=None):
519 self.opener = None
528 self.opener = None
520 self.files = {}
529 self.files = {}
521 self.created = 0
530 self.created = 0
522 self.maxsize = maxsize
531 self.maxsize = maxsize
523 if self.maxsize is None:
532 if self.maxsize is None:
524 self.maxsize = 4*(2**20)
533 self.maxsize = 4*(2**20)
525 self.size = 0
534 self.size = 0
526 self.data = {}
535 self.data = {}
527
536
528 def setfile(self, fname, data, mode, copied=None):
537 def setfile(self, fname, data, mode, copied=None):
529 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
538 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
530 self.data[fname] = (data, mode, copied)
539 self.data[fname] = (data, mode, copied)
531 self.size += len(data)
540 self.size += len(data)
532 else:
541 else:
533 if self.opener is None:
542 if self.opener is None:
534 root = tempfile.mkdtemp(prefix='hg-patch-')
543 root = tempfile.mkdtemp(prefix='hg-patch-')
535 self.opener = scmutil.opener(root)
544 self.opener = scmutil.opener(root)
536 # Avoid filename issues with these simple names
545 # Avoid filename issues with these simple names
537 fn = str(self.created)
546 fn = str(self.created)
538 self.opener.write(fn, data)
547 self.opener.write(fn, data)
539 self.created += 1
548 self.created += 1
540 self.files[fname] = (fn, mode, copied)
549 self.files[fname] = (fn, mode, copied)
541
550
542 def getfile(self, fname):
551 def getfile(self, fname):
543 if fname in self.data:
552 if fname in self.data:
544 return self.data[fname]
553 return self.data[fname]
545 if not self.opener or fname not in self.files:
554 if not self.opener or fname not in self.files:
546 return None, None, None
555 return None, None, None
547 fn, mode, copied = self.files[fname]
556 fn, mode, copied = self.files[fname]
548 return self.opener.read(fn), mode, copied
557 return self.opener.read(fn), mode, copied
549
558
550 def close(self):
559 def close(self):
551 if self.opener:
560 if self.opener:
552 shutil.rmtree(self.opener.base)
561 shutil.rmtree(self.opener.base)
553
562
554 class repobackend(abstractbackend):
563 class repobackend(abstractbackend):
555 def __init__(self, ui, repo, ctx, store):
564 def __init__(self, ui, repo, ctx, store):
556 super(repobackend, self).__init__(ui)
565 super(repobackend, self).__init__(ui)
557 self.repo = repo
566 self.repo = repo
558 self.ctx = ctx
567 self.ctx = ctx
559 self.store = store
568 self.store = store
560 self.changed = set()
569 self.changed = set()
561 self.removed = set()
570 self.removed = set()
562 self.copied = {}
571 self.copied = {}
563
572
564 def _checkknown(self, fname):
573 def _checkknown(self, fname):
565 if fname not in self.ctx:
574 if fname not in self.ctx:
566 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
575 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
567
576
568 def getfile(self, fname):
577 def getfile(self, fname):
569 try:
578 try:
570 fctx = self.ctx[fname]
579 fctx = self.ctx[fname]
571 except error.LookupError:
580 except error.LookupError:
572 return None, None
581 return None, None
573 flags = fctx.flags()
582 flags = fctx.flags()
574 return fctx.data(), ('l' in flags, 'x' in flags)
583 return fctx.data(), ('l' in flags, 'x' in flags)
575
584
576 def setfile(self, fname, data, mode, copysource):
585 def setfile(self, fname, data, mode, copysource):
577 if copysource:
586 if copysource:
578 self._checkknown(copysource)
587 self._checkknown(copysource)
579 if data is None:
588 if data is None:
580 data = self.ctx[fname].data()
589 data = self.ctx[fname].data()
581 self.store.setfile(fname, data, mode, copysource)
590 self.store.setfile(fname, data, mode, copysource)
582 self.changed.add(fname)
591 self.changed.add(fname)
583 if copysource:
592 if copysource:
584 self.copied[fname] = copysource
593 self.copied[fname] = copysource
585
594
586 def unlink(self, fname):
595 def unlink(self, fname):
587 self._checkknown(fname)
596 self._checkknown(fname)
588 self.removed.add(fname)
597 self.removed.add(fname)
589
598
590 def exists(self, fname):
599 def exists(self, fname):
591 return fname in self.ctx
600 return fname in self.ctx
592
601
593 def close(self):
602 def close(self):
594 return self.changed | self.removed
603 return self.changed | self.removed
595
604
596 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
605 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
597 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
606 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
598 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
607 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
599 eolmodes = ['strict', 'crlf', 'lf', 'auto']
608 eolmodes = ['strict', 'crlf', 'lf', 'auto']
600
609
601 class patchfile(object):
610 class patchfile(object):
602 def __init__(self, ui, gp, backend, store, eolmode='strict'):
611 def __init__(self, ui, gp, backend, store, eolmode='strict'):
603 self.fname = gp.path
612 self.fname = gp.path
604 self.eolmode = eolmode
613 self.eolmode = eolmode
605 self.eol = None
614 self.eol = None
606 self.backend = backend
615 self.backend = backend
607 self.ui = ui
616 self.ui = ui
608 self.lines = []
617 self.lines = []
609 self.exists = False
618 self.exists = False
610 self.missing = True
619 self.missing = True
611 self.mode = gp.mode
620 self.mode = gp.mode
612 self.copysource = gp.oldpath
621 self.copysource = gp.oldpath
613 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
622 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
614 self.remove = gp.op == 'DELETE'
623 self.remove = gp.op == 'DELETE'
615 if self.copysource is None:
624 if self.copysource is None:
616 data, mode = backend.getfile(self.fname)
625 data, mode = backend.getfile(self.fname)
617 else:
626 else:
618 data, mode = store.getfile(self.copysource)[:2]
627 data, mode = store.getfile(self.copysource)[:2]
619 if data is not None:
628 if data is not None:
620 self.exists = self.copysource is None or backend.exists(self.fname)
629 self.exists = self.copysource is None or backend.exists(self.fname)
621 self.missing = False
630 self.missing = False
622 if data:
631 if data:
623 self.lines = mdiff.splitnewlines(data)
632 self.lines = mdiff.splitnewlines(data)
624 if self.mode is None:
633 if self.mode is None:
625 self.mode = mode
634 self.mode = mode
626 if self.lines:
635 if self.lines:
627 # Normalize line endings
636 # Normalize line endings
628 if self.lines[0].endswith('\r\n'):
637 if self.lines[0].endswith('\r\n'):
629 self.eol = '\r\n'
638 self.eol = '\r\n'
630 elif self.lines[0].endswith('\n'):
639 elif self.lines[0].endswith('\n'):
631 self.eol = '\n'
640 self.eol = '\n'
632 if eolmode != 'strict':
641 if eolmode != 'strict':
633 nlines = []
642 nlines = []
634 for l in self.lines:
643 for l in self.lines:
635 if l.endswith('\r\n'):
644 if l.endswith('\r\n'):
636 l = l[:-2] + '\n'
645 l = l[:-2] + '\n'
637 nlines.append(l)
646 nlines.append(l)
638 self.lines = nlines
647 self.lines = nlines
639 else:
648 else:
640 if self.create:
649 if self.create:
641 self.missing = False
650 self.missing = False
642 if self.mode is None:
651 if self.mode is None:
643 self.mode = (False, False)
652 self.mode = (False, False)
644 if self.missing:
653 if self.missing:
645 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
654 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
646
655
647 self.hash = {}
656 self.hash = {}
648 self.dirty = 0
657 self.dirty = 0
649 self.offset = 0
658 self.offset = 0
650 self.skew = 0
659 self.skew = 0
651 self.rej = []
660 self.rej = []
652 self.fileprinted = False
661 self.fileprinted = False
653 self.printfile(False)
662 self.printfile(False)
654 self.hunks = 0
663 self.hunks = 0
655
664
656 def writelines(self, fname, lines, mode):
665 def writelines(self, fname, lines, mode):
657 if self.eolmode == 'auto':
666 if self.eolmode == 'auto':
658 eol = self.eol
667 eol = self.eol
659 elif self.eolmode == 'crlf':
668 elif self.eolmode == 'crlf':
660 eol = '\r\n'
669 eol = '\r\n'
661 else:
670 else:
662 eol = '\n'
671 eol = '\n'
663
672
664 if self.eolmode != 'strict' and eol and eol != '\n':
673 if self.eolmode != 'strict' and eol and eol != '\n':
665 rawlines = []
674 rawlines = []
666 for l in lines:
675 for l in lines:
667 if l and l[-1] == '\n':
676 if l and l[-1] == '\n':
668 l = l[:-1] + eol
677 l = l[:-1] + eol
669 rawlines.append(l)
678 rawlines.append(l)
670 lines = rawlines
679 lines = rawlines
671
680
672 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
681 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
673
682
674 def printfile(self, warn):
683 def printfile(self, warn):
675 if self.fileprinted:
684 if self.fileprinted:
676 return
685 return
677 if warn or self.ui.verbose:
686 if warn or self.ui.verbose:
678 self.fileprinted = True
687 self.fileprinted = True
679 s = _("patching file %s\n") % self.fname
688 s = _("patching file %s\n") % self.fname
680 if warn:
689 if warn:
681 self.ui.warn(s)
690 self.ui.warn(s)
682 else:
691 else:
683 self.ui.note(s)
692 self.ui.note(s)
684
693
685
694
686 def findlines(self, l, linenum):
695 def findlines(self, l, linenum):
687 # looks through the hash and finds candidate lines. The
696 # looks through the hash and finds candidate lines. The
688 # result is a list of line numbers sorted based on distance
697 # result is a list of line numbers sorted based on distance
689 # from linenum
698 # from linenum
690
699
691 cand = self.hash.get(l, [])
700 cand = self.hash.get(l, [])
692 if len(cand) > 1:
701 if len(cand) > 1:
693 # resort our list of potentials forward then back.
702 # resort our list of potentials forward then back.
694 cand.sort(key=lambda x: abs(x - linenum))
703 cand.sort(key=lambda x: abs(x - linenum))
695 return cand
704 return cand
696
705
697 def write_rej(self):
706 def write_rej(self):
698 # our rejects are a little different from patch(1). This always
707 # our rejects are a little different from patch(1). This always
699 # creates rejects in the same form as the original patch. A file
708 # creates rejects in the same form as the original patch. A file
700 # header is inserted so that you can run the reject through patch again
709 # header is inserted so that you can run the reject through patch again
701 # without having to type the filename.
710 # without having to type the filename.
702 if not self.rej:
711 if not self.rej:
703 return
712 return
704 base = os.path.basename(self.fname)
713 base = os.path.basename(self.fname)
705 lines = ["--- %s\n+++ %s\n" % (base, base)]
714 lines = ["--- %s\n+++ %s\n" % (base, base)]
706 for x in self.rej:
715 for x in self.rej:
707 for l in x.hunk:
716 for l in x.hunk:
708 lines.append(l)
717 lines.append(l)
709 if l[-1] != '\n':
718 if l[-1] != '\n':
710 lines.append("\n\ No newline at end of file\n")
719 lines.append("\n\ No newline at end of file\n")
711 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
720 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
712
721
713 def apply(self, h):
722 def apply(self, h):
714 if not h.complete():
723 if not h.complete():
715 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
724 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
716 (h.number, h.desc, len(h.a), h.lena, len(h.b),
725 (h.number, h.desc, len(h.a), h.lena, len(h.b),
717 h.lenb))
726 h.lenb))
718
727
719 self.hunks += 1
728 self.hunks += 1
720
729
721 if self.missing:
730 if self.missing:
722 self.rej.append(h)
731 self.rej.append(h)
723 return -1
732 return -1
724
733
725 if self.exists and self.create:
734 if self.exists and self.create:
726 if self.copysource:
735 if self.copysource:
727 self.ui.warn(_("cannot create %s: destination already "
736 self.ui.warn(_("cannot create %s: destination already "
728 "exists\n") % self.fname)
737 "exists\n") % self.fname)
729 else:
738 else:
730 self.ui.warn(_("file %s already exists\n") % self.fname)
739 self.ui.warn(_("file %s already exists\n") % self.fname)
731 self.rej.append(h)
740 self.rej.append(h)
732 return -1
741 return -1
733
742
734 if isinstance(h, binhunk):
743 if isinstance(h, binhunk):
735 if self.remove:
744 if self.remove:
736 self.backend.unlink(self.fname)
745 self.backend.unlink(self.fname)
737 else:
746 else:
738 l = h.new(self.lines)
747 l = h.new(self.lines)
739 self.lines[:] = l
748 self.lines[:] = l
740 self.offset += len(l)
749 self.offset += len(l)
741 self.dirty = True
750 self.dirty = True
742 return 0
751 return 0
743
752
744 horig = h
753 horig = h
745 if (self.eolmode in ('crlf', 'lf')
754 if (self.eolmode in ('crlf', 'lf')
746 or self.eolmode == 'auto' and self.eol):
755 or self.eolmode == 'auto' and self.eol):
747 # If new eols are going to be normalized, then normalize
756 # If new eols are going to be normalized, then normalize
748 # hunk data before patching. Otherwise, preserve input
757 # hunk data before patching. Otherwise, preserve input
749 # line-endings.
758 # line-endings.
750 h = h.getnormalized()
759 h = h.getnormalized()
751
760
752 # fast case first, no offsets, no fuzz
761 # fast case first, no offsets, no fuzz
753 old, oldstart, new, newstart = h.fuzzit(0, False)
762 old, oldstart, new, newstart = h.fuzzit(0, False)
754 oldstart += self.offset
763 oldstart += self.offset
755 orig_start = oldstart
764 orig_start = oldstart
756 # if there's skew we want to emit the "(offset %d lines)" even
765 # if there's skew we want to emit the "(offset %d lines)" even
757 # when the hunk cleanly applies at start + skew, so skip the
766 # when the hunk cleanly applies at start + skew, so skip the
758 # fast case code
767 # fast case code
759 if (self.skew == 0 and
768 if (self.skew == 0 and
760 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
769 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
761 if self.remove:
770 if self.remove:
762 self.backend.unlink(self.fname)
771 self.backend.unlink(self.fname)
763 else:
772 else:
764 self.lines[oldstart:oldstart + len(old)] = new
773 self.lines[oldstart:oldstart + len(old)] = new
765 self.offset += len(new) - len(old)
774 self.offset += len(new) - len(old)
766 self.dirty = True
775 self.dirty = True
767 return 0
776 return 0
768
777
769 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
778 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
770 self.hash = {}
779 self.hash = {}
771 for x, s in enumerate(self.lines):
780 for x, s in enumerate(self.lines):
772 self.hash.setdefault(s, []).append(x)
781 self.hash.setdefault(s, []).append(x)
773
782
774 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
783 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
775 for toponly in [True, False]:
784 for toponly in [True, False]:
776 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
785 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
777 oldstart = oldstart + self.offset + self.skew
786 oldstart = oldstart + self.offset + self.skew
778 oldstart = min(oldstart, len(self.lines))
787 oldstart = min(oldstart, len(self.lines))
779 if old:
788 if old:
780 cand = self.findlines(old[0][1:], oldstart)
789 cand = self.findlines(old[0][1:], oldstart)
781 else:
790 else:
782 # Only adding lines with no or fuzzed context, just
791 # Only adding lines with no or fuzzed context, just
783 # take the skew in account
792 # take the skew in account
784 cand = [oldstart]
793 cand = [oldstart]
785
794
786 for l in cand:
795 for l in cand:
787 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
796 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
788 self.lines[l : l + len(old)] = new
797 self.lines[l : l + len(old)] = new
789 self.offset += len(new) - len(old)
798 self.offset += len(new) - len(old)
790 self.skew = l - orig_start
799 self.skew = l - orig_start
791 self.dirty = True
800 self.dirty = True
792 offset = l - orig_start - fuzzlen
801 offset = l - orig_start - fuzzlen
793 if fuzzlen:
802 if fuzzlen:
794 msg = _("Hunk #%d succeeded at %d "
803 msg = _("Hunk #%d succeeded at %d "
795 "with fuzz %d "
804 "with fuzz %d "
796 "(offset %d lines).\n")
805 "(offset %d lines).\n")
797 self.printfile(True)
806 self.printfile(True)
798 self.ui.warn(msg %
807 self.ui.warn(msg %
799 (h.number, l + 1, fuzzlen, offset))
808 (h.number, l + 1, fuzzlen, offset))
800 else:
809 else:
801 msg = _("Hunk #%d succeeded at %d "
810 msg = _("Hunk #%d succeeded at %d "
802 "(offset %d lines).\n")
811 "(offset %d lines).\n")
803 self.ui.note(msg % (h.number, l + 1, offset))
812 self.ui.note(msg % (h.number, l + 1, offset))
804 return fuzzlen
813 return fuzzlen
805 self.printfile(True)
814 self.printfile(True)
806 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
815 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
807 self.rej.append(horig)
816 self.rej.append(horig)
808 return -1
817 return -1
809
818
810 def close(self):
819 def close(self):
811 if self.dirty:
820 if self.dirty:
812 self.writelines(self.fname, self.lines, self.mode)
821 self.writelines(self.fname, self.lines, self.mode)
813 self.write_rej()
822 self.write_rej()
814 return len(self.rej)
823 return len(self.rej)
815
824
816 class header(object):
825 class header(object):
817 """patch header
826 """patch header
818 """
827 """
819 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
828 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
820 diff_re = re.compile('diff -r .* (.*)$')
829 diff_re = re.compile('diff -r .* (.*)$')
821 allhunks_re = re.compile('(?:index|deleted file) ')
830 allhunks_re = re.compile('(?:index|deleted file) ')
822 pretty_re = re.compile('(?:new file|deleted file) ')
831 pretty_re = re.compile('(?:new file|deleted file) ')
823 special_re = re.compile('(?:index|deleted|copy|rename) ')
832 special_re = re.compile('(?:index|deleted|copy|rename) ')
824 newfile_re = re.compile('(?:new file)')
833 newfile_re = re.compile('(?:new file)')
825
834
826 def __init__(self, header):
835 def __init__(self, header):
827 self.header = header
836 self.header = header
828 self.hunks = []
837 self.hunks = []
829
838
830 def binary(self):
839 def binary(self):
831 return any(h.startswith('index ') for h in self.header)
840 return any(h.startswith('index ') for h in self.header)
832
841
833 def pretty(self, fp):
842 def pretty(self, fp):
834 for h in self.header:
843 for h in self.header:
835 if h.startswith('index '):
844 if h.startswith('index '):
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
845 fp.write(_('this modifies a binary file (all or nothing)\n'))
837 break
846 break
838 if self.pretty_re.match(h):
847 if self.pretty_re.match(h):
839 fp.write(h)
848 fp.write(h)
840 if self.binary():
849 if self.binary():
841 fp.write(_('this is a binary file\n'))
850 fp.write(_('this is a binary file\n'))
842 break
851 break
843 if h.startswith('---'):
852 if h.startswith('---'):
844 fp.write(_('%d hunks, %d lines changed\n') %
853 fp.write(_('%d hunks, %d lines changed\n') %
845 (len(self.hunks),
854 (len(self.hunks),
846 sum([max(h.added, h.removed) for h in self.hunks])))
855 sum([max(h.added, h.removed) for h in self.hunks])))
847 break
856 break
848 fp.write(h)
857 fp.write(h)
849
858
850 def write(self, fp):
859 def write(self, fp):
851 fp.write(''.join(self.header))
860 fp.write(''.join(self.header))
852
861
853 def allhunks(self):
862 def allhunks(self):
854 return any(self.allhunks_re.match(h) for h in self.header)
863 return any(self.allhunks_re.match(h) for h in self.header)
855
864
856 def files(self):
865 def files(self):
857 match = self.diffgit_re.match(self.header[0])
866 match = self.diffgit_re.match(self.header[0])
858 if match:
867 if match:
859 fromfile, tofile = match.groups()
868 fromfile, tofile = match.groups()
860 if fromfile == tofile:
869 if fromfile == tofile:
861 return [fromfile]
870 return [fromfile]
862 return [fromfile, tofile]
871 return [fromfile, tofile]
863 else:
872 else:
864 return self.diff_re.match(self.header[0]).groups()
873 return self.diff_re.match(self.header[0]).groups()
865
874
866 def filename(self):
875 def filename(self):
867 return self.files()[-1]
876 return self.files()[-1]
868
877
869 def __repr__(self):
878 def __repr__(self):
870 return '<header %s>' % (' '.join(map(repr, self.files())))
879 return '<header %s>' % (' '.join(map(repr, self.files())))
871
880
872 def isnewfile(self):
881 def isnewfile(self):
873 return any(self.newfile_re.match(h) for h in self.header)
882 return any(self.newfile_re.match(h) for h in self.header)
874
883
875 def special(self):
884 def special(self):
876 # Special files are shown only at the header level and not at the hunk
885 # Special files are shown only at the header level and not at the hunk
877 # level for example a file that has been deleted is a special file.
886 # level for example a file that has been deleted is a special file.
878 # The user cannot change the content of the operation, in the case of
887 # The user cannot change the content of the operation, in the case of
879 # the deleted file he has to take the deletion or not take it, he
888 # the deleted file he has to take the deletion or not take it, he
880 # cannot take some of it.
889 # cannot take some of it.
881 # Newly added files are special if they are empty, they are not special
890 # Newly added files are special if they are empty, they are not special
882 # if they have some content as we want to be able to change it
891 # if they have some content as we want to be able to change it
883 nocontent = len(self.header) == 2
892 nocontent = len(self.header) == 2
884 emptynewfile = self.isnewfile() and nocontent
893 emptynewfile = self.isnewfile() and nocontent
885 return emptynewfile or \
894 return emptynewfile or \
886 any(self.special_re.match(h) for h in self.header)
895 any(self.special_re.match(h) for h in self.header)
887
896
888 class recordhunk(object):
897 class recordhunk(object):
889 """patch hunk
898 """patch hunk
890
899
891 XXX shouldn't we merge this with the other hunk class?
900 XXX shouldn't we merge this with the other hunk class?
892 """
901 """
893 maxcontext = 3
902 maxcontext = 3
894
903
895 def __init__(self, header, fromline, toline, proc, before, hunk, after):
904 def __init__(self, header, fromline, toline, proc, before, hunk, after):
896 def trimcontext(number, lines):
905 def trimcontext(number, lines):
897 delta = len(lines) - self.maxcontext
906 delta = len(lines) - self.maxcontext
898 if False and delta > 0:
907 if False and delta > 0:
899 return number + delta, lines[:self.maxcontext]
908 return number + delta, lines[:self.maxcontext]
900 return number, lines
909 return number, lines
901
910
902 self.header = header
911 self.header = header
903 self.fromline, self.before = trimcontext(fromline, before)
912 self.fromline, self.before = trimcontext(fromline, before)
904 self.toline, self.after = trimcontext(toline, after)
913 self.toline, self.after = trimcontext(toline, after)
905 self.proc = proc
914 self.proc = proc
906 self.hunk = hunk
915 self.hunk = hunk
907 self.added, self.removed = self.countchanges(self.hunk)
916 self.added, self.removed = self.countchanges(self.hunk)
908
917
909 def __eq__(self, v):
918 def __eq__(self, v):
910 if not isinstance(v, recordhunk):
919 if not isinstance(v, recordhunk):
911 return False
920 return False
912
921
913 return ((v.hunk == self.hunk) and
922 return ((v.hunk == self.hunk) and
914 (v.proc == self.proc) and
923 (v.proc == self.proc) and
915 (self.fromline == v.fromline) and
924 (self.fromline == v.fromline) and
916 (self.header.files() == v.header.files()))
925 (self.header.files() == v.header.files()))
917
926
918 def __hash__(self):
927 def __hash__(self):
919 return hash((tuple(self.hunk),
928 return hash((tuple(self.hunk),
920 tuple(self.header.files()),
929 tuple(self.header.files()),
921 self.fromline,
930 self.fromline,
922 self.proc))
931 self.proc))
923
932
924 def countchanges(self, hunk):
933 def countchanges(self, hunk):
925 """hunk -> (n+,n-)"""
934 """hunk -> (n+,n-)"""
926 add = len([h for h in hunk if h[0] == '+'])
935 add = len([h for h in hunk if h[0] == '+'])
927 rem = len([h for h in hunk if h[0] == '-'])
936 rem = len([h for h in hunk if h[0] == '-'])
928 return add, rem
937 return add, rem
929
938
930 def write(self, fp):
939 def write(self, fp):
931 delta = len(self.before) + len(self.after)
940 delta = len(self.before) + len(self.after)
932 if self.after and self.after[-1] == '\\ No newline at end of file\n':
941 if self.after and self.after[-1] == '\\ No newline at end of file\n':
933 delta -= 1
942 delta -= 1
934 fromlen = delta + self.removed
943 fromlen = delta + self.removed
935 tolen = delta + self.added
944 tolen = delta + self.added
936 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
945 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
937 (self.fromline, fromlen, self.toline, tolen,
946 (self.fromline, fromlen, self.toline, tolen,
938 self.proc and (' ' + self.proc)))
947 self.proc and (' ' + self.proc)))
939 fp.write(''.join(self.before + self.hunk + self.after))
948 fp.write(''.join(self.before + self.hunk + self.after))
940
949
941 pretty = write
950 pretty = write
942
951
943 def filename(self):
952 def filename(self):
944 return self.header.filename()
953 return self.header.filename()
945
954
946 def __repr__(self):
955 def __repr__(self):
947 return '<hunk %r@%d>' % (self.filename(), self.fromline)
956 return '<hunk %r@%d>' % (self.filename(), self.fromline)
948
957
949 def filterpatch(ui, headers, operation=None):
958 def filterpatch(ui, headers, operation=None):
950 """Interactively filter patch chunks into applied-only chunks"""
959 """Interactively filter patch chunks into applied-only chunks"""
951 if operation is None:
960 if operation is None:
952 operation = _('record')
961 operation = _('record')
953
962
954 def prompt(skipfile, skipall, query, chunk):
963 def prompt(skipfile, skipall, query, chunk):
955 """prompt query, and process base inputs
964 """prompt query, and process base inputs
956
965
957 - y/n for the rest of file
966 - y/n for the rest of file
958 - y/n for the rest
967 - y/n for the rest
959 - ? (help)
968 - ? (help)
960 - q (quit)
969 - q (quit)
961
970
962 Return True/False and possibly updated skipfile and skipall.
971 Return True/False and possibly updated skipfile and skipall.
963 """
972 """
964 newpatches = None
973 newpatches = None
965 if skipall is not None:
974 if skipall is not None:
966 return skipall, skipfile, skipall, newpatches
975 return skipall, skipfile, skipall, newpatches
967 if skipfile is not None:
976 if skipfile is not None:
968 return skipfile, skipfile, skipall, newpatches
977 return skipfile, skipfile, skipall, newpatches
969 while True:
978 while True:
970 resps = _('[Ynesfdaq?]'
979 resps = _('[Ynesfdaq?]'
971 '$$ &Yes, record this change'
980 '$$ &Yes, record this change'
972 '$$ &No, skip this change'
981 '$$ &No, skip this change'
973 '$$ &Edit this change manually'
982 '$$ &Edit this change manually'
974 '$$ &Skip remaining changes to this file'
983 '$$ &Skip remaining changes to this file'
975 '$$ Record remaining changes to this &file'
984 '$$ Record remaining changes to this &file'
976 '$$ &Done, skip remaining changes and files'
985 '$$ &Done, skip remaining changes and files'
977 '$$ Record &all changes to all remaining files'
986 '$$ Record &all changes to all remaining files'
978 '$$ &Quit, recording no changes'
987 '$$ &Quit, recording no changes'
979 '$$ &? (display help)')
988 '$$ &? (display help)')
980 r = ui.promptchoice("%s %s" % (query, resps))
989 r = ui.promptchoice("%s %s" % (query, resps))
981 ui.write("\n")
990 ui.write("\n")
982 if r == 8: # ?
991 if r == 8: # ?
983 for c, t in ui.extractchoices(resps)[1]:
992 for c, t in ui.extractchoices(resps)[1]:
984 ui.write('%s - %s\n' % (c, t.lower()))
993 ui.write('%s - %s\n' % (c, t.lower()))
985 continue
994 continue
986 elif r == 0: # yes
995 elif r == 0: # yes
987 ret = True
996 ret = True
988 elif r == 1: # no
997 elif r == 1: # no
989 ret = False
998 ret = False
990 elif r == 2: # Edit patch
999 elif r == 2: # Edit patch
991 if chunk is None:
1000 if chunk is None:
992 ui.write(_('cannot edit patch for whole file'))
1001 ui.write(_('cannot edit patch for whole file'))
993 ui.write("\n")
1002 ui.write("\n")
994 continue
1003 continue
995 if chunk.header.binary():
1004 if chunk.header.binary():
996 ui.write(_('cannot edit patch for binary file'))
1005 ui.write(_('cannot edit patch for binary file'))
997 ui.write("\n")
1006 ui.write("\n")
998 continue
1007 continue
999 # Patch comment based on the Git one (based on comment at end of
1008 # Patch comment based on the Git one (based on comment at end of
1000 # https://mercurial-scm.org/wiki/RecordExtension)
1009 # https://mercurial-scm.org/wiki/RecordExtension)
1001 phelp = '---' + _("""
1010 phelp = '---' + _("""
1002 To remove '-' lines, make them ' ' lines (context).
1011 To remove '-' lines, make them ' ' lines (context).
1003 To remove '+' lines, delete them.
1012 To remove '+' lines, delete them.
1004 Lines starting with # will be removed from the patch.
1013 Lines starting with # will be removed from the patch.
1005
1014
1006 If the patch applies cleanly, the edited hunk will immediately be
1015 If the patch applies cleanly, the edited hunk will immediately be
1007 added to the record list. If it does not apply cleanly, a rejects
1016 added to the record list. If it does not apply cleanly, a rejects
1008 file will be generated: you can use that when you try again. If
1017 file will be generated: you can use that when you try again. If
1009 all lines of the hunk are removed, then the edit is aborted and
1018 all lines of the hunk are removed, then the edit is aborted and
1010 the hunk is left unchanged.
1019 the hunk is left unchanged.
1011 """)
1020 """)
1012 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1021 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1013 suffix=".diff", text=True)
1022 suffix=".diff", text=True)
1014 ncpatchfp = None
1023 ncpatchfp = None
1015 try:
1024 try:
1016 # Write the initial patch
1025 # Write the initial patch
1017 f = os.fdopen(patchfd, "w")
1026 f = os.fdopen(patchfd, "w")
1018 chunk.header.write(f)
1027 chunk.header.write(f)
1019 chunk.write(f)
1028 chunk.write(f)
1020 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1029 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1021 f.close()
1030 f.close()
1022 # Start the editor and wait for it to complete
1031 # Start the editor and wait for it to complete
1023 editor = ui.geteditor()
1032 editor = ui.geteditor()
1024 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1033 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1025 environ={'HGUSER': ui.username()})
1034 environ={'HGUSER': ui.username()})
1026 if ret != 0:
1035 if ret != 0:
1027 ui.warn(_("editor exited with exit code %d\n") % ret)
1036 ui.warn(_("editor exited with exit code %d\n") % ret)
1028 continue
1037 continue
1029 # Remove comment lines
1038 # Remove comment lines
1030 patchfp = open(patchfn)
1039 patchfp = open(patchfn)
1031 ncpatchfp = cStringIO.StringIO()
1040 ncpatchfp = cStringIO.StringIO()
1032 for line in patchfp:
1041 for line in patchfp:
1033 if not line.startswith('#'):
1042 if not line.startswith('#'):
1034 ncpatchfp.write(line)
1043 ncpatchfp.write(line)
1035 patchfp.close()
1044 patchfp.close()
1036 ncpatchfp.seek(0)
1045 ncpatchfp.seek(0)
1037 newpatches = parsepatch(ncpatchfp)
1046 newpatches = parsepatch(ncpatchfp)
1038 finally:
1047 finally:
1039 os.unlink(patchfn)
1048 os.unlink(patchfn)
1040 del ncpatchfp
1049 del ncpatchfp
1041 # Signal that the chunk shouldn't be applied as-is, but
1050 # Signal that the chunk shouldn't be applied as-is, but
1042 # provide the new patch to be used instead.
1051 # provide the new patch to be used instead.
1043 ret = False
1052 ret = False
1044 elif r == 3: # Skip
1053 elif r == 3: # Skip
1045 ret = skipfile = False
1054 ret = skipfile = False
1046 elif r == 4: # file (Record remaining)
1055 elif r == 4: # file (Record remaining)
1047 ret = skipfile = True
1056 ret = skipfile = True
1048 elif r == 5: # done, skip remaining
1057 elif r == 5: # done, skip remaining
1049 ret = skipall = False
1058 ret = skipall = False
1050 elif r == 6: # all
1059 elif r == 6: # all
1051 ret = skipall = True
1060 ret = skipall = True
1052 elif r == 7: # quit
1061 elif r == 7: # quit
1053 raise util.Abort(_('user quit'))
1062 raise util.Abort(_('user quit'))
1054 return ret, skipfile, skipall, newpatches
1063 return ret, skipfile, skipall, newpatches
1055
1064
1056 seen = set()
1065 seen = set()
1057 applied = {} # 'filename' -> [] of chunks
1066 applied = {} # 'filename' -> [] of chunks
1058 skipfile, skipall = None, None
1067 skipfile, skipall = None, None
1059 pos, total = 1, sum(len(h.hunks) for h in headers)
1068 pos, total = 1, sum(len(h.hunks) for h in headers)
1060 for h in headers:
1069 for h in headers:
1061 pos += len(h.hunks)
1070 pos += len(h.hunks)
1062 skipfile = None
1071 skipfile = None
1063 fixoffset = 0
1072 fixoffset = 0
1064 hdr = ''.join(h.header)
1073 hdr = ''.join(h.header)
1065 if hdr in seen:
1074 if hdr in seen:
1066 continue
1075 continue
1067 seen.add(hdr)
1076 seen.add(hdr)
1068 if skipall is None:
1077 if skipall is None:
1069 h.pretty(ui)
1078 h.pretty(ui)
1070 msg = (_('examine changes to %s?') %
1079 msg = (_('examine changes to %s?') %
1071 _(' and ').join("'%s'" % f for f in h.files()))
1080 _(' and ').join("'%s'" % f for f in h.files()))
1072 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1081 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1073 if not r:
1082 if not r:
1074 continue
1083 continue
1075 applied[h.filename()] = [h]
1084 applied[h.filename()] = [h]
1076 if h.allhunks():
1085 if h.allhunks():
1077 applied[h.filename()] += h.hunks
1086 applied[h.filename()] += h.hunks
1078 continue
1087 continue
1079 for i, chunk in enumerate(h.hunks):
1088 for i, chunk in enumerate(h.hunks):
1080 if skipfile is None and skipall is None:
1089 if skipfile is None and skipall is None:
1081 chunk.pretty(ui)
1090 chunk.pretty(ui)
1082 if total == 1:
1091 if total == 1:
1083 msg = _("record this change to '%s'?") % chunk.filename()
1092 msg = _("record this change to '%s'?") % chunk.filename()
1084 else:
1093 else:
1085 idx = pos - len(h.hunks) + i
1094 idx = pos - len(h.hunks) + i
1086 msg = _("record change %d/%d to '%s'?") % (idx, total,
1095 msg = _("record change %d/%d to '%s'?") % (idx, total,
1087 chunk.filename())
1096 chunk.filename())
1088 r, skipfile, skipall, newpatches = prompt(skipfile,
1097 r, skipfile, skipall, newpatches = prompt(skipfile,
1089 skipall, msg, chunk)
1098 skipall, msg, chunk)
1090 if r:
1099 if r:
1091 if fixoffset:
1100 if fixoffset:
1092 chunk = copy.copy(chunk)
1101 chunk = copy.copy(chunk)
1093 chunk.toline += fixoffset
1102 chunk.toline += fixoffset
1094 applied[chunk.filename()].append(chunk)
1103 applied[chunk.filename()].append(chunk)
1095 elif newpatches is not None:
1104 elif newpatches is not None:
1096 for newpatch in newpatches:
1105 for newpatch in newpatches:
1097 for newhunk in newpatch.hunks:
1106 for newhunk in newpatch.hunks:
1098 if fixoffset:
1107 if fixoffset:
1099 newhunk.toline += fixoffset
1108 newhunk.toline += fixoffset
1100 applied[newhunk.filename()].append(newhunk)
1109 applied[newhunk.filename()].append(newhunk)
1101 else:
1110 else:
1102 fixoffset += chunk.removed - chunk.added
1111 fixoffset += chunk.removed - chunk.added
1103 return sum([h for h in applied.itervalues()
1112 return sum([h for h in applied.itervalues()
1104 if h[0].special() or len(h) > 1], [])
1113 if h[0].special() or len(h) > 1], [])
1105 class hunk(object):
1114 class hunk(object):
1106 def __init__(self, desc, num, lr, context):
1115 def __init__(self, desc, num, lr, context):
1107 self.number = num
1116 self.number = num
1108 self.desc = desc
1117 self.desc = desc
1109 self.hunk = [desc]
1118 self.hunk = [desc]
1110 self.a = []
1119 self.a = []
1111 self.b = []
1120 self.b = []
1112 self.starta = self.lena = None
1121 self.starta = self.lena = None
1113 self.startb = self.lenb = None
1122 self.startb = self.lenb = None
1114 if lr is not None:
1123 if lr is not None:
1115 if context:
1124 if context:
1116 self.read_context_hunk(lr)
1125 self.read_context_hunk(lr)
1117 else:
1126 else:
1118 self.read_unified_hunk(lr)
1127 self.read_unified_hunk(lr)
1119
1128
1120 def getnormalized(self):
1129 def getnormalized(self):
1121 """Return a copy with line endings normalized to LF."""
1130 """Return a copy with line endings normalized to LF."""
1122
1131
1123 def normalize(lines):
1132 def normalize(lines):
1124 nlines = []
1133 nlines = []
1125 for line in lines:
1134 for line in lines:
1126 if line.endswith('\r\n'):
1135 if line.endswith('\r\n'):
1127 line = line[:-2] + '\n'
1136 line = line[:-2] + '\n'
1128 nlines.append(line)
1137 nlines.append(line)
1129 return nlines
1138 return nlines
1130
1139
1131 # Dummy object, it is rebuilt manually
1140 # Dummy object, it is rebuilt manually
1132 nh = hunk(self.desc, self.number, None, None)
1141 nh = hunk(self.desc, self.number, None, None)
1133 nh.number = self.number
1142 nh.number = self.number
1134 nh.desc = self.desc
1143 nh.desc = self.desc
1135 nh.hunk = self.hunk
1144 nh.hunk = self.hunk
1136 nh.a = normalize(self.a)
1145 nh.a = normalize(self.a)
1137 nh.b = normalize(self.b)
1146 nh.b = normalize(self.b)
1138 nh.starta = self.starta
1147 nh.starta = self.starta
1139 nh.startb = self.startb
1148 nh.startb = self.startb
1140 nh.lena = self.lena
1149 nh.lena = self.lena
1141 nh.lenb = self.lenb
1150 nh.lenb = self.lenb
1142 return nh
1151 return nh
1143
1152
1144 def read_unified_hunk(self, lr):
1153 def read_unified_hunk(self, lr):
1145 m = unidesc.match(self.desc)
1154 m = unidesc.match(self.desc)
1146 if not m:
1155 if not m:
1147 raise PatchError(_("bad hunk #%d") % self.number)
1156 raise PatchError(_("bad hunk #%d") % self.number)
1148 self.starta, self.lena, self.startb, self.lenb = m.groups()
1157 self.starta, self.lena, self.startb, self.lenb = m.groups()
1149 if self.lena is None:
1158 if self.lena is None:
1150 self.lena = 1
1159 self.lena = 1
1151 else:
1160 else:
1152 self.lena = int(self.lena)
1161 self.lena = int(self.lena)
1153 if self.lenb is None:
1162 if self.lenb is None:
1154 self.lenb = 1
1163 self.lenb = 1
1155 else:
1164 else:
1156 self.lenb = int(self.lenb)
1165 self.lenb = int(self.lenb)
1157 self.starta = int(self.starta)
1166 self.starta = int(self.starta)
1158 self.startb = int(self.startb)
1167 self.startb = int(self.startb)
1159 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1168 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1160 self.b)
1169 self.b)
1161 # if we hit eof before finishing out the hunk, the last line will
1170 # if we hit eof before finishing out the hunk, the last line will
1162 # be zero length. Lets try to fix it up.
1171 # be zero length. Lets try to fix it up.
1163 while len(self.hunk[-1]) == 0:
1172 while len(self.hunk[-1]) == 0:
1164 del self.hunk[-1]
1173 del self.hunk[-1]
1165 del self.a[-1]
1174 del self.a[-1]
1166 del self.b[-1]
1175 del self.b[-1]
1167 self.lena -= 1
1176 self.lena -= 1
1168 self.lenb -= 1
1177 self.lenb -= 1
1169 self._fixnewline(lr)
1178 self._fixnewline(lr)
1170
1179
1171 def read_context_hunk(self, lr):
1180 def read_context_hunk(self, lr):
1172 self.desc = lr.readline()
1181 self.desc = lr.readline()
1173 m = contextdesc.match(self.desc)
1182 m = contextdesc.match(self.desc)
1174 if not m:
1183 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1184 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.starta, aend = m.groups()
1185 self.starta, aend = m.groups()
1177 self.starta = int(self.starta)
1186 self.starta = int(self.starta)
1178 if aend is None:
1187 if aend is None:
1179 aend = self.starta
1188 aend = self.starta
1180 self.lena = int(aend) - self.starta
1189 self.lena = int(aend) - self.starta
1181 if self.starta:
1190 if self.starta:
1182 self.lena += 1
1191 self.lena += 1
1183 for x in xrange(self.lena):
1192 for x in xrange(self.lena):
1184 l = lr.readline()
1193 l = lr.readline()
1185 if l.startswith('---'):
1194 if l.startswith('---'):
1186 # lines addition, old block is empty
1195 # lines addition, old block is empty
1187 lr.push(l)
1196 lr.push(l)
1188 break
1197 break
1189 s = l[2:]
1198 s = l[2:]
1190 if l.startswith('- ') or l.startswith('! '):
1199 if l.startswith('- ') or l.startswith('! '):
1191 u = '-' + s
1200 u = '-' + s
1192 elif l.startswith(' '):
1201 elif l.startswith(' '):
1193 u = ' ' + s
1202 u = ' ' + s
1194 else:
1203 else:
1195 raise PatchError(_("bad hunk #%d old text line %d") %
1204 raise PatchError(_("bad hunk #%d old text line %d") %
1196 (self.number, x))
1205 (self.number, x))
1197 self.a.append(u)
1206 self.a.append(u)
1198 self.hunk.append(u)
1207 self.hunk.append(u)
1199
1208
1200 l = lr.readline()
1209 l = lr.readline()
1201 if l.startswith('\ '):
1210 if l.startswith('\ '):
1202 s = self.a[-1][:-1]
1211 s = self.a[-1][:-1]
1203 self.a[-1] = s
1212 self.a[-1] = s
1204 self.hunk[-1] = s
1213 self.hunk[-1] = s
1205 l = lr.readline()
1214 l = lr.readline()
1206 m = contextdesc.match(l)
1215 m = contextdesc.match(l)
1207 if not m:
1216 if not m:
1208 raise PatchError(_("bad hunk #%d") % self.number)
1217 raise PatchError(_("bad hunk #%d") % self.number)
1209 self.startb, bend = m.groups()
1218 self.startb, bend = m.groups()
1210 self.startb = int(self.startb)
1219 self.startb = int(self.startb)
1211 if bend is None:
1220 if bend is None:
1212 bend = self.startb
1221 bend = self.startb
1213 self.lenb = int(bend) - self.startb
1222 self.lenb = int(bend) - self.startb
1214 if self.startb:
1223 if self.startb:
1215 self.lenb += 1
1224 self.lenb += 1
1216 hunki = 1
1225 hunki = 1
1217 for x in xrange(self.lenb):
1226 for x in xrange(self.lenb):
1218 l = lr.readline()
1227 l = lr.readline()
1219 if l.startswith('\ '):
1228 if l.startswith('\ '):
1220 # XXX: the only way to hit this is with an invalid line range.
1229 # XXX: the only way to hit this is with an invalid line range.
1221 # The no-eol marker is not counted in the line range, but I
1230 # The no-eol marker is not counted in the line range, but I
1222 # guess there are diff(1) out there which behave differently.
1231 # guess there are diff(1) out there which behave differently.
1223 s = self.b[-1][:-1]
1232 s = self.b[-1][:-1]
1224 self.b[-1] = s
1233 self.b[-1] = s
1225 self.hunk[hunki - 1] = s
1234 self.hunk[hunki - 1] = s
1226 continue
1235 continue
1227 if not l:
1236 if not l:
1228 # line deletions, new block is empty and we hit EOF
1237 # line deletions, new block is empty and we hit EOF
1229 lr.push(l)
1238 lr.push(l)
1230 break
1239 break
1231 s = l[2:]
1240 s = l[2:]
1232 if l.startswith('+ ') or l.startswith('! '):
1241 if l.startswith('+ ') or l.startswith('! '):
1233 u = '+' + s
1242 u = '+' + s
1234 elif l.startswith(' '):
1243 elif l.startswith(' '):
1235 u = ' ' + s
1244 u = ' ' + s
1236 elif len(self.b) == 0:
1245 elif len(self.b) == 0:
1237 # line deletions, new block is empty
1246 # line deletions, new block is empty
1238 lr.push(l)
1247 lr.push(l)
1239 break
1248 break
1240 else:
1249 else:
1241 raise PatchError(_("bad hunk #%d old text line %d") %
1250 raise PatchError(_("bad hunk #%d old text line %d") %
1242 (self.number, x))
1251 (self.number, x))
1243 self.b.append(s)
1252 self.b.append(s)
1244 while True:
1253 while True:
1245 if hunki >= len(self.hunk):
1254 if hunki >= len(self.hunk):
1246 h = ""
1255 h = ""
1247 else:
1256 else:
1248 h = self.hunk[hunki]
1257 h = self.hunk[hunki]
1249 hunki += 1
1258 hunki += 1
1250 if h == u:
1259 if h == u:
1251 break
1260 break
1252 elif h.startswith('-'):
1261 elif h.startswith('-'):
1253 continue
1262 continue
1254 else:
1263 else:
1255 self.hunk.insert(hunki - 1, u)
1264 self.hunk.insert(hunki - 1, u)
1256 break
1265 break
1257
1266
1258 if not self.a:
1267 if not self.a:
1259 # this happens when lines were only added to the hunk
1268 # this happens when lines were only added to the hunk
1260 for x in self.hunk:
1269 for x in self.hunk:
1261 if x.startswith('-') or x.startswith(' '):
1270 if x.startswith('-') or x.startswith(' '):
1262 self.a.append(x)
1271 self.a.append(x)
1263 if not self.b:
1272 if not self.b:
1264 # this happens when lines were only deleted from the hunk
1273 # this happens when lines were only deleted from the hunk
1265 for x in self.hunk:
1274 for x in self.hunk:
1266 if x.startswith('+') or x.startswith(' '):
1275 if x.startswith('+') or x.startswith(' '):
1267 self.b.append(x[1:])
1276 self.b.append(x[1:])
1268 # @@ -start,len +start,len @@
1277 # @@ -start,len +start,len @@
1269 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1278 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1270 self.startb, self.lenb)
1279 self.startb, self.lenb)
1271 self.hunk[0] = self.desc
1280 self.hunk[0] = self.desc
1272 self._fixnewline(lr)
1281 self._fixnewline(lr)
1273
1282
1274 def _fixnewline(self, lr):
1283 def _fixnewline(self, lr):
1275 l = lr.readline()
1284 l = lr.readline()
1276 if l.startswith('\ '):
1285 if l.startswith('\ '):
1277 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1286 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1278 else:
1287 else:
1279 lr.push(l)
1288 lr.push(l)
1280
1289
1281 def complete(self):
1290 def complete(self):
1282 return len(self.a) == self.lena and len(self.b) == self.lenb
1291 return len(self.a) == self.lena and len(self.b) == self.lenb
1283
1292
1284 def _fuzzit(self, old, new, fuzz, toponly):
1293 def _fuzzit(self, old, new, fuzz, toponly):
1285 # this removes context lines from the top and bottom of list 'l'. It
1294 # this removes context lines from the top and bottom of list 'l'. It
1286 # checks the hunk to make sure only context lines are removed, and then
1295 # checks the hunk to make sure only context lines are removed, and then
1287 # returns a new shortened list of lines.
1296 # returns a new shortened list of lines.
1288 fuzz = min(fuzz, len(old))
1297 fuzz = min(fuzz, len(old))
1289 if fuzz:
1298 if fuzz:
1290 top = 0
1299 top = 0
1291 bot = 0
1300 bot = 0
1292 hlen = len(self.hunk)
1301 hlen = len(self.hunk)
1293 for x in xrange(hlen - 1):
1302 for x in xrange(hlen - 1):
1294 # the hunk starts with the @@ line, so use x+1
1303 # the hunk starts with the @@ line, so use x+1
1295 if self.hunk[x + 1][0] == ' ':
1304 if self.hunk[x + 1][0] == ' ':
1296 top += 1
1305 top += 1
1297 else:
1306 else:
1298 break
1307 break
1299 if not toponly:
1308 if not toponly:
1300 for x in xrange(hlen - 1):
1309 for x in xrange(hlen - 1):
1301 if self.hunk[hlen - bot - 1][0] == ' ':
1310 if self.hunk[hlen - bot - 1][0] == ' ':
1302 bot += 1
1311 bot += 1
1303 else:
1312 else:
1304 break
1313 break
1305
1314
1306 bot = min(fuzz, bot)
1315 bot = min(fuzz, bot)
1307 top = min(fuzz, top)
1316 top = min(fuzz, top)
1308 return old[top:len(old) - bot], new[top:len(new) - bot], top
1317 return old[top:len(old) - bot], new[top:len(new) - bot], top
1309 return old, new, 0
1318 return old, new, 0
1310
1319
1311 def fuzzit(self, fuzz, toponly):
1320 def fuzzit(self, fuzz, toponly):
1312 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1321 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1313 oldstart = self.starta + top
1322 oldstart = self.starta + top
1314 newstart = self.startb + top
1323 newstart = self.startb + top
1315 # zero length hunk ranges already have their start decremented
1324 # zero length hunk ranges already have their start decremented
1316 if self.lena and oldstart > 0:
1325 if self.lena and oldstart > 0:
1317 oldstart -= 1
1326 oldstart -= 1
1318 if self.lenb and newstart > 0:
1327 if self.lenb and newstart > 0:
1319 newstart -= 1
1328 newstart -= 1
1320 return old, oldstart, new, newstart
1329 return old, oldstart, new, newstart
1321
1330
1322 class binhunk(object):
1331 class binhunk(object):
1323 'A binary patch file.'
1332 'A binary patch file.'
1324 def __init__(self, lr, fname):
1333 def __init__(self, lr, fname):
1325 self.text = None
1334 self.text = None
1326 self.delta = False
1335 self.delta = False
1327 self.hunk = ['GIT binary patch\n']
1336 self.hunk = ['GIT binary patch\n']
1328 self._fname = fname
1337 self._fname = fname
1329 self._read(lr)
1338 self._read(lr)
1330
1339
1331 def complete(self):
1340 def complete(self):
1332 return self.text is not None
1341 return self.text is not None
1333
1342
1334 def new(self, lines):
1343 def new(self, lines):
1335 if self.delta:
1344 if self.delta:
1336 return [applybindelta(self.text, ''.join(lines))]
1345 return [applybindelta(self.text, ''.join(lines))]
1337 return [self.text]
1346 return [self.text]
1338
1347
1339 def _read(self, lr):
1348 def _read(self, lr):
1340 def getline(lr, hunk):
1349 def getline(lr, hunk):
1341 l = lr.readline()
1350 l = lr.readline()
1342 hunk.append(l)
1351 hunk.append(l)
1343 return l.rstrip('\r\n')
1352 return l.rstrip('\r\n')
1344
1353
1345 size = 0
1354 size = 0
1346 while True:
1355 while True:
1347 line = getline(lr, self.hunk)
1356 line = getline(lr, self.hunk)
1348 if not line:
1357 if not line:
1349 raise PatchError(_('could not extract "%s" binary data')
1358 raise PatchError(_('could not extract "%s" binary data')
1350 % self._fname)
1359 % self._fname)
1351 if line.startswith('literal '):
1360 if line.startswith('literal '):
1352 size = int(line[8:].rstrip())
1361 size = int(line[8:].rstrip())
1353 break
1362 break
1354 if line.startswith('delta '):
1363 if line.startswith('delta '):
1355 size = int(line[6:].rstrip())
1364 size = int(line[6:].rstrip())
1356 self.delta = True
1365 self.delta = True
1357 break
1366 break
1358 dec = []
1367 dec = []
1359 line = getline(lr, self.hunk)
1368 line = getline(lr, self.hunk)
1360 while len(line) > 1:
1369 while len(line) > 1:
1361 l = line[0]
1370 l = line[0]
1362 if l <= 'Z' and l >= 'A':
1371 if l <= 'Z' and l >= 'A':
1363 l = ord(l) - ord('A') + 1
1372 l = ord(l) - ord('A') + 1
1364 else:
1373 else:
1365 l = ord(l) - ord('a') + 27
1374 l = ord(l) - ord('a') + 27
1366 try:
1375 try:
1367 dec.append(base85.b85decode(line[1:])[:l])
1376 dec.append(base85.b85decode(line[1:])[:l])
1368 except ValueError as e:
1377 except ValueError as e:
1369 raise PatchError(_('could not decode "%s" binary patch: %s')
1378 raise PatchError(_('could not decode "%s" binary patch: %s')
1370 % (self._fname, str(e)))
1379 % (self._fname, str(e)))
1371 line = getline(lr, self.hunk)
1380 line = getline(lr, self.hunk)
1372 text = zlib.decompress(''.join(dec))
1381 text = zlib.decompress(''.join(dec))
1373 if len(text) != size:
1382 if len(text) != size:
1374 raise PatchError(_('"%s" length is %d bytes, should be %d')
1383 raise PatchError(_('"%s" length is %d bytes, should be %d')
1375 % (self._fname, len(text), size))
1384 % (self._fname, len(text), size))
1376 self.text = text
1385 self.text = text
1377
1386
1378 def parsefilename(str):
1387 def parsefilename(str):
1379 # --- filename \t|space stuff
1388 # --- filename \t|space stuff
1380 s = str[4:].rstrip('\r\n')
1389 s = str[4:].rstrip('\r\n')
1381 i = s.find('\t')
1390 i = s.find('\t')
1382 if i < 0:
1391 if i < 0:
1383 i = s.find(' ')
1392 i = s.find(' ')
1384 if i < 0:
1393 if i < 0:
1385 return s
1394 return s
1386 return s[:i]
1395 return s[:i]
1387
1396
1388 def reversehunks(hunks):
1397 def reversehunks(hunks):
1389 '''reverse the signs in the hunks given as argument
1398 '''reverse the signs in the hunks given as argument
1390
1399
1391 This function operates on hunks coming out of patch.filterpatch, that is
1400 This function operates on hunks coming out of patch.filterpatch, that is
1392 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1401 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1393
1402
1394 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1403 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1395 ... --- a/folder1/g
1404 ... --- a/folder1/g
1396 ... +++ b/folder1/g
1405 ... +++ b/folder1/g
1397 ... @@ -1,7 +1,7 @@
1406 ... @@ -1,7 +1,7 @@
1398 ... +firstline
1407 ... +firstline
1399 ... c
1408 ... c
1400 ... 1
1409 ... 1
1401 ... 2
1410 ... 2
1402 ... + 3
1411 ... + 3
1403 ... -4
1412 ... -4
1404 ... 5
1413 ... 5
1405 ... d
1414 ... d
1406 ... +lastline"""
1415 ... +lastline"""
1407 >>> hunks = parsepatch(rawpatch)
1416 >>> hunks = parsepatch(rawpatch)
1408 >>> hunkscomingfromfilterpatch = []
1417 >>> hunkscomingfromfilterpatch = []
1409 >>> for h in hunks:
1418 >>> for h in hunks:
1410 ... hunkscomingfromfilterpatch.append(h)
1419 ... hunkscomingfromfilterpatch.append(h)
1411 ... hunkscomingfromfilterpatch.extend(h.hunks)
1420 ... hunkscomingfromfilterpatch.extend(h.hunks)
1412
1421
1413 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1422 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1414 >>> fp = cStringIO.StringIO()
1423 >>> fp = cStringIO.StringIO()
1415 >>> for c in reversedhunks:
1424 >>> for c in reversedhunks:
1416 ... c.write(fp)
1425 ... c.write(fp)
1417 >>> fp.seek(0)
1426 >>> fp.seek(0)
1418 >>> reversedpatch = fp.read()
1427 >>> reversedpatch = fp.read()
1419 >>> print reversedpatch
1428 >>> print reversedpatch
1420 diff --git a/folder1/g b/folder1/g
1429 diff --git a/folder1/g b/folder1/g
1421 --- a/folder1/g
1430 --- a/folder1/g
1422 +++ b/folder1/g
1431 +++ b/folder1/g
1423 @@ -1,4 +1,3 @@
1432 @@ -1,4 +1,3 @@
1424 -firstline
1433 -firstline
1425 c
1434 c
1426 1
1435 1
1427 2
1436 2
1428 @@ -1,6 +2,6 @@
1437 @@ -1,6 +2,6 @@
1429 c
1438 c
1430 1
1439 1
1431 2
1440 2
1432 - 3
1441 - 3
1433 +4
1442 +4
1434 5
1443 5
1435 d
1444 d
1436 @@ -5,3 +6,2 @@
1445 @@ -5,3 +6,2 @@
1437 5
1446 5
1438 d
1447 d
1439 -lastline
1448 -lastline
1440
1449
1441 '''
1450 '''
1442
1451
1443 import crecord as crecordmod
1452 import crecord as crecordmod
1444 newhunks = []
1453 newhunks = []
1445 for c in hunks:
1454 for c in hunks:
1446 if isinstance(c, crecordmod.uihunk):
1455 if isinstance(c, crecordmod.uihunk):
1447 # curses hunks encapsulate the record hunk in _hunk
1456 # curses hunks encapsulate the record hunk in _hunk
1448 c = c._hunk
1457 c = c._hunk
1449 if isinstance(c, recordhunk):
1458 if isinstance(c, recordhunk):
1450 for j, line in enumerate(c.hunk):
1459 for j, line in enumerate(c.hunk):
1451 if line.startswith("-"):
1460 if line.startswith("-"):
1452 c.hunk[j] = "+" + c.hunk[j][1:]
1461 c.hunk[j] = "+" + c.hunk[j][1:]
1453 elif line.startswith("+"):
1462 elif line.startswith("+"):
1454 c.hunk[j] = "-" + c.hunk[j][1:]
1463 c.hunk[j] = "-" + c.hunk[j][1:]
1455 c.added, c.removed = c.removed, c.added
1464 c.added, c.removed = c.removed, c.added
1456 newhunks.append(c)
1465 newhunks.append(c)
1457 return newhunks
1466 return newhunks
1458
1467
1459 def parsepatch(originalchunks):
1468 def parsepatch(originalchunks):
1460 """patch -> [] of headers -> [] of hunks """
1469 """patch -> [] of headers -> [] of hunks """
1461 class parser(object):
1470 class parser(object):
1462 """patch parsing state machine"""
1471 """patch parsing state machine"""
1463 def __init__(self):
1472 def __init__(self):
1464 self.fromline = 0
1473 self.fromline = 0
1465 self.toline = 0
1474 self.toline = 0
1466 self.proc = ''
1475 self.proc = ''
1467 self.header = None
1476 self.header = None
1468 self.context = []
1477 self.context = []
1469 self.before = []
1478 self.before = []
1470 self.hunk = []
1479 self.hunk = []
1471 self.headers = []
1480 self.headers = []
1472
1481
1473 def addrange(self, limits):
1482 def addrange(self, limits):
1474 fromstart, fromend, tostart, toend, proc = limits
1483 fromstart, fromend, tostart, toend, proc = limits
1475 self.fromline = int(fromstart)
1484 self.fromline = int(fromstart)
1476 self.toline = int(tostart)
1485 self.toline = int(tostart)
1477 self.proc = proc
1486 self.proc = proc
1478
1487
1479 def addcontext(self, context):
1488 def addcontext(self, context):
1480 if self.hunk:
1489 if self.hunk:
1481 h = recordhunk(self.header, self.fromline, self.toline,
1490 h = recordhunk(self.header, self.fromline, self.toline,
1482 self.proc, self.before, self.hunk, context)
1491 self.proc, self.before, self.hunk, context)
1483 self.header.hunks.append(h)
1492 self.header.hunks.append(h)
1484 self.fromline += len(self.before) + h.removed
1493 self.fromline += len(self.before) + h.removed
1485 self.toline += len(self.before) + h.added
1494 self.toline += len(self.before) + h.added
1486 self.before = []
1495 self.before = []
1487 self.hunk = []
1496 self.hunk = []
1488 self.proc = ''
1497 self.proc = ''
1489 self.context = context
1498 self.context = context
1490
1499
1491 def addhunk(self, hunk):
1500 def addhunk(self, hunk):
1492 if self.context:
1501 if self.context:
1493 self.before = self.context
1502 self.before = self.context
1494 self.context = []
1503 self.context = []
1495 self.hunk = hunk
1504 self.hunk = hunk
1496
1505
1497 def newfile(self, hdr):
1506 def newfile(self, hdr):
1498 self.addcontext([])
1507 self.addcontext([])
1499 h = header(hdr)
1508 h = header(hdr)
1500 self.headers.append(h)
1509 self.headers.append(h)
1501 self.header = h
1510 self.header = h
1502
1511
1503 def addother(self, line):
1512 def addother(self, line):
1504 pass # 'other' lines are ignored
1513 pass # 'other' lines are ignored
1505
1514
1506 def finished(self):
1515 def finished(self):
1507 self.addcontext([])
1516 self.addcontext([])
1508 return self.headers
1517 return self.headers
1509
1518
1510 transitions = {
1519 transitions = {
1511 'file': {'context': addcontext,
1520 'file': {'context': addcontext,
1512 'file': newfile,
1521 'file': newfile,
1513 'hunk': addhunk,
1522 'hunk': addhunk,
1514 'range': addrange},
1523 'range': addrange},
1515 'context': {'file': newfile,
1524 'context': {'file': newfile,
1516 'hunk': addhunk,
1525 'hunk': addhunk,
1517 'range': addrange,
1526 'range': addrange,
1518 'other': addother},
1527 'other': addother},
1519 'hunk': {'context': addcontext,
1528 'hunk': {'context': addcontext,
1520 'file': newfile,
1529 'file': newfile,
1521 'range': addrange},
1530 'range': addrange},
1522 'range': {'context': addcontext,
1531 'range': {'context': addcontext,
1523 'hunk': addhunk},
1532 'hunk': addhunk},
1524 'other': {'other': addother},
1533 'other': {'other': addother},
1525 }
1534 }
1526
1535
1527 p = parser()
1536 p = parser()
1528 fp = cStringIO.StringIO()
1537 fp = cStringIO.StringIO()
1529 fp.write(''.join(originalchunks))
1538 fp.write(''.join(originalchunks))
1530 fp.seek(0)
1539 fp.seek(0)
1531
1540
1532 state = 'context'
1541 state = 'context'
1533 for newstate, data in scanpatch(fp):
1542 for newstate, data in scanpatch(fp):
1534 try:
1543 try:
1535 p.transitions[state][newstate](p, data)
1544 p.transitions[state][newstate](p, data)
1536 except KeyError:
1545 except KeyError:
1537 raise PatchError('unhandled transition: %s -> %s' %
1546 raise PatchError('unhandled transition: %s -> %s' %
1538 (state, newstate))
1547 (state, newstate))
1539 state = newstate
1548 state = newstate
1540 del fp
1549 del fp
1541 return p.finished()
1550 return p.finished()
1542
1551
1543 def pathtransform(path, strip, prefix):
1552 def pathtransform(path, strip, prefix):
1544 '''turn a path from a patch into a path suitable for the repository
1553 '''turn a path from a patch into a path suitable for the repository
1545
1554
1546 prefix, if not empty, is expected to be normalized with a / at the end.
1555 prefix, if not empty, is expected to be normalized with a / at the end.
1547
1556
1548 Returns (stripped components, path in repository).
1557 Returns (stripped components, path in repository).
1549
1558
1550 >>> pathtransform('a/b/c', 0, '')
1559 >>> pathtransform('a/b/c', 0, '')
1551 ('', 'a/b/c')
1560 ('', 'a/b/c')
1552 >>> pathtransform(' a/b/c ', 0, '')
1561 >>> pathtransform(' a/b/c ', 0, '')
1553 ('', ' a/b/c')
1562 ('', ' a/b/c')
1554 >>> pathtransform(' a/b/c ', 2, '')
1563 >>> pathtransform(' a/b/c ', 2, '')
1555 ('a/b/', 'c')
1564 ('a/b/', 'c')
1556 >>> pathtransform('a/b/c', 0, 'd/e/')
1565 >>> pathtransform('a/b/c', 0, 'd/e/')
1557 ('', 'd/e/a/b/c')
1566 ('', 'd/e/a/b/c')
1558 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1567 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1559 ('a//b/', 'd/e/c')
1568 ('a//b/', 'd/e/c')
1560 >>> pathtransform('a/b/c', 3, '')
1569 >>> pathtransform('a/b/c', 3, '')
1561 Traceback (most recent call last):
1570 Traceback (most recent call last):
1562 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1571 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1563 '''
1572 '''
1564 pathlen = len(path)
1573 pathlen = len(path)
1565 i = 0
1574 i = 0
1566 if strip == 0:
1575 if strip == 0:
1567 return '', prefix + path.rstrip()
1576 return '', prefix + path.rstrip()
1568 count = strip
1577 count = strip
1569 while count > 0:
1578 while count > 0:
1570 i = path.find('/', i)
1579 i = path.find('/', i)
1571 if i == -1:
1580 if i == -1:
1572 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1581 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1573 (count, strip, path))
1582 (count, strip, path))
1574 i += 1
1583 i += 1
1575 # consume '//' in the path
1584 # consume '//' in the path
1576 while i < pathlen - 1 and path[i] == '/':
1585 while i < pathlen - 1 and path[i] == '/':
1577 i += 1
1586 i += 1
1578 count -= 1
1587 count -= 1
1579 return path[:i].lstrip(), prefix + path[i:].rstrip()
1588 return path[:i].lstrip(), prefix + path[i:].rstrip()
1580
1589
1581 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1590 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1582 nulla = afile_orig == "/dev/null"
1591 nulla = afile_orig == "/dev/null"
1583 nullb = bfile_orig == "/dev/null"
1592 nullb = bfile_orig == "/dev/null"
1584 create = nulla and hunk.starta == 0 and hunk.lena == 0
1593 create = nulla and hunk.starta == 0 and hunk.lena == 0
1585 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1594 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1586 abase, afile = pathtransform(afile_orig, strip, prefix)
1595 abase, afile = pathtransform(afile_orig, strip, prefix)
1587 gooda = not nulla and backend.exists(afile)
1596 gooda = not nulla and backend.exists(afile)
1588 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1597 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1589 if afile == bfile:
1598 if afile == bfile:
1590 goodb = gooda
1599 goodb = gooda
1591 else:
1600 else:
1592 goodb = not nullb and backend.exists(bfile)
1601 goodb = not nullb and backend.exists(bfile)
1593 missing = not goodb and not gooda and not create
1602 missing = not goodb and not gooda and not create
1594
1603
1595 # some diff programs apparently produce patches where the afile is
1604 # some diff programs apparently produce patches where the afile is
1596 # not /dev/null, but afile starts with bfile
1605 # not /dev/null, but afile starts with bfile
1597 abasedir = afile[:afile.rfind('/') + 1]
1606 abasedir = afile[:afile.rfind('/') + 1]
1598 bbasedir = bfile[:bfile.rfind('/') + 1]
1607 bbasedir = bfile[:bfile.rfind('/') + 1]
1599 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1608 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1600 and hunk.starta == 0 and hunk.lena == 0):
1609 and hunk.starta == 0 and hunk.lena == 0):
1601 create = True
1610 create = True
1602 missing = False
1611 missing = False
1603
1612
1604 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1613 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1605 # diff is between a file and its backup. In this case, the original
1614 # diff is between a file and its backup. In this case, the original
1606 # file should be patched (see original mpatch code).
1615 # file should be patched (see original mpatch code).
1607 isbackup = (abase == bbase and bfile.startswith(afile))
1616 isbackup = (abase == bbase and bfile.startswith(afile))
1608 fname = None
1617 fname = None
1609 if not missing:
1618 if not missing:
1610 if gooda and goodb:
1619 if gooda and goodb:
1611 if isbackup:
1620 if isbackup:
1612 fname = afile
1621 fname = afile
1613 else:
1622 else:
1614 fname = bfile
1623 fname = bfile
1615 elif gooda:
1624 elif gooda:
1616 fname = afile
1625 fname = afile
1617
1626
1618 if not fname:
1627 if not fname:
1619 if not nullb:
1628 if not nullb:
1620 if isbackup:
1629 if isbackup:
1621 fname = afile
1630 fname = afile
1622 else:
1631 else:
1623 fname = bfile
1632 fname = bfile
1624 elif not nulla:
1633 elif not nulla:
1625 fname = afile
1634 fname = afile
1626 else:
1635 else:
1627 raise PatchError(_("undefined source and destination files"))
1636 raise PatchError(_("undefined source and destination files"))
1628
1637
1629 gp = patchmeta(fname)
1638 gp = patchmeta(fname)
1630 if create:
1639 if create:
1631 gp.op = 'ADD'
1640 gp.op = 'ADD'
1632 elif remove:
1641 elif remove:
1633 gp.op = 'DELETE'
1642 gp.op = 'DELETE'
1634 return gp
1643 return gp
1635
1644
1636 def scanpatch(fp):
1645 def scanpatch(fp):
1637 """like patch.iterhunks, but yield different events
1646 """like patch.iterhunks, but yield different events
1638
1647
1639 - ('file', [header_lines + fromfile + tofile])
1648 - ('file', [header_lines + fromfile + tofile])
1640 - ('context', [context_lines])
1649 - ('context', [context_lines])
1641 - ('hunk', [hunk_lines])
1650 - ('hunk', [hunk_lines])
1642 - ('range', (-start,len, +start,len, proc))
1651 - ('range', (-start,len, +start,len, proc))
1643 """
1652 """
1644 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1653 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1645 lr = linereader(fp)
1654 lr = linereader(fp)
1646
1655
1647 def scanwhile(first, p):
1656 def scanwhile(first, p):
1648 """scan lr while predicate holds"""
1657 """scan lr while predicate holds"""
1649 lines = [first]
1658 lines = [first]
1650 while True:
1659 while True:
1651 line = lr.readline()
1660 line = lr.readline()
1652 if not line:
1661 if not line:
1653 break
1662 break
1654 if p(line):
1663 if p(line):
1655 lines.append(line)
1664 lines.append(line)
1656 else:
1665 else:
1657 lr.push(line)
1666 lr.push(line)
1658 break
1667 break
1659 return lines
1668 return lines
1660
1669
1661 while True:
1670 while True:
1662 line = lr.readline()
1671 line = lr.readline()
1663 if not line:
1672 if not line:
1664 break
1673 break
1665 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1674 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1666 def notheader(line):
1675 def notheader(line):
1667 s = line.split(None, 1)
1676 s = line.split(None, 1)
1668 return not s or s[0] not in ('---', 'diff')
1677 return not s or s[0] not in ('---', 'diff')
1669 header = scanwhile(line, notheader)
1678 header = scanwhile(line, notheader)
1670 fromfile = lr.readline()
1679 fromfile = lr.readline()
1671 if fromfile.startswith('---'):
1680 if fromfile.startswith('---'):
1672 tofile = lr.readline()
1681 tofile = lr.readline()
1673 header += [fromfile, tofile]
1682 header += [fromfile, tofile]
1674 else:
1683 else:
1675 lr.push(fromfile)
1684 lr.push(fromfile)
1676 yield 'file', header
1685 yield 'file', header
1677 elif line[0] == ' ':
1686 elif line[0] == ' ':
1678 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1687 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1679 elif line[0] in '-+':
1688 elif line[0] in '-+':
1680 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1689 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1681 else:
1690 else:
1682 m = lines_re.match(line)
1691 m = lines_re.match(line)
1683 if m:
1692 if m:
1684 yield 'range', m.groups()
1693 yield 'range', m.groups()
1685 else:
1694 else:
1686 yield 'other', line
1695 yield 'other', line
1687
1696
1688 def scangitpatch(lr, firstline):
1697 def scangitpatch(lr, firstline):
1689 """
1698 """
1690 Git patches can emit:
1699 Git patches can emit:
1691 - rename a to b
1700 - rename a to b
1692 - change b
1701 - change b
1693 - copy a to c
1702 - copy a to c
1694 - change c
1703 - change c
1695
1704
1696 We cannot apply this sequence as-is, the renamed 'a' could not be
1705 We cannot apply this sequence as-is, the renamed 'a' could not be
1697 found for it would have been renamed already. And we cannot copy
1706 found for it would have been renamed already. And we cannot copy
1698 from 'b' instead because 'b' would have been changed already. So
1707 from 'b' instead because 'b' would have been changed already. So
1699 we scan the git patch for copy and rename commands so we can
1708 we scan the git patch for copy and rename commands so we can
1700 perform the copies ahead of time.
1709 perform the copies ahead of time.
1701 """
1710 """
1702 pos = 0
1711 pos = 0
1703 try:
1712 try:
1704 pos = lr.fp.tell()
1713 pos = lr.fp.tell()
1705 fp = lr.fp
1714 fp = lr.fp
1706 except IOError:
1715 except IOError:
1707 fp = cStringIO.StringIO(lr.fp.read())
1716 fp = cStringIO.StringIO(lr.fp.read())
1708 gitlr = linereader(fp)
1717 gitlr = linereader(fp)
1709 gitlr.push(firstline)
1718 gitlr.push(firstline)
1710 gitpatches = readgitpatch(gitlr)
1719 gitpatches = readgitpatch(gitlr)
1711 fp.seek(pos)
1720 fp.seek(pos)
1712 return gitpatches
1721 return gitpatches
1713
1722
1714 def iterhunks(fp):
1723 def iterhunks(fp):
1715 """Read a patch and yield the following events:
1724 """Read a patch and yield the following events:
1716 - ("file", afile, bfile, firsthunk): select a new target file.
1725 - ("file", afile, bfile, firsthunk): select a new target file.
1717 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1726 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1718 "file" event.
1727 "file" event.
1719 - ("git", gitchanges): current diff is in git format, gitchanges
1728 - ("git", gitchanges): current diff is in git format, gitchanges
1720 maps filenames to gitpatch records. Unique event.
1729 maps filenames to gitpatch records. Unique event.
1721 """
1730 """
1722 afile = ""
1731 afile = ""
1723 bfile = ""
1732 bfile = ""
1724 state = None
1733 state = None
1725 hunknum = 0
1734 hunknum = 0
1726 emitfile = newfile = False
1735 emitfile = newfile = False
1727 gitpatches = None
1736 gitpatches = None
1728
1737
1729 # our states
1738 # our states
1730 BFILE = 1
1739 BFILE = 1
1731 context = None
1740 context = None
1732 lr = linereader(fp)
1741 lr = linereader(fp)
1733
1742
1734 while True:
1743 while True:
1735 x = lr.readline()
1744 x = lr.readline()
1736 if not x:
1745 if not x:
1737 break
1746 break
1738 if state == BFILE and (
1747 if state == BFILE and (
1739 (not context and x[0] == '@')
1748 (not context and x[0] == '@')
1740 or (context is not False and x.startswith('***************'))
1749 or (context is not False and x.startswith('***************'))
1741 or x.startswith('GIT binary patch')):
1750 or x.startswith('GIT binary patch')):
1742 gp = None
1751 gp = None
1743 if (gitpatches and
1752 if (gitpatches and
1744 gitpatches[-1].ispatching(afile, bfile)):
1753 gitpatches[-1].ispatching(afile, bfile)):
1745 gp = gitpatches.pop()
1754 gp = gitpatches.pop()
1746 if x.startswith('GIT binary patch'):
1755 if x.startswith('GIT binary patch'):
1747 h = binhunk(lr, gp.path)
1756 h = binhunk(lr, gp.path)
1748 else:
1757 else:
1749 if context is None and x.startswith('***************'):
1758 if context is None and x.startswith('***************'):
1750 context = True
1759 context = True
1751 h = hunk(x, hunknum + 1, lr, context)
1760 h = hunk(x, hunknum + 1, lr, context)
1752 hunknum += 1
1761 hunknum += 1
1753 if emitfile:
1762 if emitfile:
1754 emitfile = False
1763 emitfile = False
1755 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1764 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1756 yield 'hunk', h
1765 yield 'hunk', h
1757 elif x.startswith('diff --git a/'):
1766 elif x.startswith('diff --git a/'):
1758 m = gitre.match(x.rstrip(' \r\n'))
1767 m = gitre.match(x.rstrip(' \r\n'))
1759 if not m:
1768 if not m:
1760 continue
1769 continue
1761 if gitpatches is None:
1770 if gitpatches is None:
1762 # scan whole input for git metadata
1771 # scan whole input for git metadata
1763 gitpatches = scangitpatch(lr, x)
1772 gitpatches = scangitpatch(lr, x)
1764 yield 'git', [g.copy() for g in gitpatches
1773 yield 'git', [g.copy() for g in gitpatches
1765 if g.op in ('COPY', 'RENAME')]
1774 if g.op in ('COPY', 'RENAME')]
1766 gitpatches.reverse()
1775 gitpatches.reverse()
1767 afile = 'a/' + m.group(1)
1776 afile = 'a/' + m.group(1)
1768 bfile = 'b/' + m.group(2)
1777 bfile = 'b/' + m.group(2)
1769 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1778 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1770 gp = gitpatches.pop()
1779 gp = gitpatches.pop()
1771 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1780 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1772 if not gitpatches:
1781 if not gitpatches:
1773 raise PatchError(_('failed to synchronize metadata for "%s"')
1782 raise PatchError(_('failed to synchronize metadata for "%s"')
1774 % afile[2:])
1783 % afile[2:])
1775 gp = gitpatches[-1]
1784 gp = gitpatches[-1]
1776 newfile = True
1785 newfile = True
1777 elif x.startswith('---'):
1786 elif x.startswith('---'):
1778 # check for a unified diff
1787 # check for a unified diff
1779 l2 = lr.readline()
1788 l2 = lr.readline()
1780 if not l2.startswith('+++'):
1789 if not l2.startswith('+++'):
1781 lr.push(l2)
1790 lr.push(l2)
1782 continue
1791 continue
1783 newfile = True
1792 newfile = True
1784 context = False
1793 context = False
1785 afile = parsefilename(x)
1794 afile = parsefilename(x)
1786 bfile = parsefilename(l2)
1795 bfile = parsefilename(l2)
1787 elif x.startswith('***'):
1796 elif x.startswith('***'):
1788 # check for a context diff
1797 # check for a context diff
1789 l2 = lr.readline()
1798 l2 = lr.readline()
1790 if not l2.startswith('---'):
1799 if not l2.startswith('---'):
1791 lr.push(l2)
1800 lr.push(l2)
1792 continue
1801 continue
1793 l3 = lr.readline()
1802 l3 = lr.readline()
1794 lr.push(l3)
1803 lr.push(l3)
1795 if not l3.startswith("***************"):
1804 if not l3.startswith("***************"):
1796 lr.push(l2)
1805 lr.push(l2)
1797 continue
1806 continue
1798 newfile = True
1807 newfile = True
1799 context = True
1808 context = True
1800 afile = parsefilename(x)
1809 afile = parsefilename(x)
1801 bfile = parsefilename(l2)
1810 bfile = parsefilename(l2)
1802
1811
1803 if newfile:
1812 if newfile:
1804 newfile = False
1813 newfile = False
1805 emitfile = True
1814 emitfile = True
1806 state = BFILE
1815 state = BFILE
1807 hunknum = 0
1816 hunknum = 0
1808
1817
1809 while gitpatches:
1818 while gitpatches:
1810 gp = gitpatches.pop()
1819 gp = gitpatches.pop()
1811 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1820 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1812
1821
1813 def applybindelta(binchunk, data):
1822 def applybindelta(binchunk, data):
1814 """Apply a binary delta hunk
1823 """Apply a binary delta hunk
1815 The algorithm used is the algorithm from git's patch-delta.c
1824 The algorithm used is the algorithm from git's patch-delta.c
1816 """
1825 """
1817 def deltahead(binchunk):
1826 def deltahead(binchunk):
1818 i = 0
1827 i = 0
1819 for c in binchunk:
1828 for c in binchunk:
1820 i += 1
1829 i += 1
1821 if not (ord(c) & 0x80):
1830 if not (ord(c) & 0x80):
1822 return i
1831 return i
1823 return i
1832 return i
1824 out = ""
1833 out = ""
1825 s = deltahead(binchunk)
1834 s = deltahead(binchunk)
1826 binchunk = binchunk[s:]
1835 binchunk = binchunk[s:]
1827 s = deltahead(binchunk)
1836 s = deltahead(binchunk)
1828 binchunk = binchunk[s:]
1837 binchunk = binchunk[s:]
1829 i = 0
1838 i = 0
1830 while i < len(binchunk):
1839 while i < len(binchunk):
1831 cmd = ord(binchunk[i])
1840 cmd = ord(binchunk[i])
1832 i += 1
1841 i += 1
1833 if (cmd & 0x80):
1842 if (cmd & 0x80):
1834 offset = 0
1843 offset = 0
1835 size = 0
1844 size = 0
1836 if (cmd & 0x01):
1845 if (cmd & 0x01):
1837 offset = ord(binchunk[i])
1846 offset = ord(binchunk[i])
1838 i += 1
1847 i += 1
1839 if (cmd & 0x02):
1848 if (cmd & 0x02):
1840 offset |= ord(binchunk[i]) << 8
1849 offset |= ord(binchunk[i]) << 8
1841 i += 1
1850 i += 1
1842 if (cmd & 0x04):
1851 if (cmd & 0x04):
1843 offset |= ord(binchunk[i]) << 16
1852 offset |= ord(binchunk[i]) << 16
1844 i += 1
1853 i += 1
1845 if (cmd & 0x08):
1854 if (cmd & 0x08):
1846 offset |= ord(binchunk[i]) << 24
1855 offset |= ord(binchunk[i]) << 24
1847 i += 1
1856 i += 1
1848 if (cmd & 0x10):
1857 if (cmd & 0x10):
1849 size = ord(binchunk[i])
1858 size = ord(binchunk[i])
1850 i += 1
1859 i += 1
1851 if (cmd & 0x20):
1860 if (cmd & 0x20):
1852 size |= ord(binchunk[i]) << 8
1861 size |= ord(binchunk[i]) << 8
1853 i += 1
1862 i += 1
1854 if (cmd & 0x40):
1863 if (cmd & 0x40):
1855 size |= ord(binchunk[i]) << 16
1864 size |= ord(binchunk[i]) << 16
1856 i += 1
1865 i += 1
1857 if size == 0:
1866 if size == 0:
1858 size = 0x10000
1867 size = 0x10000
1859 offset_end = offset + size
1868 offset_end = offset + size
1860 out += data[offset:offset_end]
1869 out += data[offset:offset_end]
1861 elif cmd != 0:
1870 elif cmd != 0:
1862 offset_end = i + cmd
1871 offset_end = i + cmd
1863 out += binchunk[i:offset_end]
1872 out += binchunk[i:offset_end]
1864 i += cmd
1873 i += cmd
1865 else:
1874 else:
1866 raise PatchError(_('unexpected delta opcode 0'))
1875 raise PatchError(_('unexpected delta opcode 0'))
1867 return out
1876 return out
1868
1877
1869 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1878 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1870 """Reads a patch from fp and tries to apply it.
1879 """Reads a patch from fp and tries to apply it.
1871
1880
1872 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1881 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1873 there was any fuzz.
1882 there was any fuzz.
1874
1883
1875 If 'eolmode' is 'strict', the patch content and patched file are
1884 If 'eolmode' is 'strict', the patch content and patched file are
1876 read in binary mode. Otherwise, line endings are ignored when
1885 read in binary mode. Otherwise, line endings are ignored when
1877 patching then normalized according to 'eolmode'.
1886 patching then normalized according to 'eolmode'.
1878 """
1887 """
1879 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1888 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1880 prefix=prefix, eolmode=eolmode)
1889 prefix=prefix, eolmode=eolmode)
1881
1890
1882 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1891 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1883 eolmode='strict'):
1892 eolmode='strict'):
1884
1893
1885 if prefix:
1894 if prefix:
1886 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1895 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1887 prefix)
1896 prefix)
1888 if prefix != '':
1897 if prefix != '':
1889 prefix += '/'
1898 prefix += '/'
1890 def pstrip(p):
1899 def pstrip(p):
1891 return pathtransform(p, strip - 1, prefix)[1]
1900 return pathtransform(p, strip - 1, prefix)[1]
1892
1901
1893 rejects = 0
1902 rejects = 0
1894 err = 0
1903 err = 0
1895 current_file = None
1904 current_file = None
1896
1905
1897 for state, values in iterhunks(fp):
1906 for state, values in iterhunks(fp):
1898 if state == 'hunk':
1907 if state == 'hunk':
1899 if not current_file:
1908 if not current_file:
1900 continue
1909 continue
1901 ret = current_file.apply(values)
1910 ret = current_file.apply(values)
1902 if ret > 0:
1911 if ret > 0:
1903 err = 1
1912 err = 1
1904 elif state == 'file':
1913 elif state == 'file':
1905 if current_file:
1914 if current_file:
1906 rejects += current_file.close()
1915 rejects += current_file.close()
1907 current_file = None
1916 current_file = None
1908 afile, bfile, first_hunk, gp = values
1917 afile, bfile, first_hunk, gp = values
1909 if gp:
1918 if gp:
1910 gp.path = pstrip(gp.path)
1919 gp.path = pstrip(gp.path)
1911 if gp.oldpath:
1920 if gp.oldpath:
1912 gp.oldpath = pstrip(gp.oldpath)
1921 gp.oldpath = pstrip(gp.oldpath)
1913 else:
1922 else:
1914 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1923 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1915 prefix)
1924 prefix)
1916 if gp.op == 'RENAME':
1925 if gp.op == 'RENAME':
1917 backend.unlink(gp.oldpath)
1926 backend.unlink(gp.oldpath)
1918 if not first_hunk:
1927 if not first_hunk:
1919 if gp.op == 'DELETE':
1928 if gp.op == 'DELETE':
1920 backend.unlink(gp.path)
1929 backend.unlink(gp.path)
1921 continue
1930 continue
1922 data, mode = None, None
1931 data, mode = None, None
1923 if gp.op in ('RENAME', 'COPY'):
1932 if gp.op in ('RENAME', 'COPY'):
1924 data, mode = store.getfile(gp.oldpath)[:2]
1933 data, mode = store.getfile(gp.oldpath)[:2]
1925 # FIXME: failing getfile has never been handled here
1934 # FIXME: failing getfile has never been handled here
1926 assert data is not None
1935 assert data is not None
1927 if gp.mode:
1936 if gp.mode:
1928 mode = gp.mode
1937 mode = gp.mode
1929 if gp.op == 'ADD':
1938 if gp.op == 'ADD':
1930 # Added files without content have no hunk and
1939 # Added files without content have no hunk and
1931 # must be created
1940 # must be created
1932 data = ''
1941 data = ''
1933 if data or mode:
1942 if data or mode:
1934 if (gp.op in ('ADD', 'RENAME', 'COPY')
1943 if (gp.op in ('ADD', 'RENAME', 'COPY')
1935 and backend.exists(gp.path)):
1944 and backend.exists(gp.path)):
1936 raise PatchError(_("cannot create %s: destination "
1945 raise PatchError(_("cannot create %s: destination "
1937 "already exists") % gp.path)
1946 "already exists") % gp.path)
1938 backend.setfile(gp.path, data, mode, gp.oldpath)
1947 backend.setfile(gp.path, data, mode, gp.oldpath)
1939 continue
1948 continue
1940 try:
1949 try:
1941 current_file = patcher(ui, gp, backend, store,
1950 current_file = patcher(ui, gp, backend, store,
1942 eolmode=eolmode)
1951 eolmode=eolmode)
1943 except PatchError as inst:
1952 except PatchError as inst:
1944 ui.warn(str(inst) + '\n')
1953 ui.warn(str(inst) + '\n')
1945 current_file = None
1954 current_file = None
1946 rejects += 1
1955 rejects += 1
1947 continue
1956 continue
1948 elif state == 'git':
1957 elif state == 'git':
1949 for gp in values:
1958 for gp in values:
1950 path = pstrip(gp.oldpath)
1959 path = pstrip(gp.oldpath)
1951 data, mode = backend.getfile(path)
1960 data, mode = backend.getfile(path)
1952 if data is None:
1961 if data is None:
1953 # The error ignored here will trigger a getfile()
1962 # The error ignored here will trigger a getfile()
1954 # error in a place more appropriate for error
1963 # error in a place more appropriate for error
1955 # handling, and will not interrupt the patching
1964 # handling, and will not interrupt the patching
1956 # process.
1965 # process.
1957 pass
1966 pass
1958 else:
1967 else:
1959 store.setfile(path, data, mode)
1968 store.setfile(path, data, mode)
1960 else:
1969 else:
1961 raise util.Abort(_('unsupported parser state: %s') % state)
1970 raise util.Abort(_('unsupported parser state: %s') % state)
1962
1971
1963 if current_file:
1972 if current_file:
1964 rejects += current_file.close()
1973 rejects += current_file.close()
1965
1974
1966 if rejects:
1975 if rejects:
1967 return -1
1976 return -1
1968 return err
1977 return err
1969
1978
1970 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1979 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1971 similarity):
1980 similarity):
1972 """use <patcher> to apply <patchname> to the working directory.
1981 """use <patcher> to apply <patchname> to the working directory.
1973 returns whether patch was applied with fuzz factor."""
1982 returns whether patch was applied with fuzz factor."""
1974
1983
1975 fuzz = False
1984 fuzz = False
1976 args = []
1985 args = []
1977 cwd = repo.root
1986 cwd = repo.root
1978 if cwd:
1987 if cwd:
1979 args.append('-d %s' % util.shellquote(cwd))
1988 args.append('-d %s' % util.shellquote(cwd))
1980 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1989 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1981 util.shellquote(patchname)))
1990 util.shellquote(patchname)))
1982 try:
1991 try:
1983 for line in fp:
1992 for line in fp:
1984 line = line.rstrip()
1993 line = line.rstrip()
1985 ui.note(line + '\n')
1994 ui.note(line + '\n')
1986 if line.startswith('patching file '):
1995 if line.startswith('patching file '):
1987 pf = util.parsepatchoutput(line)
1996 pf = util.parsepatchoutput(line)
1988 printed_file = False
1997 printed_file = False
1989 files.add(pf)
1998 files.add(pf)
1990 elif line.find('with fuzz') >= 0:
1999 elif line.find('with fuzz') >= 0:
1991 fuzz = True
2000 fuzz = True
1992 if not printed_file:
2001 if not printed_file:
1993 ui.warn(pf + '\n')
2002 ui.warn(pf + '\n')
1994 printed_file = True
2003 printed_file = True
1995 ui.warn(line + '\n')
2004 ui.warn(line + '\n')
1996 elif line.find('saving rejects to file') >= 0:
2005 elif line.find('saving rejects to file') >= 0:
1997 ui.warn(line + '\n')
2006 ui.warn(line + '\n')
1998 elif line.find('FAILED') >= 0:
2007 elif line.find('FAILED') >= 0:
1999 if not printed_file:
2008 if not printed_file:
2000 ui.warn(pf + '\n')
2009 ui.warn(pf + '\n')
2001 printed_file = True
2010 printed_file = True
2002 ui.warn(line + '\n')
2011 ui.warn(line + '\n')
2003 finally:
2012 finally:
2004 if files:
2013 if files:
2005 scmutil.marktouched(repo, files, similarity)
2014 scmutil.marktouched(repo, files, similarity)
2006 code = fp.close()
2015 code = fp.close()
2007 if code:
2016 if code:
2008 raise PatchError(_("patch command failed: %s") %
2017 raise PatchError(_("patch command failed: %s") %
2009 util.explainexit(code)[0])
2018 util.explainexit(code)[0])
2010 return fuzz
2019 return fuzz
2011
2020
2012 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2021 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2013 eolmode='strict'):
2022 eolmode='strict'):
2014 if files is None:
2023 if files is None:
2015 files = set()
2024 files = set()
2016 if eolmode is None:
2025 if eolmode is None:
2017 eolmode = ui.config('patch', 'eol', 'strict')
2026 eolmode = ui.config('patch', 'eol', 'strict')
2018 if eolmode.lower() not in eolmodes:
2027 if eolmode.lower() not in eolmodes:
2019 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2028 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2020 eolmode = eolmode.lower()
2029 eolmode = eolmode.lower()
2021
2030
2022 store = filestore()
2031 store = filestore()
2023 try:
2032 try:
2024 fp = open(patchobj, 'rb')
2033 fp = open(patchobj, 'rb')
2025 except TypeError:
2034 except TypeError:
2026 fp = patchobj
2035 fp = patchobj
2027 try:
2036 try:
2028 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2037 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2029 eolmode=eolmode)
2038 eolmode=eolmode)
2030 finally:
2039 finally:
2031 if fp != patchobj:
2040 if fp != patchobj:
2032 fp.close()
2041 fp.close()
2033 files.update(backend.close())
2042 files.update(backend.close())
2034 store.close()
2043 store.close()
2035 if ret < 0:
2044 if ret < 0:
2036 raise PatchError(_('patch failed to apply'))
2045 raise PatchError(_('patch failed to apply'))
2037 return ret > 0
2046 return ret > 0
2038
2047
2039 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2048 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2040 eolmode='strict', similarity=0):
2049 eolmode='strict', similarity=0):
2041 """use builtin patch to apply <patchobj> to the working directory.
2050 """use builtin patch to apply <patchobj> to the working directory.
2042 returns whether patch was applied with fuzz factor."""
2051 returns whether patch was applied with fuzz factor."""
2043 backend = workingbackend(ui, repo, similarity)
2052 backend = workingbackend(ui, repo, similarity)
2044 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2053 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2045
2054
2046 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2055 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2047 eolmode='strict'):
2056 eolmode='strict'):
2048 backend = repobackend(ui, repo, ctx, store)
2057 backend = repobackend(ui, repo, ctx, store)
2049 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2058 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2050
2059
2051 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2060 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2052 similarity=0):
2061 similarity=0):
2053 """Apply <patchname> to the working directory.
2062 """Apply <patchname> to the working directory.
2054
2063
2055 'eolmode' specifies how end of lines should be handled. It can be:
2064 'eolmode' specifies how end of lines should be handled. It can be:
2056 - 'strict': inputs are read in binary mode, EOLs are preserved
2065 - 'strict': inputs are read in binary mode, EOLs are preserved
2057 - 'crlf': EOLs are ignored when patching and reset to CRLF
2066 - 'crlf': EOLs are ignored when patching and reset to CRLF
2058 - 'lf': EOLs are ignored when patching and reset to LF
2067 - 'lf': EOLs are ignored when patching and reset to LF
2059 - None: get it from user settings, default to 'strict'
2068 - None: get it from user settings, default to 'strict'
2060 'eolmode' is ignored when using an external patcher program.
2069 'eolmode' is ignored when using an external patcher program.
2061
2070
2062 Returns whether patch was applied with fuzz factor.
2071 Returns whether patch was applied with fuzz factor.
2063 """
2072 """
2064 patcher = ui.config('ui', 'patch')
2073 patcher = ui.config('ui', 'patch')
2065 if files is None:
2074 if files is None:
2066 files = set()
2075 files = set()
2067 if patcher:
2076 if patcher:
2068 return _externalpatch(ui, repo, patcher, patchname, strip,
2077 return _externalpatch(ui, repo, patcher, patchname, strip,
2069 files, similarity)
2078 files, similarity)
2070 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2079 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2071 similarity)
2080 similarity)
2072
2081
2073 def changedfiles(ui, repo, patchpath, strip=1):
2082 def changedfiles(ui, repo, patchpath, strip=1):
2074 backend = fsbackend(ui, repo.root)
2083 backend = fsbackend(ui, repo.root)
2075 fp = open(patchpath, 'rb')
2084 fp = open(patchpath, 'rb')
2076 try:
2085 try:
2077 changed = set()
2086 changed = set()
2078 for state, values in iterhunks(fp):
2087 for state, values in iterhunks(fp):
2079 if state == 'file':
2088 if state == 'file':
2080 afile, bfile, first_hunk, gp = values
2089 afile, bfile, first_hunk, gp = values
2081 if gp:
2090 if gp:
2082 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2091 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2083 if gp.oldpath:
2092 if gp.oldpath:
2084 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2093 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2085 else:
2094 else:
2086 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2095 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2087 '')
2096 '')
2088 changed.add(gp.path)
2097 changed.add(gp.path)
2089 if gp.op == 'RENAME':
2098 if gp.op == 'RENAME':
2090 changed.add(gp.oldpath)
2099 changed.add(gp.oldpath)
2091 elif state not in ('hunk', 'git'):
2100 elif state not in ('hunk', 'git'):
2092 raise util.Abort(_('unsupported parser state: %s') % state)
2101 raise util.Abort(_('unsupported parser state: %s') % state)
2093 return changed
2102 return changed
2094 finally:
2103 finally:
2095 fp.close()
2104 fp.close()
2096
2105
2097 class GitDiffRequired(Exception):
2106 class GitDiffRequired(Exception):
2098 pass
2107 pass
2099
2108
2100 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2109 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2101 '''return diffopts with all features supported and parsed'''
2110 '''return diffopts with all features supported and parsed'''
2102 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2111 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2103 git=True, whitespace=True, formatchanging=True)
2112 git=True, whitespace=True, formatchanging=True)
2104
2113
2105 diffopts = diffallopts
2114 diffopts = diffallopts
2106
2115
2107 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2116 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2108 whitespace=False, formatchanging=False):
2117 whitespace=False, formatchanging=False):
2109 '''return diffopts with only opted-in features parsed
2118 '''return diffopts with only opted-in features parsed
2110
2119
2111 Features:
2120 Features:
2112 - git: git-style diffs
2121 - git: git-style diffs
2113 - whitespace: whitespace options like ignoreblanklines and ignorews
2122 - whitespace: whitespace options like ignoreblanklines and ignorews
2114 - formatchanging: options that will likely break or cause correctness issues
2123 - formatchanging: options that will likely break or cause correctness issues
2115 with most diff parsers
2124 with most diff parsers
2116 '''
2125 '''
2117 def get(key, name=None, getter=ui.configbool, forceplain=None):
2126 def get(key, name=None, getter=ui.configbool, forceplain=None):
2118 if opts:
2127 if opts:
2119 v = opts.get(key)
2128 v = opts.get(key)
2120 if v:
2129 if v:
2121 return v
2130 return v
2122 if forceplain is not None and ui.plain():
2131 if forceplain is not None and ui.plain():
2123 return forceplain
2132 return forceplain
2124 return getter(section, name or key, None, untrusted=untrusted)
2133 return getter(section, name or key, None, untrusted=untrusted)
2125
2134
2126 # core options, expected to be understood by every diff parser
2135 # core options, expected to be understood by every diff parser
2127 buildopts = {
2136 buildopts = {
2128 'nodates': get('nodates'),
2137 'nodates': get('nodates'),
2129 'showfunc': get('show_function', 'showfunc'),
2138 'showfunc': get('show_function', 'showfunc'),
2130 'context': get('unified', getter=ui.config),
2139 'context': get('unified', getter=ui.config),
2131 }
2140 }
2132
2141
2133 if git:
2142 if git:
2134 buildopts['git'] = get('git')
2143 buildopts['git'] = get('git')
2135 if whitespace:
2144 if whitespace:
2136 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2145 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2137 buildopts['ignorewsamount'] = get('ignore_space_change',
2146 buildopts['ignorewsamount'] = get('ignore_space_change',
2138 'ignorewsamount')
2147 'ignorewsamount')
2139 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2148 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2140 'ignoreblanklines')
2149 'ignoreblanklines')
2141 if formatchanging:
2150 if formatchanging:
2142 buildopts['text'] = opts and opts.get('text')
2151 buildopts['text'] = opts and opts.get('text')
2143 buildopts['nobinary'] = get('nobinary')
2152 buildopts['nobinary'] = get('nobinary')
2144 buildopts['noprefix'] = get('noprefix', forceplain=False)
2153 buildopts['noprefix'] = get('noprefix', forceplain=False)
2145
2154
2146 return mdiff.diffopts(**buildopts)
2155 return mdiff.diffopts(**buildopts)
2147
2156
2148 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2157 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2149 losedatafn=None, prefix='', relroot=''):
2158 losedatafn=None, prefix='', relroot=''):
2150 '''yields diff of changes to files between two nodes, or node and
2159 '''yields diff of changes to files between two nodes, or node and
2151 working directory.
2160 working directory.
2152
2161
2153 if node1 is None, use first dirstate parent instead.
2162 if node1 is None, use first dirstate parent instead.
2154 if node2 is None, compare node1 with working directory.
2163 if node2 is None, compare node1 with working directory.
2155
2164
2156 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2165 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2157 every time some change cannot be represented with the current
2166 every time some change cannot be represented with the current
2158 patch format. Return False to upgrade to git patch format, True to
2167 patch format. Return False to upgrade to git patch format, True to
2159 accept the loss or raise an exception to abort the diff. It is
2168 accept the loss or raise an exception to abort the diff. It is
2160 called with the name of current file being diffed as 'fn'. If set
2169 called with the name of current file being diffed as 'fn'. If set
2161 to None, patches will always be upgraded to git format when
2170 to None, patches will always be upgraded to git format when
2162 necessary.
2171 necessary.
2163
2172
2164 prefix is a filename prefix that is prepended to all filenames on
2173 prefix is a filename prefix that is prepended to all filenames on
2165 display (used for subrepos).
2174 display (used for subrepos).
2166
2175
2167 relroot, if not empty, must be normalized with a trailing /. Any match
2176 relroot, if not empty, must be normalized with a trailing /. Any match
2168 patterns that fall outside it will be ignored.'''
2177 patterns that fall outside it will be ignored.'''
2169
2178
2170 if opts is None:
2179 if opts is None:
2171 opts = mdiff.defaultopts
2180 opts = mdiff.defaultopts
2172
2181
2173 if not node1 and not node2:
2182 if not node1 and not node2:
2174 node1 = repo.dirstate.p1()
2183 node1 = repo.dirstate.p1()
2175
2184
2176 def lrugetfilectx():
2185 def lrugetfilectx():
2177 cache = {}
2186 cache = {}
2178 order = collections.deque()
2187 order = collections.deque()
2179 def getfilectx(f, ctx):
2188 def getfilectx(f, ctx):
2180 fctx = ctx.filectx(f, filelog=cache.get(f))
2189 fctx = ctx.filectx(f, filelog=cache.get(f))
2181 if f not in cache:
2190 if f not in cache:
2182 if len(cache) > 20:
2191 if len(cache) > 20:
2183 del cache[order.popleft()]
2192 del cache[order.popleft()]
2184 cache[f] = fctx.filelog()
2193 cache[f] = fctx.filelog()
2185 else:
2194 else:
2186 order.remove(f)
2195 order.remove(f)
2187 order.append(f)
2196 order.append(f)
2188 return fctx
2197 return fctx
2189 return getfilectx
2198 return getfilectx
2190 getfilectx = lrugetfilectx()
2199 getfilectx = lrugetfilectx()
2191
2200
2192 ctx1 = repo[node1]
2201 ctx1 = repo[node1]
2193 ctx2 = repo[node2]
2202 ctx2 = repo[node2]
2194
2203
2195 relfiltered = False
2204 relfiltered = False
2196 if relroot != '' and match.always():
2205 if relroot != '' and match.always():
2197 # as a special case, create a new matcher with just the relroot
2206 # as a special case, create a new matcher with just the relroot
2198 pats = [relroot]
2207 pats = [relroot]
2199 match = scmutil.match(ctx2, pats, default='path')
2208 match = scmutil.match(ctx2, pats, default='path')
2200 relfiltered = True
2209 relfiltered = True
2201
2210
2202 if not changes:
2211 if not changes:
2203 changes = repo.status(ctx1, ctx2, match=match)
2212 changes = repo.status(ctx1, ctx2, match=match)
2204 modified, added, removed = changes[:3]
2213 modified, added, removed = changes[:3]
2205
2214
2206 if not modified and not added and not removed:
2215 if not modified and not added and not removed:
2207 return []
2216 return []
2208
2217
2209 if repo.ui.debugflag:
2218 if repo.ui.debugflag:
2210 hexfunc = hex
2219 hexfunc = hex
2211 else:
2220 else:
2212 hexfunc = short
2221 hexfunc = short
2213 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2222 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2214
2223
2215 copy = {}
2224 copy = {}
2216 if opts.git or opts.upgrade:
2225 if opts.git or opts.upgrade:
2217 copy = copies.pathcopies(ctx1, ctx2, match=match)
2226 copy = copies.pathcopies(ctx1, ctx2, match=match)
2218
2227
2219 if relroot is not None:
2228 if relroot is not None:
2220 if not relfiltered:
2229 if not relfiltered:
2221 # XXX this would ideally be done in the matcher, but that is
2230 # XXX this would ideally be done in the matcher, but that is
2222 # generally meant to 'or' patterns, not 'and' them. In this case we
2231 # generally meant to 'or' patterns, not 'and' them. In this case we
2223 # need to 'and' all the patterns from the matcher with relroot.
2232 # need to 'and' all the patterns from the matcher with relroot.
2224 def filterrel(l):
2233 def filterrel(l):
2225 return [f for f in l if f.startswith(relroot)]
2234 return [f for f in l if f.startswith(relroot)]
2226 modified = filterrel(modified)
2235 modified = filterrel(modified)
2227 added = filterrel(added)
2236 added = filterrel(added)
2228 removed = filterrel(removed)
2237 removed = filterrel(removed)
2229 relfiltered = True
2238 relfiltered = True
2230 # filter out copies where either side isn't inside the relative root
2239 # filter out copies where either side isn't inside the relative root
2231 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2240 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2232 if dst.startswith(relroot)
2241 if dst.startswith(relroot)
2233 and src.startswith(relroot)))
2242 and src.startswith(relroot)))
2234
2243
2235 def difffn(opts, losedata):
2244 def difffn(opts, losedata):
2236 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2245 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2237 copy, getfilectx, opts, losedata, prefix, relroot)
2246 copy, getfilectx, opts, losedata, prefix, relroot)
2238 if opts.upgrade and not opts.git:
2247 if opts.upgrade and not opts.git:
2239 try:
2248 try:
2240 def losedata(fn):
2249 def losedata(fn):
2241 if not losedatafn or not losedatafn(fn=fn):
2250 if not losedatafn or not losedatafn(fn=fn):
2242 raise GitDiffRequired
2251 raise GitDiffRequired
2243 # Buffer the whole output until we are sure it can be generated
2252 # Buffer the whole output until we are sure it can be generated
2244 return list(difffn(opts.copy(git=False), losedata))
2253 return list(difffn(opts.copy(git=False), losedata))
2245 except GitDiffRequired:
2254 except GitDiffRequired:
2246 return difffn(opts.copy(git=True), None)
2255 return difffn(opts.copy(git=True), None)
2247 else:
2256 else:
2248 return difffn(opts, None)
2257 return difffn(opts, None)
2249
2258
2250 def difflabel(func, *args, **kw):
2259 def difflabel(func, *args, **kw):
2251 '''yields 2-tuples of (output, label) based on the output of func()'''
2260 '''yields 2-tuples of (output, label) based on the output of func()'''
2252 headprefixes = [('diff', 'diff.diffline'),
2261 headprefixes = [('diff', 'diff.diffline'),
2253 ('copy', 'diff.extended'),
2262 ('copy', 'diff.extended'),
2254 ('rename', 'diff.extended'),
2263 ('rename', 'diff.extended'),
2255 ('old', 'diff.extended'),
2264 ('old', 'diff.extended'),
2256 ('new', 'diff.extended'),
2265 ('new', 'diff.extended'),
2257 ('deleted', 'diff.extended'),
2266 ('deleted', 'diff.extended'),
2258 ('---', 'diff.file_a'),
2267 ('---', 'diff.file_a'),
2259 ('+++', 'diff.file_b')]
2268 ('+++', 'diff.file_b')]
2260 textprefixes = [('@', 'diff.hunk'),
2269 textprefixes = [('@', 'diff.hunk'),
2261 ('-', 'diff.deleted'),
2270 ('-', 'diff.deleted'),
2262 ('+', 'diff.inserted')]
2271 ('+', 'diff.inserted')]
2263 head = False
2272 head = False
2264 for chunk in func(*args, **kw):
2273 for chunk in func(*args, **kw):
2265 lines = chunk.split('\n')
2274 lines = chunk.split('\n')
2266 for i, line in enumerate(lines):
2275 for i, line in enumerate(lines):
2267 if i != 0:
2276 if i != 0:
2268 yield ('\n', '')
2277 yield ('\n', '')
2269 if head:
2278 if head:
2270 if line.startswith('@'):
2279 if line.startswith('@'):
2271 head = False
2280 head = False
2272 else:
2281 else:
2273 if line and line[0] not in ' +-@\\':
2282 if line and line[0] not in ' +-@\\':
2274 head = True
2283 head = True
2275 stripline = line
2284 stripline = line
2276 diffline = False
2285 diffline = False
2277 if not head and line and line[0] in '+-':
2286 if not head and line and line[0] in '+-':
2278 # highlight tabs and trailing whitespace, but only in
2287 # highlight tabs and trailing whitespace, but only in
2279 # changed lines
2288 # changed lines
2280 stripline = line.rstrip()
2289 stripline = line.rstrip()
2281 diffline = True
2290 diffline = True
2282
2291
2283 prefixes = textprefixes
2292 prefixes = textprefixes
2284 if head:
2293 if head:
2285 prefixes = headprefixes
2294 prefixes = headprefixes
2286 for prefix, label in prefixes:
2295 for prefix, label in prefixes:
2287 if stripline.startswith(prefix):
2296 if stripline.startswith(prefix):
2288 if diffline:
2297 if diffline:
2289 for token in tabsplitter.findall(stripline):
2298 for token in tabsplitter.findall(stripline):
2290 if '\t' == token[0]:
2299 if '\t' == token[0]:
2291 yield (token, 'diff.tab')
2300 yield (token, 'diff.tab')
2292 else:
2301 else:
2293 yield (token, label)
2302 yield (token, label)
2294 else:
2303 else:
2295 yield (stripline, label)
2304 yield (stripline, label)
2296 break
2305 break
2297 else:
2306 else:
2298 yield (line, '')
2307 yield (line, '')
2299 if line != stripline:
2308 if line != stripline:
2300 yield (line[len(stripline):], 'diff.trailingwhitespace')
2309 yield (line[len(stripline):], 'diff.trailingwhitespace')
2301
2310
2302 def diffui(*args, **kw):
2311 def diffui(*args, **kw):
2303 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2312 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2304 return difflabel(diff, *args, **kw)
2313 return difflabel(diff, *args, **kw)
2305
2314
2306 def _filepairs(ctx1, modified, added, removed, copy, opts):
2315 def _filepairs(ctx1, modified, added, removed, copy, opts):
2307 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2316 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2308 before and f2 is the the name after. For added files, f1 will be None,
2317 before and f2 is the the name after. For added files, f1 will be None,
2309 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2318 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2310 or 'rename' (the latter two only if opts.git is set).'''
2319 or 'rename' (the latter two only if opts.git is set).'''
2311 gone = set()
2320 gone = set()
2312
2321
2313 copyto = dict([(v, k) for k, v in copy.items()])
2322 copyto = dict([(v, k) for k, v in copy.items()])
2314
2323
2315 addedset, removedset = set(added), set(removed)
2324 addedset, removedset = set(added), set(removed)
2316 # Fix up added, since merged-in additions appear as
2325 # Fix up added, since merged-in additions appear as
2317 # modifications during merges
2326 # modifications during merges
2318 for f in modified:
2327 for f in modified:
2319 if f not in ctx1:
2328 if f not in ctx1:
2320 addedset.add(f)
2329 addedset.add(f)
2321
2330
2322 for f in sorted(modified + added + removed):
2331 for f in sorted(modified + added + removed):
2323 copyop = None
2332 copyop = None
2324 f1, f2 = f, f
2333 f1, f2 = f, f
2325 if f in addedset:
2334 if f in addedset:
2326 f1 = None
2335 f1 = None
2327 if f in copy:
2336 if f in copy:
2328 if opts.git:
2337 if opts.git:
2329 f1 = copy[f]
2338 f1 = copy[f]
2330 if f1 in removedset and f1 not in gone:
2339 if f1 in removedset and f1 not in gone:
2331 copyop = 'rename'
2340 copyop = 'rename'
2332 gone.add(f1)
2341 gone.add(f1)
2333 else:
2342 else:
2334 copyop = 'copy'
2343 copyop = 'copy'
2335 elif f in removedset:
2344 elif f in removedset:
2336 f2 = None
2345 f2 = None
2337 if opts.git:
2346 if opts.git:
2338 # have we already reported a copy above?
2347 # have we already reported a copy above?
2339 if (f in copyto and copyto[f] in addedset
2348 if (f in copyto and copyto[f] in addedset
2340 and copy[copyto[f]] == f):
2349 and copy[copyto[f]] == f):
2341 continue
2350 continue
2342 yield f1, f2, copyop
2351 yield f1, f2, copyop
2343
2352
2344 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2353 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2345 copy, getfilectx, opts, losedatafn, prefix, relroot):
2354 copy, getfilectx, opts, losedatafn, prefix, relroot):
2346 '''given input data, generate a diff and yield it in blocks
2355 '''given input data, generate a diff and yield it in blocks
2347
2356
2348 If generating a diff would lose data like flags or binary data and
2357 If generating a diff would lose data like flags or binary data and
2349 losedatafn is not None, it will be called.
2358 losedatafn is not None, it will be called.
2350
2359
2351 relroot is removed and prefix is added to every path in the diff output.
2360 relroot is removed and prefix is added to every path in the diff output.
2352
2361
2353 If relroot is not empty, this function expects every path in modified,
2362 If relroot is not empty, this function expects every path in modified,
2354 added, removed and copy to start with it.'''
2363 added, removed and copy to start with it.'''
2355
2364
2356 def gitindex(text):
2365 def gitindex(text):
2357 if not text:
2366 if not text:
2358 text = ""
2367 text = ""
2359 l = len(text)
2368 l = len(text)
2360 s = util.sha1('blob %d\0' % l)
2369 s = util.sha1('blob %d\0' % l)
2361 s.update(text)
2370 s.update(text)
2362 return s.hexdigest()
2371 return s.hexdigest()
2363
2372
2364 if opts.noprefix:
2373 if opts.noprefix:
2365 aprefix = bprefix = ''
2374 aprefix = bprefix = ''
2366 else:
2375 else:
2367 aprefix = 'a/'
2376 aprefix = 'a/'
2368 bprefix = 'b/'
2377 bprefix = 'b/'
2369
2378
2370 def diffline(f, revs):
2379 def diffline(f, revs):
2371 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2380 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2372 return 'diff %s %s' % (revinfo, f)
2381 return 'diff %s %s' % (revinfo, f)
2373
2382
2374 date1 = util.datestr(ctx1.date())
2383 date1 = util.datestr(ctx1.date())
2375 date2 = util.datestr(ctx2.date())
2384 date2 = util.datestr(ctx2.date())
2376
2385
2377 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2386 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2378
2387
2379 if relroot != '' and (repo.ui.configbool('devel', 'all')
2388 if relroot != '' and (repo.ui.configbool('devel', 'all')
2380 or repo.ui.configbool('devel', 'check-relroot')):
2389 or repo.ui.configbool('devel', 'check-relroot')):
2381 for f in modified + added + removed + copy.keys() + copy.values():
2390 for f in modified + added + removed + copy.keys() + copy.values():
2382 if f is not None and not f.startswith(relroot):
2391 if f is not None and not f.startswith(relroot):
2383 raise AssertionError(
2392 raise AssertionError(
2384 "file %s doesn't start with relroot %s" % (f, relroot))
2393 "file %s doesn't start with relroot %s" % (f, relroot))
2385
2394
2386 for f1, f2, copyop in _filepairs(
2395 for f1, f2, copyop in _filepairs(
2387 ctx1, modified, added, removed, copy, opts):
2396 ctx1, modified, added, removed, copy, opts):
2388 content1 = None
2397 content1 = None
2389 content2 = None
2398 content2 = None
2390 flag1 = None
2399 flag1 = None
2391 flag2 = None
2400 flag2 = None
2392 if f1:
2401 if f1:
2393 content1 = getfilectx(f1, ctx1).data()
2402 content1 = getfilectx(f1, ctx1).data()
2394 if opts.git or losedatafn:
2403 if opts.git or losedatafn:
2395 flag1 = ctx1.flags(f1)
2404 flag1 = ctx1.flags(f1)
2396 if f2:
2405 if f2:
2397 content2 = getfilectx(f2, ctx2).data()
2406 content2 = getfilectx(f2, ctx2).data()
2398 if opts.git or losedatafn:
2407 if opts.git or losedatafn:
2399 flag2 = ctx2.flags(f2)
2408 flag2 = ctx2.flags(f2)
2400 binary = False
2409 binary = False
2401 if opts.git or losedatafn:
2410 if opts.git or losedatafn:
2402 binary = util.binary(content1) or util.binary(content2)
2411 binary = util.binary(content1) or util.binary(content2)
2403
2412
2404 if losedatafn and not opts.git:
2413 if losedatafn and not opts.git:
2405 if (binary or
2414 if (binary or
2406 # copy/rename
2415 # copy/rename
2407 f2 in copy or
2416 f2 in copy or
2408 # empty file creation
2417 # empty file creation
2409 (not f1 and not content2) or
2418 (not f1 and not content2) or
2410 # empty file deletion
2419 # empty file deletion
2411 (not content1 and not f2) or
2420 (not content1 and not f2) or
2412 # create with flags
2421 # create with flags
2413 (not f1 and flag2) or
2422 (not f1 and flag2) or
2414 # change flags
2423 # change flags
2415 (f1 and f2 and flag1 != flag2)):
2424 (f1 and f2 and flag1 != flag2)):
2416 losedatafn(f2 or f1)
2425 losedatafn(f2 or f1)
2417
2426
2418 path1 = f1 or f2
2427 path1 = f1 or f2
2419 path2 = f2 or f1
2428 path2 = f2 or f1
2420 path1 = posixpath.join(prefix, path1[len(relroot):])
2429 path1 = posixpath.join(prefix, path1[len(relroot):])
2421 path2 = posixpath.join(prefix, path2[len(relroot):])
2430 path2 = posixpath.join(prefix, path2[len(relroot):])
2422 header = []
2431 header = []
2423 if opts.git:
2432 if opts.git:
2424 header.append('diff --git %s%s %s%s' %
2433 header.append('diff --git %s%s %s%s' %
2425 (aprefix, path1, bprefix, path2))
2434 (aprefix, path1, bprefix, path2))
2426 if not f1: # added
2435 if not f1: # added
2427 header.append('new file mode %s' % gitmode[flag2])
2436 header.append('new file mode %s' % gitmode[flag2])
2428 elif not f2: # removed
2437 elif not f2: # removed
2429 header.append('deleted file mode %s' % gitmode[flag1])
2438 header.append('deleted file mode %s' % gitmode[flag1])
2430 else: # modified/copied/renamed
2439 else: # modified/copied/renamed
2431 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2440 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2432 if mode1 != mode2:
2441 if mode1 != mode2:
2433 header.append('old mode %s' % mode1)
2442 header.append('old mode %s' % mode1)
2434 header.append('new mode %s' % mode2)
2443 header.append('new mode %s' % mode2)
2435 if copyop is not None:
2444 if copyop is not None:
2436 header.append('%s from %s' % (copyop, path1))
2445 header.append('%s from %s' % (copyop, path1))
2437 header.append('%s to %s' % (copyop, path2))
2446 header.append('%s to %s' % (copyop, path2))
2438 elif revs and not repo.ui.quiet:
2447 elif revs and not repo.ui.quiet:
2439 header.append(diffline(path1, revs))
2448 header.append(diffline(path1, revs))
2440
2449
2441 if binary and opts.git and not opts.nobinary:
2450 if binary and opts.git and not opts.nobinary:
2442 text = mdiff.b85diff(content1, content2)
2451 text = mdiff.b85diff(content1, content2)
2443 if text:
2452 if text:
2444 header.append('index %s..%s' %
2453 header.append('index %s..%s' %
2445 (gitindex(content1), gitindex(content2)))
2454 (gitindex(content1), gitindex(content2)))
2446 else:
2455 else:
2447 text = mdiff.unidiff(content1, date1,
2456 text = mdiff.unidiff(content1, date1,
2448 content2, date2,
2457 content2, date2,
2449 path1, path2, opts=opts)
2458 path1, path2, opts=opts)
2450 if header and (text or len(header) > 1):
2459 if header and (text or len(header) > 1):
2451 yield '\n'.join(header) + '\n'
2460 yield '\n'.join(header) + '\n'
2452 if text:
2461 if text:
2453 yield text
2462 yield text
2454
2463
2455 def diffstatsum(stats):
2464 def diffstatsum(stats):
2456 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2465 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2457 for f, a, r, b in stats:
2466 for f, a, r, b in stats:
2458 maxfile = max(maxfile, encoding.colwidth(f))
2467 maxfile = max(maxfile, encoding.colwidth(f))
2459 maxtotal = max(maxtotal, a + r)
2468 maxtotal = max(maxtotal, a + r)
2460 addtotal += a
2469 addtotal += a
2461 removetotal += r
2470 removetotal += r
2462 binary = binary or b
2471 binary = binary or b
2463
2472
2464 return maxfile, maxtotal, addtotal, removetotal, binary
2473 return maxfile, maxtotal, addtotal, removetotal, binary
2465
2474
2466 def diffstatdata(lines):
2475 def diffstatdata(lines):
2467 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2476 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2468
2477
2469 results = []
2478 results = []
2470 filename, adds, removes, isbinary = None, 0, 0, False
2479 filename, adds, removes, isbinary = None, 0, 0, False
2471
2480
2472 def addresult():
2481 def addresult():
2473 if filename:
2482 if filename:
2474 results.append((filename, adds, removes, isbinary))
2483 results.append((filename, adds, removes, isbinary))
2475
2484
2476 for line in lines:
2485 for line in lines:
2477 if line.startswith('diff'):
2486 if line.startswith('diff'):
2478 addresult()
2487 addresult()
2479 # set numbers to 0 anyway when starting new file
2488 # set numbers to 0 anyway when starting new file
2480 adds, removes, isbinary = 0, 0, False
2489 adds, removes, isbinary = 0, 0, False
2481 if line.startswith('diff --git a/'):
2490 if line.startswith('diff --git a/'):
2482 filename = gitre.search(line).group(2)
2491 filename = gitre.search(line).group(2)
2483 elif line.startswith('diff -r'):
2492 elif line.startswith('diff -r'):
2484 # format: "diff -r ... -r ... filename"
2493 # format: "diff -r ... -r ... filename"
2485 filename = diffre.search(line).group(1)
2494 filename = diffre.search(line).group(1)
2486 elif line.startswith('+') and not line.startswith('+++ '):
2495 elif line.startswith('+') and not line.startswith('+++ '):
2487 adds += 1
2496 adds += 1
2488 elif line.startswith('-') and not line.startswith('--- '):
2497 elif line.startswith('-') and not line.startswith('--- '):
2489 removes += 1
2498 removes += 1
2490 elif (line.startswith('GIT binary patch') or
2499 elif (line.startswith('GIT binary patch') or
2491 line.startswith('Binary file')):
2500 line.startswith('Binary file')):
2492 isbinary = True
2501 isbinary = True
2493 addresult()
2502 addresult()
2494 return results
2503 return results
2495
2504
2496 def diffstat(lines, width=80, git=False):
2505 def diffstat(lines, width=80, git=False):
2497 output = []
2506 output = []
2498 stats = diffstatdata(lines)
2507 stats = diffstatdata(lines)
2499 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2508 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2500
2509
2501 countwidth = len(str(maxtotal))
2510 countwidth = len(str(maxtotal))
2502 if hasbinary and countwidth < 3:
2511 if hasbinary and countwidth < 3:
2503 countwidth = 3
2512 countwidth = 3
2504 graphwidth = width - countwidth - maxname - 6
2513 graphwidth = width - countwidth - maxname - 6
2505 if graphwidth < 10:
2514 if graphwidth < 10:
2506 graphwidth = 10
2515 graphwidth = 10
2507
2516
2508 def scale(i):
2517 def scale(i):
2509 if maxtotal <= graphwidth:
2518 if maxtotal <= graphwidth:
2510 return i
2519 return i
2511 # If diffstat runs out of room it doesn't print anything,
2520 # If diffstat runs out of room it doesn't print anything,
2512 # which isn't very useful, so always print at least one + or -
2521 # which isn't very useful, so always print at least one + or -
2513 # if there were at least some changes.
2522 # if there were at least some changes.
2514 return max(i * graphwidth // maxtotal, int(bool(i)))
2523 return max(i * graphwidth // maxtotal, int(bool(i)))
2515
2524
2516 for filename, adds, removes, isbinary in stats:
2525 for filename, adds, removes, isbinary in stats:
2517 if isbinary:
2526 if isbinary:
2518 count = 'Bin'
2527 count = 'Bin'
2519 else:
2528 else:
2520 count = adds + removes
2529 count = adds + removes
2521 pluses = '+' * scale(adds)
2530 pluses = '+' * scale(adds)
2522 minuses = '-' * scale(removes)
2531 minuses = '-' * scale(removes)
2523 output.append(' %s%s | %*s %s%s\n' %
2532 output.append(' %s%s | %*s %s%s\n' %
2524 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2533 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2525 countwidth, count, pluses, minuses))
2534 countwidth, count, pluses, minuses))
2526
2535
2527 if stats:
2536 if stats:
2528 output.append(_(' %d files changed, %d insertions(+), '
2537 output.append(_(' %d files changed, %d insertions(+), '
2529 '%d deletions(-)\n')
2538 '%d deletions(-)\n')
2530 % (len(stats), totaladds, totalremoves))
2539 % (len(stats), totaladds, totalremoves))
2531
2540
2532 return ''.join(output)
2541 return ''.join(output)
2533
2542
2534 def diffstatui(*args, **kw):
2543 def diffstatui(*args, **kw):
2535 '''like diffstat(), but yields 2-tuples of (output, label) for
2544 '''like diffstat(), but yields 2-tuples of (output, label) for
2536 ui.write()
2545 ui.write()
2537 '''
2546 '''
2538
2547
2539 for line in diffstat(*args, **kw).splitlines():
2548 for line in diffstat(*args, **kw).splitlines():
2540 if line and line[-1] in '+-':
2549 if line and line[-1] in '+-':
2541 name, graph = line.rsplit(' ', 1)
2550 name, graph = line.rsplit(' ', 1)
2542 yield (name + ' ', '')
2551 yield (name + ' ', '')
2543 m = re.search(r'\++', graph)
2552 m = re.search(r'\++', graph)
2544 if m:
2553 if m:
2545 yield (m.group(0), 'diffstat.inserted')
2554 yield (m.group(0), 'diffstat.inserted')
2546 m = re.search(r'-+', graph)
2555 m = re.search(r'-+', graph)
2547 if m:
2556 if m:
2548 yield (m.group(0), 'diffstat.deleted')
2557 yield (m.group(0), 'diffstat.deleted')
2549 else:
2558 else:
2550 yield (line, '')
2559 yield (line, '')
2551 yield ('\n', '')
2560 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now