##// END OF EJS Templates
extract: move 'branch' assignment where it is parsed...
Pierre-Yves David -
r26551:5b8faea8 default
parent child Browse files
Show More
@@ -1,2562 +1,2559 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 def extract(ui, fileobj):
154 def extract(ui, fileobj):
155 '''extract patch from data read from fileobj.
155 '''extract patch from data read from fileobj.
156
156
157 patch can be a normal patch or contained in an email message.
157 patch can be a normal patch or contained in an email message.
158
158
159 return a dictionnary. Standard keys are:
159 return a dictionnary. Standard keys are:
160 - filename,
160 - filename,
161 - message,
161 - message,
162 - user,
162 - user,
163 - date,
163 - date,
164 - branch,
164 - branch,
165 - node,
165 - node,
166 - p1,
166 - p1,
167 - p2.
167 - p2.
168 Any item can be missing from the dictionary. If filename is mising,
168 Any item can be missing from the dictionary. If filename is mising,
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
170
170
171 # attempt to detect the start of a patch
171 # attempt to detect the start of a patch
172 # (this heuristic is borrowed from quilt)
172 # (this heuristic is borrowed from quilt)
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
177
177
178 data = {}
178 data = {}
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
180 tmpfp = os.fdopen(fd, 'w')
180 tmpfp = os.fdopen(fd, 'w')
181 try:
181 try:
182 msg = email.Parser.Parser().parse(fileobj)
182 msg = email.Parser.Parser().parse(fileobj)
183
183
184 subject = msg['Subject']
184 subject = msg['Subject']
185 user = msg['From']
185 user = msg['From']
186 if not subject and not user:
186 if not subject and not user:
187 # Not an email, restore parsed headers if any
187 # Not an email, restore parsed headers if any
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
189
189
190 # should try to parse msg['Date']
190 # should try to parse msg['Date']
191 date = None
191 date = None
192 nodeid = None
192 nodeid = None
193 branch = None
194 parents = []
193 parents = []
195
194
196 if subject:
195 if subject:
197 if subject.startswith('[PATCH'):
196 if subject.startswith('[PATCH'):
198 pend = subject.find(']')
197 pend = subject.find(']')
199 if pend >= 0:
198 if pend >= 0:
200 subject = subject[pend + 1:].lstrip()
199 subject = subject[pend + 1:].lstrip()
201 subject = re.sub(r'\n[ \t]+', ' ', subject)
200 subject = re.sub(r'\n[ \t]+', ' ', subject)
202 ui.debug('Subject: %s\n' % subject)
201 ui.debug('Subject: %s\n' % subject)
203 if user:
202 if user:
204 ui.debug('From: %s\n' % user)
203 ui.debug('From: %s\n' % user)
205 diffs_seen = 0
204 diffs_seen = 0
206 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
205 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
207 message = ''
206 message = ''
208 for part in msg.walk():
207 for part in msg.walk():
209 content_type = part.get_content_type()
208 content_type = part.get_content_type()
210 ui.debug('Content-Type: %s\n' % content_type)
209 ui.debug('Content-Type: %s\n' % content_type)
211 if content_type not in ok_types:
210 if content_type not in ok_types:
212 continue
211 continue
213 payload = part.get_payload(decode=True)
212 payload = part.get_payload(decode=True)
214 m = diffre.search(payload)
213 m = diffre.search(payload)
215 if m:
214 if m:
216 hgpatch = False
215 hgpatch = False
217 hgpatchheader = False
216 hgpatchheader = False
218 ignoretext = False
217 ignoretext = False
219
218
220 ui.debug('found patch at byte %d\n' % m.start(0))
219 ui.debug('found patch at byte %d\n' % m.start(0))
221 diffs_seen += 1
220 diffs_seen += 1
222 cfp = cStringIO.StringIO()
221 cfp = cStringIO.StringIO()
223 for line in payload[:m.start(0)].splitlines():
222 for line in payload[:m.start(0)].splitlines():
224 if line.startswith('# HG changeset patch') and not hgpatch:
223 if line.startswith('# HG changeset patch') and not hgpatch:
225 ui.debug('patch generated by hg export\n')
224 ui.debug('patch generated by hg export\n')
226 hgpatch = True
225 hgpatch = True
227 hgpatchheader = True
226 hgpatchheader = True
228 # drop earlier commit message content
227 # drop earlier commit message content
229 cfp.seek(0)
228 cfp.seek(0)
230 cfp.truncate()
229 cfp.truncate()
231 subject = None
230 subject = None
232 elif hgpatchheader:
231 elif hgpatchheader:
233 if line.startswith('# User '):
232 if line.startswith('# User '):
234 user = line[7:]
233 user = line[7:]
235 ui.debug('From: %s\n' % user)
234 ui.debug('From: %s\n' % user)
236 elif line.startswith("# Date "):
235 elif line.startswith("# Date "):
237 date = line[7:]
236 date = line[7:]
238 elif line.startswith("# Branch "):
237 elif line.startswith("# Branch "):
239 branch = line[9:]
238 data['branch'] = line[9:]
240 elif line.startswith("# Node ID "):
239 elif line.startswith("# Node ID "):
241 nodeid = line[10:]
240 nodeid = line[10:]
242 elif line.startswith("# Parent "):
241 elif line.startswith("# Parent "):
243 parents.append(line[9:].lstrip())
242 parents.append(line[9:].lstrip())
244 elif not line.startswith("# "):
243 elif not line.startswith("# "):
245 hgpatchheader = False
244 hgpatchheader = False
246 elif line == '---':
245 elif line == '---':
247 ignoretext = True
246 ignoretext = True
248 if not hgpatchheader and not ignoretext:
247 if not hgpatchheader and not ignoretext:
249 cfp.write(line)
248 cfp.write(line)
250 cfp.write('\n')
249 cfp.write('\n')
251 message = cfp.getvalue()
250 message = cfp.getvalue()
252 if tmpfp:
251 if tmpfp:
253 tmpfp.write(payload)
252 tmpfp.write(payload)
254 if not payload.endswith('\n'):
253 if not payload.endswith('\n'):
255 tmpfp.write('\n')
254 tmpfp.write('\n')
256 elif not diffs_seen and message and content_type == 'text/plain':
255 elif not diffs_seen and message and content_type == 'text/plain':
257 message += '\n' + payload
256 message += '\n' + payload
258 except: # re-raises
257 except: # re-raises
259 tmpfp.close()
258 tmpfp.close()
260 os.unlink(tmpname)
259 os.unlink(tmpname)
261 raise
260 raise
262
261
263 if subject and not message.startswith(subject):
262 if subject and not message.startswith(subject):
264 message = '%s\n%s' % (subject, message)
263 message = '%s\n%s' % (subject, message)
265 data['message'] = message
264 data['message'] = message
266 tmpfp.close()
265 tmpfp.close()
267 if not diffs_seen:
266 if not diffs_seen:
268 os.unlink(tmpname)
267 os.unlink(tmpname)
269 data['user'] = user
268 data['user'] = user
270 data['date'] = date
269 data['date'] = date
271 data['branch'] = branch
272 return data
270 return data
273
271
274 if parents:
272 if parents:
275 data['p1'] = parents.pop(0)
273 data['p1'] = parents.pop(0)
276 if parents:
274 if parents:
277 data['p2'] = parents.pop(0)
275 data['p2'] = parents.pop(0)
278
276
279 data['filename'] = tmpname
277 data['filename'] = tmpname
280 data['user'] = user
278 data['user'] = user
281 data['date'] = date
279 data['date'] = date
282 data['branch'] = branch
283 data['nodeid'] = nodeid
280 data['nodeid'] = nodeid
284 return data
281 return data
285
282
286 class patchmeta(object):
283 class patchmeta(object):
287 """Patched file metadata
284 """Patched file metadata
288
285
289 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
286 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
290 or COPY. 'path' is patched file path. 'oldpath' is set to the
287 or COPY. 'path' is patched file path. 'oldpath' is set to the
291 origin file when 'op' is either COPY or RENAME, None otherwise. If
288 origin file when 'op' is either COPY or RENAME, None otherwise. If
292 file mode is changed, 'mode' is a tuple (islink, isexec) where
289 file mode is changed, 'mode' is a tuple (islink, isexec) where
293 'islink' is True if the file is a symlink and 'isexec' is True if
290 'islink' is True if the file is a symlink and 'isexec' is True if
294 the file is executable. Otherwise, 'mode' is None.
291 the file is executable. Otherwise, 'mode' is None.
295 """
292 """
296 def __init__(self, path):
293 def __init__(self, path):
297 self.path = path
294 self.path = path
298 self.oldpath = None
295 self.oldpath = None
299 self.mode = None
296 self.mode = None
300 self.op = 'MODIFY'
297 self.op = 'MODIFY'
301 self.binary = False
298 self.binary = False
302
299
303 def setmode(self, mode):
300 def setmode(self, mode):
304 islink = mode & 0o20000
301 islink = mode & 0o20000
305 isexec = mode & 0o100
302 isexec = mode & 0o100
306 self.mode = (islink, isexec)
303 self.mode = (islink, isexec)
307
304
308 def copy(self):
305 def copy(self):
309 other = patchmeta(self.path)
306 other = patchmeta(self.path)
310 other.oldpath = self.oldpath
307 other.oldpath = self.oldpath
311 other.mode = self.mode
308 other.mode = self.mode
312 other.op = self.op
309 other.op = self.op
313 other.binary = self.binary
310 other.binary = self.binary
314 return other
311 return other
315
312
316 def _ispatchinga(self, afile):
313 def _ispatchinga(self, afile):
317 if afile == '/dev/null':
314 if afile == '/dev/null':
318 return self.op == 'ADD'
315 return self.op == 'ADD'
319 return afile == 'a/' + (self.oldpath or self.path)
316 return afile == 'a/' + (self.oldpath or self.path)
320
317
321 def _ispatchingb(self, bfile):
318 def _ispatchingb(self, bfile):
322 if bfile == '/dev/null':
319 if bfile == '/dev/null':
323 return self.op == 'DELETE'
320 return self.op == 'DELETE'
324 return bfile == 'b/' + self.path
321 return bfile == 'b/' + self.path
325
322
326 def ispatching(self, afile, bfile):
323 def ispatching(self, afile, bfile):
327 return self._ispatchinga(afile) and self._ispatchingb(bfile)
324 return self._ispatchinga(afile) and self._ispatchingb(bfile)
328
325
329 def __repr__(self):
326 def __repr__(self):
330 return "<patchmeta %s %r>" % (self.op, self.path)
327 return "<patchmeta %s %r>" % (self.op, self.path)
331
328
332 def readgitpatch(lr):
329 def readgitpatch(lr):
333 """extract git-style metadata about patches from <patchname>"""
330 """extract git-style metadata about patches from <patchname>"""
334
331
335 # Filter patch for git information
332 # Filter patch for git information
336 gp = None
333 gp = None
337 gitpatches = []
334 gitpatches = []
338 for line in lr:
335 for line in lr:
339 line = line.rstrip(' \r\n')
336 line = line.rstrip(' \r\n')
340 if line.startswith('diff --git a/'):
337 if line.startswith('diff --git a/'):
341 m = gitre.match(line)
338 m = gitre.match(line)
342 if m:
339 if m:
343 if gp:
340 if gp:
344 gitpatches.append(gp)
341 gitpatches.append(gp)
345 dst = m.group(2)
342 dst = m.group(2)
346 gp = patchmeta(dst)
343 gp = patchmeta(dst)
347 elif gp:
344 elif gp:
348 if line.startswith('--- '):
345 if line.startswith('--- '):
349 gitpatches.append(gp)
346 gitpatches.append(gp)
350 gp = None
347 gp = None
351 continue
348 continue
352 if line.startswith('rename from '):
349 if line.startswith('rename from '):
353 gp.op = 'RENAME'
350 gp.op = 'RENAME'
354 gp.oldpath = line[12:]
351 gp.oldpath = line[12:]
355 elif line.startswith('rename to '):
352 elif line.startswith('rename to '):
356 gp.path = line[10:]
353 gp.path = line[10:]
357 elif line.startswith('copy from '):
354 elif line.startswith('copy from '):
358 gp.op = 'COPY'
355 gp.op = 'COPY'
359 gp.oldpath = line[10:]
356 gp.oldpath = line[10:]
360 elif line.startswith('copy to '):
357 elif line.startswith('copy to '):
361 gp.path = line[8:]
358 gp.path = line[8:]
362 elif line.startswith('deleted file'):
359 elif line.startswith('deleted file'):
363 gp.op = 'DELETE'
360 gp.op = 'DELETE'
364 elif line.startswith('new file mode '):
361 elif line.startswith('new file mode '):
365 gp.op = 'ADD'
362 gp.op = 'ADD'
366 gp.setmode(int(line[-6:], 8))
363 gp.setmode(int(line[-6:], 8))
367 elif line.startswith('new mode '):
364 elif line.startswith('new mode '):
368 gp.setmode(int(line[-6:], 8))
365 gp.setmode(int(line[-6:], 8))
369 elif line.startswith('GIT binary patch'):
366 elif line.startswith('GIT binary patch'):
370 gp.binary = True
367 gp.binary = True
371 if gp:
368 if gp:
372 gitpatches.append(gp)
369 gitpatches.append(gp)
373
370
374 return gitpatches
371 return gitpatches
375
372
376 class linereader(object):
373 class linereader(object):
377 # simple class to allow pushing lines back into the input stream
374 # simple class to allow pushing lines back into the input stream
378 def __init__(self, fp):
375 def __init__(self, fp):
379 self.fp = fp
376 self.fp = fp
380 self.buf = []
377 self.buf = []
381
378
382 def push(self, line):
379 def push(self, line):
383 if line is not None:
380 if line is not None:
384 self.buf.append(line)
381 self.buf.append(line)
385
382
386 def readline(self):
383 def readline(self):
387 if self.buf:
384 if self.buf:
388 l = self.buf[0]
385 l = self.buf[0]
389 del self.buf[0]
386 del self.buf[0]
390 return l
387 return l
391 return self.fp.readline()
388 return self.fp.readline()
392
389
393 def __iter__(self):
390 def __iter__(self):
394 while True:
391 while True:
395 l = self.readline()
392 l = self.readline()
396 if not l:
393 if not l:
397 break
394 break
398 yield l
395 yield l
399
396
400 class abstractbackend(object):
397 class abstractbackend(object):
401 def __init__(self, ui):
398 def __init__(self, ui):
402 self.ui = ui
399 self.ui = ui
403
400
404 def getfile(self, fname):
401 def getfile(self, fname):
405 """Return target file data and flags as a (data, (islink,
402 """Return target file data and flags as a (data, (islink,
406 isexec)) tuple. Data is None if file is missing/deleted.
403 isexec)) tuple. Data is None if file is missing/deleted.
407 """
404 """
408 raise NotImplementedError
405 raise NotImplementedError
409
406
410 def setfile(self, fname, data, mode, copysource):
407 def setfile(self, fname, data, mode, copysource):
411 """Write data to target file fname and set its mode. mode is a
408 """Write data to target file fname and set its mode. mode is a
412 (islink, isexec) tuple. If data is None, the file content should
409 (islink, isexec) tuple. If data is None, the file content should
413 be left unchanged. If the file is modified after being copied,
410 be left unchanged. If the file is modified after being copied,
414 copysource is set to the original file name.
411 copysource is set to the original file name.
415 """
412 """
416 raise NotImplementedError
413 raise NotImplementedError
417
414
418 def unlink(self, fname):
415 def unlink(self, fname):
419 """Unlink target file."""
416 """Unlink target file."""
420 raise NotImplementedError
417 raise NotImplementedError
421
418
422 def writerej(self, fname, failed, total, lines):
419 def writerej(self, fname, failed, total, lines):
423 """Write rejected lines for fname. total is the number of hunks
420 """Write rejected lines for fname. total is the number of hunks
424 which failed to apply and total the total number of hunks for this
421 which failed to apply and total the total number of hunks for this
425 files.
422 files.
426 """
423 """
427 pass
424 pass
428
425
429 def exists(self, fname):
426 def exists(self, fname):
430 raise NotImplementedError
427 raise NotImplementedError
431
428
432 class fsbackend(abstractbackend):
429 class fsbackend(abstractbackend):
433 def __init__(self, ui, basedir):
430 def __init__(self, ui, basedir):
434 super(fsbackend, self).__init__(ui)
431 super(fsbackend, self).__init__(ui)
435 self.opener = scmutil.opener(basedir)
432 self.opener = scmutil.opener(basedir)
436
433
437 def _join(self, f):
434 def _join(self, f):
438 return os.path.join(self.opener.base, f)
435 return os.path.join(self.opener.base, f)
439
436
440 def getfile(self, fname):
437 def getfile(self, fname):
441 if self.opener.islink(fname):
438 if self.opener.islink(fname):
442 return (self.opener.readlink(fname), (True, False))
439 return (self.opener.readlink(fname), (True, False))
443
440
444 isexec = False
441 isexec = False
445 try:
442 try:
446 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
443 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
447 except OSError as e:
444 except OSError as e:
448 if e.errno != errno.ENOENT:
445 if e.errno != errno.ENOENT:
449 raise
446 raise
450 try:
447 try:
451 return (self.opener.read(fname), (False, isexec))
448 return (self.opener.read(fname), (False, isexec))
452 except IOError as e:
449 except IOError as e:
453 if e.errno != errno.ENOENT:
450 if e.errno != errno.ENOENT:
454 raise
451 raise
455 return None, None
452 return None, None
456
453
457 def setfile(self, fname, data, mode, copysource):
454 def setfile(self, fname, data, mode, copysource):
458 islink, isexec = mode
455 islink, isexec = mode
459 if data is None:
456 if data is None:
460 self.opener.setflags(fname, islink, isexec)
457 self.opener.setflags(fname, islink, isexec)
461 return
458 return
462 if islink:
459 if islink:
463 self.opener.symlink(data, fname)
460 self.opener.symlink(data, fname)
464 else:
461 else:
465 self.opener.write(fname, data)
462 self.opener.write(fname, data)
466 if isexec:
463 if isexec:
467 self.opener.setflags(fname, False, True)
464 self.opener.setflags(fname, False, True)
468
465
469 def unlink(self, fname):
466 def unlink(self, fname):
470 self.opener.unlinkpath(fname, ignoremissing=True)
467 self.opener.unlinkpath(fname, ignoremissing=True)
471
468
472 def writerej(self, fname, failed, total, lines):
469 def writerej(self, fname, failed, total, lines):
473 fname = fname + ".rej"
470 fname = fname + ".rej"
474 self.ui.warn(
471 self.ui.warn(
475 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
472 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
476 (failed, total, fname))
473 (failed, total, fname))
477 fp = self.opener(fname, 'w')
474 fp = self.opener(fname, 'w')
478 fp.writelines(lines)
475 fp.writelines(lines)
479 fp.close()
476 fp.close()
480
477
481 def exists(self, fname):
478 def exists(self, fname):
482 return self.opener.lexists(fname)
479 return self.opener.lexists(fname)
483
480
484 class workingbackend(fsbackend):
481 class workingbackend(fsbackend):
485 def __init__(self, ui, repo, similarity):
482 def __init__(self, ui, repo, similarity):
486 super(workingbackend, self).__init__(ui, repo.root)
483 super(workingbackend, self).__init__(ui, repo.root)
487 self.repo = repo
484 self.repo = repo
488 self.similarity = similarity
485 self.similarity = similarity
489 self.removed = set()
486 self.removed = set()
490 self.changed = set()
487 self.changed = set()
491 self.copied = []
488 self.copied = []
492
489
493 def _checkknown(self, fname):
490 def _checkknown(self, fname):
494 if self.repo.dirstate[fname] == '?' and self.exists(fname):
491 if self.repo.dirstate[fname] == '?' and self.exists(fname):
495 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
492 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
496
493
497 def setfile(self, fname, data, mode, copysource):
494 def setfile(self, fname, data, mode, copysource):
498 self._checkknown(fname)
495 self._checkknown(fname)
499 super(workingbackend, self).setfile(fname, data, mode, copysource)
496 super(workingbackend, self).setfile(fname, data, mode, copysource)
500 if copysource is not None:
497 if copysource is not None:
501 self.copied.append((copysource, fname))
498 self.copied.append((copysource, fname))
502 self.changed.add(fname)
499 self.changed.add(fname)
503
500
504 def unlink(self, fname):
501 def unlink(self, fname):
505 self._checkknown(fname)
502 self._checkknown(fname)
506 super(workingbackend, self).unlink(fname)
503 super(workingbackend, self).unlink(fname)
507 self.removed.add(fname)
504 self.removed.add(fname)
508 self.changed.add(fname)
505 self.changed.add(fname)
509
506
510 def close(self):
507 def close(self):
511 wctx = self.repo[None]
508 wctx = self.repo[None]
512 changed = set(self.changed)
509 changed = set(self.changed)
513 for src, dst in self.copied:
510 for src, dst in self.copied:
514 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
511 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
515 if self.removed:
512 if self.removed:
516 wctx.forget(sorted(self.removed))
513 wctx.forget(sorted(self.removed))
517 for f in self.removed:
514 for f in self.removed:
518 if f not in self.repo.dirstate:
515 if f not in self.repo.dirstate:
519 # File was deleted and no longer belongs to the
516 # File was deleted and no longer belongs to the
520 # dirstate, it was probably marked added then
517 # dirstate, it was probably marked added then
521 # deleted, and should not be considered by
518 # deleted, and should not be considered by
522 # marktouched().
519 # marktouched().
523 changed.discard(f)
520 changed.discard(f)
524 if changed:
521 if changed:
525 scmutil.marktouched(self.repo, changed, self.similarity)
522 scmutil.marktouched(self.repo, changed, self.similarity)
526 return sorted(self.changed)
523 return sorted(self.changed)
527
524
528 class filestore(object):
525 class filestore(object):
529 def __init__(self, maxsize=None):
526 def __init__(self, maxsize=None):
530 self.opener = None
527 self.opener = None
531 self.files = {}
528 self.files = {}
532 self.created = 0
529 self.created = 0
533 self.maxsize = maxsize
530 self.maxsize = maxsize
534 if self.maxsize is None:
531 if self.maxsize is None:
535 self.maxsize = 4*(2**20)
532 self.maxsize = 4*(2**20)
536 self.size = 0
533 self.size = 0
537 self.data = {}
534 self.data = {}
538
535
539 def setfile(self, fname, data, mode, copied=None):
536 def setfile(self, fname, data, mode, copied=None):
540 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
537 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
541 self.data[fname] = (data, mode, copied)
538 self.data[fname] = (data, mode, copied)
542 self.size += len(data)
539 self.size += len(data)
543 else:
540 else:
544 if self.opener is None:
541 if self.opener is None:
545 root = tempfile.mkdtemp(prefix='hg-patch-')
542 root = tempfile.mkdtemp(prefix='hg-patch-')
546 self.opener = scmutil.opener(root)
543 self.opener = scmutil.opener(root)
547 # Avoid filename issues with these simple names
544 # Avoid filename issues with these simple names
548 fn = str(self.created)
545 fn = str(self.created)
549 self.opener.write(fn, data)
546 self.opener.write(fn, data)
550 self.created += 1
547 self.created += 1
551 self.files[fname] = (fn, mode, copied)
548 self.files[fname] = (fn, mode, copied)
552
549
553 def getfile(self, fname):
550 def getfile(self, fname):
554 if fname in self.data:
551 if fname in self.data:
555 return self.data[fname]
552 return self.data[fname]
556 if not self.opener or fname not in self.files:
553 if not self.opener or fname not in self.files:
557 return None, None, None
554 return None, None, None
558 fn, mode, copied = self.files[fname]
555 fn, mode, copied = self.files[fname]
559 return self.opener.read(fn), mode, copied
556 return self.opener.read(fn), mode, copied
560
557
561 def close(self):
558 def close(self):
562 if self.opener:
559 if self.opener:
563 shutil.rmtree(self.opener.base)
560 shutil.rmtree(self.opener.base)
564
561
565 class repobackend(abstractbackend):
562 class repobackend(abstractbackend):
566 def __init__(self, ui, repo, ctx, store):
563 def __init__(self, ui, repo, ctx, store):
567 super(repobackend, self).__init__(ui)
564 super(repobackend, self).__init__(ui)
568 self.repo = repo
565 self.repo = repo
569 self.ctx = ctx
566 self.ctx = ctx
570 self.store = store
567 self.store = store
571 self.changed = set()
568 self.changed = set()
572 self.removed = set()
569 self.removed = set()
573 self.copied = {}
570 self.copied = {}
574
571
575 def _checkknown(self, fname):
572 def _checkknown(self, fname):
576 if fname not in self.ctx:
573 if fname not in self.ctx:
577 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
574 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
578
575
579 def getfile(self, fname):
576 def getfile(self, fname):
580 try:
577 try:
581 fctx = self.ctx[fname]
578 fctx = self.ctx[fname]
582 except error.LookupError:
579 except error.LookupError:
583 return None, None
580 return None, None
584 flags = fctx.flags()
581 flags = fctx.flags()
585 return fctx.data(), ('l' in flags, 'x' in flags)
582 return fctx.data(), ('l' in flags, 'x' in flags)
586
583
587 def setfile(self, fname, data, mode, copysource):
584 def setfile(self, fname, data, mode, copysource):
588 if copysource:
585 if copysource:
589 self._checkknown(copysource)
586 self._checkknown(copysource)
590 if data is None:
587 if data is None:
591 data = self.ctx[fname].data()
588 data = self.ctx[fname].data()
592 self.store.setfile(fname, data, mode, copysource)
589 self.store.setfile(fname, data, mode, copysource)
593 self.changed.add(fname)
590 self.changed.add(fname)
594 if copysource:
591 if copysource:
595 self.copied[fname] = copysource
592 self.copied[fname] = copysource
596
593
597 def unlink(self, fname):
594 def unlink(self, fname):
598 self._checkknown(fname)
595 self._checkknown(fname)
599 self.removed.add(fname)
596 self.removed.add(fname)
600
597
601 def exists(self, fname):
598 def exists(self, fname):
602 return fname in self.ctx
599 return fname in self.ctx
603
600
604 def close(self):
601 def close(self):
605 return self.changed | self.removed
602 return self.changed | self.removed
606
603
607 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
604 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
608 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
605 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
609 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
606 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
610 eolmodes = ['strict', 'crlf', 'lf', 'auto']
607 eolmodes = ['strict', 'crlf', 'lf', 'auto']
611
608
612 class patchfile(object):
609 class patchfile(object):
613 def __init__(self, ui, gp, backend, store, eolmode='strict'):
610 def __init__(self, ui, gp, backend, store, eolmode='strict'):
614 self.fname = gp.path
611 self.fname = gp.path
615 self.eolmode = eolmode
612 self.eolmode = eolmode
616 self.eol = None
613 self.eol = None
617 self.backend = backend
614 self.backend = backend
618 self.ui = ui
615 self.ui = ui
619 self.lines = []
616 self.lines = []
620 self.exists = False
617 self.exists = False
621 self.missing = True
618 self.missing = True
622 self.mode = gp.mode
619 self.mode = gp.mode
623 self.copysource = gp.oldpath
620 self.copysource = gp.oldpath
624 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
621 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
625 self.remove = gp.op == 'DELETE'
622 self.remove = gp.op == 'DELETE'
626 if self.copysource is None:
623 if self.copysource is None:
627 data, mode = backend.getfile(self.fname)
624 data, mode = backend.getfile(self.fname)
628 else:
625 else:
629 data, mode = store.getfile(self.copysource)[:2]
626 data, mode = store.getfile(self.copysource)[:2]
630 if data is not None:
627 if data is not None:
631 self.exists = self.copysource is None or backend.exists(self.fname)
628 self.exists = self.copysource is None or backend.exists(self.fname)
632 self.missing = False
629 self.missing = False
633 if data:
630 if data:
634 self.lines = mdiff.splitnewlines(data)
631 self.lines = mdiff.splitnewlines(data)
635 if self.mode is None:
632 if self.mode is None:
636 self.mode = mode
633 self.mode = mode
637 if self.lines:
634 if self.lines:
638 # Normalize line endings
635 # Normalize line endings
639 if self.lines[0].endswith('\r\n'):
636 if self.lines[0].endswith('\r\n'):
640 self.eol = '\r\n'
637 self.eol = '\r\n'
641 elif self.lines[0].endswith('\n'):
638 elif self.lines[0].endswith('\n'):
642 self.eol = '\n'
639 self.eol = '\n'
643 if eolmode != 'strict':
640 if eolmode != 'strict':
644 nlines = []
641 nlines = []
645 for l in self.lines:
642 for l in self.lines:
646 if l.endswith('\r\n'):
643 if l.endswith('\r\n'):
647 l = l[:-2] + '\n'
644 l = l[:-2] + '\n'
648 nlines.append(l)
645 nlines.append(l)
649 self.lines = nlines
646 self.lines = nlines
650 else:
647 else:
651 if self.create:
648 if self.create:
652 self.missing = False
649 self.missing = False
653 if self.mode is None:
650 if self.mode is None:
654 self.mode = (False, False)
651 self.mode = (False, False)
655 if self.missing:
652 if self.missing:
656 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
653 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
657
654
658 self.hash = {}
655 self.hash = {}
659 self.dirty = 0
656 self.dirty = 0
660 self.offset = 0
657 self.offset = 0
661 self.skew = 0
658 self.skew = 0
662 self.rej = []
659 self.rej = []
663 self.fileprinted = False
660 self.fileprinted = False
664 self.printfile(False)
661 self.printfile(False)
665 self.hunks = 0
662 self.hunks = 0
666
663
667 def writelines(self, fname, lines, mode):
664 def writelines(self, fname, lines, mode):
668 if self.eolmode == 'auto':
665 if self.eolmode == 'auto':
669 eol = self.eol
666 eol = self.eol
670 elif self.eolmode == 'crlf':
667 elif self.eolmode == 'crlf':
671 eol = '\r\n'
668 eol = '\r\n'
672 else:
669 else:
673 eol = '\n'
670 eol = '\n'
674
671
675 if self.eolmode != 'strict' and eol and eol != '\n':
672 if self.eolmode != 'strict' and eol and eol != '\n':
676 rawlines = []
673 rawlines = []
677 for l in lines:
674 for l in lines:
678 if l and l[-1] == '\n':
675 if l and l[-1] == '\n':
679 l = l[:-1] + eol
676 l = l[:-1] + eol
680 rawlines.append(l)
677 rawlines.append(l)
681 lines = rawlines
678 lines = rawlines
682
679
683 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
680 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
684
681
685 def printfile(self, warn):
682 def printfile(self, warn):
686 if self.fileprinted:
683 if self.fileprinted:
687 return
684 return
688 if warn or self.ui.verbose:
685 if warn or self.ui.verbose:
689 self.fileprinted = True
686 self.fileprinted = True
690 s = _("patching file %s\n") % self.fname
687 s = _("patching file %s\n") % self.fname
691 if warn:
688 if warn:
692 self.ui.warn(s)
689 self.ui.warn(s)
693 else:
690 else:
694 self.ui.note(s)
691 self.ui.note(s)
695
692
696
693
697 def findlines(self, l, linenum):
694 def findlines(self, l, linenum):
698 # looks through the hash and finds candidate lines. The
695 # looks through the hash and finds candidate lines. The
699 # result is a list of line numbers sorted based on distance
696 # result is a list of line numbers sorted based on distance
700 # from linenum
697 # from linenum
701
698
702 cand = self.hash.get(l, [])
699 cand = self.hash.get(l, [])
703 if len(cand) > 1:
700 if len(cand) > 1:
704 # resort our list of potentials forward then back.
701 # resort our list of potentials forward then back.
705 cand.sort(key=lambda x: abs(x - linenum))
702 cand.sort(key=lambda x: abs(x - linenum))
706 return cand
703 return cand
707
704
708 def write_rej(self):
705 def write_rej(self):
709 # our rejects are a little different from patch(1). This always
706 # our rejects are a little different from patch(1). This always
710 # creates rejects in the same form as the original patch. A file
707 # creates rejects in the same form as the original patch. A file
711 # header is inserted so that you can run the reject through patch again
708 # header is inserted so that you can run the reject through patch again
712 # without having to type the filename.
709 # without having to type the filename.
713 if not self.rej:
710 if not self.rej:
714 return
711 return
715 base = os.path.basename(self.fname)
712 base = os.path.basename(self.fname)
716 lines = ["--- %s\n+++ %s\n" % (base, base)]
713 lines = ["--- %s\n+++ %s\n" % (base, base)]
717 for x in self.rej:
714 for x in self.rej:
718 for l in x.hunk:
715 for l in x.hunk:
719 lines.append(l)
716 lines.append(l)
720 if l[-1] != '\n':
717 if l[-1] != '\n':
721 lines.append("\n\ No newline at end of file\n")
718 lines.append("\n\ No newline at end of file\n")
722 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
719 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
723
720
724 def apply(self, h):
721 def apply(self, h):
725 if not h.complete():
722 if not h.complete():
726 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
723 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
727 (h.number, h.desc, len(h.a), h.lena, len(h.b),
724 (h.number, h.desc, len(h.a), h.lena, len(h.b),
728 h.lenb))
725 h.lenb))
729
726
730 self.hunks += 1
727 self.hunks += 1
731
728
732 if self.missing:
729 if self.missing:
733 self.rej.append(h)
730 self.rej.append(h)
734 return -1
731 return -1
735
732
736 if self.exists and self.create:
733 if self.exists and self.create:
737 if self.copysource:
734 if self.copysource:
738 self.ui.warn(_("cannot create %s: destination already "
735 self.ui.warn(_("cannot create %s: destination already "
739 "exists\n") % self.fname)
736 "exists\n") % self.fname)
740 else:
737 else:
741 self.ui.warn(_("file %s already exists\n") % self.fname)
738 self.ui.warn(_("file %s already exists\n") % self.fname)
742 self.rej.append(h)
739 self.rej.append(h)
743 return -1
740 return -1
744
741
745 if isinstance(h, binhunk):
742 if isinstance(h, binhunk):
746 if self.remove:
743 if self.remove:
747 self.backend.unlink(self.fname)
744 self.backend.unlink(self.fname)
748 else:
745 else:
749 l = h.new(self.lines)
746 l = h.new(self.lines)
750 self.lines[:] = l
747 self.lines[:] = l
751 self.offset += len(l)
748 self.offset += len(l)
752 self.dirty = True
749 self.dirty = True
753 return 0
750 return 0
754
751
755 horig = h
752 horig = h
756 if (self.eolmode in ('crlf', 'lf')
753 if (self.eolmode in ('crlf', 'lf')
757 or self.eolmode == 'auto' and self.eol):
754 or self.eolmode == 'auto' and self.eol):
758 # If new eols are going to be normalized, then normalize
755 # If new eols are going to be normalized, then normalize
759 # hunk data before patching. Otherwise, preserve input
756 # hunk data before patching. Otherwise, preserve input
760 # line-endings.
757 # line-endings.
761 h = h.getnormalized()
758 h = h.getnormalized()
762
759
763 # fast case first, no offsets, no fuzz
760 # fast case first, no offsets, no fuzz
764 old, oldstart, new, newstart = h.fuzzit(0, False)
761 old, oldstart, new, newstart = h.fuzzit(0, False)
765 oldstart += self.offset
762 oldstart += self.offset
766 orig_start = oldstart
763 orig_start = oldstart
767 # if there's skew we want to emit the "(offset %d lines)" even
764 # if there's skew we want to emit the "(offset %d lines)" even
768 # when the hunk cleanly applies at start + skew, so skip the
765 # when the hunk cleanly applies at start + skew, so skip the
769 # fast case code
766 # fast case code
770 if (self.skew == 0 and
767 if (self.skew == 0 and
771 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
768 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
772 if self.remove:
769 if self.remove:
773 self.backend.unlink(self.fname)
770 self.backend.unlink(self.fname)
774 else:
771 else:
775 self.lines[oldstart:oldstart + len(old)] = new
772 self.lines[oldstart:oldstart + len(old)] = new
776 self.offset += len(new) - len(old)
773 self.offset += len(new) - len(old)
777 self.dirty = True
774 self.dirty = True
778 return 0
775 return 0
779
776
780 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
777 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
781 self.hash = {}
778 self.hash = {}
782 for x, s in enumerate(self.lines):
779 for x, s in enumerate(self.lines):
783 self.hash.setdefault(s, []).append(x)
780 self.hash.setdefault(s, []).append(x)
784
781
785 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
782 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
786 for toponly in [True, False]:
783 for toponly in [True, False]:
787 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
784 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
788 oldstart = oldstart + self.offset + self.skew
785 oldstart = oldstart + self.offset + self.skew
789 oldstart = min(oldstart, len(self.lines))
786 oldstart = min(oldstart, len(self.lines))
790 if old:
787 if old:
791 cand = self.findlines(old[0][1:], oldstart)
788 cand = self.findlines(old[0][1:], oldstart)
792 else:
789 else:
793 # Only adding lines with no or fuzzed context, just
790 # Only adding lines with no or fuzzed context, just
794 # take the skew in account
791 # take the skew in account
795 cand = [oldstart]
792 cand = [oldstart]
796
793
797 for l in cand:
794 for l in cand:
798 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
795 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
799 self.lines[l : l + len(old)] = new
796 self.lines[l : l + len(old)] = new
800 self.offset += len(new) - len(old)
797 self.offset += len(new) - len(old)
801 self.skew = l - orig_start
798 self.skew = l - orig_start
802 self.dirty = True
799 self.dirty = True
803 offset = l - orig_start - fuzzlen
800 offset = l - orig_start - fuzzlen
804 if fuzzlen:
801 if fuzzlen:
805 msg = _("Hunk #%d succeeded at %d "
802 msg = _("Hunk #%d succeeded at %d "
806 "with fuzz %d "
803 "with fuzz %d "
807 "(offset %d lines).\n")
804 "(offset %d lines).\n")
808 self.printfile(True)
805 self.printfile(True)
809 self.ui.warn(msg %
806 self.ui.warn(msg %
810 (h.number, l + 1, fuzzlen, offset))
807 (h.number, l + 1, fuzzlen, offset))
811 else:
808 else:
812 msg = _("Hunk #%d succeeded at %d "
809 msg = _("Hunk #%d succeeded at %d "
813 "(offset %d lines).\n")
810 "(offset %d lines).\n")
814 self.ui.note(msg % (h.number, l + 1, offset))
811 self.ui.note(msg % (h.number, l + 1, offset))
815 return fuzzlen
812 return fuzzlen
816 self.printfile(True)
813 self.printfile(True)
817 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
814 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
818 self.rej.append(horig)
815 self.rej.append(horig)
819 return -1
816 return -1
820
817
821 def close(self):
818 def close(self):
822 if self.dirty:
819 if self.dirty:
823 self.writelines(self.fname, self.lines, self.mode)
820 self.writelines(self.fname, self.lines, self.mode)
824 self.write_rej()
821 self.write_rej()
825 return len(self.rej)
822 return len(self.rej)
826
823
827 class header(object):
824 class header(object):
828 """patch header
825 """patch header
829 """
826 """
830 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
827 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
831 diff_re = re.compile('diff -r .* (.*)$')
828 diff_re = re.compile('diff -r .* (.*)$')
832 allhunks_re = re.compile('(?:index|deleted file) ')
829 allhunks_re = re.compile('(?:index|deleted file) ')
833 pretty_re = re.compile('(?:new file|deleted file) ')
830 pretty_re = re.compile('(?:new file|deleted file) ')
834 special_re = re.compile('(?:index|deleted|copy|rename) ')
831 special_re = re.compile('(?:index|deleted|copy|rename) ')
835 newfile_re = re.compile('(?:new file)')
832 newfile_re = re.compile('(?:new file)')
836
833
837 def __init__(self, header):
834 def __init__(self, header):
838 self.header = header
835 self.header = header
839 self.hunks = []
836 self.hunks = []
840
837
841 def binary(self):
838 def binary(self):
842 return any(h.startswith('index ') for h in self.header)
839 return any(h.startswith('index ') for h in self.header)
843
840
844 def pretty(self, fp):
841 def pretty(self, fp):
845 for h in self.header:
842 for h in self.header:
846 if h.startswith('index '):
843 if h.startswith('index '):
847 fp.write(_('this modifies a binary file (all or nothing)\n'))
844 fp.write(_('this modifies a binary file (all or nothing)\n'))
848 break
845 break
849 if self.pretty_re.match(h):
846 if self.pretty_re.match(h):
850 fp.write(h)
847 fp.write(h)
851 if self.binary():
848 if self.binary():
852 fp.write(_('this is a binary file\n'))
849 fp.write(_('this is a binary file\n'))
853 break
850 break
854 if h.startswith('---'):
851 if h.startswith('---'):
855 fp.write(_('%d hunks, %d lines changed\n') %
852 fp.write(_('%d hunks, %d lines changed\n') %
856 (len(self.hunks),
853 (len(self.hunks),
857 sum([max(h.added, h.removed) for h in self.hunks])))
854 sum([max(h.added, h.removed) for h in self.hunks])))
858 break
855 break
859 fp.write(h)
856 fp.write(h)
860
857
861 def write(self, fp):
858 def write(self, fp):
862 fp.write(''.join(self.header))
859 fp.write(''.join(self.header))
863
860
864 def allhunks(self):
861 def allhunks(self):
865 return any(self.allhunks_re.match(h) for h in self.header)
862 return any(self.allhunks_re.match(h) for h in self.header)
866
863
867 def files(self):
864 def files(self):
868 match = self.diffgit_re.match(self.header[0])
865 match = self.diffgit_re.match(self.header[0])
869 if match:
866 if match:
870 fromfile, tofile = match.groups()
867 fromfile, tofile = match.groups()
871 if fromfile == tofile:
868 if fromfile == tofile:
872 return [fromfile]
869 return [fromfile]
873 return [fromfile, tofile]
870 return [fromfile, tofile]
874 else:
871 else:
875 return self.diff_re.match(self.header[0]).groups()
872 return self.diff_re.match(self.header[0]).groups()
876
873
877 def filename(self):
874 def filename(self):
878 return self.files()[-1]
875 return self.files()[-1]
879
876
880 def __repr__(self):
877 def __repr__(self):
881 return '<header %s>' % (' '.join(map(repr, self.files())))
878 return '<header %s>' % (' '.join(map(repr, self.files())))
882
879
883 def isnewfile(self):
880 def isnewfile(self):
884 return any(self.newfile_re.match(h) for h in self.header)
881 return any(self.newfile_re.match(h) for h in self.header)
885
882
886 def special(self):
883 def special(self):
887 # Special files are shown only at the header level and not at the hunk
884 # Special files are shown only at the header level and not at the hunk
888 # level for example a file that has been deleted is a special file.
885 # level for example a file that has been deleted is a special file.
889 # The user cannot change the content of the operation, in the case of
886 # The user cannot change the content of the operation, in the case of
890 # the deleted file he has to take the deletion or not take it, he
887 # the deleted file he has to take the deletion or not take it, he
891 # cannot take some of it.
888 # cannot take some of it.
892 # Newly added files are special if they are empty, they are not special
889 # Newly added files are special if they are empty, they are not special
893 # if they have some content as we want to be able to change it
890 # if they have some content as we want to be able to change it
894 nocontent = len(self.header) == 2
891 nocontent = len(self.header) == 2
895 emptynewfile = self.isnewfile() and nocontent
892 emptynewfile = self.isnewfile() and nocontent
896 return emptynewfile or \
893 return emptynewfile or \
897 any(self.special_re.match(h) for h in self.header)
894 any(self.special_re.match(h) for h in self.header)
898
895
899 class recordhunk(object):
896 class recordhunk(object):
900 """patch hunk
897 """patch hunk
901
898
902 XXX shouldn't we merge this with the other hunk class?
899 XXX shouldn't we merge this with the other hunk class?
903 """
900 """
904 maxcontext = 3
901 maxcontext = 3
905
902
906 def __init__(self, header, fromline, toline, proc, before, hunk, after):
903 def __init__(self, header, fromline, toline, proc, before, hunk, after):
907 def trimcontext(number, lines):
904 def trimcontext(number, lines):
908 delta = len(lines) - self.maxcontext
905 delta = len(lines) - self.maxcontext
909 if False and delta > 0:
906 if False and delta > 0:
910 return number + delta, lines[:self.maxcontext]
907 return number + delta, lines[:self.maxcontext]
911 return number, lines
908 return number, lines
912
909
913 self.header = header
910 self.header = header
914 self.fromline, self.before = trimcontext(fromline, before)
911 self.fromline, self.before = trimcontext(fromline, before)
915 self.toline, self.after = trimcontext(toline, after)
912 self.toline, self.after = trimcontext(toline, after)
916 self.proc = proc
913 self.proc = proc
917 self.hunk = hunk
914 self.hunk = hunk
918 self.added, self.removed = self.countchanges(self.hunk)
915 self.added, self.removed = self.countchanges(self.hunk)
919
916
920 def __eq__(self, v):
917 def __eq__(self, v):
921 if not isinstance(v, recordhunk):
918 if not isinstance(v, recordhunk):
922 return False
919 return False
923
920
924 return ((v.hunk == self.hunk) and
921 return ((v.hunk == self.hunk) and
925 (v.proc == self.proc) and
922 (v.proc == self.proc) and
926 (self.fromline == v.fromline) and
923 (self.fromline == v.fromline) and
927 (self.header.files() == v.header.files()))
924 (self.header.files() == v.header.files()))
928
925
929 def __hash__(self):
926 def __hash__(self):
930 return hash((tuple(self.hunk),
927 return hash((tuple(self.hunk),
931 tuple(self.header.files()),
928 tuple(self.header.files()),
932 self.fromline,
929 self.fromline,
933 self.proc))
930 self.proc))
934
931
935 def countchanges(self, hunk):
932 def countchanges(self, hunk):
936 """hunk -> (n+,n-)"""
933 """hunk -> (n+,n-)"""
937 add = len([h for h in hunk if h[0] == '+'])
934 add = len([h for h in hunk if h[0] == '+'])
938 rem = len([h for h in hunk if h[0] == '-'])
935 rem = len([h for h in hunk if h[0] == '-'])
939 return add, rem
936 return add, rem
940
937
941 def write(self, fp):
938 def write(self, fp):
942 delta = len(self.before) + len(self.after)
939 delta = len(self.before) + len(self.after)
943 if self.after and self.after[-1] == '\\ No newline at end of file\n':
940 if self.after and self.after[-1] == '\\ No newline at end of file\n':
944 delta -= 1
941 delta -= 1
945 fromlen = delta + self.removed
942 fromlen = delta + self.removed
946 tolen = delta + self.added
943 tolen = delta + self.added
947 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
944 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
948 (self.fromline, fromlen, self.toline, tolen,
945 (self.fromline, fromlen, self.toline, tolen,
949 self.proc and (' ' + self.proc)))
946 self.proc and (' ' + self.proc)))
950 fp.write(''.join(self.before + self.hunk + self.after))
947 fp.write(''.join(self.before + self.hunk + self.after))
951
948
952 pretty = write
949 pretty = write
953
950
954 def filename(self):
951 def filename(self):
955 return self.header.filename()
952 return self.header.filename()
956
953
957 def __repr__(self):
954 def __repr__(self):
958 return '<hunk %r@%d>' % (self.filename(), self.fromline)
955 return '<hunk %r@%d>' % (self.filename(), self.fromline)
959
956
960 def filterpatch(ui, headers, operation=None):
957 def filterpatch(ui, headers, operation=None):
961 """Interactively filter patch chunks into applied-only chunks"""
958 """Interactively filter patch chunks into applied-only chunks"""
962 if operation is None:
959 if operation is None:
963 operation = _('record')
960 operation = _('record')
964
961
965 def prompt(skipfile, skipall, query, chunk):
962 def prompt(skipfile, skipall, query, chunk):
966 """prompt query, and process base inputs
963 """prompt query, and process base inputs
967
964
968 - y/n for the rest of file
965 - y/n for the rest of file
969 - y/n for the rest
966 - y/n for the rest
970 - ? (help)
967 - ? (help)
971 - q (quit)
968 - q (quit)
972
969
973 Return True/False and possibly updated skipfile and skipall.
970 Return True/False and possibly updated skipfile and skipall.
974 """
971 """
975 newpatches = None
972 newpatches = None
976 if skipall is not None:
973 if skipall is not None:
977 return skipall, skipfile, skipall, newpatches
974 return skipall, skipfile, skipall, newpatches
978 if skipfile is not None:
975 if skipfile is not None:
979 return skipfile, skipfile, skipall, newpatches
976 return skipfile, skipfile, skipall, newpatches
980 while True:
977 while True:
981 resps = _('[Ynesfdaq?]'
978 resps = _('[Ynesfdaq?]'
982 '$$ &Yes, record this change'
979 '$$ &Yes, record this change'
983 '$$ &No, skip this change'
980 '$$ &No, skip this change'
984 '$$ &Edit this change manually'
981 '$$ &Edit this change manually'
985 '$$ &Skip remaining changes to this file'
982 '$$ &Skip remaining changes to this file'
986 '$$ Record remaining changes to this &file'
983 '$$ Record remaining changes to this &file'
987 '$$ &Done, skip remaining changes and files'
984 '$$ &Done, skip remaining changes and files'
988 '$$ Record &all changes to all remaining files'
985 '$$ Record &all changes to all remaining files'
989 '$$ &Quit, recording no changes'
986 '$$ &Quit, recording no changes'
990 '$$ &? (display help)')
987 '$$ &? (display help)')
991 r = ui.promptchoice("%s %s" % (query, resps))
988 r = ui.promptchoice("%s %s" % (query, resps))
992 ui.write("\n")
989 ui.write("\n")
993 if r == 8: # ?
990 if r == 8: # ?
994 for c, t in ui.extractchoices(resps)[1]:
991 for c, t in ui.extractchoices(resps)[1]:
995 ui.write('%s - %s\n' % (c, t.lower()))
992 ui.write('%s - %s\n' % (c, t.lower()))
996 continue
993 continue
997 elif r == 0: # yes
994 elif r == 0: # yes
998 ret = True
995 ret = True
999 elif r == 1: # no
996 elif r == 1: # no
1000 ret = False
997 ret = False
1001 elif r == 2: # Edit patch
998 elif r == 2: # Edit patch
1002 if chunk is None:
999 if chunk is None:
1003 ui.write(_('cannot edit patch for whole file'))
1000 ui.write(_('cannot edit patch for whole file'))
1004 ui.write("\n")
1001 ui.write("\n")
1005 continue
1002 continue
1006 if chunk.header.binary():
1003 if chunk.header.binary():
1007 ui.write(_('cannot edit patch for binary file'))
1004 ui.write(_('cannot edit patch for binary file'))
1008 ui.write("\n")
1005 ui.write("\n")
1009 continue
1006 continue
1010 # Patch comment based on the Git one (based on comment at end of
1007 # Patch comment based on the Git one (based on comment at end of
1011 # https://mercurial-scm.org/wiki/RecordExtension)
1008 # https://mercurial-scm.org/wiki/RecordExtension)
1012 phelp = '---' + _("""
1009 phelp = '---' + _("""
1013 To remove '-' lines, make them ' ' lines (context).
1010 To remove '-' lines, make them ' ' lines (context).
1014 To remove '+' lines, delete them.
1011 To remove '+' lines, delete them.
1015 Lines starting with # will be removed from the patch.
1012 Lines starting with # will be removed from the patch.
1016
1013
1017 If the patch applies cleanly, the edited hunk will immediately be
1014 If the patch applies cleanly, the edited hunk will immediately be
1018 added to the record list. If it does not apply cleanly, a rejects
1015 added to the record list. If it does not apply cleanly, a rejects
1019 file will be generated: you can use that when you try again. If
1016 file will be generated: you can use that when you try again. If
1020 all lines of the hunk are removed, then the edit is aborted and
1017 all lines of the hunk are removed, then the edit is aborted and
1021 the hunk is left unchanged.
1018 the hunk is left unchanged.
1022 """)
1019 """)
1023 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1020 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1024 suffix=".diff", text=True)
1021 suffix=".diff", text=True)
1025 ncpatchfp = None
1022 ncpatchfp = None
1026 try:
1023 try:
1027 # Write the initial patch
1024 # Write the initial patch
1028 f = os.fdopen(patchfd, "w")
1025 f = os.fdopen(patchfd, "w")
1029 chunk.header.write(f)
1026 chunk.header.write(f)
1030 chunk.write(f)
1027 chunk.write(f)
1031 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1028 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1032 f.close()
1029 f.close()
1033 # Start the editor and wait for it to complete
1030 # Start the editor and wait for it to complete
1034 editor = ui.geteditor()
1031 editor = ui.geteditor()
1035 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1032 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1036 environ={'HGUSER': ui.username()})
1033 environ={'HGUSER': ui.username()})
1037 if ret != 0:
1034 if ret != 0:
1038 ui.warn(_("editor exited with exit code %d\n") % ret)
1035 ui.warn(_("editor exited with exit code %d\n") % ret)
1039 continue
1036 continue
1040 # Remove comment lines
1037 # Remove comment lines
1041 patchfp = open(patchfn)
1038 patchfp = open(patchfn)
1042 ncpatchfp = cStringIO.StringIO()
1039 ncpatchfp = cStringIO.StringIO()
1043 for line in patchfp:
1040 for line in patchfp:
1044 if not line.startswith('#'):
1041 if not line.startswith('#'):
1045 ncpatchfp.write(line)
1042 ncpatchfp.write(line)
1046 patchfp.close()
1043 patchfp.close()
1047 ncpatchfp.seek(0)
1044 ncpatchfp.seek(0)
1048 newpatches = parsepatch(ncpatchfp)
1045 newpatches = parsepatch(ncpatchfp)
1049 finally:
1046 finally:
1050 os.unlink(patchfn)
1047 os.unlink(patchfn)
1051 del ncpatchfp
1048 del ncpatchfp
1052 # Signal that the chunk shouldn't be applied as-is, but
1049 # Signal that the chunk shouldn't be applied as-is, but
1053 # provide the new patch to be used instead.
1050 # provide the new patch to be used instead.
1054 ret = False
1051 ret = False
1055 elif r == 3: # Skip
1052 elif r == 3: # Skip
1056 ret = skipfile = False
1053 ret = skipfile = False
1057 elif r == 4: # file (Record remaining)
1054 elif r == 4: # file (Record remaining)
1058 ret = skipfile = True
1055 ret = skipfile = True
1059 elif r == 5: # done, skip remaining
1056 elif r == 5: # done, skip remaining
1060 ret = skipall = False
1057 ret = skipall = False
1061 elif r == 6: # all
1058 elif r == 6: # all
1062 ret = skipall = True
1059 ret = skipall = True
1063 elif r == 7: # quit
1060 elif r == 7: # quit
1064 raise util.Abort(_('user quit'))
1061 raise util.Abort(_('user quit'))
1065 return ret, skipfile, skipall, newpatches
1062 return ret, skipfile, skipall, newpatches
1066
1063
1067 seen = set()
1064 seen = set()
1068 applied = {} # 'filename' -> [] of chunks
1065 applied = {} # 'filename' -> [] of chunks
1069 skipfile, skipall = None, None
1066 skipfile, skipall = None, None
1070 pos, total = 1, sum(len(h.hunks) for h in headers)
1067 pos, total = 1, sum(len(h.hunks) for h in headers)
1071 for h in headers:
1068 for h in headers:
1072 pos += len(h.hunks)
1069 pos += len(h.hunks)
1073 skipfile = None
1070 skipfile = None
1074 fixoffset = 0
1071 fixoffset = 0
1075 hdr = ''.join(h.header)
1072 hdr = ''.join(h.header)
1076 if hdr in seen:
1073 if hdr in seen:
1077 continue
1074 continue
1078 seen.add(hdr)
1075 seen.add(hdr)
1079 if skipall is None:
1076 if skipall is None:
1080 h.pretty(ui)
1077 h.pretty(ui)
1081 msg = (_('examine changes to %s?') %
1078 msg = (_('examine changes to %s?') %
1082 _(' and ').join("'%s'" % f for f in h.files()))
1079 _(' and ').join("'%s'" % f for f in h.files()))
1083 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1080 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1084 if not r:
1081 if not r:
1085 continue
1082 continue
1086 applied[h.filename()] = [h]
1083 applied[h.filename()] = [h]
1087 if h.allhunks():
1084 if h.allhunks():
1088 applied[h.filename()] += h.hunks
1085 applied[h.filename()] += h.hunks
1089 continue
1086 continue
1090 for i, chunk in enumerate(h.hunks):
1087 for i, chunk in enumerate(h.hunks):
1091 if skipfile is None and skipall is None:
1088 if skipfile is None and skipall is None:
1092 chunk.pretty(ui)
1089 chunk.pretty(ui)
1093 if total == 1:
1090 if total == 1:
1094 msg = _("record this change to '%s'?") % chunk.filename()
1091 msg = _("record this change to '%s'?") % chunk.filename()
1095 else:
1092 else:
1096 idx = pos - len(h.hunks) + i
1093 idx = pos - len(h.hunks) + i
1097 msg = _("record change %d/%d to '%s'?") % (idx, total,
1094 msg = _("record change %d/%d to '%s'?") % (idx, total,
1098 chunk.filename())
1095 chunk.filename())
1099 r, skipfile, skipall, newpatches = prompt(skipfile,
1096 r, skipfile, skipall, newpatches = prompt(skipfile,
1100 skipall, msg, chunk)
1097 skipall, msg, chunk)
1101 if r:
1098 if r:
1102 if fixoffset:
1099 if fixoffset:
1103 chunk = copy.copy(chunk)
1100 chunk = copy.copy(chunk)
1104 chunk.toline += fixoffset
1101 chunk.toline += fixoffset
1105 applied[chunk.filename()].append(chunk)
1102 applied[chunk.filename()].append(chunk)
1106 elif newpatches is not None:
1103 elif newpatches is not None:
1107 for newpatch in newpatches:
1104 for newpatch in newpatches:
1108 for newhunk in newpatch.hunks:
1105 for newhunk in newpatch.hunks:
1109 if fixoffset:
1106 if fixoffset:
1110 newhunk.toline += fixoffset
1107 newhunk.toline += fixoffset
1111 applied[newhunk.filename()].append(newhunk)
1108 applied[newhunk.filename()].append(newhunk)
1112 else:
1109 else:
1113 fixoffset += chunk.removed - chunk.added
1110 fixoffset += chunk.removed - chunk.added
1114 return sum([h for h in applied.itervalues()
1111 return sum([h for h in applied.itervalues()
1115 if h[0].special() or len(h) > 1], [])
1112 if h[0].special() or len(h) > 1], [])
1116 class hunk(object):
1113 class hunk(object):
1117 def __init__(self, desc, num, lr, context):
1114 def __init__(self, desc, num, lr, context):
1118 self.number = num
1115 self.number = num
1119 self.desc = desc
1116 self.desc = desc
1120 self.hunk = [desc]
1117 self.hunk = [desc]
1121 self.a = []
1118 self.a = []
1122 self.b = []
1119 self.b = []
1123 self.starta = self.lena = None
1120 self.starta = self.lena = None
1124 self.startb = self.lenb = None
1121 self.startb = self.lenb = None
1125 if lr is not None:
1122 if lr is not None:
1126 if context:
1123 if context:
1127 self.read_context_hunk(lr)
1124 self.read_context_hunk(lr)
1128 else:
1125 else:
1129 self.read_unified_hunk(lr)
1126 self.read_unified_hunk(lr)
1130
1127
1131 def getnormalized(self):
1128 def getnormalized(self):
1132 """Return a copy with line endings normalized to LF."""
1129 """Return a copy with line endings normalized to LF."""
1133
1130
1134 def normalize(lines):
1131 def normalize(lines):
1135 nlines = []
1132 nlines = []
1136 for line in lines:
1133 for line in lines:
1137 if line.endswith('\r\n'):
1134 if line.endswith('\r\n'):
1138 line = line[:-2] + '\n'
1135 line = line[:-2] + '\n'
1139 nlines.append(line)
1136 nlines.append(line)
1140 return nlines
1137 return nlines
1141
1138
1142 # Dummy object, it is rebuilt manually
1139 # Dummy object, it is rebuilt manually
1143 nh = hunk(self.desc, self.number, None, None)
1140 nh = hunk(self.desc, self.number, None, None)
1144 nh.number = self.number
1141 nh.number = self.number
1145 nh.desc = self.desc
1142 nh.desc = self.desc
1146 nh.hunk = self.hunk
1143 nh.hunk = self.hunk
1147 nh.a = normalize(self.a)
1144 nh.a = normalize(self.a)
1148 nh.b = normalize(self.b)
1145 nh.b = normalize(self.b)
1149 nh.starta = self.starta
1146 nh.starta = self.starta
1150 nh.startb = self.startb
1147 nh.startb = self.startb
1151 nh.lena = self.lena
1148 nh.lena = self.lena
1152 nh.lenb = self.lenb
1149 nh.lenb = self.lenb
1153 return nh
1150 return nh
1154
1151
1155 def read_unified_hunk(self, lr):
1152 def read_unified_hunk(self, lr):
1156 m = unidesc.match(self.desc)
1153 m = unidesc.match(self.desc)
1157 if not m:
1154 if not m:
1158 raise PatchError(_("bad hunk #%d") % self.number)
1155 raise PatchError(_("bad hunk #%d") % self.number)
1159 self.starta, self.lena, self.startb, self.lenb = m.groups()
1156 self.starta, self.lena, self.startb, self.lenb = m.groups()
1160 if self.lena is None:
1157 if self.lena is None:
1161 self.lena = 1
1158 self.lena = 1
1162 else:
1159 else:
1163 self.lena = int(self.lena)
1160 self.lena = int(self.lena)
1164 if self.lenb is None:
1161 if self.lenb is None:
1165 self.lenb = 1
1162 self.lenb = 1
1166 else:
1163 else:
1167 self.lenb = int(self.lenb)
1164 self.lenb = int(self.lenb)
1168 self.starta = int(self.starta)
1165 self.starta = int(self.starta)
1169 self.startb = int(self.startb)
1166 self.startb = int(self.startb)
1170 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1167 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1171 self.b)
1168 self.b)
1172 # if we hit eof before finishing out the hunk, the last line will
1169 # if we hit eof before finishing out the hunk, the last line will
1173 # be zero length. Lets try to fix it up.
1170 # be zero length. Lets try to fix it up.
1174 while len(self.hunk[-1]) == 0:
1171 while len(self.hunk[-1]) == 0:
1175 del self.hunk[-1]
1172 del self.hunk[-1]
1176 del self.a[-1]
1173 del self.a[-1]
1177 del self.b[-1]
1174 del self.b[-1]
1178 self.lena -= 1
1175 self.lena -= 1
1179 self.lenb -= 1
1176 self.lenb -= 1
1180 self._fixnewline(lr)
1177 self._fixnewline(lr)
1181
1178
1182 def read_context_hunk(self, lr):
1179 def read_context_hunk(self, lr):
1183 self.desc = lr.readline()
1180 self.desc = lr.readline()
1184 m = contextdesc.match(self.desc)
1181 m = contextdesc.match(self.desc)
1185 if not m:
1182 if not m:
1186 raise PatchError(_("bad hunk #%d") % self.number)
1183 raise PatchError(_("bad hunk #%d") % self.number)
1187 self.starta, aend = m.groups()
1184 self.starta, aend = m.groups()
1188 self.starta = int(self.starta)
1185 self.starta = int(self.starta)
1189 if aend is None:
1186 if aend is None:
1190 aend = self.starta
1187 aend = self.starta
1191 self.lena = int(aend) - self.starta
1188 self.lena = int(aend) - self.starta
1192 if self.starta:
1189 if self.starta:
1193 self.lena += 1
1190 self.lena += 1
1194 for x in xrange(self.lena):
1191 for x in xrange(self.lena):
1195 l = lr.readline()
1192 l = lr.readline()
1196 if l.startswith('---'):
1193 if l.startswith('---'):
1197 # lines addition, old block is empty
1194 # lines addition, old block is empty
1198 lr.push(l)
1195 lr.push(l)
1199 break
1196 break
1200 s = l[2:]
1197 s = l[2:]
1201 if l.startswith('- ') or l.startswith('! '):
1198 if l.startswith('- ') or l.startswith('! '):
1202 u = '-' + s
1199 u = '-' + s
1203 elif l.startswith(' '):
1200 elif l.startswith(' '):
1204 u = ' ' + s
1201 u = ' ' + s
1205 else:
1202 else:
1206 raise PatchError(_("bad hunk #%d old text line %d") %
1203 raise PatchError(_("bad hunk #%d old text line %d") %
1207 (self.number, x))
1204 (self.number, x))
1208 self.a.append(u)
1205 self.a.append(u)
1209 self.hunk.append(u)
1206 self.hunk.append(u)
1210
1207
1211 l = lr.readline()
1208 l = lr.readline()
1212 if l.startswith('\ '):
1209 if l.startswith('\ '):
1213 s = self.a[-1][:-1]
1210 s = self.a[-1][:-1]
1214 self.a[-1] = s
1211 self.a[-1] = s
1215 self.hunk[-1] = s
1212 self.hunk[-1] = s
1216 l = lr.readline()
1213 l = lr.readline()
1217 m = contextdesc.match(l)
1214 m = contextdesc.match(l)
1218 if not m:
1215 if not m:
1219 raise PatchError(_("bad hunk #%d") % self.number)
1216 raise PatchError(_("bad hunk #%d") % self.number)
1220 self.startb, bend = m.groups()
1217 self.startb, bend = m.groups()
1221 self.startb = int(self.startb)
1218 self.startb = int(self.startb)
1222 if bend is None:
1219 if bend is None:
1223 bend = self.startb
1220 bend = self.startb
1224 self.lenb = int(bend) - self.startb
1221 self.lenb = int(bend) - self.startb
1225 if self.startb:
1222 if self.startb:
1226 self.lenb += 1
1223 self.lenb += 1
1227 hunki = 1
1224 hunki = 1
1228 for x in xrange(self.lenb):
1225 for x in xrange(self.lenb):
1229 l = lr.readline()
1226 l = lr.readline()
1230 if l.startswith('\ '):
1227 if l.startswith('\ '):
1231 # XXX: the only way to hit this is with an invalid line range.
1228 # XXX: the only way to hit this is with an invalid line range.
1232 # The no-eol marker is not counted in the line range, but I
1229 # The no-eol marker is not counted in the line range, but I
1233 # guess there are diff(1) out there which behave differently.
1230 # guess there are diff(1) out there which behave differently.
1234 s = self.b[-1][:-1]
1231 s = self.b[-1][:-1]
1235 self.b[-1] = s
1232 self.b[-1] = s
1236 self.hunk[hunki - 1] = s
1233 self.hunk[hunki - 1] = s
1237 continue
1234 continue
1238 if not l:
1235 if not l:
1239 # line deletions, new block is empty and we hit EOF
1236 # line deletions, new block is empty and we hit EOF
1240 lr.push(l)
1237 lr.push(l)
1241 break
1238 break
1242 s = l[2:]
1239 s = l[2:]
1243 if l.startswith('+ ') or l.startswith('! '):
1240 if l.startswith('+ ') or l.startswith('! '):
1244 u = '+' + s
1241 u = '+' + s
1245 elif l.startswith(' '):
1242 elif l.startswith(' '):
1246 u = ' ' + s
1243 u = ' ' + s
1247 elif len(self.b) == 0:
1244 elif len(self.b) == 0:
1248 # line deletions, new block is empty
1245 # line deletions, new block is empty
1249 lr.push(l)
1246 lr.push(l)
1250 break
1247 break
1251 else:
1248 else:
1252 raise PatchError(_("bad hunk #%d old text line %d") %
1249 raise PatchError(_("bad hunk #%d old text line %d") %
1253 (self.number, x))
1250 (self.number, x))
1254 self.b.append(s)
1251 self.b.append(s)
1255 while True:
1252 while True:
1256 if hunki >= len(self.hunk):
1253 if hunki >= len(self.hunk):
1257 h = ""
1254 h = ""
1258 else:
1255 else:
1259 h = self.hunk[hunki]
1256 h = self.hunk[hunki]
1260 hunki += 1
1257 hunki += 1
1261 if h == u:
1258 if h == u:
1262 break
1259 break
1263 elif h.startswith('-'):
1260 elif h.startswith('-'):
1264 continue
1261 continue
1265 else:
1262 else:
1266 self.hunk.insert(hunki - 1, u)
1263 self.hunk.insert(hunki - 1, u)
1267 break
1264 break
1268
1265
1269 if not self.a:
1266 if not self.a:
1270 # this happens when lines were only added to the hunk
1267 # this happens when lines were only added to the hunk
1271 for x in self.hunk:
1268 for x in self.hunk:
1272 if x.startswith('-') or x.startswith(' '):
1269 if x.startswith('-') or x.startswith(' '):
1273 self.a.append(x)
1270 self.a.append(x)
1274 if not self.b:
1271 if not self.b:
1275 # this happens when lines were only deleted from the hunk
1272 # this happens when lines were only deleted from the hunk
1276 for x in self.hunk:
1273 for x in self.hunk:
1277 if x.startswith('+') or x.startswith(' '):
1274 if x.startswith('+') or x.startswith(' '):
1278 self.b.append(x[1:])
1275 self.b.append(x[1:])
1279 # @@ -start,len +start,len @@
1276 # @@ -start,len +start,len @@
1280 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1277 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1281 self.startb, self.lenb)
1278 self.startb, self.lenb)
1282 self.hunk[0] = self.desc
1279 self.hunk[0] = self.desc
1283 self._fixnewline(lr)
1280 self._fixnewline(lr)
1284
1281
1285 def _fixnewline(self, lr):
1282 def _fixnewline(self, lr):
1286 l = lr.readline()
1283 l = lr.readline()
1287 if l.startswith('\ '):
1284 if l.startswith('\ '):
1288 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1285 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1289 else:
1286 else:
1290 lr.push(l)
1287 lr.push(l)
1291
1288
1292 def complete(self):
1289 def complete(self):
1293 return len(self.a) == self.lena and len(self.b) == self.lenb
1290 return len(self.a) == self.lena and len(self.b) == self.lenb
1294
1291
1295 def _fuzzit(self, old, new, fuzz, toponly):
1292 def _fuzzit(self, old, new, fuzz, toponly):
1296 # this removes context lines from the top and bottom of list 'l'. It
1293 # this removes context lines from the top and bottom of list 'l'. It
1297 # checks the hunk to make sure only context lines are removed, and then
1294 # checks the hunk to make sure only context lines are removed, and then
1298 # returns a new shortened list of lines.
1295 # returns a new shortened list of lines.
1299 fuzz = min(fuzz, len(old))
1296 fuzz = min(fuzz, len(old))
1300 if fuzz:
1297 if fuzz:
1301 top = 0
1298 top = 0
1302 bot = 0
1299 bot = 0
1303 hlen = len(self.hunk)
1300 hlen = len(self.hunk)
1304 for x in xrange(hlen - 1):
1301 for x in xrange(hlen - 1):
1305 # the hunk starts with the @@ line, so use x+1
1302 # the hunk starts with the @@ line, so use x+1
1306 if self.hunk[x + 1][0] == ' ':
1303 if self.hunk[x + 1][0] == ' ':
1307 top += 1
1304 top += 1
1308 else:
1305 else:
1309 break
1306 break
1310 if not toponly:
1307 if not toponly:
1311 for x in xrange(hlen - 1):
1308 for x in xrange(hlen - 1):
1312 if self.hunk[hlen - bot - 1][0] == ' ':
1309 if self.hunk[hlen - bot - 1][0] == ' ':
1313 bot += 1
1310 bot += 1
1314 else:
1311 else:
1315 break
1312 break
1316
1313
1317 bot = min(fuzz, bot)
1314 bot = min(fuzz, bot)
1318 top = min(fuzz, top)
1315 top = min(fuzz, top)
1319 return old[top:len(old) - bot], new[top:len(new) - bot], top
1316 return old[top:len(old) - bot], new[top:len(new) - bot], top
1320 return old, new, 0
1317 return old, new, 0
1321
1318
1322 def fuzzit(self, fuzz, toponly):
1319 def fuzzit(self, fuzz, toponly):
1323 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1320 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1324 oldstart = self.starta + top
1321 oldstart = self.starta + top
1325 newstart = self.startb + top
1322 newstart = self.startb + top
1326 # zero length hunk ranges already have their start decremented
1323 # zero length hunk ranges already have their start decremented
1327 if self.lena and oldstart > 0:
1324 if self.lena and oldstart > 0:
1328 oldstart -= 1
1325 oldstart -= 1
1329 if self.lenb and newstart > 0:
1326 if self.lenb and newstart > 0:
1330 newstart -= 1
1327 newstart -= 1
1331 return old, oldstart, new, newstart
1328 return old, oldstart, new, newstart
1332
1329
1333 class binhunk(object):
1330 class binhunk(object):
1334 'A binary patch file.'
1331 'A binary patch file.'
1335 def __init__(self, lr, fname):
1332 def __init__(self, lr, fname):
1336 self.text = None
1333 self.text = None
1337 self.delta = False
1334 self.delta = False
1338 self.hunk = ['GIT binary patch\n']
1335 self.hunk = ['GIT binary patch\n']
1339 self._fname = fname
1336 self._fname = fname
1340 self._read(lr)
1337 self._read(lr)
1341
1338
1342 def complete(self):
1339 def complete(self):
1343 return self.text is not None
1340 return self.text is not None
1344
1341
1345 def new(self, lines):
1342 def new(self, lines):
1346 if self.delta:
1343 if self.delta:
1347 return [applybindelta(self.text, ''.join(lines))]
1344 return [applybindelta(self.text, ''.join(lines))]
1348 return [self.text]
1345 return [self.text]
1349
1346
1350 def _read(self, lr):
1347 def _read(self, lr):
1351 def getline(lr, hunk):
1348 def getline(lr, hunk):
1352 l = lr.readline()
1349 l = lr.readline()
1353 hunk.append(l)
1350 hunk.append(l)
1354 return l.rstrip('\r\n')
1351 return l.rstrip('\r\n')
1355
1352
1356 size = 0
1353 size = 0
1357 while True:
1354 while True:
1358 line = getline(lr, self.hunk)
1355 line = getline(lr, self.hunk)
1359 if not line:
1356 if not line:
1360 raise PatchError(_('could not extract "%s" binary data')
1357 raise PatchError(_('could not extract "%s" binary data')
1361 % self._fname)
1358 % self._fname)
1362 if line.startswith('literal '):
1359 if line.startswith('literal '):
1363 size = int(line[8:].rstrip())
1360 size = int(line[8:].rstrip())
1364 break
1361 break
1365 if line.startswith('delta '):
1362 if line.startswith('delta '):
1366 size = int(line[6:].rstrip())
1363 size = int(line[6:].rstrip())
1367 self.delta = True
1364 self.delta = True
1368 break
1365 break
1369 dec = []
1366 dec = []
1370 line = getline(lr, self.hunk)
1367 line = getline(lr, self.hunk)
1371 while len(line) > 1:
1368 while len(line) > 1:
1372 l = line[0]
1369 l = line[0]
1373 if l <= 'Z' and l >= 'A':
1370 if l <= 'Z' and l >= 'A':
1374 l = ord(l) - ord('A') + 1
1371 l = ord(l) - ord('A') + 1
1375 else:
1372 else:
1376 l = ord(l) - ord('a') + 27
1373 l = ord(l) - ord('a') + 27
1377 try:
1374 try:
1378 dec.append(base85.b85decode(line[1:])[:l])
1375 dec.append(base85.b85decode(line[1:])[:l])
1379 except ValueError as e:
1376 except ValueError as e:
1380 raise PatchError(_('could not decode "%s" binary patch: %s')
1377 raise PatchError(_('could not decode "%s" binary patch: %s')
1381 % (self._fname, str(e)))
1378 % (self._fname, str(e)))
1382 line = getline(lr, self.hunk)
1379 line = getline(lr, self.hunk)
1383 text = zlib.decompress(''.join(dec))
1380 text = zlib.decompress(''.join(dec))
1384 if len(text) != size:
1381 if len(text) != size:
1385 raise PatchError(_('"%s" length is %d bytes, should be %d')
1382 raise PatchError(_('"%s" length is %d bytes, should be %d')
1386 % (self._fname, len(text), size))
1383 % (self._fname, len(text), size))
1387 self.text = text
1384 self.text = text
1388
1385
1389 def parsefilename(str):
1386 def parsefilename(str):
1390 # --- filename \t|space stuff
1387 # --- filename \t|space stuff
1391 s = str[4:].rstrip('\r\n')
1388 s = str[4:].rstrip('\r\n')
1392 i = s.find('\t')
1389 i = s.find('\t')
1393 if i < 0:
1390 if i < 0:
1394 i = s.find(' ')
1391 i = s.find(' ')
1395 if i < 0:
1392 if i < 0:
1396 return s
1393 return s
1397 return s[:i]
1394 return s[:i]
1398
1395
1399 def reversehunks(hunks):
1396 def reversehunks(hunks):
1400 '''reverse the signs in the hunks given as argument
1397 '''reverse the signs in the hunks given as argument
1401
1398
1402 This function operates on hunks coming out of patch.filterpatch, that is
1399 This function operates on hunks coming out of patch.filterpatch, that is
1403 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1400 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1404
1401
1405 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1402 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1406 ... --- a/folder1/g
1403 ... --- a/folder1/g
1407 ... +++ b/folder1/g
1404 ... +++ b/folder1/g
1408 ... @@ -1,7 +1,7 @@
1405 ... @@ -1,7 +1,7 @@
1409 ... +firstline
1406 ... +firstline
1410 ... c
1407 ... c
1411 ... 1
1408 ... 1
1412 ... 2
1409 ... 2
1413 ... + 3
1410 ... + 3
1414 ... -4
1411 ... -4
1415 ... 5
1412 ... 5
1416 ... d
1413 ... d
1417 ... +lastline"""
1414 ... +lastline"""
1418 >>> hunks = parsepatch(rawpatch)
1415 >>> hunks = parsepatch(rawpatch)
1419 >>> hunkscomingfromfilterpatch = []
1416 >>> hunkscomingfromfilterpatch = []
1420 >>> for h in hunks:
1417 >>> for h in hunks:
1421 ... hunkscomingfromfilterpatch.append(h)
1418 ... hunkscomingfromfilterpatch.append(h)
1422 ... hunkscomingfromfilterpatch.extend(h.hunks)
1419 ... hunkscomingfromfilterpatch.extend(h.hunks)
1423
1420
1424 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1421 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1425 >>> fp = cStringIO.StringIO()
1422 >>> fp = cStringIO.StringIO()
1426 >>> for c in reversedhunks:
1423 >>> for c in reversedhunks:
1427 ... c.write(fp)
1424 ... c.write(fp)
1428 >>> fp.seek(0)
1425 >>> fp.seek(0)
1429 >>> reversedpatch = fp.read()
1426 >>> reversedpatch = fp.read()
1430 >>> print reversedpatch
1427 >>> print reversedpatch
1431 diff --git a/folder1/g b/folder1/g
1428 diff --git a/folder1/g b/folder1/g
1432 --- a/folder1/g
1429 --- a/folder1/g
1433 +++ b/folder1/g
1430 +++ b/folder1/g
1434 @@ -1,4 +1,3 @@
1431 @@ -1,4 +1,3 @@
1435 -firstline
1432 -firstline
1436 c
1433 c
1437 1
1434 1
1438 2
1435 2
1439 @@ -1,6 +2,6 @@
1436 @@ -1,6 +2,6 @@
1440 c
1437 c
1441 1
1438 1
1442 2
1439 2
1443 - 3
1440 - 3
1444 +4
1441 +4
1445 5
1442 5
1446 d
1443 d
1447 @@ -5,3 +6,2 @@
1444 @@ -5,3 +6,2 @@
1448 5
1445 5
1449 d
1446 d
1450 -lastline
1447 -lastline
1451
1448
1452 '''
1449 '''
1453
1450
1454 import crecord as crecordmod
1451 import crecord as crecordmod
1455 newhunks = []
1452 newhunks = []
1456 for c in hunks:
1453 for c in hunks:
1457 if isinstance(c, crecordmod.uihunk):
1454 if isinstance(c, crecordmod.uihunk):
1458 # curses hunks encapsulate the record hunk in _hunk
1455 # curses hunks encapsulate the record hunk in _hunk
1459 c = c._hunk
1456 c = c._hunk
1460 if isinstance(c, recordhunk):
1457 if isinstance(c, recordhunk):
1461 for j, line in enumerate(c.hunk):
1458 for j, line in enumerate(c.hunk):
1462 if line.startswith("-"):
1459 if line.startswith("-"):
1463 c.hunk[j] = "+" + c.hunk[j][1:]
1460 c.hunk[j] = "+" + c.hunk[j][1:]
1464 elif line.startswith("+"):
1461 elif line.startswith("+"):
1465 c.hunk[j] = "-" + c.hunk[j][1:]
1462 c.hunk[j] = "-" + c.hunk[j][1:]
1466 c.added, c.removed = c.removed, c.added
1463 c.added, c.removed = c.removed, c.added
1467 newhunks.append(c)
1464 newhunks.append(c)
1468 return newhunks
1465 return newhunks
1469
1466
1470 def parsepatch(originalchunks):
1467 def parsepatch(originalchunks):
1471 """patch -> [] of headers -> [] of hunks """
1468 """patch -> [] of headers -> [] of hunks """
1472 class parser(object):
1469 class parser(object):
1473 """patch parsing state machine"""
1470 """patch parsing state machine"""
1474 def __init__(self):
1471 def __init__(self):
1475 self.fromline = 0
1472 self.fromline = 0
1476 self.toline = 0
1473 self.toline = 0
1477 self.proc = ''
1474 self.proc = ''
1478 self.header = None
1475 self.header = None
1479 self.context = []
1476 self.context = []
1480 self.before = []
1477 self.before = []
1481 self.hunk = []
1478 self.hunk = []
1482 self.headers = []
1479 self.headers = []
1483
1480
1484 def addrange(self, limits):
1481 def addrange(self, limits):
1485 fromstart, fromend, tostart, toend, proc = limits
1482 fromstart, fromend, tostart, toend, proc = limits
1486 self.fromline = int(fromstart)
1483 self.fromline = int(fromstart)
1487 self.toline = int(tostart)
1484 self.toline = int(tostart)
1488 self.proc = proc
1485 self.proc = proc
1489
1486
1490 def addcontext(self, context):
1487 def addcontext(self, context):
1491 if self.hunk:
1488 if self.hunk:
1492 h = recordhunk(self.header, self.fromline, self.toline,
1489 h = recordhunk(self.header, self.fromline, self.toline,
1493 self.proc, self.before, self.hunk, context)
1490 self.proc, self.before, self.hunk, context)
1494 self.header.hunks.append(h)
1491 self.header.hunks.append(h)
1495 self.fromline += len(self.before) + h.removed
1492 self.fromline += len(self.before) + h.removed
1496 self.toline += len(self.before) + h.added
1493 self.toline += len(self.before) + h.added
1497 self.before = []
1494 self.before = []
1498 self.hunk = []
1495 self.hunk = []
1499 self.proc = ''
1496 self.proc = ''
1500 self.context = context
1497 self.context = context
1501
1498
1502 def addhunk(self, hunk):
1499 def addhunk(self, hunk):
1503 if self.context:
1500 if self.context:
1504 self.before = self.context
1501 self.before = self.context
1505 self.context = []
1502 self.context = []
1506 self.hunk = hunk
1503 self.hunk = hunk
1507
1504
1508 def newfile(self, hdr):
1505 def newfile(self, hdr):
1509 self.addcontext([])
1506 self.addcontext([])
1510 h = header(hdr)
1507 h = header(hdr)
1511 self.headers.append(h)
1508 self.headers.append(h)
1512 self.header = h
1509 self.header = h
1513
1510
1514 def addother(self, line):
1511 def addother(self, line):
1515 pass # 'other' lines are ignored
1512 pass # 'other' lines are ignored
1516
1513
1517 def finished(self):
1514 def finished(self):
1518 self.addcontext([])
1515 self.addcontext([])
1519 return self.headers
1516 return self.headers
1520
1517
1521 transitions = {
1518 transitions = {
1522 'file': {'context': addcontext,
1519 'file': {'context': addcontext,
1523 'file': newfile,
1520 'file': newfile,
1524 'hunk': addhunk,
1521 'hunk': addhunk,
1525 'range': addrange},
1522 'range': addrange},
1526 'context': {'file': newfile,
1523 'context': {'file': newfile,
1527 'hunk': addhunk,
1524 'hunk': addhunk,
1528 'range': addrange,
1525 'range': addrange,
1529 'other': addother},
1526 'other': addother},
1530 'hunk': {'context': addcontext,
1527 'hunk': {'context': addcontext,
1531 'file': newfile,
1528 'file': newfile,
1532 'range': addrange},
1529 'range': addrange},
1533 'range': {'context': addcontext,
1530 'range': {'context': addcontext,
1534 'hunk': addhunk},
1531 'hunk': addhunk},
1535 'other': {'other': addother},
1532 'other': {'other': addother},
1536 }
1533 }
1537
1534
1538 p = parser()
1535 p = parser()
1539 fp = cStringIO.StringIO()
1536 fp = cStringIO.StringIO()
1540 fp.write(''.join(originalchunks))
1537 fp.write(''.join(originalchunks))
1541 fp.seek(0)
1538 fp.seek(0)
1542
1539
1543 state = 'context'
1540 state = 'context'
1544 for newstate, data in scanpatch(fp):
1541 for newstate, data in scanpatch(fp):
1545 try:
1542 try:
1546 p.transitions[state][newstate](p, data)
1543 p.transitions[state][newstate](p, data)
1547 except KeyError:
1544 except KeyError:
1548 raise PatchError('unhandled transition: %s -> %s' %
1545 raise PatchError('unhandled transition: %s -> %s' %
1549 (state, newstate))
1546 (state, newstate))
1550 state = newstate
1547 state = newstate
1551 del fp
1548 del fp
1552 return p.finished()
1549 return p.finished()
1553
1550
1554 def pathtransform(path, strip, prefix):
1551 def pathtransform(path, strip, prefix):
1555 '''turn a path from a patch into a path suitable for the repository
1552 '''turn a path from a patch into a path suitable for the repository
1556
1553
1557 prefix, if not empty, is expected to be normalized with a / at the end.
1554 prefix, if not empty, is expected to be normalized with a / at the end.
1558
1555
1559 Returns (stripped components, path in repository).
1556 Returns (stripped components, path in repository).
1560
1557
1561 >>> pathtransform('a/b/c', 0, '')
1558 >>> pathtransform('a/b/c', 0, '')
1562 ('', 'a/b/c')
1559 ('', 'a/b/c')
1563 >>> pathtransform(' a/b/c ', 0, '')
1560 >>> pathtransform(' a/b/c ', 0, '')
1564 ('', ' a/b/c')
1561 ('', ' a/b/c')
1565 >>> pathtransform(' a/b/c ', 2, '')
1562 >>> pathtransform(' a/b/c ', 2, '')
1566 ('a/b/', 'c')
1563 ('a/b/', 'c')
1567 >>> pathtransform('a/b/c', 0, 'd/e/')
1564 >>> pathtransform('a/b/c', 0, 'd/e/')
1568 ('', 'd/e/a/b/c')
1565 ('', 'd/e/a/b/c')
1569 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1566 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1570 ('a//b/', 'd/e/c')
1567 ('a//b/', 'd/e/c')
1571 >>> pathtransform('a/b/c', 3, '')
1568 >>> pathtransform('a/b/c', 3, '')
1572 Traceback (most recent call last):
1569 Traceback (most recent call last):
1573 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1570 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1574 '''
1571 '''
1575 pathlen = len(path)
1572 pathlen = len(path)
1576 i = 0
1573 i = 0
1577 if strip == 0:
1574 if strip == 0:
1578 return '', prefix + path.rstrip()
1575 return '', prefix + path.rstrip()
1579 count = strip
1576 count = strip
1580 while count > 0:
1577 while count > 0:
1581 i = path.find('/', i)
1578 i = path.find('/', i)
1582 if i == -1:
1579 if i == -1:
1583 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1580 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1584 (count, strip, path))
1581 (count, strip, path))
1585 i += 1
1582 i += 1
1586 # consume '//' in the path
1583 # consume '//' in the path
1587 while i < pathlen - 1 and path[i] == '/':
1584 while i < pathlen - 1 and path[i] == '/':
1588 i += 1
1585 i += 1
1589 count -= 1
1586 count -= 1
1590 return path[:i].lstrip(), prefix + path[i:].rstrip()
1587 return path[:i].lstrip(), prefix + path[i:].rstrip()
1591
1588
1592 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1589 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1593 nulla = afile_orig == "/dev/null"
1590 nulla = afile_orig == "/dev/null"
1594 nullb = bfile_orig == "/dev/null"
1591 nullb = bfile_orig == "/dev/null"
1595 create = nulla and hunk.starta == 0 and hunk.lena == 0
1592 create = nulla and hunk.starta == 0 and hunk.lena == 0
1596 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1593 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1597 abase, afile = pathtransform(afile_orig, strip, prefix)
1594 abase, afile = pathtransform(afile_orig, strip, prefix)
1598 gooda = not nulla and backend.exists(afile)
1595 gooda = not nulla and backend.exists(afile)
1599 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1596 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1600 if afile == bfile:
1597 if afile == bfile:
1601 goodb = gooda
1598 goodb = gooda
1602 else:
1599 else:
1603 goodb = not nullb and backend.exists(bfile)
1600 goodb = not nullb and backend.exists(bfile)
1604 missing = not goodb and not gooda and not create
1601 missing = not goodb and not gooda and not create
1605
1602
1606 # some diff programs apparently produce patches where the afile is
1603 # some diff programs apparently produce patches where the afile is
1607 # not /dev/null, but afile starts with bfile
1604 # not /dev/null, but afile starts with bfile
1608 abasedir = afile[:afile.rfind('/') + 1]
1605 abasedir = afile[:afile.rfind('/') + 1]
1609 bbasedir = bfile[:bfile.rfind('/') + 1]
1606 bbasedir = bfile[:bfile.rfind('/') + 1]
1610 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1607 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1611 and hunk.starta == 0 and hunk.lena == 0):
1608 and hunk.starta == 0 and hunk.lena == 0):
1612 create = True
1609 create = True
1613 missing = False
1610 missing = False
1614
1611
1615 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1612 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1616 # diff is between a file and its backup. In this case, the original
1613 # diff is between a file and its backup. In this case, the original
1617 # file should be patched (see original mpatch code).
1614 # file should be patched (see original mpatch code).
1618 isbackup = (abase == bbase and bfile.startswith(afile))
1615 isbackup = (abase == bbase and bfile.startswith(afile))
1619 fname = None
1616 fname = None
1620 if not missing:
1617 if not missing:
1621 if gooda and goodb:
1618 if gooda and goodb:
1622 if isbackup:
1619 if isbackup:
1623 fname = afile
1620 fname = afile
1624 else:
1621 else:
1625 fname = bfile
1622 fname = bfile
1626 elif gooda:
1623 elif gooda:
1627 fname = afile
1624 fname = afile
1628
1625
1629 if not fname:
1626 if not fname:
1630 if not nullb:
1627 if not nullb:
1631 if isbackup:
1628 if isbackup:
1632 fname = afile
1629 fname = afile
1633 else:
1630 else:
1634 fname = bfile
1631 fname = bfile
1635 elif not nulla:
1632 elif not nulla:
1636 fname = afile
1633 fname = afile
1637 else:
1634 else:
1638 raise PatchError(_("undefined source and destination files"))
1635 raise PatchError(_("undefined source and destination files"))
1639
1636
1640 gp = patchmeta(fname)
1637 gp = patchmeta(fname)
1641 if create:
1638 if create:
1642 gp.op = 'ADD'
1639 gp.op = 'ADD'
1643 elif remove:
1640 elif remove:
1644 gp.op = 'DELETE'
1641 gp.op = 'DELETE'
1645 return gp
1642 return gp
1646
1643
1647 def scanpatch(fp):
1644 def scanpatch(fp):
1648 """like patch.iterhunks, but yield different events
1645 """like patch.iterhunks, but yield different events
1649
1646
1650 - ('file', [header_lines + fromfile + tofile])
1647 - ('file', [header_lines + fromfile + tofile])
1651 - ('context', [context_lines])
1648 - ('context', [context_lines])
1652 - ('hunk', [hunk_lines])
1649 - ('hunk', [hunk_lines])
1653 - ('range', (-start,len, +start,len, proc))
1650 - ('range', (-start,len, +start,len, proc))
1654 """
1651 """
1655 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1652 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1656 lr = linereader(fp)
1653 lr = linereader(fp)
1657
1654
1658 def scanwhile(first, p):
1655 def scanwhile(first, p):
1659 """scan lr while predicate holds"""
1656 """scan lr while predicate holds"""
1660 lines = [first]
1657 lines = [first]
1661 while True:
1658 while True:
1662 line = lr.readline()
1659 line = lr.readline()
1663 if not line:
1660 if not line:
1664 break
1661 break
1665 if p(line):
1662 if p(line):
1666 lines.append(line)
1663 lines.append(line)
1667 else:
1664 else:
1668 lr.push(line)
1665 lr.push(line)
1669 break
1666 break
1670 return lines
1667 return lines
1671
1668
1672 while True:
1669 while True:
1673 line = lr.readline()
1670 line = lr.readline()
1674 if not line:
1671 if not line:
1675 break
1672 break
1676 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1673 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1677 def notheader(line):
1674 def notheader(line):
1678 s = line.split(None, 1)
1675 s = line.split(None, 1)
1679 return not s or s[0] not in ('---', 'diff')
1676 return not s or s[0] not in ('---', 'diff')
1680 header = scanwhile(line, notheader)
1677 header = scanwhile(line, notheader)
1681 fromfile = lr.readline()
1678 fromfile = lr.readline()
1682 if fromfile.startswith('---'):
1679 if fromfile.startswith('---'):
1683 tofile = lr.readline()
1680 tofile = lr.readline()
1684 header += [fromfile, tofile]
1681 header += [fromfile, tofile]
1685 else:
1682 else:
1686 lr.push(fromfile)
1683 lr.push(fromfile)
1687 yield 'file', header
1684 yield 'file', header
1688 elif line[0] == ' ':
1685 elif line[0] == ' ':
1689 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1686 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1690 elif line[0] in '-+':
1687 elif line[0] in '-+':
1691 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1688 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1692 else:
1689 else:
1693 m = lines_re.match(line)
1690 m = lines_re.match(line)
1694 if m:
1691 if m:
1695 yield 'range', m.groups()
1692 yield 'range', m.groups()
1696 else:
1693 else:
1697 yield 'other', line
1694 yield 'other', line
1698
1695
1699 def scangitpatch(lr, firstline):
1696 def scangitpatch(lr, firstline):
1700 """
1697 """
1701 Git patches can emit:
1698 Git patches can emit:
1702 - rename a to b
1699 - rename a to b
1703 - change b
1700 - change b
1704 - copy a to c
1701 - copy a to c
1705 - change c
1702 - change c
1706
1703
1707 We cannot apply this sequence as-is, the renamed 'a' could not be
1704 We cannot apply this sequence as-is, the renamed 'a' could not be
1708 found for it would have been renamed already. And we cannot copy
1705 found for it would have been renamed already. And we cannot copy
1709 from 'b' instead because 'b' would have been changed already. So
1706 from 'b' instead because 'b' would have been changed already. So
1710 we scan the git patch for copy and rename commands so we can
1707 we scan the git patch for copy and rename commands so we can
1711 perform the copies ahead of time.
1708 perform the copies ahead of time.
1712 """
1709 """
1713 pos = 0
1710 pos = 0
1714 try:
1711 try:
1715 pos = lr.fp.tell()
1712 pos = lr.fp.tell()
1716 fp = lr.fp
1713 fp = lr.fp
1717 except IOError:
1714 except IOError:
1718 fp = cStringIO.StringIO(lr.fp.read())
1715 fp = cStringIO.StringIO(lr.fp.read())
1719 gitlr = linereader(fp)
1716 gitlr = linereader(fp)
1720 gitlr.push(firstline)
1717 gitlr.push(firstline)
1721 gitpatches = readgitpatch(gitlr)
1718 gitpatches = readgitpatch(gitlr)
1722 fp.seek(pos)
1719 fp.seek(pos)
1723 return gitpatches
1720 return gitpatches
1724
1721
1725 def iterhunks(fp):
1722 def iterhunks(fp):
1726 """Read a patch and yield the following events:
1723 """Read a patch and yield the following events:
1727 - ("file", afile, bfile, firsthunk): select a new target file.
1724 - ("file", afile, bfile, firsthunk): select a new target file.
1728 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1725 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1729 "file" event.
1726 "file" event.
1730 - ("git", gitchanges): current diff is in git format, gitchanges
1727 - ("git", gitchanges): current diff is in git format, gitchanges
1731 maps filenames to gitpatch records. Unique event.
1728 maps filenames to gitpatch records. Unique event.
1732 """
1729 """
1733 afile = ""
1730 afile = ""
1734 bfile = ""
1731 bfile = ""
1735 state = None
1732 state = None
1736 hunknum = 0
1733 hunknum = 0
1737 emitfile = newfile = False
1734 emitfile = newfile = False
1738 gitpatches = None
1735 gitpatches = None
1739
1736
1740 # our states
1737 # our states
1741 BFILE = 1
1738 BFILE = 1
1742 context = None
1739 context = None
1743 lr = linereader(fp)
1740 lr = linereader(fp)
1744
1741
1745 while True:
1742 while True:
1746 x = lr.readline()
1743 x = lr.readline()
1747 if not x:
1744 if not x:
1748 break
1745 break
1749 if state == BFILE and (
1746 if state == BFILE and (
1750 (not context and x[0] == '@')
1747 (not context and x[0] == '@')
1751 or (context is not False and x.startswith('***************'))
1748 or (context is not False and x.startswith('***************'))
1752 or x.startswith('GIT binary patch')):
1749 or x.startswith('GIT binary patch')):
1753 gp = None
1750 gp = None
1754 if (gitpatches and
1751 if (gitpatches and
1755 gitpatches[-1].ispatching(afile, bfile)):
1752 gitpatches[-1].ispatching(afile, bfile)):
1756 gp = gitpatches.pop()
1753 gp = gitpatches.pop()
1757 if x.startswith('GIT binary patch'):
1754 if x.startswith('GIT binary patch'):
1758 h = binhunk(lr, gp.path)
1755 h = binhunk(lr, gp.path)
1759 else:
1756 else:
1760 if context is None and x.startswith('***************'):
1757 if context is None and x.startswith('***************'):
1761 context = True
1758 context = True
1762 h = hunk(x, hunknum + 1, lr, context)
1759 h = hunk(x, hunknum + 1, lr, context)
1763 hunknum += 1
1760 hunknum += 1
1764 if emitfile:
1761 if emitfile:
1765 emitfile = False
1762 emitfile = False
1766 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1763 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1767 yield 'hunk', h
1764 yield 'hunk', h
1768 elif x.startswith('diff --git a/'):
1765 elif x.startswith('diff --git a/'):
1769 m = gitre.match(x.rstrip(' \r\n'))
1766 m = gitre.match(x.rstrip(' \r\n'))
1770 if not m:
1767 if not m:
1771 continue
1768 continue
1772 if gitpatches is None:
1769 if gitpatches is None:
1773 # scan whole input for git metadata
1770 # scan whole input for git metadata
1774 gitpatches = scangitpatch(lr, x)
1771 gitpatches = scangitpatch(lr, x)
1775 yield 'git', [g.copy() for g in gitpatches
1772 yield 'git', [g.copy() for g in gitpatches
1776 if g.op in ('COPY', 'RENAME')]
1773 if g.op in ('COPY', 'RENAME')]
1777 gitpatches.reverse()
1774 gitpatches.reverse()
1778 afile = 'a/' + m.group(1)
1775 afile = 'a/' + m.group(1)
1779 bfile = 'b/' + m.group(2)
1776 bfile = 'b/' + m.group(2)
1780 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1777 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1781 gp = gitpatches.pop()
1778 gp = gitpatches.pop()
1782 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1779 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1783 if not gitpatches:
1780 if not gitpatches:
1784 raise PatchError(_('failed to synchronize metadata for "%s"')
1781 raise PatchError(_('failed to synchronize metadata for "%s"')
1785 % afile[2:])
1782 % afile[2:])
1786 gp = gitpatches[-1]
1783 gp = gitpatches[-1]
1787 newfile = True
1784 newfile = True
1788 elif x.startswith('---'):
1785 elif x.startswith('---'):
1789 # check for a unified diff
1786 # check for a unified diff
1790 l2 = lr.readline()
1787 l2 = lr.readline()
1791 if not l2.startswith('+++'):
1788 if not l2.startswith('+++'):
1792 lr.push(l2)
1789 lr.push(l2)
1793 continue
1790 continue
1794 newfile = True
1791 newfile = True
1795 context = False
1792 context = False
1796 afile = parsefilename(x)
1793 afile = parsefilename(x)
1797 bfile = parsefilename(l2)
1794 bfile = parsefilename(l2)
1798 elif x.startswith('***'):
1795 elif x.startswith('***'):
1799 # check for a context diff
1796 # check for a context diff
1800 l2 = lr.readline()
1797 l2 = lr.readline()
1801 if not l2.startswith('---'):
1798 if not l2.startswith('---'):
1802 lr.push(l2)
1799 lr.push(l2)
1803 continue
1800 continue
1804 l3 = lr.readline()
1801 l3 = lr.readline()
1805 lr.push(l3)
1802 lr.push(l3)
1806 if not l3.startswith("***************"):
1803 if not l3.startswith("***************"):
1807 lr.push(l2)
1804 lr.push(l2)
1808 continue
1805 continue
1809 newfile = True
1806 newfile = True
1810 context = True
1807 context = True
1811 afile = parsefilename(x)
1808 afile = parsefilename(x)
1812 bfile = parsefilename(l2)
1809 bfile = parsefilename(l2)
1813
1810
1814 if newfile:
1811 if newfile:
1815 newfile = False
1812 newfile = False
1816 emitfile = True
1813 emitfile = True
1817 state = BFILE
1814 state = BFILE
1818 hunknum = 0
1815 hunknum = 0
1819
1816
1820 while gitpatches:
1817 while gitpatches:
1821 gp = gitpatches.pop()
1818 gp = gitpatches.pop()
1822 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1819 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1823
1820
1824 def applybindelta(binchunk, data):
1821 def applybindelta(binchunk, data):
1825 """Apply a binary delta hunk
1822 """Apply a binary delta hunk
1826 The algorithm used is the algorithm from git's patch-delta.c
1823 The algorithm used is the algorithm from git's patch-delta.c
1827 """
1824 """
1828 def deltahead(binchunk):
1825 def deltahead(binchunk):
1829 i = 0
1826 i = 0
1830 for c in binchunk:
1827 for c in binchunk:
1831 i += 1
1828 i += 1
1832 if not (ord(c) & 0x80):
1829 if not (ord(c) & 0x80):
1833 return i
1830 return i
1834 return i
1831 return i
1835 out = ""
1832 out = ""
1836 s = deltahead(binchunk)
1833 s = deltahead(binchunk)
1837 binchunk = binchunk[s:]
1834 binchunk = binchunk[s:]
1838 s = deltahead(binchunk)
1835 s = deltahead(binchunk)
1839 binchunk = binchunk[s:]
1836 binchunk = binchunk[s:]
1840 i = 0
1837 i = 0
1841 while i < len(binchunk):
1838 while i < len(binchunk):
1842 cmd = ord(binchunk[i])
1839 cmd = ord(binchunk[i])
1843 i += 1
1840 i += 1
1844 if (cmd & 0x80):
1841 if (cmd & 0x80):
1845 offset = 0
1842 offset = 0
1846 size = 0
1843 size = 0
1847 if (cmd & 0x01):
1844 if (cmd & 0x01):
1848 offset = ord(binchunk[i])
1845 offset = ord(binchunk[i])
1849 i += 1
1846 i += 1
1850 if (cmd & 0x02):
1847 if (cmd & 0x02):
1851 offset |= ord(binchunk[i]) << 8
1848 offset |= ord(binchunk[i]) << 8
1852 i += 1
1849 i += 1
1853 if (cmd & 0x04):
1850 if (cmd & 0x04):
1854 offset |= ord(binchunk[i]) << 16
1851 offset |= ord(binchunk[i]) << 16
1855 i += 1
1852 i += 1
1856 if (cmd & 0x08):
1853 if (cmd & 0x08):
1857 offset |= ord(binchunk[i]) << 24
1854 offset |= ord(binchunk[i]) << 24
1858 i += 1
1855 i += 1
1859 if (cmd & 0x10):
1856 if (cmd & 0x10):
1860 size = ord(binchunk[i])
1857 size = ord(binchunk[i])
1861 i += 1
1858 i += 1
1862 if (cmd & 0x20):
1859 if (cmd & 0x20):
1863 size |= ord(binchunk[i]) << 8
1860 size |= ord(binchunk[i]) << 8
1864 i += 1
1861 i += 1
1865 if (cmd & 0x40):
1862 if (cmd & 0x40):
1866 size |= ord(binchunk[i]) << 16
1863 size |= ord(binchunk[i]) << 16
1867 i += 1
1864 i += 1
1868 if size == 0:
1865 if size == 0:
1869 size = 0x10000
1866 size = 0x10000
1870 offset_end = offset + size
1867 offset_end = offset + size
1871 out += data[offset:offset_end]
1868 out += data[offset:offset_end]
1872 elif cmd != 0:
1869 elif cmd != 0:
1873 offset_end = i + cmd
1870 offset_end = i + cmd
1874 out += binchunk[i:offset_end]
1871 out += binchunk[i:offset_end]
1875 i += cmd
1872 i += cmd
1876 else:
1873 else:
1877 raise PatchError(_('unexpected delta opcode 0'))
1874 raise PatchError(_('unexpected delta opcode 0'))
1878 return out
1875 return out
1879
1876
1880 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1877 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1881 """Reads a patch from fp and tries to apply it.
1878 """Reads a patch from fp and tries to apply it.
1882
1879
1883 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1880 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1884 there was any fuzz.
1881 there was any fuzz.
1885
1882
1886 If 'eolmode' is 'strict', the patch content and patched file are
1883 If 'eolmode' is 'strict', the patch content and patched file are
1887 read in binary mode. Otherwise, line endings are ignored when
1884 read in binary mode. Otherwise, line endings are ignored when
1888 patching then normalized according to 'eolmode'.
1885 patching then normalized according to 'eolmode'.
1889 """
1886 """
1890 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1887 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1891 prefix=prefix, eolmode=eolmode)
1888 prefix=prefix, eolmode=eolmode)
1892
1889
1893 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1890 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1894 eolmode='strict'):
1891 eolmode='strict'):
1895
1892
1896 if prefix:
1893 if prefix:
1897 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1894 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1898 prefix)
1895 prefix)
1899 if prefix != '':
1896 if prefix != '':
1900 prefix += '/'
1897 prefix += '/'
1901 def pstrip(p):
1898 def pstrip(p):
1902 return pathtransform(p, strip - 1, prefix)[1]
1899 return pathtransform(p, strip - 1, prefix)[1]
1903
1900
1904 rejects = 0
1901 rejects = 0
1905 err = 0
1902 err = 0
1906 current_file = None
1903 current_file = None
1907
1904
1908 for state, values in iterhunks(fp):
1905 for state, values in iterhunks(fp):
1909 if state == 'hunk':
1906 if state == 'hunk':
1910 if not current_file:
1907 if not current_file:
1911 continue
1908 continue
1912 ret = current_file.apply(values)
1909 ret = current_file.apply(values)
1913 if ret > 0:
1910 if ret > 0:
1914 err = 1
1911 err = 1
1915 elif state == 'file':
1912 elif state == 'file':
1916 if current_file:
1913 if current_file:
1917 rejects += current_file.close()
1914 rejects += current_file.close()
1918 current_file = None
1915 current_file = None
1919 afile, bfile, first_hunk, gp = values
1916 afile, bfile, first_hunk, gp = values
1920 if gp:
1917 if gp:
1921 gp.path = pstrip(gp.path)
1918 gp.path = pstrip(gp.path)
1922 if gp.oldpath:
1919 if gp.oldpath:
1923 gp.oldpath = pstrip(gp.oldpath)
1920 gp.oldpath = pstrip(gp.oldpath)
1924 else:
1921 else:
1925 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1922 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1926 prefix)
1923 prefix)
1927 if gp.op == 'RENAME':
1924 if gp.op == 'RENAME':
1928 backend.unlink(gp.oldpath)
1925 backend.unlink(gp.oldpath)
1929 if not first_hunk:
1926 if not first_hunk:
1930 if gp.op == 'DELETE':
1927 if gp.op == 'DELETE':
1931 backend.unlink(gp.path)
1928 backend.unlink(gp.path)
1932 continue
1929 continue
1933 data, mode = None, None
1930 data, mode = None, None
1934 if gp.op in ('RENAME', 'COPY'):
1931 if gp.op in ('RENAME', 'COPY'):
1935 data, mode = store.getfile(gp.oldpath)[:2]
1932 data, mode = store.getfile(gp.oldpath)[:2]
1936 # FIXME: failing getfile has never been handled here
1933 # FIXME: failing getfile has never been handled here
1937 assert data is not None
1934 assert data is not None
1938 if gp.mode:
1935 if gp.mode:
1939 mode = gp.mode
1936 mode = gp.mode
1940 if gp.op == 'ADD':
1937 if gp.op == 'ADD':
1941 # Added files without content have no hunk and
1938 # Added files without content have no hunk and
1942 # must be created
1939 # must be created
1943 data = ''
1940 data = ''
1944 if data or mode:
1941 if data or mode:
1945 if (gp.op in ('ADD', 'RENAME', 'COPY')
1942 if (gp.op in ('ADD', 'RENAME', 'COPY')
1946 and backend.exists(gp.path)):
1943 and backend.exists(gp.path)):
1947 raise PatchError(_("cannot create %s: destination "
1944 raise PatchError(_("cannot create %s: destination "
1948 "already exists") % gp.path)
1945 "already exists") % gp.path)
1949 backend.setfile(gp.path, data, mode, gp.oldpath)
1946 backend.setfile(gp.path, data, mode, gp.oldpath)
1950 continue
1947 continue
1951 try:
1948 try:
1952 current_file = patcher(ui, gp, backend, store,
1949 current_file = patcher(ui, gp, backend, store,
1953 eolmode=eolmode)
1950 eolmode=eolmode)
1954 except PatchError as inst:
1951 except PatchError as inst:
1955 ui.warn(str(inst) + '\n')
1952 ui.warn(str(inst) + '\n')
1956 current_file = None
1953 current_file = None
1957 rejects += 1
1954 rejects += 1
1958 continue
1955 continue
1959 elif state == 'git':
1956 elif state == 'git':
1960 for gp in values:
1957 for gp in values:
1961 path = pstrip(gp.oldpath)
1958 path = pstrip(gp.oldpath)
1962 data, mode = backend.getfile(path)
1959 data, mode = backend.getfile(path)
1963 if data is None:
1960 if data is None:
1964 # The error ignored here will trigger a getfile()
1961 # The error ignored here will trigger a getfile()
1965 # error in a place more appropriate for error
1962 # error in a place more appropriate for error
1966 # handling, and will not interrupt the patching
1963 # handling, and will not interrupt the patching
1967 # process.
1964 # process.
1968 pass
1965 pass
1969 else:
1966 else:
1970 store.setfile(path, data, mode)
1967 store.setfile(path, data, mode)
1971 else:
1968 else:
1972 raise util.Abort(_('unsupported parser state: %s') % state)
1969 raise util.Abort(_('unsupported parser state: %s') % state)
1973
1970
1974 if current_file:
1971 if current_file:
1975 rejects += current_file.close()
1972 rejects += current_file.close()
1976
1973
1977 if rejects:
1974 if rejects:
1978 return -1
1975 return -1
1979 return err
1976 return err
1980
1977
1981 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1978 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1982 similarity):
1979 similarity):
1983 """use <patcher> to apply <patchname> to the working directory.
1980 """use <patcher> to apply <patchname> to the working directory.
1984 returns whether patch was applied with fuzz factor."""
1981 returns whether patch was applied with fuzz factor."""
1985
1982
1986 fuzz = False
1983 fuzz = False
1987 args = []
1984 args = []
1988 cwd = repo.root
1985 cwd = repo.root
1989 if cwd:
1986 if cwd:
1990 args.append('-d %s' % util.shellquote(cwd))
1987 args.append('-d %s' % util.shellquote(cwd))
1991 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1988 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1992 util.shellquote(patchname)))
1989 util.shellquote(patchname)))
1993 try:
1990 try:
1994 for line in fp:
1991 for line in fp:
1995 line = line.rstrip()
1992 line = line.rstrip()
1996 ui.note(line + '\n')
1993 ui.note(line + '\n')
1997 if line.startswith('patching file '):
1994 if line.startswith('patching file '):
1998 pf = util.parsepatchoutput(line)
1995 pf = util.parsepatchoutput(line)
1999 printed_file = False
1996 printed_file = False
2000 files.add(pf)
1997 files.add(pf)
2001 elif line.find('with fuzz') >= 0:
1998 elif line.find('with fuzz') >= 0:
2002 fuzz = True
1999 fuzz = True
2003 if not printed_file:
2000 if not printed_file:
2004 ui.warn(pf + '\n')
2001 ui.warn(pf + '\n')
2005 printed_file = True
2002 printed_file = True
2006 ui.warn(line + '\n')
2003 ui.warn(line + '\n')
2007 elif line.find('saving rejects to file') >= 0:
2004 elif line.find('saving rejects to file') >= 0:
2008 ui.warn(line + '\n')
2005 ui.warn(line + '\n')
2009 elif line.find('FAILED') >= 0:
2006 elif line.find('FAILED') >= 0:
2010 if not printed_file:
2007 if not printed_file:
2011 ui.warn(pf + '\n')
2008 ui.warn(pf + '\n')
2012 printed_file = True
2009 printed_file = True
2013 ui.warn(line + '\n')
2010 ui.warn(line + '\n')
2014 finally:
2011 finally:
2015 if files:
2012 if files:
2016 scmutil.marktouched(repo, files, similarity)
2013 scmutil.marktouched(repo, files, similarity)
2017 code = fp.close()
2014 code = fp.close()
2018 if code:
2015 if code:
2019 raise PatchError(_("patch command failed: %s") %
2016 raise PatchError(_("patch command failed: %s") %
2020 util.explainexit(code)[0])
2017 util.explainexit(code)[0])
2021 return fuzz
2018 return fuzz
2022
2019
2023 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2020 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2024 eolmode='strict'):
2021 eolmode='strict'):
2025 if files is None:
2022 if files is None:
2026 files = set()
2023 files = set()
2027 if eolmode is None:
2024 if eolmode is None:
2028 eolmode = ui.config('patch', 'eol', 'strict')
2025 eolmode = ui.config('patch', 'eol', 'strict')
2029 if eolmode.lower() not in eolmodes:
2026 if eolmode.lower() not in eolmodes:
2030 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2027 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2031 eolmode = eolmode.lower()
2028 eolmode = eolmode.lower()
2032
2029
2033 store = filestore()
2030 store = filestore()
2034 try:
2031 try:
2035 fp = open(patchobj, 'rb')
2032 fp = open(patchobj, 'rb')
2036 except TypeError:
2033 except TypeError:
2037 fp = patchobj
2034 fp = patchobj
2038 try:
2035 try:
2039 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2036 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2040 eolmode=eolmode)
2037 eolmode=eolmode)
2041 finally:
2038 finally:
2042 if fp != patchobj:
2039 if fp != patchobj:
2043 fp.close()
2040 fp.close()
2044 files.update(backend.close())
2041 files.update(backend.close())
2045 store.close()
2042 store.close()
2046 if ret < 0:
2043 if ret < 0:
2047 raise PatchError(_('patch failed to apply'))
2044 raise PatchError(_('patch failed to apply'))
2048 return ret > 0
2045 return ret > 0
2049
2046
2050 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2047 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2051 eolmode='strict', similarity=0):
2048 eolmode='strict', similarity=0):
2052 """use builtin patch to apply <patchobj> to the working directory.
2049 """use builtin patch to apply <patchobj> to the working directory.
2053 returns whether patch was applied with fuzz factor."""
2050 returns whether patch was applied with fuzz factor."""
2054 backend = workingbackend(ui, repo, similarity)
2051 backend = workingbackend(ui, repo, similarity)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2052 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2056
2053
2057 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2054 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2058 eolmode='strict'):
2055 eolmode='strict'):
2059 backend = repobackend(ui, repo, ctx, store)
2056 backend = repobackend(ui, repo, ctx, store)
2060 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2057 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2061
2058
2062 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2059 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2063 similarity=0):
2060 similarity=0):
2064 """Apply <patchname> to the working directory.
2061 """Apply <patchname> to the working directory.
2065
2062
2066 'eolmode' specifies how end of lines should be handled. It can be:
2063 'eolmode' specifies how end of lines should be handled. It can be:
2067 - 'strict': inputs are read in binary mode, EOLs are preserved
2064 - 'strict': inputs are read in binary mode, EOLs are preserved
2068 - 'crlf': EOLs are ignored when patching and reset to CRLF
2065 - 'crlf': EOLs are ignored when patching and reset to CRLF
2069 - 'lf': EOLs are ignored when patching and reset to LF
2066 - 'lf': EOLs are ignored when patching and reset to LF
2070 - None: get it from user settings, default to 'strict'
2067 - None: get it from user settings, default to 'strict'
2071 'eolmode' is ignored when using an external patcher program.
2068 'eolmode' is ignored when using an external patcher program.
2072
2069
2073 Returns whether patch was applied with fuzz factor.
2070 Returns whether patch was applied with fuzz factor.
2074 """
2071 """
2075 patcher = ui.config('ui', 'patch')
2072 patcher = ui.config('ui', 'patch')
2076 if files is None:
2073 if files is None:
2077 files = set()
2074 files = set()
2078 if patcher:
2075 if patcher:
2079 return _externalpatch(ui, repo, patcher, patchname, strip,
2076 return _externalpatch(ui, repo, patcher, patchname, strip,
2080 files, similarity)
2077 files, similarity)
2081 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2078 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2082 similarity)
2079 similarity)
2083
2080
2084 def changedfiles(ui, repo, patchpath, strip=1):
2081 def changedfiles(ui, repo, patchpath, strip=1):
2085 backend = fsbackend(ui, repo.root)
2082 backend = fsbackend(ui, repo.root)
2086 fp = open(patchpath, 'rb')
2083 fp = open(patchpath, 'rb')
2087 try:
2084 try:
2088 changed = set()
2085 changed = set()
2089 for state, values in iterhunks(fp):
2086 for state, values in iterhunks(fp):
2090 if state == 'file':
2087 if state == 'file':
2091 afile, bfile, first_hunk, gp = values
2088 afile, bfile, first_hunk, gp = values
2092 if gp:
2089 if gp:
2093 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2090 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2094 if gp.oldpath:
2091 if gp.oldpath:
2095 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2092 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2096 else:
2093 else:
2097 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2094 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2098 '')
2095 '')
2099 changed.add(gp.path)
2096 changed.add(gp.path)
2100 if gp.op == 'RENAME':
2097 if gp.op == 'RENAME':
2101 changed.add(gp.oldpath)
2098 changed.add(gp.oldpath)
2102 elif state not in ('hunk', 'git'):
2099 elif state not in ('hunk', 'git'):
2103 raise util.Abort(_('unsupported parser state: %s') % state)
2100 raise util.Abort(_('unsupported parser state: %s') % state)
2104 return changed
2101 return changed
2105 finally:
2102 finally:
2106 fp.close()
2103 fp.close()
2107
2104
2108 class GitDiffRequired(Exception):
2105 class GitDiffRequired(Exception):
2109 pass
2106 pass
2110
2107
2111 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2108 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2112 '''return diffopts with all features supported and parsed'''
2109 '''return diffopts with all features supported and parsed'''
2113 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2110 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2114 git=True, whitespace=True, formatchanging=True)
2111 git=True, whitespace=True, formatchanging=True)
2115
2112
2116 diffopts = diffallopts
2113 diffopts = diffallopts
2117
2114
2118 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2115 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2119 whitespace=False, formatchanging=False):
2116 whitespace=False, formatchanging=False):
2120 '''return diffopts with only opted-in features parsed
2117 '''return diffopts with only opted-in features parsed
2121
2118
2122 Features:
2119 Features:
2123 - git: git-style diffs
2120 - git: git-style diffs
2124 - whitespace: whitespace options like ignoreblanklines and ignorews
2121 - whitespace: whitespace options like ignoreblanklines and ignorews
2125 - formatchanging: options that will likely break or cause correctness issues
2122 - formatchanging: options that will likely break or cause correctness issues
2126 with most diff parsers
2123 with most diff parsers
2127 '''
2124 '''
2128 def get(key, name=None, getter=ui.configbool, forceplain=None):
2125 def get(key, name=None, getter=ui.configbool, forceplain=None):
2129 if opts:
2126 if opts:
2130 v = opts.get(key)
2127 v = opts.get(key)
2131 if v:
2128 if v:
2132 return v
2129 return v
2133 if forceplain is not None and ui.plain():
2130 if forceplain is not None and ui.plain():
2134 return forceplain
2131 return forceplain
2135 return getter(section, name or key, None, untrusted=untrusted)
2132 return getter(section, name or key, None, untrusted=untrusted)
2136
2133
2137 # core options, expected to be understood by every diff parser
2134 # core options, expected to be understood by every diff parser
2138 buildopts = {
2135 buildopts = {
2139 'nodates': get('nodates'),
2136 'nodates': get('nodates'),
2140 'showfunc': get('show_function', 'showfunc'),
2137 'showfunc': get('show_function', 'showfunc'),
2141 'context': get('unified', getter=ui.config),
2138 'context': get('unified', getter=ui.config),
2142 }
2139 }
2143
2140
2144 if git:
2141 if git:
2145 buildopts['git'] = get('git')
2142 buildopts['git'] = get('git')
2146 if whitespace:
2143 if whitespace:
2147 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2144 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2148 buildopts['ignorewsamount'] = get('ignore_space_change',
2145 buildopts['ignorewsamount'] = get('ignore_space_change',
2149 'ignorewsamount')
2146 'ignorewsamount')
2150 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2147 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2151 'ignoreblanklines')
2148 'ignoreblanklines')
2152 if formatchanging:
2149 if formatchanging:
2153 buildopts['text'] = opts and opts.get('text')
2150 buildopts['text'] = opts and opts.get('text')
2154 buildopts['nobinary'] = get('nobinary')
2151 buildopts['nobinary'] = get('nobinary')
2155 buildopts['noprefix'] = get('noprefix', forceplain=False)
2152 buildopts['noprefix'] = get('noprefix', forceplain=False)
2156
2153
2157 return mdiff.diffopts(**buildopts)
2154 return mdiff.diffopts(**buildopts)
2158
2155
2159 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2156 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2160 losedatafn=None, prefix='', relroot=''):
2157 losedatafn=None, prefix='', relroot=''):
2161 '''yields diff of changes to files between two nodes, or node and
2158 '''yields diff of changes to files between two nodes, or node and
2162 working directory.
2159 working directory.
2163
2160
2164 if node1 is None, use first dirstate parent instead.
2161 if node1 is None, use first dirstate parent instead.
2165 if node2 is None, compare node1 with working directory.
2162 if node2 is None, compare node1 with working directory.
2166
2163
2167 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2164 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2168 every time some change cannot be represented with the current
2165 every time some change cannot be represented with the current
2169 patch format. Return False to upgrade to git patch format, True to
2166 patch format. Return False to upgrade to git patch format, True to
2170 accept the loss or raise an exception to abort the diff. It is
2167 accept the loss or raise an exception to abort the diff. It is
2171 called with the name of current file being diffed as 'fn'. If set
2168 called with the name of current file being diffed as 'fn'. If set
2172 to None, patches will always be upgraded to git format when
2169 to None, patches will always be upgraded to git format when
2173 necessary.
2170 necessary.
2174
2171
2175 prefix is a filename prefix that is prepended to all filenames on
2172 prefix is a filename prefix that is prepended to all filenames on
2176 display (used for subrepos).
2173 display (used for subrepos).
2177
2174
2178 relroot, if not empty, must be normalized with a trailing /. Any match
2175 relroot, if not empty, must be normalized with a trailing /. Any match
2179 patterns that fall outside it will be ignored.'''
2176 patterns that fall outside it will be ignored.'''
2180
2177
2181 if opts is None:
2178 if opts is None:
2182 opts = mdiff.defaultopts
2179 opts = mdiff.defaultopts
2183
2180
2184 if not node1 and not node2:
2181 if not node1 and not node2:
2185 node1 = repo.dirstate.p1()
2182 node1 = repo.dirstate.p1()
2186
2183
2187 def lrugetfilectx():
2184 def lrugetfilectx():
2188 cache = {}
2185 cache = {}
2189 order = collections.deque()
2186 order = collections.deque()
2190 def getfilectx(f, ctx):
2187 def getfilectx(f, ctx):
2191 fctx = ctx.filectx(f, filelog=cache.get(f))
2188 fctx = ctx.filectx(f, filelog=cache.get(f))
2192 if f not in cache:
2189 if f not in cache:
2193 if len(cache) > 20:
2190 if len(cache) > 20:
2194 del cache[order.popleft()]
2191 del cache[order.popleft()]
2195 cache[f] = fctx.filelog()
2192 cache[f] = fctx.filelog()
2196 else:
2193 else:
2197 order.remove(f)
2194 order.remove(f)
2198 order.append(f)
2195 order.append(f)
2199 return fctx
2196 return fctx
2200 return getfilectx
2197 return getfilectx
2201 getfilectx = lrugetfilectx()
2198 getfilectx = lrugetfilectx()
2202
2199
2203 ctx1 = repo[node1]
2200 ctx1 = repo[node1]
2204 ctx2 = repo[node2]
2201 ctx2 = repo[node2]
2205
2202
2206 relfiltered = False
2203 relfiltered = False
2207 if relroot != '' and match.always():
2204 if relroot != '' and match.always():
2208 # as a special case, create a new matcher with just the relroot
2205 # as a special case, create a new matcher with just the relroot
2209 pats = [relroot]
2206 pats = [relroot]
2210 match = scmutil.match(ctx2, pats, default='path')
2207 match = scmutil.match(ctx2, pats, default='path')
2211 relfiltered = True
2208 relfiltered = True
2212
2209
2213 if not changes:
2210 if not changes:
2214 changes = repo.status(ctx1, ctx2, match=match)
2211 changes = repo.status(ctx1, ctx2, match=match)
2215 modified, added, removed = changes[:3]
2212 modified, added, removed = changes[:3]
2216
2213
2217 if not modified and not added and not removed:
2214 if not modified and not added and not removed:
2218 return []
2215 return []
2219
2216
2220 if repo.ui.debugflag:
2217 if repo.ui.debugflag:
2221 hexfunc = hex
2218 hexfunc = hex
2222 else:
2219 else:
2223 hexfunc = short
2220 hexfunc = short
2224 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2221 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2225
2222
2226 copy = {}
2223 copy = {}
2227 if opts.git or opts.upgrade:
2224 if opts.git or opts.upgrade:
2228 copy = copies.pathcopies(ctx1, ctx2, match=match)
2225 copy = copies.pathcopies(ctx1, ctx2, match=match)
2229
2226
2230 if relroot is not None:
2227 if relroot is not None:
2231 if not relfiltered:
2228 if not relfiltered:
2232 # XXX this would ideally be done in the matcher, but that is
2229 # XXX this would ideally be done in the matcher, but that is
2233 # generally meant to 'or' patterns, not 'and' them. In this case we
2230 # generally meant to 'or' patterns, not 'and' them. In this case we
2234 # need to 'and' all the patterns from the matcher with relroot.
2231 # need to 'and' all the patterns from the matcher with relroot.
2235 def filterrel(l):
2232 def filterrel(l):
2236 return [f for f in l if f.startswith(relroot)]
2233 return [f for f in l if f.startswith(relroot)]
2237 modified = filterrel(modified)
2234 modified = filterrel(modified)
2238 added = filterrel(added)
2235 added = filterrel(added)
2239 removed = filterrel(removed)
2236 removed = filterrel(removed)
2240 relfiltered = True
2237 relfiltered = True
2241 # filter out copies where either side isn't inside the relative root
2238 # filter out copies where either side isn't inside the relative root
2242 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2239 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2243 if dst.startswith(relroot)
2240 if dst.startswith(relroot)
2244 and src.startswith(relroot)))
2241 and src.startswith(relroot)))
2245
2242
2246 def difffn(opts, losedata):
2243 def difffn(opts, losedata):
2247 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2244 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2248 copy, getfilectx, opts, losedata, prefix, relroot)
2245 copy, getfilectx, opts, losedata, prefix, relroot)
2249 if opts.upgrade and not opts.git:
2246 if opts.upgrade and not opts.git:
2250 try:
2247 try:
2251 def losedata(fn):
2248 def losedata(fn):
2252 if not losedatafn or not losedatafn(fn=fn):
2249 if not losedatafn or not losedatafn(fn=fn):
2253 raise GitDiffRequired
2250 raise GitDiffRequired
2254 # Buffer the whole output until we are sure it can be generated
2251 # Buffer the whole output until we are sure it can be generated
2255 return list(difffn(opts.copy(git=False), losedata))
2252 return list(difffn(opts.copy(git=False), losedata))
2256 except GitDiffRequired:
2253 except GitDiffRequired:
2257 return difffn(opts.copy(git=True), None)
2254 return difffn(opts.copy(git=True), None)
2258 else:
2255 else:
2259 return difffn(opts, None)
2256 return difffn(opts, None)
2260
2257
2261 def difflabel(func, *args, **kw):
2258 def difflabel(func, *args, **kw):
2262 '''yields 2-tuples of (output, label) based on the output of func()'''
2259 '''yields 2-tuples of (output, label) based on the output of func()'''
2263 headprefixes = [('diff', 'diff.diffline'),
2260 headprefixes = [('diff', 'diff.diffline'),
2264 ('copy', 'diff.extended'),
2261 ('copy', 'diff.extended'),
2265 ('rename', 'diff.extended'),
2262 ('rename', 'diff.extended'),
2266 ('old', 'diff.extended'),
2263 ('old', 'diff.extended'),
2267 ('new', 'diff.extended'),
2264 ('new', 'diff.extended'),
2268 ('deleted', 'diff.extended'),
2265 ('deleted', 'diff.extended'),
2269 ('---', 'diff.file_a'),
2266 ('---', 'diff.file_a'),
2270 ('+++', 'diff.file_b')]
2267 ('+++', 'diff.file_b')]
2271 textprefixes = [('@', 'diff.hunk'),
2268 textprefixes = [('@', 'diff.hunk'),
2272 ('-', 'diff.deleted'),
2269 ('-', 'diff.deleted'),
2273 ('+', 'diff.inserted')]
2270 ('+', 'diff.inserted')]
2274 head = False
2271 head = False
2275 for chunk in func(*args, **kw):
2272 for chunk in func(*args, **kw):
2276 lines = chunk.split('\n')
2273 lines = chunk.split('\n')
2277 for i, line in enumerate(lines):
2274 for i, line in enumerate(lines):
2278 if i != 0:
2275 if i != 0:
2279 yield ('\n', '')
2276 yield ('\n', '')
2280 if head:
2277 if head:
2281 if line.startswith('@'):
2278 if line.startswith('@'):
2282 head = False
2279 head = False
2283 else:
2280 else:
2284 if line and line[0] not in ' +-@\\':
2281 if line and line[0] not in ' +-@\\':
2285 head = True
2282 head = True
2286 stripline = line
2283 stripline = line
2287 diffline = False
2284 diffline = False
2288 if not head and line and line[0] in '+-':
2285 if not head and line and line[0] in '+-':
2289 # highlight tabs and trailing whitespace, but only in
2286 # highlight tabs and trailing whitespace, but only in
2290 # changed lines
2287 # changed lines
2291 stripline = line.rstrip()
2288 stripline = line.rstrip()
2292 diffline = True
2289 diffline = True
2293
2290
2294 prefixes = textprefixes
2291 prefixes = textprefixes
2295 if head:
2292 if head:
2296 prefixes = headprefixes
2293 prefixes = headprefixes
2297 for prefix, label in prefixes:
2294 for prefix, label in prefixes:
2298 if stripline.startswith(prefix):
2295 if stripline.startswith(prefix):
2299 if diffline:
2296 if diffline:
2300 for token in tabsplitter.findall(stripline):
2297 for token in tabsplitter.findall(stripline):
2301 if '\t' == token[0]:
2298 if '\t' == token[0]:
2302 yield (token, 'diff.tab')
2299 yield (token, 'diff.tab')
2303 else:
2300 else:
2304 yield (token, label)
2301 yield (token, label)
2305 else:
2302 else:
2306 yield (stripline, label)
2303 yield (stripline, label)
2307 break
2304 break
2308 else:
2305 else:
2309 yield (line, '')
2306 yield (line, '')
2310 if line != stripline:
2307 if line != stripline:
2311 yield (line[len(stripline):], 'diff.trailingwhitespace')
2308 yield (line[len(stripline):], 'diff.trailingwhitespace')
2312
2309
2313 def diffui(*args, **kw):
2310 def diffui(*args, **kw):
2314 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2311 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2315 return difflabel(diff, *args, **kw)
2312 return difflabel(diff, *args, **kw)
2316
2313
2317 def _filepairs(ctx1, modified, added, removed, copy, opts):
2314 def _filepairs(ctx1, modified, added, removed, copy, opts):
2318 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2315 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2319 before and f2 is the the name after. For added files, f1 will be None,
2316 before and f2 is the the name after. For added files, f1 will be None,
2320 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2317 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2321 or 'rename' (the latter two only if opts.git is set).'''
2318 or 'rename' (the latter two only if opts.git is set).'''
2322 gone = set()
2319 gone = set()
2323
2320
2324 copyto = dict([(v, k) for k, v in copy.items()])
2321 copyto = dict([(v, k) for k, v in copy.items()])
2325
2322
2326 addedset, removedset = set(added), set(removed)
2323 addedset, removedset = set(added), set(removed)
2327 # Fix up added, since merged-in additions appear as
2324 # Fix up added, since merged-in additions appear as
2328 # modifications during merges
2325 # modifications during merges
2329 for f in modified:
2326 for f in modified:
2330 if f not in ctx1:
2327 if f not in ctx1:
2331 addedset.add(f)
2328 addedset.add(f)
2332
2329
2333 for f in sorted(modified + added + removed):
2330 for f in sorted(modified + added + removed):
2334 copyop = None
2331 copyop = None
2335 f1, f2 = f, f
2332 f1, f2 = f, f
2336 if f in addedset:
2333 if f in addedset:
2337 f1 = None
2334 f1 = None
2338 if f in copy:
2335 if f in copy:
2339 if opts.git:
2336 if opts.git:
2340 f1 = copy[f]
2337 f1 = copy[f]
2341 if f1 in removedset and f1 not in gone:
2338 if f1 in removedset and f1 not in gone:
2342 copyop = 'rename'
2339 copyop = 'rename'
2343 gone.add(f1)
2340 gone.add(f1)
2344 else:
2341 else:
2345 copyop = 'copy'
2342 copyop = 'copy'
2346 elif f in removedset:
2343 elif f in removedset:
2347 f2 = None
2344 f2 = None
2348 if opts.git:
2345 if opts.git:
2349 # have we already reported a copy above?
2346 # have we already reported a copy above?
2350 if (f in copyto and copyto[f] in addedset
2347 if (f in copyto and copyto[f] in addedset
2351 and copy[copyto[f]] == f):
2348 and copy[copyto[f]] == f):
2352 continue
2349 continue
2353 yield f1, f2, copyop
2350 yield f1, f2, copyop
2354
2351
2355 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2352 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2356 copy, getfilectx, opts, losedatafn, prefix, relroot):
2353 copy, getfilectx, opts, losedatafn, prefix, relroot):
2357 '''given input data, generate a diff and yield it in blocks
2354 '''given input data, generate a diff and yield it in blocks
2358
2355
2359 If generating a diff would lose data like flags or binary data and
2356 If generating a diff would lose data like flags or binary data and
2360 losedatafn is not None, it will be called.
2357 losedatafn is not None, it will be called.
2361
2358
2362 relroot is removed and prefix is added to every path in the diff output.
2359 relroot is removed and prefix is added to every path in the diff output.
2363
2360
2364 If relroot is not empty, this function expects every path in modified,
2361 If relroot is not empty, this function expects every path in modified,
2365 added, removed and copy to start with it.'''
2362 added, removed and copy to start with it.'''
2366
2363
2367 def gitindex(text):
2364 def gitindex(text):
2368 if not text:
2365 if not text:
2369 text = ""
2366 text = ""
2370 l = len(text)
2367 l = len(text)
2371 s = util.sha1('blob %d\0' % l)
2368 s = util.sha1('blob %d\0' % l)
2372 s.update(text)
2369 s.update(text)
2373 return s.hexdigest()
2370 return s.hexdigest()
2374
2371
2375 if opts.noprefix:
2372 if opts.noprefix:
2376 aprefix = bprefix = ''
2373 aprefix = bprefix = ''
2377 else:
2374 else:
2378 aprefix = 'a/'
2375 aprefix = 'a/'
2379 bprefix = 'b/'
2376 bprefix = 'b/'
2380
2377
2381 def diffline(f, revs):
2378 def diffline(f, revs):
2382 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2379 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2383 return 'diff %s %s' % (revinfo, f)
2380 return 'diff %s %s' % (revinfo, f)
2384
2381
2385 date1 = util.datestr(ctx1.date())
2382 date1 = util.datestr(ctx1.date())
2386 date2 = util.datestr(ctx2.date())
2383 date2 = util.datestr(ctx2.date())
2387
2384
2388 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2385 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2389
2386
2390 if relroot != '' and (repo.ui.configbool('devel', 'all')
2387 if relroot != '' and (repo.ui.configbool('devel', 'all')
2391 or repo.ui.configbool('devel', 'check-relroot')):
2388 or repo.ui.configbool('devel', 'check-relroot')):
2392 for f in modified + added + removed + copy.keys() + copy.values():
2389 for f in modified + added + removed + copy.keys() + copy.values():
2393 if f is not None and not f.startswith(relroot):
2390 if f is not None and not f.startswith(relroot):
2394 raise AssertionError(
2391 raise AssertionError(
2395 "file %s doesn't start with relroot %s" % (f, relroot))
2392 "file %s doesn't start with relroot %s" % (f, relroot))
2396
2393
2397 for f1, f2, copyop in _filepairs(
2394 for f1, f2, copyop in _filepairs(
2398 ctx1, modified, added, removed, copy, opts):
2395 ctx1, modified, added, removed, copy, opts):
2399 content1 = None
2396 content1 = None
2400 content2 = None
2397 content2 = None
2401 flag1 = None
2398 flag1 = None
2402 flag2 = None
2399 flag2 = None
2403 if f1:
2400 if f1:
2404 content1 = getfilectx(f1, ctx1).data()
2401 content1 = getfilectx(f1, ctx1).data()
2405 if opts.git or losedatafn:
2402 if opts.git or losedatafn:
2406 flag1 = ctx1.flags(f1)
2403 flag1 = ctx1.flags(f1)
2407 if f2:
2404 if f2:
2408 content2 = getfilectx(f2, ctx2).data()
2405 content2 = getfilectx(f2, ctx2).data()
2409 if opts.git or losedatafn:
2406 if opts.git or losedatafn:
2410 flag2 = ctx2.flags(f2)
2407 flag2 = ctx2.flags(f2)
2411 binary = False
2408 binary = False
2412 if opts.git or losedatafn:
2409 if opts.git or losedatafn:
2413 binary = util.binary(content1) or util.binary(content2)
2410 binary = util.binary(content1) or util.binary(content2)
2414
2411
2415 if losedatafn and not opts.git:
2412 if losedatafn and not opts.git:
2416 if (binary or
2413 if (binary or
2417 # copy/rename
2414 # copy/rename
2418 f2 in copy or
2415 f2 in copy or
2419 # empty file creation
2416 # empty file creation
2420 (not f1 and not content2) or
2417 (not f1 and not content2) or
2421 # empty file deletion
2418 # empty file deletion
2422 (not content1 and not f2) or
2419 (not content1 and not f2) or
2423 # create with flags
2420 # create with flags
2424 (not f1 and flag2) or
2421 (not f1 and flag2) or
2425 # change flags
2422 # change flags
2426 (f1 and f2 and flag1 != flag2)):
2423 (f1 and f2 and flag1 != flag2)):
2427 losedatafn(f2 or f1)
2424 losedatafn(f2 or f1)
2428
2425
2429 path1 = f1 or f2
2426 path1 = f1 or f2
2430 path2 = f2 or f1
2427 path2 = f2 or f1
2431 path1 = posixpath.join(prefix, path1[len(relroot):])
2428 path1 = posixpath.join(prefix, path1[len(relroot):])
2432 path2 = posixpath.join(prefix, path2[len(relroot):])
2429 path2 = posixpath.join(prefix, path2[len(relroot):])
2433 header = []
2430 header = []
2434 if opts.git:
2431 if opts.git:
2435 header.append('diff --git %s%s %s%s' %
2432 header.append('diff --git %s%s %s%s' %
2436 (aprefix, path1, bprefix, path2))
2433 (aprefix, path1, bprefix, path2))
2437 if not f1: # added
2434 if not f1: # added
2438 header.append('new file mode %s' % gitmode[flag2])
2435 header.append('new file mode %s' % gitmode[flag2])
2439 elif not f2: # removed
2436 elif not f2: # removed
2440 header.append('deleted file mode %s' % gitmode[flag1])
2437 header.append('deleted file mode %s' % gitmode[flag1])
2441 else: # modified/copied/renamed
2438 else: # modified/copied/renamed
2442 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2439 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2443 if mode1 != mode2:
2440 if mode1 != mode2:
2444 header.append('old mode %s' % mode1)
2441 header.append('old mode %s' % mode1)
2445 header.append('new mode %s' % mode2)
2442 header.append('new mode %s' % mode2)
2446 if copyop is not None:
2443 if copyop is not None:
2447 header.append('%s from %s' % (copyop, path1))
2444 header.append('%s from %s' % (copyop, path1))
2448 header.append('%s to %s' % (copyop, path2))
2445 header.append('%s to %s' % (copyop, path2))
2449 elif revs and not repo.ui.quiet:
2446 elif revs and not repo.ui.quiet:
2450 header.append(diffline(path1, revs))
2447 header.append(diffline(path1, revs))
2451
2448
2452 if binary and opts.git and not opts.nobinary:
2449 if binary and opts.git and not opts.nobinary:
2453 text = mdiff.b85diff(content1, content2)
2450 text = mdiff.b85diff(content1, content2)
2454 if text:
2451 if text:
2455 header.append('index %s..%s' %
2452 header.append('index %s..%s' %
2456 (gitindex(content1), gitindex(content2)))
2453 (gitindex(content1), gitindex(content2)))
2457 else:
2454 else:
2458 text = mdiff.unidiff(content1, date1,
2455 text = mdiff.unidiff(content1, date1,
2459 content2, date2,
2456 content2, date2,
2460 path1, path2, opts=opts)
2457 path1, path2, opts=opts)
2461 if header and (text or len(header) > 1):
2458 if header and (text or len(header) > 1):
2462 yield '\n'.join(header) + '\n'
2459 yield '\n'.join(header) + '\n'
2463 if text:
2460 if text:
2464 yield text
2461 yield text
2465
2462
2466 def diffstatsum(stats):
2463 def diffstatsum(stats):
2467 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2464 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2468 for f, a, r, b in stats:
2465 for f, a, r, b in stats:
2469 maxfile = max(maxfile, encoding.colwidth(f))
2466 maxfile = max(maxfile, encoding.colwidth(f))
2470 maxtotal = max(maxtotal, a + r)
2467 maxtotal = max(maxtotal, a + r)
2471 addtotal += a
2468 addtotal += a
2472 removetotal += r
2469 removetotal += r
2473 binary = binary or b
2470 binary = binary or b
2474
2471
2475 return maxfile, maxtotal, addtotal, removetotal, binary
2472 return maxfile, maxtotal, addtotal, removetotal, binary
2476
2473
2477 def diffstatdata(lines):
2474 def diffstatdata(lines):
2478 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2475 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2479
2476
2480 results = []
2477 results = []
2481 filename, adds, removes, isbinary = None, 0, 0, False
2478 filename, adds, removes, isbinary = None, 0, 0, False
2482
2479
2483 def addresult():
2480 def addresult():
2484 if filename:
2481 if filename:
2485 results.append((filename, adds, removes, isbinary))
2482 results.append((filename, adds, removes, isbinary))
2486
2483
2487 for line in lines:
2484 for line in lines:
2488 if line.startswith('diff'):
2485 if line.startswith('diff'):
2489 addresult()
2486 addresult()
2490 # set numbers to 0 anyway when starting new file
2487 # set numbers to 0 anyway when starting new file
2491 adds, removes, isbinary = 0, 0, False
2488 adds, removes, isbinary = 0, 0, False
2492 if line.startswith('diff --git a/'):
2489 if line.startswith('diff --git a/'):
2493 filename = gitre.search(line).group(2)
2490 filename = gitre.search(line).group(2)
2494 elif line.startswith('diff -r'):
2491 elif line.startswith('diff -r'):
2495 # format: "diff -r ... -r ... filename"
2492 # format: "diff -r ... -r ... filename"
2496 filename = diffre.search(line).group(1)
2493 filename = diffre.search(line).group(1)
2497 elif line.startswith('+') and not line.startswith('+++ '):
2494 elif line.startswith('+') and not line.startswith('+++ '):
2498 adds += 1
2495 adds += 1
2499 elif line.startswith('-') and not line.startswith('--- '):
2496 elif line.startswith('-') and not line.startswith('--- '):
2500 removes += 1
2497 removes += 1
2501 elif (line.startswith('GIT binary patch') or
2498 elif (line.startswith('GIT binary patch') or
2502 line.startswith('Binary file')):
2499 line.startswith('Binary file')):
2503 isbinary = True
2500 isbinary = True
2504 addresult()
2501 addresult()
2505 return results
2502 return results
2506
2503
2507 def diffstat(lines, width=80, git=False):
2504 def diffstat(lines, width=80, git=False):
2508 output = []
2505 output = []
2509 stats = diffstatdata(lines)
2506 stats = diffstatdata(lines)
2510 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2507 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2511
2508
2512 countwidth = len(str(maxtotal))
2509 countwidth = len(str(maxtotal))
2513 if hasbinary and countwidth < 3:
2510 if hasbinary and countwidth < 3:
2514 countwidth = 3
2511 countwidth = 3
2515 graphwidth = width - countwidth - maxname - 6
2512 graphwidth = width - countwidth - maxname - 6
2516 if graphwidth < 10:
2513 if graphwidth < 10:
2517 graphwidth = 10
2514 graphwidth = 10
2518
2515
2519 def scale(i):
2516 def scale(i):
2520 if maxtotal <= graphwidth:
2517 if maxtotal <= graphwidth:
2521 return i
2518 return i
2522 # If diffstat runs out of room it doesn't print anything,
2519 # If diffstat runs out of room it doesn't print anything,
2523 # which isn't very useful, so always print at least one + or -
2520 # which isn't very useful, so always print at least one + or -
2524 # if there were at least some changes.
2521 # if there were at least some changes.
2525 return max(i * graphwidth // maxtotal, int(bool(i)))
2522 return max(i * graphwidth // maxtotal, int(bool(i)))
2526
2523
2527 for filename, adds, removes, isbinary in stats:
2524 for filename, adds, removes, isbinary in stats:
2528 if isbinary:
2525 if isbinary:
2529 count = 'Bin'
2526 count = 'Bin'
2530 else:
2527 else:
2531 count = adds + removes
2528 count = adds + removes
2532 pluses = '+' * scale(adds)
2529 pluses = '+' * scale(adds)
2533 minuses = '-' * scale(removes)
2530 minuses = '-' * scale(removes)
2534 output.append(' %s%s | %*s %s%s\n' %
2531 output.append(' %s%s | %*s %s%s\n' %
2535 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2532 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2536 countwidth, count, pluses, minuses))
2533 countwidth, count, pluses, minuses))
2537
2534
2538 if stats:
2535 if stats:
2539 output.append(_(' %d files changed, %d insertions(+), '
2536 output.append(_(' %d files changed, %d insertions(+), '
2540 '%d deletions(-)\n')
2537 '%d deletions(-)\n')
2541 % (len(stats), totaladds, totalremoves))
2538 % (len(stats), totaladds, totalremoves))
2542
2539
2543 return ''.join(output)
2540 return ''.join(output)
2544
2541
2545 def diffstatui(*args, **kw):
2542 def diffstatui(*args, **kw):
2546 '''like diffstat(), but yields 2-tuples of (output, label) for
2543 '''like diffstat(), but yields 2-tuples of (output, label) for
2547 ui.write()
2544 ui.write()
2548 '''
2545 '''
2549
2546
2550 for line in diffstat(*args, **kw).splitlines():
2547 for line in diffstat(*args, **kw).splitlines():
2551 if line and line[-1] in '+-':
2548 if line and line[-1] in '+-':
2552 name, graph = line.rsplit(' ', 1)
2549 name, graph = line.rsplit(' ', 1)
2553 yield (name + ' ', '')
2550 yield (name + ' ', '')
2554 m = re.search(r'\++', graph)
2551 m = re.search(r'\++', graph)
2555 if m:
2552 if m:
2556 yield (m.group(0), 'diffstat.inserted')
2553 yield (m.group(0), 'diffstat.inserted')
2557 m = re.search(r'-+', graph)
2554 m = re.search(r'-+', graph)
2558 if m:
2555 if m:
2559 yield (m.group(0), 'diffstat.deleted')
2556 yield (m.group(0), 'diffstat.deleted')
2560 else:
2557 else:
2561 yield (line, '')
2558 yield (line, '')
2562 yield ('\n', '')
2559 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now