##// END OF EJS Templates
extract: directly assign parent to data dictionary...
Pierre-Yves David -
r26550:72782f60 default
parent child Browse files
Show More
@@ -1,2565 +1,2562 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 def extract(ui, fileobj):
154 def extract(ui, fileobj):
155 '''extract patch from data read from fileobj.
155 '''extract patch from data read from fileobj.
156
156
157 patch can be a normal patch or contained in an email message.
157 patch can be a normal patch or contained in an email message.
158
158
159 return a dictionnary. Standard keys are:
159 return a dictionnary. Standard keys are:
160 - filename,
160 - filename,
161 - message,
161 - message,
162 - user,
162 - user,
163 - date,
163 - date,
164 - branch,
164 - branch,
165 - node,
165 - node,
166 - p1,
166 - p1,
167 - p2.
167 - p2.
168 Any item can be missing from the dictionary. If filename is mising,
168 Any item can be missing from the dictionary. If filename is mising,
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
170
170
171 # attempt to detect the start of a patch
171 # attempt to detect the start of a patch
172 # (this heuristic is borrowed from quilt)
172 # (this heuristic is borrowed from quilt)
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
177
177
178 data = {}
178 data = {}
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
180 tmpfp = os.fdopen(fd, 'w')
180 tmpfp = os.fdopen(fd, 'w')
181 try:
181 try:
182 msg = email.Parser.Parser().parse(fileobj)
182 msg = email.Parser.Parser().parse(fileobj)
183
183
184 subject = msg['Subject']
184 subject = msg['Subject']
185 user = msg['From']
185 user = msg['From']
186 if not subject and not user:
186 if not subject and not user:
187 # Not an email, restore parsed headers if any
187 # Not an email, restore parsed headers if any
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
189
189
190 # should try to parse msg['Date']
190 # should try to parse msg['Date']
191 date = None
191 date = None
192 nodeid = None
192 nodeid = None
193 branch = None
193 branch = None
194 parents = []
194 parents = []
195
195
196 if subject:
196 if subject:
197 if subject.startswith('[PATCH'):
197 if subject.startswith('[PATCH'):
198 pend = subject.find(']')
198 pend = subject.find(']')
199 if pend >= 0:
199 if pend >= 0:
200 subject = subject[pend + 1:].lstrip()
200 subject = subject[pend + 1:].lstrip()
201 subject = re.sub(r'\n[ \t]+', ' ', subject)
201 subject = re.sub(r'\n[ \t]+', ' ', subject)
202 ui.debug('Subject: %s\n' % subject)
202 ui.debug('Subject: %s\n' % subject)
203 if user:
203 if user:
204 ui.debug('From: %s\n' % user)
204 ui.debug('From: %s\n' % user)
205 diffs_seen = 0
205 diffs_seen = 0
206 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
206 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
207 message = ''
207 message = ''
208 for part in msg.walk():
208 for part in msg.walk():
209 content_type = part.get_content_type()
209 content_type = part.get_content_type()
210 ui.debug('Content-Type: %s\n' % content_type)
210 ui.debug('Content-Type: %s\n' % content_type)
211 if content_type not in ok_types:
211 if content_type not in ok_types:
212 continue
212 continue
213 payload = part.get_payload(decode=True)
213 payload = part.get_payload(decode=True)
214 m = diffre.search(payload)
214 m = diffre.search(payload)
215 if m:
215 if m:
216 hgpatch = False
216 hgpatch = False
217 hgpatchheader = False
217 hgpatchheader = False
218 ignoretext = False
218 ignoretext = False
219
219
220 ui.debug('found patch at byte %d\n' % m.start(0))
220 ui.debug('found patch at byte %d\n' % m.start(0))
221 diffs_seen += 1
221 diffs_seen += 1
222 cfp = cStringIO.StringIO()
222 cfp = cStringIO.StringIO()
223 for line in payload[:m.start(0)].splitlines():
223 for line in payload[:m.start(0)].splitlines():
224 if line.startswith('# HG changeset patch') and not hgpatch:
224 if line.startswith('# HG changeset patch') and not hgpatch:
225 ui.debug('patch generated by hg export\n')
225 ui.debug('patch generated by hg export\n')
226 hgpatch = True
226 hgpatch = True
227 hgpatchheader = True
227 hgpatchheader = True
228 # drop earlier commit message content
228 # drop earlier commit message content
229 cfp.seek(0)
229 cfp.seek(0)
230 cfp.truncate()
230 cfp.truncate()
231 subject = None
231 subject = None
232 elif hgpatchheader:
232 elif hgpatchheader:
233 if line.startswith('# User '):
233 if line.startswith('# User '):
234 user = line[7:]
234 user = line[7:]
235 ui.debug('From: %s\n' % user)
235 ui.debug('From: %s\n' % user)
236 elif line.startswith("# Date "):
236 elif line.startswith("# Date "):
237 date = line[7:]
237 date = line[7:]
238 elif line.startswith("# Branch "):
238 elif line.startswith("# Branch "):
239 branch = line[9:]
239 branch = line[9:]
240 elif line.startswith("# Node ID "):
240 elif line.startswith("# Node ID "):
241 nodeid = line[10:]
241 nodeid = line[10:]
242 elif line.startswith("# Parent "):
242 elif line.startswith("# Parent "):
243 parents.append(line[9:].lstrip())
243 parents.append(line[9:].lstrip())
244 elif not line.startswith("# "):
244 elif not line.startswith("# "):
245 hgpatchheader = False
245 hgpatchheader = False
246 elif line == '---':
246 elif line == '---':
247 ignoretext = True
247 ignoretext = True
248 if not hgpatchheader and not ignoretext:
248 if not hgpatchheader and not ignoretext:
249 cfp.write(line)
249 cfp.write(line)
250 cfp.write('\n')
250 cfp.write('\n')
251 message = cfp.getvalue()
251 message = cfp.getvalue()
252 if tmpfp:
252 if tmpfp:
253 tmpfp.write(payload)
253 tmpfp.write(payload)
254 if not payload.endswith('\n'):
254 if not payload.endswith('\n'):
255 tmpfp.write('\n')
255 tmpfp.write('\n')
256 elif not diffs_seen and message and content_type == 'text/plain':
256 elif not diffs_seen and message and content_type == 'text/plain':
257 message += '\n' + payload
257 message += '\n' + payload
258 except: # re-raises
258 except: # re-raises
259 tmpfp.close()
259 tmpfp.close()
260 os.unlink(tmpname)
260 os.unlink(tmpname)
261 raise
261 raise
262
262
263 if subject and not message.startswith(subject):
263 if subject and not message.startswith(subject):
264 message = '%s\n%s' % (subject, message)
264 message = '%s\n%s' % (subject, message)
265 data['message'] = message
265 data['message'] = message
266 tmpfp.close()
266 tmpfp.close()
267 if not diffs_seen:
267 if not diffs_seen:
268 os.unlink(tmpname)
268 os.unlink(tmpname)
269 data['user'] = user
269 data['user'] = user
270 data['date'] = date
270 data['date'] = date
271 data['branch'] = branch
271 data['branch'] = branch
272 return data
272 return data
273
273
274 p1 = p2 = None
275 if parents:
274 if parents:
276 p1 = parents.pop(0)
275 data['p1'] = parents.pop(0)
277 if parents:
276 if parents:
278 p2 = parents.pop(0)
277 data['p2'] = parents.pop(0)
279
278
280 data['filename'] = tmpname
279 data['filename'] = tmpname
281 data['user'] = user
280 data['user'] = user
282 data['date'] = date
281 data['date'] = date
283 data['branch'] = branch
282 data['branch'] = branch
284 data['nodeid'] = nodeid
283 data['nodeid'] = nodeid
285 data['p1'] = p1
286 data['p2'] = p2
287 return data
284 return data
288
285
289 class patchmeta(object):
286 class patchmeta(object):
290 """Patched file metadata
287 """Patched file metadata
291
288
292 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
289 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
293 or COPY. 'path' is patched file path. 'oldpath' is set to the
290 or COPY. 'path' is patched file path. 'oldpath' is set to the
294 origin file when 'op' is either COPY or RENAME, None otherwise. If
291 origin file when 'op' is either COPY or RENAME, None otherwise. If
295 file mode is changed, 'mode' is a tuple (islink, isexec) where
292 file mode is changed, 'mode' is a tuple (islink, isexec) where
296 'islink' is True if the file is a symlink and 'isexec' is True if
293 'islink' is True if the file is a symlink and 'isexec' is True if
297 the file is executable. Otherwise, 'mode' is None.
294 the file is executable. Otherwise, 'mode' is None.
298 """
295 """
299 def __init__(self, path):
296 def __init__(self, path):
300 self.path = path
297 self.path = path
301 self.oldpath = None
298 self.oldpath = None
302 self.mode = None
299 self.mode = None
303 self.op = 'MODIFY'
300 self.op = 'MODIFY'
304 self.binary = False
301 self.binary = False
305
302
306 def setmode(self, mode):
303 def setmode(self, mode):
307 islink = mode & 0o20000
304 islink = mode & 0o20000
308 isexec = mode & 0o100
305 isexec = mode & 0o100
309 self.mode = (islink, isexec)
306 self.mode = (islink, isexec)
310
307
311 def copy(self):
308 def copy(self):
312 other = patchmeta(self.path)
309 other = patchmeta(self.path)
313 other.oldpath = self.oldpath
310 other.oldpath = self.oldpath
314 other.mode = self.mode
311 other.mode = self.mode
315 other.op = self.op
312 other.op = self.op
316 other.binary = self.binary
313 other.binary = self.binary
317 return other
314 return other
318
315
319 def _ispatchinga(self, afile):
316 def _ispatchinga(self, afile):
320 if afile == '/dev/null':
317 if afile == '/dev/null':
321 return self.op == 'ADD'
318 return self.op == 'ADD'
322 return afile == 'a/' + (self.oldpath or self.path)
319 return afile == 'a/' + (self.oldpath or self.path)
323
320
324 def _ispatchingb(self, bfile):
321 def _ispatchingb(self, bfile):
325 if bfile == '/dev/null':
322 if bfile == '/dev/null':
326 return self.op == 'DELETE'
323 return self.op == 'DELETE'
327 return bfile == 'b/' + self.path
324 return bfile == 'b/' + self.path
328
325
329 def ispatching(self, afile, bfile):
326 def ispatching(self, afile, bfile):
330 return self._ispatchinga(afile) and self._ispatchingb(bfile)
327 return self._ispatchinga(afile) and self._ispatchingb(bfile)
331
328
332 def __repr__(self):
329 def __repr__(self):
333 return "<patchmeta %s %r>" % (self.op, self.path)
330 return "<patchmeta %s %r>" % (self.op, self.path)
334
331
335 def readgitpatch(lr):
332 def readgitpatch(lr):
336 """extract git-style metadata about patches from <patchname>"""
333 """extract git-style metadata about patches from <patchname>"""
337
334
338 # Filter patch for git information
335 # Filter patch for git information
339 gp = None
336 gp = None
340 gitpatches = []
337 gitpatches = []
341 for line in lr:
338 for line in lr:
342 line = line.rstrip(' \r\n')
339 line = line.rstrip(' \r\n')
343 if line.startswith('diff --git a/'):
340 if line.startswith('diff --git a/'):
344 m = gitre.match(line)
341 m = gitre.match(line)
345 if m:
342 if m:
346 if gp:
343 if gp:
347 gitpatches.append(gp)
344 gitpatches.append(gp)
348 dst = m.group(2)
345 dst = m.group(2)
349 gp = patchmeta(dst)
346 gp = patchmeta(dst)
350 elif gp:
347 elif gp:
351 if line.startswith('--- '):
348 if line.startswith('--- '):
352 gitpatches.append(gp)
349 gitpatches.append(gp)
353 gp = None
350 gp = None
354 continue
351 continue
355 if line.startswith('rename from '):
352 if line.startswith('rename from '):
356 gp.op = 'RENAME'
353 gp.op = 'RENAME'
357 gp.oldpath = line[12:]
354 gp.oldpath = line[12:]
358 elif line.startswith('rename to '):
355 elif line.startswith('rename to '):
359 gp.path = line[10:]
356 gp.path = line[10:]
360 elif line.startswith('copy from '):
357 elif line.startswith('copy from '):
361 gp.op = 'COPY'
358 gp.op = 'COPY'
362 gp.oldpath = line[10:]
359 gp.oldpath = line[10:]
363 elif line.startswith('copy to '):
360 elif line.startswith('copy to '):
364 gp.path = line[8:]
361 gp.path = line[8:]
365 elif line.startswith('deleted file'):
362 elif line.startswith('deleted file'):
366 gp.op = 'DELETE'
363 gp.op = 'DELETE'
367 elif line.startswith('new file mode '):
364 elif line.startswith('new file mode '):
368 gp.op = 'ADD'
365 gp.op = 'ADD'
369 gp.setmode(int(line[-6:], 8))
366 gp.setmode(int(line[-6:], 8))
370 elif line.startswith('new mode '):
367 elif line.startswith('new mode '):
371 gp.setmode(int(line[-6:], 8))
368 gp.setmode(int(line[-6:], 8))
372 elif line.startswith('GIT binary patch'):
369 elif line.startswith('GIT binary patch'):
373 gp.binary = True
370 gp.binary = True
374 if gp:
371 if gp:
375 gitpatches.append(gp)
372 gitpatches.append(gp)
376
373
377 return gitpatches
374 return gitpatches
378
375
379 class linereader(object):
376 class linereader(object):
380 # simple class to allow pushing lines back into the input stream
377 # simple class to allow pushing lines back into the input stream
381 def __init__(self, fp):
378 def __init__(self, fp):
382 self.fp = fp
379 self.fp = fp
383 self.buf = []
380 self.buf = []
384
381
385 def push(self, line):
382 def push(self, line):
386 if line is not None:
383 if line is not None:
387 self.buf.append(line)
384 self.buf.append(line)
388
385
389 def readline(self):
386 def readline(self):
390 if self.buf:
387 if self.buf:
391 l = self.buf[0]
388 l = self.buf[0]
392 del self.buf[0]
389 del self.buf[0]
393 return l
390 return l
394 return self.fp.readline()
391 return self.fp.readline()
395
392
396 def __iter__(self):
393 def __iter__(self):
397 while True:
394 while True:
398 l = self.readline()
395 l = self.readline()
399 if not l:
396 if not l:
400 break
397 break
401 yield l
398 yield l
402
399
403 class abstractbackend(object):
400 class abstractbackend(object):
404 def __init__(self, ui):
401 def __init__(self, ui):
405 self.ui = ui
402 self.ui = ui
406
403
407 def getfile(self, fname):
404 def getfile(self, fname):
408 """Return target file data and flags as a (data, (islink,
405 """Return target file data and flags as a (data, (islink,
409 isexec)) tuple. Data is None if file is missing/deleted.
406 isexec)) tuple. Data is None if file is missing/deleted.
410 """
407 """
411 raise NotImplementedError
408 raise NotImplementedError
412
409
413 def setfile(self, fname, data, mode, copysource):
410 def setfile(self, fname, data, mode, copysource):
414 """Write data to target file fname and set its mode. mode is a
411 """Write data to target file fname and set its mode. mode is a
415 (islink, isexec) tuple. If data is None, the file content should
412 (islink, isexec) tuple. If data is None, the file content should
416 be left unchanged. If the file is modified after being copied,
413 be left unchanged. If the file is modified after being copied,
417 copysource is set to the original file name.
414 copysource is set to the original file name.
418 """
415 """
419 raise NotImplementedError
416 raise NotImplementedError
420
417
421 def unlink(self, fname):
418 def unlink(self, fname):
422 """Unlink target file."""
419 """Unlink target file."""
423 raise NotImplementedError
420 raise NotImplementedError
424
421
425 def writerej(self, fname, failed, total, lines):
422 def writerej(self, fname, failed, total, lines):
426 """Write rejected lines for fname. total is the number of hunks
423 """Write rejected lines for fname. total is the number of hunks
427 which failed to apply and total the total number of hunks for this
424 which failed to apply and total the total number of hunks for this
428 files.
425 files.
429 """
426 """
430 pass
427 pass
431
428
432 def exists(self, fname):
429 def exists(self, fname):
433 raise NotImplementedError
430 raise NotImplementedError
434
431
435 class fsbackend(abstractbackend):
432 class fsbackend(abstractbackend):
436 def __init__(self, ui, basedir):
433 def __init__(self, ui, basedir):
437 super(fsbackend, self).__init__(ui)
434 super(fsbackend, self).__init__(ui)
438 self.opener = scmutil.opener(basedir)
435 self.opener = scmutil.opener(basedir)
439
436
440 def _join(self, f):
437 def _join(self, f):
441 return os.path.join(self.opener.base, f)
438 return os.path.join(self.opener.base, f)
442
439
443 def getfile(self, fname):
440 def getfile(self, fname):
444 if self.opener.islink(fname):
441 if self.opener.islink(fname):
445 return (self.opener.readlink(fname), (True, False))
442 return (self.opener.readlink(fname), (True, False))
446
443
447 isexec = False
444 isexec = False
448 try:
445 try:
449 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
446 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
450 except OSError as e:
447 except OSError as e:
451 if e.errno != errno.ENOENT:
448 if e.errno != errno.ENOENT:
452 raise
449 raise
453 try:
450 try:
454 return (self.opener.read(fname), (False, isexec))
451 return (self.opener.read(fname), (False, isexec))
455 except IOError as e:
452 except IOError as e:
456 if e.errno != errno.ENOENT:
453 if e.errno != errno.ENOENT:
457 raise
454 raise
458 return None, None
455 return None, None
459
456
460 def setfile(self, fname, data, mode, copysource):
457 def setfile(self, fname, data, mode, copysource):
461 islink, isexec = mode
458 islink, isexec = mode
462 if data is None:
459 if data is None:
463 self.opener.setflags(fname, islink, isexec)
460 self.opener.setflags(fname, islink, isexec)
464 return
461 return
465 if islink:
462 if islink:
466 self.opener.symlink(data, fname)
463 self.opener.symlink(data, fname)
467 else:
464 else:
468 self.opener.write(fname, data)
465 self.opener.write(fname, data)
469 if isexec:
466 if isexec:
470 self.opener.setflags(fname, False, True)
467 self.opener.setflags(fname, False, True)
471
468
472 def unlink(self, fname):
469 def unlink(self, fname):
473 self.opener.unlinkpath(fname, ignoremissing=True)
470 self.opener.unlinkpath(fname, ignoremissing=True)
474
471
475 def writerej(self, fname, failed, total, lines):
472 def writerej(self, fname, failed, total, lines):
476 fname = fname + ".rej"
473 fname = fname + ".rej"
477 self.ui.warn(
474 self.ui.warn(
478 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
475 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
479 (failed, total, fname))
476 (failed, total, fname))
480 fp = self.opener(fname, 'w')
477 fp = self.opener(fname, 'w')
481 fp.writelines(lines)
478 fp.writelines(lines)
482 fp.close()
479 fp.close()
483
480
484 def exists(self, fname):
481 def exists(self, fname):
485 return self.opener.lexists(fname)
482 return self.opener.lexists(fname)
486
483
487 class workingbackend(fsbackend):
484 class workingbackend(fsbackend):
488 def __init__(self, ui, repo, similarity):
485 def __init__(self, ui, repo, similarity):
489 super(workingbackend, self).__init__(ui, repo.root)
486 super(workingbackend, self).__init__(ui, repo.root)
490 self.repo = repo
487 self.repo = repo
491 self.similarity = similarity
488 self.similarity = similarity
492 self.removed = set()
489 self.removed = set()
493 self.changed = set()
490 self.changed = set()
494 self.copied = []
491 self.copied = []
495
492
496 def _checkknown(self, fname):
493 def _checkknown(self, fname):
497 if self.repo.dirstate[fname] == '?' and self.exists(fname):
494 if self.repo.dirstate[fname] == '?' and self.exists(fname):
498 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
495 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
499
496
500 def setfile(self, fname, data, mode, copysource):
497 def setfile(self, fname, data, mode, copysource):
501 self._checkknown(fname)
498 self._checkknown(fname)
502 super(workingbackend, self).setfile(fname, data, mode, copysource)
499 super(workingbackend, self).setfile(fname, data, mode, copysource)
503 if copysource is not None:
500 if copysource is not None:
504 self.copied.append((copysource, fname))
501 self.copied.append((copysource, fname))
505 self.changed.add(fname)
502 self.changed.add(fname)
506
503
507 def unlink(self, fname):
504 def unlink(self, fname):
508 self._checkknown(fname)
505 self._checkknown(fname)
509 super(workingbackend, self).unlink(fname)
506 super(workingbackend, self).unlink(fname)
510 self.removed.add(fname)
507 self.removed.add(fname)
511 self.changed.add(fname)
508 self.changed.add(fname)
512
509
513 def close(self):
510 def close(self):
514 wctx = self.repo[None]
511 wctx = self.repo[None]
515 changed = set(self.changed)
512 changed = set(self.changed)
516 for src, dst in self.copied:
513 for src, dst in self.copied:
517 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
514 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
518 if self.removed:
515 if self.removed:
519 wctx.forget(sorted(self.removed))
516 wctx.forget(sorted(self.removed))
520 for f in self.removed:
517 for f in self.removed:
521 if f not in self.repo.dirstate:
518 if f not in self.repo.dirstate:
522 # File was deleted and no longer belongs to the
519 # File was deleted and no longer belongs to the
523 # dirstate, it was probably marked added then
520 # dirstate, it was probably marked added then
524 # deleted, and should not be considered by
521 # deleted, and should not be considered by
525 # marktouched().
522 # marktouched().
526 changed.discard(f)
523 changed.discard(f)
527 if changed:
524 if changed:
528 scmutil.marktouched(self.repo, changed, self.similarity)
525 scmutil.marktouched(self.repo, changed, self.similarity)
529 return sorted(self.changed)
526 return sorted(self.changed)
530
527
531 class filestore(object):
528 class filestore(object):
532 def __init__(self, maxsize=None):
529 def __init__(self, maxsize=None):
533 self.opener = None
530 self.opener = None
534 self.files = {}
531 self.files = {}
535 self.created = 0
532 self.created = 0
536 self.maxsize = maxsize
533 self.maxsize = maxsize
537 if self.maxsize is None:
534 if self.maxsize is None:
538 self.maxsize = 4*(2**20)
535 self.maxsize = 4*(2**20)
539 self.size = 0
536 self.size = 0
540 self.data = {}
537 self.data = {}
541
538
542 def setfile(self, fname, data, mode, copied=None):
539 def setfile(self, fname, data, mode, copied=None):
543 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
540 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
544 self.data[fname] = (data, mode, copied)
541 self.data[fname] = (data, mode, copied)
545 self.size += len(data)
542 self.size += len(data)
546 else:
543 else:
547 if self.opener is None:
544 if self.opener is None:
548 root = tempfile.mkdtemp(prefix='hg-patch-')
545 root = tempfile.mkdtemp(prefix='hg-patch-')
549 self.opener = scmutil.opener(root)
546 self.opener = scmutil.opener(root)
550 # Avoid filename issues with these simple names
547 # Avoid filename issues with these simple names
551 fn = str(self.created)
548 fn = str(self.created)
552 self.opener.write(fn, data)
549 self.opener.write(fn, data)
553 self.created += 1
550 self.created += 1
554 self.files[fname] = (fn, mode, copied)
551 self.files[fname] = (fn, mode, copied)
555
552
556 def getfile(self, fname):
553 def getfile(self, fname):
557 if fname in self.data:
554 if fname in self.data:
558 return self.data[fname]
555 return self.data[fname]
559 if not self.opener or fname not in self.files:
556 if not self.opener or fname not in self.files:
560 return None, None, None
557 return None, None, None
561 fn, mode, copied = self.files[fname]
558 fn, mode, copied = self.files[fname]
562 return self.opener.read(fn), mode, copied
559 return self.opener.read(fn), mode, copied
563
560
564 def close(self):
561 def close(self):
565 if self.opener:
562 if self.opener:
566 shutil.rmtree(self.opener.base)
563 shutil.rmtree(self.opener.base)
567
564
568 class repobackend(abstractbackend):
565 class repobackend(abstractbackend):
569 def __init__(self, ui, repo, ctx, store):
566 def __init__(self, ui, repo, ctx, store):
570 super(repobackend, self).__init__(ui)
567 super(repobackend, self).__init__(ui)
571 self.repo = repo
568 self.repo = repo
572 self.ctx = ctx
569 self.ctx = ctx
573 self.store = store
570 self.store = store
574 self.changed = set()
571 self.changed = set()
575 self.removed = set()
572 self.removed = set()
576 self.copied = {}
573 self.copied = {}
577
574
578 def _checkknown(self, fname):
575 def _checkknown(self, fname):
579 if fname not in self.ctx:
576 if fname not in self.ctx:
580 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
577 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
581
578
582 def getfile(self, fname):
579 def getfile(self, fname):
583 try:
580 try:
584 fctx = self.ctx[fname]
581 fctx = self.ctx[fname]
585 except error.LookupError:
582 except error.LookupError:
586 return None, None
583 return None, None
587 flags = fctx.flags()
584 flags = fctx.flags()
588 return fctx.data(), ('l' in flags, 'x' in flags)
585 return fctx.data(), ('l' in flags, 'x' in flags)
589
586
590 def setfile(self, fname, data, mode, copysource):
587 def setfile(self, fname, data, mode, copysource):
591 if copysource:
588 if copysource:
592 self._checkknown(copysource)
589 self._checkknown(copysource)
593 if data is None:
590 if data is None:
594 data = self.ctx[fname].data()
591 data = self.ctx[fname].data()
595 self.store.setfile(fname, data, mode, copysource)
592 self.store.setfile(fname, data, mode, copysource)
596 self.changed.add(fname)
593 self.changed.add(fname)
597 if copysource:
594 if copysource:
598 self.copied[fname] = copysource
595 self.copied[fname] = copysource
599
596
600 def unlink(self, fname):
597 def unlink(self, fname):
601 self._checkknown(fname)
598 self._checkknown(fname)
602 self.removed.add(fname)
599 self.removed.add(fname)
603
600
604 def exists(self, fname):
601 def exists(self, fname):
605 return fname in self.ctx
602 return fname in self.ctx
606
603
607 def close(self):
604 def close(self):
608 return self.changed | self.removed
605 return self.changed | self.removed
609
606
610 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
607 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
611 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
608 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
612 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
609 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
613 eolmodes = ['strict', 'crlf', 'lf', 'auto']
610 eolmodes = ['strict', 'crlf', 'lf', 'auto']
614
611
615 class patchfile(object):
612 class patchfile(object):
616 def __init__(self, ui, gp, backend, store, eolmode='strict'):
613 def __init__(self, ui, gp, backend, store, eolmode='strict'):
617 self.fname = gp.path
614 self.fname = gp.path
618 self.eolmode = eolmode
615 self.eolmode = eolmode
619 self.eol = None
616 self.eol = None
620 self.backend = backend
617 self.backend = backend
621 self.ui = ui
618 self.ui = ui
622 self.lines = []
619 self.lines = []
623 self.exists = False
620 self.exists = False
624 self.missing = True
621 self.missing = True
625 self.mode = gp.mode
622 self.mode = gp.mode
626 self.copysource = gp.oldpath
623 self.copysource = gp.oldpath
627 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
624 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
628 self.remove = gp.op == 'DELETE'
625 self.remove = gp.op == 'DELETE'
629 if self.copysource is None:
626 if self.copysource is None:
630 data, mode = backend.getfile(self.fname)
627 data, mode = backend.getfile(self.fname)
631 else:
628 else:
632 data, mode = store.getfile(self.copysource)[:2]
629 data, mode = store.getfile(self.copysource)[:2]
633 if data is not None:
630 if data is not None:
634 self.exists = self.copysource is None or backend.exists(self.fname)
631 self.exists = self.copysource is None or backend.exists(self.fname)
635 self.missing = False
632 self.missing = False
636 if data:
633 if data:
637 self.lines = mdiff.splitnewlines(data)
634 self.lines = mdiff.splitnewlines(data)
638 if self.mode is None:
635 if self.mode is None:
639 self.mode = mode
636 self.mode = mode
640 if self.lines:
637 if self.lines:
641 # Normalize line endings
638 # Normalize line endings
642 if self.lines[0].endswith('\r\n'):
639 if self.lines[0].endswith('\r\n'):
643 self.eol = '\r\n'
640 self.eol = '\r\n'
644 elif self.lines[0].endswith('\n'):
641 elif self.lines[0].endswith('\n'):
645 self.eol = '\n'
642 self.eol = '\n'
646 if eolmode != 'strict':
643 if eolmode != 'strict':
647 nlines = []
644 nlines = []
648 for l in self.lines:
645 for l in self.lines:
649 if l.endswith('\r\n'):
646 if l.endswith('\r\n'):
650 l = l[:-2] + '\n'
647 l = l[:-2] + '\n'
651 nlines.append(l)
648 nlines.append(l)
652 self.lines = nlines
649 self.lines = nlines
653 else:
650 else:
654 if self.create:
651 if self.create:
655 self.missing = False
652 self.missing = False
656 if self.mode is None:
653 if self.mode is None:
657 self.mode = (False, False)
654 self.mode = (False, False)
658 if self.missing:
655 if self.missing:
659 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
656 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
660
657
661 self.hash = {}
658 self.hash = {}
662 self.dirty = 0
659 self.dirty = 0
663 self.offset = 0
660 self.offset = 0
664 self.skew = 0
661 self.skew = 0
665 self.rej = []
662 self.rej = []
666 self.fileprinted = False
663 self.fileprinted = False
667 self.printfile(False)
664 self.printfile(False)
668 self.hunks = 0
665 self.hunks = 0
669
666
670 def writelines(self, fname, lines, mode):
667 def writelines(self, fname, lines, mode):
671 if self.eolmode == 'auto':
668 if self.eolmode == 'auto':
672 eol = self.eol
669 eol = self.eol
673 elif self.eolmode == 'crlf':
670 elif self.eolmode == 'crlf':
674 eol = '\r\n'
671 eol = '\r\n'
675 else:
672 else:
676 eol = '\n'
673 eol = '\n'
677
674
678 if self.eolmode != 'strict' and eol and eol != '\n':
675 if self.eolmode != 'strict' and eol and eol != '\n':
679 rawlines = []
676 rawlines = []
680 for l in lines:
677 for l in lines:
681 if l and l[-1] == '\n':
678 if l and l[-1] == '\n':
682 l = l[:-1] + eol
679 l = l[:-1] + eol
683 rawlines.append(l)
680 rawlines.append(l)
684 lines = rawlines
681 lines = rawlines
685
682
686 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
683 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
687
684
688 def printfile(self, warn):
685 def printfile(self, warn):
689 if self.fileprinted:
686 if self.fileprinted:
690 return
687 return
691 if warn or self.ui.verbose:
688 if warn or self.ui.verbose:
692 self.fileprinted = True
689 self.fileprinted = True
693 s = _("patching file %s\n") % self.fname
690 s = _("patching file %s\n") % self.fname
694 if warn:
691 if warn:
695 self.ui.warn(s)
692 self.ui.warn(s)
696 else:
693 else:
697 self.ui.note(s)
694 self.ui.note(s)
698
695
699
696
700 def findlines(self, l, linenum):
697 def findlines(self, l, linenum):
701 # looks through the hash and finds candidate lines. The
698 # looks through the hash and finds candidate lines. The
702 # result is a list of line numbers sorted based on distance
699 # result is a list of line numbers sorted based on distance
703 # from linenum
700 # from linenum
704
701
705 cand = self.hash.get(l, [])
702 cand = self.hash.get(l, [])
706 if len(cand) > 1:
703 if len(cand) > 1:
707 # resort our list of potentials forward then back.
704 # resort our list of potentials forward then back.
708 cand.sort(key=lambda x: abs(x - linenum))
705 cand.sort(key=lambda x: abs(x - linenum))
709 return cand
706 return cand
710
707
711 def write_rej(self):
708 def write_rej(self):
712 # our rejects are a little different from patch(1). This always
709 # our rejects are a little different from patch(1). This always
713 # creates rejects in the same form as the original patch. A file
710 # creates rejects in the same form as the original patch. A file
714 # header is inserted so that you can run the reject through patch again
711 # header is inserted so that you can run the reject through patch again
715 # without having to type the filename.
712 # without having to type the filename.
716 if not self.rej:
713 if not self.rej:
717 return
714 return
718 base = os.path.basename(self.fname)
715 base = os.path.basename(self.fname)
719 lines = ["--- %s\n+++ %s\n" % (base, base)]
716 lines = ["--- %s\n+++ %s\n" % (base, base)]
720 for x in self.rej:
717 for x in self.rej:
721 for l in x.hunk:
718 for l in x.hunk:
722 lines.append(l)
719 lines.append(l)
723 if l[-1] != '\n':
720 if l[-1] != '\n':
724 lines.append("\n\ No newline at end of file\n")
721 lines.append("\n\ No newline at end of file\n")
725 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
722 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
726
723
727 def apply(self, h):
724 def apply(self, h):
728 if not h.complete():
725 if not h.complete():
729 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
726 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
730 (h.number, h.desc, len(h.a), h.lena, len(h.b),
727 (h.number, h.desc, len(h.a), h.lena, len(h.b),
731 h.lenb))
728 h.lenb))
732
729
733 self.hunks += 1
730 self.hunks += 1
734
731
735 if self.missing:
732 if self.missing:
736 self.rej.append(h)
733 self.rej.append(h)
737 return -1
734 return -1
738
735
739 if self.exists and self.create:
736 if self.exists and self.create:
740 if self.copysource:
737 if self.copysource:
741 self.ui.warn(_("cannot create %s: destination already "
738 self.ui.warn(_("cannot create %s: destination already "
742 "exists\n") % self.fname)
739 "exists\n") % self.fname)
743 else:
740 else:
744 self.ui.warn(_("file %s already exists\n") % self.fname)
741 self.ui.warn(_("file %s already exists\n") % self.fname)
745 self.rej.append(h)
742 self.rej.append(h)
746 return -1
743 return -1
747
744
748 if isinstance(h, binhunk):
745 if isinstance(h, binhunk):
749 if self.remove:
746 if self.remove:
750 self.backend.unlink(self.fname)
747 self.backend.unlink(self.fname)
751 else:
748 else:
752 l = h.new(self.lines)
749 l = h.new(self.lines)
753 self.lines[:] = l
750 self.lines[:] = l
754 self.offset += len(l)
751 self.offset += len(l)
755 self.dirty = True
752 self.dirty = True
756 return 0
753 return 0
757
754
758 horig = h
755 horig = h
759 if (self.eolmode in ('crlf', 'lf')
756 if (self.eolmode in ('crlf', 'lf')
760 or self.eolmode == 'auto' and self.eol):
757 or self.eolmode == 'auto' and self.eol):
761 # If new eols are going to be normalized, then normalize
758 # If new eols are going to be normalized, then normalize
762 # hunk data before patching. Otherwise, preserve input
759 # hunk data before patching. Otherwise, preserve input
763 # line-endings.
760 # line-endings.
764 h = h.getnormalized()
761 h = h.getnormalized()
765
762
766 # fast case first, no offsets, no fuzz
763 # fast case first, no offsets, no fuzz
767 old, oldstart, new, newstart = h.fuzzit(0, False)
764 old, oldstart, new, newstart = h.fuzzit(0, False)
768 oldstart += self.offset
765 oldstart += self.offset
769 orig_start = oldstart
766 orig_start = oldstart
770 # if there's skew we want to emit the "(offset %d lines)" even
767 # if there's skew we want to emit the "(offset %d lines)" even
771 # when the hunk cleanly applies at start + skew, so skip the
768 # when the hunk cleanly applies at start + skew, so skip the
772 # fast case code
769 # fast case code
773 if (self.skew == 0 and
770 if (self.skew == 0 and
774 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
771 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
775 if self.remove:
772 if self.remove:
776 self.backend.unlink(self.fname)
773 self.backend.unlink(self.fname)
777 else:
774 else:
778 self.lines[oldstart:oldstart + len(old)] = new
775 self.lines[oldstart:oldstart + len(old)] = new
779 self.offset += len(new) - len(old)
776 self.offset += len(new) - len(old)
780 self.dirty = True
777 self.dirty = True
781 return 0
778 return 0
782
779
783 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
780 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
784 self.hash = {}
781 self.hash = {}
785 for x, s in enumerate(self.lines):
782 for x, s in enumerate(self.lines):
786 self.hash.setdefault(s, []).append(x)
783 self.hash.setdefault(s, []).append(x)
787
784
788 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
785 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
789 for toponly in [True, False]:
786 for toponly in [True, False]:
790 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
787 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
791 oldstart = oldstart + self.offset + self.skew
788 oldstart = oldstart + self.offset + self.skew
792 oldstart = min(oldstart, len(self.lines))
789 oldstart = min(oldstart, len(self.lines))
793 if old:
790 if old:
794 cand = self.findlines(old[0][1:], oldstart)
791 cand = self.findlines(old[0][1:], oldstart)
795 else:
792 else:
796 # Only adding lines with no or fuzzed context, just
793 # Only adding lines with no or fuzzed context, just
797 # take the skew in account
794 # take the skew in account
798 cand = [oldstart]
795 cand = [oldstart]
799
796
800 for l in cand:
797 for l in cand:
801 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
798 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
802 self.lines[l : l + len(old)] = new
799 self.lines[l : l + len(old)] = new
803 self.offset += len(new) - len(old)
800 self.offset += len(new) - len(old)
804 self.skew = l - orig_start
801 self.skew = l - orig_start
805 self.dirty = True
802 self.dirty = True
806 offset = l - orig_start - fuzzlen
803 offset = l - orig_start - fuzzlen
807 if fuzzlen:
804 if fuzzlen:
808 msg = _("Hunk #%d succeeded at %d "
805 msg = _("Hunk #%d succeeded at %d "
809 "with fuzz %d "
806 "with fuzz %d "
810 "(offset %d lines).\n")
807 "(offset %d lines).\n")
811 self.printfile(True)
808 self.printfile(True)
812 self.ui.warn(msg %
809 self.ui.warn(msg %
813 (h.number, l + 1, fuzzlen, offset))
810 (h.number, l + 1, fuzzlen, offset))
814 else:
811 else:
815 msg = _("Hunk #%d succeeded at %d "
812 msg = _("Hunk #%d succeeded at %d "
816 "(offset %d lines).\n")
813 "(offset %d lines).\n")
817 self.ui.note(msg % (h.number, l + 1, offset))
814 self.ui.note(msg % (h.number, l + 1, offset))
818 return fuzzlen
815 return fuzzlen
819 self.printfile(True)
816 self.printfile(True)
820 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
817 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
821 self.rej.append(horig)
818 self.rej.append(horig)
822 return -1
819 return -1
823
820
824 def close(self):
821 def close(self):
825 if self.dirty:
822 if self.dirty:
826 self.writelines(self.fname, self.lines, self.mode)
823 self.writelines(self.fname, self.lines, self.mode)
827 self.write_rej()
824 self.write_rej()
828 return len(self.rej)
825 return len(self.rej)
829
826
830 class header(object):
827 class header(object):
831 """patch header
828 """patch header
832 """
829 """
833 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
830 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
834 diff_re = re.compile('diff -r .* (.*)$')
831 diff_re = re.compile('diff -r .* (.*)$')
835 allhunks_re = re.compile('(?:index|deleted file) ')
832 allhunks_re = re.compile('(?:index|deleted file) ')
836 pretty_re = re.compile('(?:new file|deleted file) ')
833 pretty_re = re.compile('(?:new file|deleted file) ')
837 special_re = re.compile('(?:index|deleted|copy|rename) ')
834 special_re = re.compile('(?:index|deleted|copy|rename) ')
838 newfile_re = re.compile('(?:new file)')
835 newfile_re = re.compile('(?:new file)')
839
836
840 def __init__(self, header):
837 def __init__(self, header):
841 self.header = header
838 self.header = header
842 self.hunks = []
839 self.hunks = []
843
840
844 def binary(self):
841 def binary(self):
845 return any(h.startswith('index ') for h in self.header)
842 return any(h.startswith('index ') for h in self.header)
846
843
847 def pretty(self, fp):
844 def pretty(self, fp):
848 for h in self.header:
845 for h in self.header:
849 if h.startswith('index '):
846 if h.startswith('index '):
850 fp.write(_('this modifies a binary file (all or nothing)\n'))
847 fp.write(_('this modifies a binary file (all or nothing)\n'))
851 break
848 break
852 if self.pretty_re.match(h):
849 if self.pretty_re.match(h):
853 fp.write(h)
850 fp.write(h)
854 if self.binary():
851 if self.binary():
855 fp.write(_('this is a binary file\n'))
852 fp.write(_('this is a binary file\n'))
856 break
853 break
857 if h.startswith('---'):
854 if h.startswith('---'):
858 fp.write(_('%d hunks, %d lines changed\n') %
855 fp.write(_('%d hunks, %d lines changed\n') %
859 (len(self.hunks),
856 (len(self.hunks),
860 sum([max(h.added, h.removed) for h in self.hunks])))
857 sum([max(h.added, h.removed) for h in self.hunks])))
861 break
858 break
862 fp.write(h)
859 fp.write(h)
863
860
864 def write(self, fp):
861 def write(self, fp):
865 fp.write(''.join(self.header))
862 fp.write(''.join(self.header))
866
863
867 def allhunks(self):
864 def allhunks(self):
868 return any(self.allhunks_re.match(h) for h in self.header)
865 return any(self.allhunks_re.match(h) for h in self.header)
869
866
870 def files(self):
867 def files(self):
871 match = self.diffgit_re.match(self.header[0])
868 match = self.diffgit_re.match(self.header[0])
872 if match:
869 if match:
873 fromfile, tofile = match.groups()
870 fromfile, tofile = match.groups()
874 if fromfile == tofile:
871 if fromfile == tofile:
875 return [fromfile]
872 return [fromfile]
876 return [fromfile, tofile]
873 return [fromfile, tofile]
877 else:
874 else:
878 return self.diff_re.match(self.header[0]).groups()
875 return self.diff_re.match(self.header[0]).groups()
879
876
880 def filename(self):
877 def filename(self):
881 return self.files()[-1]
878 return self.files()[-1]
882
879
883 def __repr__(self):
880 def __repr__(self):
884 return '<header %s>' % (' '.join(map(repr, self.files())))
881 return '<header %s>' % (' '.join(map(repr, self.files())))
885
882
886 def isnewfile(self):
883 def isnewfile(self):
887 return any(self.newfile_re.match(h) for h in self.header)
884 return any(self.newfile_re.match(h) for h in self.header)
888
885
889 def special(self):
886 def special(self):
890 # Special files are shown only at the header level and not at the hunk
887 # Special files are shown only at the header level and not at the hunk
891 # level for example a file that has been deleted is a special file.
888 # level for example a file that has been deleted is a special file.
892 # The user cannot change the content of the operation, in the case of
889 # The user cannot change the content of the operation, in the case of
893 # the deleted file he has to take the deletion or not take it, he
890 # the deleted file he has to take the deletion or not take it, he
894 # cannot take some of it.
891 # cannot take some of it.
895 # Newly added files are special if they are empty, they are not special
892 # Newly added files are special if they are empty, they are not special
896 # if they have some content as we want to be able to change it
893 # if they have some content as we want to be able to change it
897 nocontent = len(self.header) == 2
894 nocontent = len(self.header) == 2
898 emptynewfile = self.isnewfile() and nocontent
895 emptynewfile = self.isnewfile() and nocontent
899 return emptynewfile or \
896 return emptynewfile or \
900 any(self.special_re.match(h) for h in self.header)
897 any(self.special_re.match(h) for h in self.header)
901
898
902 class recordhunk(object):
899 class recordhunk(object):
903 """patch hunk
900 """patch hunk
904
901
905 XXX shouldn't we merge this with the other hunk class?
902 XXX shouldn't we merge this with the other hunk class?
906 """
903 """
907 maxcontext = 3
904 maxcontext = 3
908
905
909 def __init__(self, header, fromline, toline, proc, before, hunk, after):
906 def __init__(self, header, fromline, toline, proc, before, hunk, after):
910 def trimcontext(number, lines):
907 def trimcontext(number, lines):
911 delta = len(lines) - self.maxcontext
908 delta = len(lines) - self.maxcontext
912 if False and delta > 0:
909 if False and delta > 0:
913 return number + delta, lines[:self.maxcontext]
910 return number + delta, lines[:self.maxcontext]
914 return number, lines
911 return number, lines
915
912
916 self.header = header
913 self.header = header
917 self.fromline, self.before = trimcontext(fromline, before)
914 self.fromline, self.before = trimcontext(fromline, before)
918 self.toline, self.after = trimcontext(toline, after)
915 self.toline, self.after = trimcontext(toline, after)
919 self.proc = proc
916 self.proc = proc
920 self.hunk = hunk
917 self.hunk = hunk
921 self.added, self.removed = self.countchanges(self.hunk)
918 self.added, self.removed = self.countchanges(self.hunk)
922
919
923 def __eq__(self, v):
920 def __eq__(self, v):
924 if not isinstance(v, recordhunk):
921 if not isinstance(v, recordhunk):
925 return False
922 return False
926
923
927 return ((v.hunk == self.hunk) and
924 return ((v.hunk == self.hunk) and
928 (v.proc == self.proc) and
925 (v.proc == self.proc) and
929 (self.fromline == v.fromline) and
926 (self.fromline == v.fromline) and
930 (self.header.files() == v.header.files()))
927 (self.header.files() == v.header.files()))
931
928
932 def __hash__(self):
929 def __hash__(self):
933 return hash((tuple(self.hunk),
930 return hash((tuple(self.hunk),
934 tuple(self.header.files()),
931 tuple(self.header.files()),
935 self.fromline,
932 self.fromline,
936 self.proc))
933 self.proc))
937
934
938 def countchanges(self, hunk):
935 def countchanges(self, hunk):
939 """hunk -> (n+,n-)"""
936 """hunk -> (n+,n-)"""
940 add = len([h for h in hunk if h[0] == '+'])
937 add = len([h for h in hunk if h[0] == '+'])
941 rem = len([h for h in hunk if h[0] == '-'])
938 rem = len([h for h in hunk if h[0] == '-'])
942 return add, rem
939 return add, rem
943
940
944 def write(self, fp):
941 def write(self, fp):
945 delta = len(self.before) + len(self.after)
942 delta = len(self.before) + len(self.after)
946 if self.after and self.after[-1] == '\\ No newline at end of file\n':
943 if self.after and self.after[-1] == '\\ No newline at end of file\n':
947 delta -= 1
944 delta -= 1
948 fromlen = delta + self.removed
945 fromlen = delta + self.removed
949 tolen = delta + self.added
946 tolen = delta + self.added
950 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
947 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
951 (self.fromline, fromlen, self.toline, tolen,
948 (self.fromline, fromlen, self.toline, tolen,
952 self.proc and (' ' + self.proc)))
949 self.proc and (' ' + self.proc)))
953 fp.write(''.join(self.before + self.hunk + self.after))
950 fp.write(''.join(self.before + self.hunk + self.after))
954
951
955 pretty = write
952 pretty = write
956
953
957 def filename(self):
954 def filename(self):
958 return self.header.filename()
955 return self.header.filename()
959
956
960 def __repr__(self):
957 def __repr__(self):
961 return '<hunk %r@%d>' % (self.filename(), self.fromline)
958 return '<hunk %r@%d>' % (self.filename(), self.fromline)
962
959
963 def filterpatch(ui, headers, operation=None):
960 def filterpatch(ui, headers, operation=None):
964 """Interactively filter patch chunks into applied-only chunks"""
961 """Interactively filter patch chunks into applied-only chunks"""
965 if operation is None:
962 if operation is None:
966 operation = _('record')
963 operation = _('record')
967
964
968 def prompt(skipfile, skipall, query, chunk):
965 def prompt(skipfile, skipall, query, chunk):
969 """prompt query, and process base inputs
966 """prompt query, and process base inputs
970
967
971 - y/n for the rest of file
968 - y/n for the rest of file
972 - y/n for the rest
969 - y/n for the rest
973 - ? (help)
970 - ? (help)
974 - q (quit)
971 - q (quit)
975
972
976 Return True/False and possibly updated skipfile and skipall.
973 Return True/False and possibly updated skipfile and skipall.
977 """
974 """
978 newpatches = None
975 newpatches = None
979 if skipall is not None:
976 if skipall is not None:
980 return skipall, skipfile, skipall, newpatches
977 return skipall, skipfile, skipall, newpatches
981 if skipfile is not None:
978 if skipfile is not None:
982 return skipfile, skipfile, skipall, newpatches
979 return skipfile, skipfile, skipall, newpatches
983 while True:
980 while True:
984 resps = _('[Ynesfdaq?]'
981 resps = _('[Ynesfdaq?]'
985 '$$ &Yes, record this change'
982 '$$ &Yes, record this change'
986 '$$ &No, skip this change'
983 '$$ &No, skip this change'
987 '$$ &Edit this change manually'
984 '$$ &Edit this change manually'
988 '$$ &Skip remaining changes to this file'
985 '$$ &Skip remaining changes to this file'
989 '$$ Record remaining changes to this &file'
986 '$$ Record remaining changes to this &file'
990 '$$ &Done, skip remaining changes and files'
987 '$$ &Done, skip remaining changes and files'
991 '$$ Record &all changes to all remaining files'
988 '$$ Record &all changes to all remaining files'
992 '$$ &Quit, recording no changes'
989 '$$ &Quit, recording no changes'
993 '$$ &? (display help)')
990 '$$ &? (display help)')
994 r = ui.promptchoice("%s %s" % (query, resps))
991 r = ui.promptchoice("%s %s" % (query, resps))
995 ui.write("\n")
992 ui.write("\n")
996 if r == 8: # ?
993 if r == 8: # ?
997 for c, t in ui.extractchoices(resps)[1]:
994 for c, t in ui.extractchoices(resps)[1]:
998 ui.write('%s - %s\n' % (c, t.lower()))
995 ui.write('%s - %s\n' % (c, t.lower()))
999 continue
996 continue
1000 elif r == 0: # yes
997 elif r == 0: # yes
1001 ret = True
998 ret = True
1002 elif r == 1: # no
999 elif r == 1: # no
1003 ret = False
1000 ret = False
1004 elif r == 2: # Edit patch
1001 elif r == 2: # Edit patch
1005 if chunk is None:
1002 if chunk is None:
1006 ui.write(_('cannot edit patch for whole file'))
1003 ui.write(_('cannot edit patch for whole file'))
1007 ui.write("\n")
1004 ui.write("\n")
1008 continue
1005 continue
1009 if chunk.header.binary():
1006 if chunk.header.binary():
1010 ui.write(_('cannot edit patch for binary file'))
1007 ui.write(_('cannot edit patch for binary file'))
1011 ui.write("\n")
1008 ui.write("\n")
1012 continue
1009 continue
1013 # Patch comment based on the Git one (based on comment at end of
1010 # Patch comment based on the Git one (based on comment at end of
1014 # https://mercurial-scm.org/wiki/RecordExtension)
1011 # https://mercurial-scm.org/wiki/RecordExtension)
1015 phelp = '---' + _("""
1012 phelp = '---' + _("""
1016 To remove '-' lines, make them ' ' lines (context).
1013 To remove '-' lines, make them ' ' lines (context).
1017 To remove '+' lines, delete them.
1014 To remove '+' lines, delete them.
1018 Lines starting with # will be removed from the patch.
1015 Lines starting with # will be removed from the patch.
1019
1016
1020 If the patch applies cleanly, the edited hunk will immediately be
1017 If the patch applies cleanly, the edited hunk will immediately be
1021 added to the record list. If it does not apply cleanly, a rejects
1018 added to the record list. If it does not apply cleanly, a rejects
1022 file will be generated: you can use that when you try again. If
1019 file will be generated: you can use that when you try again. If
1023 all lines of the hunk are removed, then the edit is aborted and
1020 all lines of the hunk are removed, then the edit is aborted and
1024 the hunk is left unchanged.
1021 the hunk is left unchanged.
1025 """)
1022 """)
1026 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1023 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1027 suffix=".diff", text=True)
1024 suffix=".diff", text=True)
1028 ncpatchfp = None
1025 ncpatchfp = None
1029 try:
1026 try:
1030 # Write the initial patch
1027 # Write the initial patch
1031 f = os.fdopen(patchfd, "w")
1028 f = os.fdopen(patchfd, "w")
1032 chunk.header.write(f)
1029 chunk.header.write(f)
1033 chunk.write(f)
1030 chunk.write(f)
1034 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1031 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1035 f.close()
1032 f.close()
1036 # Start the editor and wait for it to complete
1033 # Start the editor and wait for it to complete
1037 editor = ui.geteditor()
1034 editor = ui.geteditor()
1038 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1035 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1039 environ={'HGUSER': ui.username()})
1036 environ={'HGUSER': ui.username()})
1040 if ret != 0:
1037 if ret != 0:
1041 ui.warn(_("editor exited with exit code %d\n") % ret)
1038 ui.warn(_("editor exited with exit code %d\n") % ret)
1042 continue
1039 continue
1043 # Remove comment lines
1040 # Remove comment lines
1044 patchfp = open(patchfn)
1041 patchfp = open(patchfn)
1045 ncpatchfp = cStringIO.StringIO()
1042 ncpatchfp = cStringIO.StringIO()
1046 for line in patchfp:
1043 for line in patchfp:
1047 if not line.startswith('#'):
1044 if not line.startswith('#'):
1048 ncpatchfp.write(line)
1045 ncpatchfp.write(line)
1049 patchfp.close()
1046 patchfp.close()
1050 ncpatchfp.seek(0)
1047 ncpatchfp.seek(0)
1051 newpatches = parsepatch(ncpatchfp)
1048 newpatches = parsepatch(ncpatchfp)
1052 finally:
1049 finally:
1053 os.unlink(patchfn)
1050 os.unlink(patchfn)
1054 del ncpatchfp
1051 del ncpatchfp
1055 # Signal that the chunk shouldn't be applied as-is, but
1052 # Signal that the chunk shouldn't be applied as-is, but
1056 # provide the new patch to be used instead.
1053 # provide the new patch to be used instead.
1057 ret = False
1054 ret = False
1058 elif r == 3: # Skip
1055 elif r == 3: # Skip
1059 ret = skipfile = False
1056 ret = skipfile = False
1060 elif r == 4: # file (Record remaining)
1057 elif r == 4: # file (Record remaining)
1061 ret = skipfile = True
1058 ret = skipfile = True
1062 elif r == 5: # done, skip remaining
1059 elif r == 5: # done, skip remaining
1063 ret = skipall = False
1060 ret = skipall = False
1064 elif r == 6: # all
1061 elif r == 6: # all
1065 ret = skipall = True
1062 ret = skipall = True
1066 elif r == 7: # quit
1063 elif r == 7: # quit
1067 raise util.Abort(_('user quit'))
1064 raise util.Abort(_('user quit'))
1068 return ret, skipfile, skipall, newpatches
1065 return ret, skipfile, skipall, newpatches
1069
1066
1070 seen = set()
1067 seen = set()
1071 applied = {} # 'filename' -> [] of chunks
1068 applied = {} # 'filename' -> [] of chunks
1072 skipfile, skipall = None, None
1069 skipfile, skipall = None, None
1073 pos, total = 1, sum(len(h.hunks) for h in headers)
1070 pos, total = 1, sum(len(h.hunks) for h in headers)
1074 for h in headers:
1071 for h in headers:
1075 pos += len(h.hunks)
1072 pos += len(h.hunks)
1076 skipfile = None
1073 skipfile = None
1077 fixoffset = 0
1074 fixoffset = 0
1078 hdr = ''.join(h.header)
1075 hdr = ''.join(h.header)
1079 if hdr in seen:
1076 if hdr in seen:
1080 continue
1077 continue
1081 seen.add(hdr)
1078 seen.add(hdr)
1082 if skipall is None:
1079 if skipall is None:
1083 h.pretty(ui)
1080 h.pretty(ui)
1084 msg = (_('examine changes to %s?') %
1081 msg = (_('examine changes to %s?') %
1085 _(' and ').join("'%s'" % f for f in h.files()))
1082 _(' and ').join("'%s'" % f for f in h.files()))
1086 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1083 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1087 if not r:
1084 if not r:
1088 continue
1085 continue
1089 applied[h.filename()] = [h]
1086 applied[h.filename()] = [h]
1090 if h.allhunks():
1087 if h.allhunks():
1091 applied[h.filename()] += h.hunks
1088 applied[h.filename()] += h.hunks
1092 continue
1089 continue
1093 for i, chunk in enumerate(h.hunks):
1090 for i, chunk in enumerate(h.hunks):
1094 if skipfile is None and skipall is None:
1091 if skipfile is None and skipall is None:
1095 chunk.pretty(ui)
1092 chunk.pretty(ui)
1096 if total == 1:
1093 if total == 1:
1097 msg = _("record this change to '%s'?") % chunk.filename()
1094 msg = _("record this change to '%s'?") % chunk.filename()
1098 else:
1095 else:
1099 idx = pos - len(h.hunks) + i
1096 idx = pos - len(h.hunks) + i
1100 msg = _("record change %d/%d to '%s'?") % (idx, total,
1097 msg = _("record change %d/%d to '%s'?") % (idx, total,
1101 chunk.filename())
1098 chunk.filename())
1102 r, skipfile, skipall, newpatches = prompt(skipfile,
1099 r, skipfile, skipall, newpatches = prompt(skipfile,
1103 skipall, msg, chunk)
1100 skipall, msg, chunk)
1104 if r:
1101 if r:
1105 if fixoffset:
1102 if fixoffset:
1106 chunk = copy.copy(chunk)
1103 chunk = copy.copy(chunk)
1107 chunk.toline += fixoffset
1104 chunk.toline += fixoffset
1108 applied[chunk.filename()].append(chunk)
1105 applied[chunk.filename()].append(chunk)
1109 elif newpatches is not None:
1106 elif newpatches is not None:
1110 for newpatch in newpatches:
1107 for newpatch in newpatches:
1111 for newhunk in newpatch.hunks:
1108 for newhunk in newpatch.hunks:
1112 if fixoffset:
1109 if fixoffset:
1113 newhunk.toline += fixoffset
1110 newhunk.toline += fixoffset
1114 applied[newhunk.filename()].append(newhunk)
1111 applied[newhunk.filename()].append(newhunk)
1115 else:
1112 else:
1116 fixoffset += chunk.removed - chunk.added
1113 fixoffset += chunk.removed - chunk.added
1117 return sum([h for h in applied.itervalues()
1114 return sum([h for h in applied.itervalues()
1118 if h[0].special() or len(h) > 1], [])
1115 if h[0].special() or len(h) > 1], [])
1119 class hunk(object):
1116 class hunk(object):
1120 def __init__(self, desc, num, lr, context):
1117 def __init__(self, desc, num, lr, context):
1121 self.number = num
1118 self.number = num
1122 self.desc = desc
1119 self.desc = desc
1123 self.hunk = [desc]
1120 self.hunk = [desc]
1124 self.a = []
1121 self.a = []
1125 self.b = []
1122 self.b = []
1126 self.starta = self.lena = None
1123 self.starta = self.lena = None
1127 self.startb = self.lenb = None
1124 self.startb = self.lenb = None
1128 if lr is not None:
1125 if lr is not None:
1129 if context:
1126 if context:
1130 self.read_context_hunk(lr)
1127 self.read_context_hunk(lr)
1131 else:
1128 else:
1132 self.read_unified_hunk(lr)
1129 self.read_unified_hunk(lr)
1133
1130
1134 def getnormalized(self):
1131 def getnormalized(self):
1135 """Return a copy with line endings normalized to LF."""
1132 """Return a copy with line endings normalized to LF."""
1136
1133
1137 def normalize(lines):
1134 def normalize(lines):
1138 nlines = []
1135 nlines = []
1139 for line in lines:
1136 for line in lines:
1140 if line.endswith('\r\n'):
1137 if line.endswith('\r\n'):
1141 line = line[:-2] + '\n'
1138 line = line[:-2] + '\n'
1142 nlines.append(line)
1139 nlines.append(line)
1143 return nlines
1140 return nlines
1144
1141
1145 # Dummy object, it is rebuilt manually
1142 # Dummy object, it is rebuilt manually
1146 nh = hunk(self.desc, self.number, None, None)
1143 nh = hunk(self.desc, self.number, None, None)
1147 nh.number = self.number
1144 nh.number = self.number
1148 nh.desc = self.desc
1145 nh.desc = self.desc
1149 nh.hunk = self.hunk
1146 nh.hunk = self.hunk
1150 nh.a = normalize(self.a)
1147 nh.a = normalize(self.a)
1151 nh.b = normalize(self.b)
1148 nh.b = normalize(self.b)
1152 nh.starta = self.starta
1149 nh.starta = self.starta
1153 nh.startb = self.startb
1150 nh.startb = self.startb
1154 nh.lena = self.lena
1151 nh.lena = self.lena
1155 nh.lenb = self.lenb
1152 nh.lenb = self.lenb
1156 return nh
1153 return nh
1157
1154
1158 def read_unified_hunk(self, lr):
1155 def read_unified_hunk(self, lr):
1159 m = unidesc.match(self.desc)
1156 m = unidesc.match(self.desc)
1160 if not m:
1157 if not m:
1161 raise PatchError(_("bad hunk #%d") % self.number)
1158 raise PatchError(_("bad hunk #%d") % self.number)
1162 self.starta, self.lena, self.startb, self.lenb = m.groups()
1159 self.starta, self.lena, self.startb, self.lenb = m.groups()
1163 if self.lena is None:
1160 if self.lena is None:
1164 self.lena = 1
1161 self.lena = 1
1165 else:
1162 else:
1166 self.lena = int(self.lena)
1163 self.lena = int(self.lena)
1167 if self.lenb is None:
1164 if self.lenb is None:
1168 self.lenb = 1
1165 self.lenb = 1
1169 else:
1166 else:
1170 self.lenb = int(self.lenb)
1167 self.lenb = int(self.lenb)
1171 self.starta = int(self.starta)
1168 self.starta = int(self.starta)
1172 self.startb = int(self.startb)
1169 self.startb = int(self.startb)
1173 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1170 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1174 self.b)
1171 self.b)
1175 # if we hit eof before finishing out the hunk, the last line will
1172 # if we hit eof before finishing out the hunk, the last line will
1176 # be zero length. Lets try to fix it up.
1173 # be zero length. Lets try to fix it up.
1177 while len(self.hunk[-1]) == 0:
1174 while len(self.hunk[-1]) == 0:
1178 del self.hunk[-1]
1175 del self.hunk[-1]
1179 del self.a[-1]
1176 del self.a[-1]
1180 del self.b[-1]
1177 del self.b[-1]
1181 self.lena -= 1
1178 self.lena -= 1
1182 self.lenb -= 1
1179 self.lenb -= 1
1183 self._fixnewline(lr)
1180 self._fixnewline(lr)
1184
1181
1185 def read_context_hunk(self, lr):
1182 def read_context_hunk(self, lr):
1186 self.desc = lr.readline()
1183 self.desc = lr.readline()
1187 m = contextdesc.match(self.desc)
1184 m = contextdesc.match(self.desc)
1188 if not m:
1185 if not m:
1189 raise PatchError(_("bad hunk #%d") % self.number)
1186 raise PatchError(_("bad hunk #%d") % self.number)
1190 self.starta, aend = m.groups()
1187 self.starta, aend = m.groups()
1191 self.starta = int(self.starta)
1188 self.starta = int(self.starta)
1192 if aend is None:
1189 if aend is None:
1193 aend = self.starta
1190 aend = self.starta
1194 self.lena = int(aend) - self.starta
1191 self.lena = int(aend) - self.starta
1195 if self.starta:
1192 if self.starta:
1196 self.lena += 1
1193 self.lena += 1
1197 for x in xrange(self.lena):
1194 for x in xrange(self.lena):
1198 l = lr.readline()
1195 l = lr.readline()
1199 if l.startswith('---'):
1196 if l.startswith('---'):
1200 # lines addition, old block is empty
1197 # lines addition, old block is empty
1201 lr.push(l)
1198 lr.push(l)
1202 break
1199 break
1203 s = l[2:]
1200 s = l[2:]
1204 if l.startswith('- ') or l.startswith('! '):
1201 if l.startswith('- ') or l.startswith('! '):
1205 u = '-' + s
1202 u = '-' + s
1206 elif l.startswith(' '):
1203 elif l.startswith(' '):
1207 u = ' ' + s
1204 u = ' ' + s
1208 else:
1205 else:
1209 raise PatchError(_("bad hunk #%d old text line %d") %
1206 raise PatchError(_("bad hunk #%d old text line %d") %
1210 (self.number, x))
1207 (self.number, x))
1211 self.a.append(u)
1208 self.a.append(u)
1212 self.hunk.append(u)
1209 self.hunk.append(u)
1213
1210
1214 l = lr.readline()
1211 l = lr.readline()
1215 if l.startswith('\ '):
1212 if l.startswith('\ '):
1216 s = self.a[-1][:-1]
1213 s = self.a[-1][:-1]
1217 self.a[-1] = s
1214 self.a[-1] = s
1218 self.hunk[-1] = s
1215 self.hunk[-1] = s
1219 l = lr.readline()
1216 l = lr.readline()
1220 m = contextdesc.match(l)
1217 m = contextdesc.match(l)
1221 if not m:
1218 if not m:
1222 raise PatchError(_("bad hunk #%d") % self.number)
1219 raise PatchError(_("bad hunk #%d") % self.number)
1223 self.startb, bend = m.groups()
1220 self.startb, bend = m.groups()
1224 self.startb = int(self.startb)
1221 self.startb = int(self.startb)
1225 if bend is None:
1222 if bend is None:
1226 bend = self.startb
1223 bend = self.startb
1227 self.lenb = int(bend) - self.startb
1224 self.lenb = int(bend) - self.startb
1228 if self.startb:
1225 if self.startb:
1229 self.lenb += 1
1226 self.lenb += 1
1230 hunki = 1
1227 hunki = 1
1231 for x in xrange(self.lenb):
1228 for x in xrange(self.lenb):
1232 l = lr.readline()
1229 l = lr.readline()
1233 if l.startswith('\ '):
1230 if l.startswith('\ '):
1234 # XXX: the only way to hit this is with an invalid line range.
1231 # XXX: the only way to hit this is with an invalid line range.
1235 # The no-eol marker is not counted in the line range, but I
1232 # The no-eol marker is not counted in the line range, but I
1236 # guess there are diff(1) out there which behave differently.
1233 # guess there are diff(1) out there which behave differently.
1237 s = self.b[-1][:-1]
1234 s = self.b[-1][:-1]
1238 self.b[-1] = s
1235 self.b[-1] = s
1239 self.hunk[hunki - 1] = s
1236 self.hunk[hunki - 1] = s
1240 continue
1237 continue
1241 if not l:
1238 if not l:
1242 # line deletions, new block is empty and we hit EOF
1239 # line deletions, new block is empty and we hit EOF
1243 lr.push(l)
1240 lr.push(l)
1244 break
1241 break
1245 s = l[2:]
1242 s = l[2:]
1246 if l.startswith('+ ') or l.startswith('! '):
1243 if l.startswith('+ ') or l.startswith('! '):
1247 u = '+' + s
1244 u = '+' + s
1248 elif l.startswith(' '):
1245 elif l.startswith(' '):
1249 u = ' ' + s
1246 u = ' ' + s
1250 elif len(self.b) == 0:
1247 elif len(self.b) == 0:
1251 # line deletions, new block is empty
1248 # line deletions, new block is empty
1252 lr.push(l)
1249 lr.push(l)
1253 break
1250 break
1254 else:
1251 else:
1255 raise PatchError(_("bad hunk #%d old text line %d") %
1252 raise PatchError(_("bad hunk #%d old text line %d") %
1256 (self.number, x))
1253 (self.number, x))
1257 self.b.append(s)
1254 self.b.append(s)
1258 while True:
1255 while True:
1259 if hunki >= len(self.hunk):
1256 if hunki >= len(self.hunk):
1260 h = ""
1257 h = ""
1261 else:
1258 else:
1262 h = self.hunk[hunki]
1259 h = self.hunk[hunki]
1263 hunki += 1
1260 hunki += 1
1264 if h == u:
1261 if h == u:
1265 break
1262 break
1266 elif h.startswith('-'):
1263 elif h.startswith('-'):
1267 continue
1264 continue
1268 else:
1265 else:
1269 self.hunk.insert(hunki - 1, u)
1266 self.hunk.insert(hunki - 1, u)
1270 break
1267 break
1271
1268
1272 if not self.a:
1269 if not self.a:
1273 # this happens when lines were only added to the hunk
1270 # this happens when lines were only added to the hunk
1274 for x in self.hunk:
1271 for x in self.hunk:
1275 if x.startswith('-') or x.startswith(' '):
1272 if x.startswith('-') or x.startswith(' '):
1276 self.a.append(x)
1273 self.a.append(x)
1277 if not self.b:
1274 if not self.b:
1278 # this happens when lines were only deleted from the hunk
1275 # this happens when lines were only deleted from the hunk
1279 for x in self.hunk:
1276 for x in self.hunk:
1280 if x.startswith('+') or x.startswith(' '):
1277 if x.startswith('+') or x.startswith(' '):
1281 self.b.append(x[1:])
1278 self.b.append(x[1:])
1282 # @@ -start,len +start,len @@
1279 # @@ -start,len +start,len @@
1283 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1280 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1284 self.startb, self.lenb)
1281 self.startb, self.lenb)
1285 self.hunk[0] = self.desc
1282 self.hunk[0] = self.desc
1286 self._fixnewline(lr)
1283 self._fixnewline(lr)
1287
1284
1288 def _fixnewline(self, lr):
1285 def _fixnewline(self, lr):
1289 l = lr.readline()
1286 l = lr.readline()
1290 if l.startswith('\ '):
1287 if l.startswith('\ '):
1291 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1288 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1292 else:
1289 else:
1293 lr.push(l)
1290 lr.push(l)
1294
1291
1295 def complete(self):
1292 def complete(self):
1296 return len(self.a) == self.lena and len(self.b) == self.lenb
1293 return len(self.a) == self.lena and len(self.b) == self.lenb
1297
1294
1298 def _fuzzit(self, old, new, fuzz, toponly):
1295 def _fuzzit(self, old, new, fuzz, toponly):
1299 # this removes context lines from the top and bottom of list 'l'. It
1296 # this removes context lines from the top and bottom of list 'l'. It
1300 # checks the hunk to make sure only context lines are removed, and then
1297 # checks the hunk to make sure only context lines are removed, and then
1301 # returns a new shortened list of lines.
1298 # returns a new shortened list of lines.
1302 fuzz = min(fuzz, len(old))
1299 fuzz = min(fuzz, len(old))
1303 if fuzz:
1300 if fuzz:
1304 top = 0
1301 top = 0
1305 bot = 0
1302 bot = 0
1306 hlen = len(self.hunk)
1303 hlen = len(self.hunk)
1307 for x in xrange(hlen - 1):
1304 for x in xrange(hlen - 1):
1308 # the hunk starts with the @@ line, so use x+1
1305 # the hunk starts with the @@ line, so use x+1
1309 if self.hunk[x + 1][0] == ' ':
1306 if self.hunk[x + 1][0] == ' ':
1310 top += 1
1307 top += 1
1311 else:
1308 else:
1312 break
1309 break
1313 if not toponly:
1310 if not toponly:
1314 for x in xrange(hlen - 1):
1311 for x in xrange(hlen - 1):
1315 if self.hunk[hlen - bot - 1][0] == ' ':
1312 if self.hunk[hlen - bot - 1][0] == ' ':
1316 bot += 1
1313 bot += 1
1317 else:
1314 else:
1318 break
1315 break
1319
1316
1320 bot = min(fuzz, bot)
1317 bot = min(fuzz, bot)
1321 top = min(fuzz, top)
1318 top = min(fuzz, top)
1322 return old[top:len(old) - bot], new[top:len(new) - bot], top
1319 return old[top:len(old) - bot], new[top:len(new) - bot], top
1323 return old, new, 0
1320 return old, new, 0
1324
1321
1325 def fuzzit(self, fuzz, toponly):
1322 def fuzzit(self, fuzz, toponly):
1326 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1323 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1327 oldstart = self.starta + top
1324 oldstart = self.starta + top
1328 newstart = self.startb + top
1325 newstart = self.startb + top
1329 # zero length hunk ranges already have their start decremented
1326 # zero length hunk ranges already have their start decremented
1330 if self.lena and oldstart > 0:
1327 if self.lena and oldstart > 0:
1331 oldstart -= 1
1328 oldstart -= 1
1332 if self.lenb and newstart > 0:
1329 if self.lenb and newstart > 0:
1333 newstart -= 1
1330 newstart -= 1
1334 return old, oldstart, new, newstart
1331 return old, oldstart, new, newstart
1335
1332
1336 class binhunk(object):
1333 class binhunk(object):
1337 'A binary patch file.'
1334 'A binary patch file.'
1338 def __init__(self, lr, fname):
1335 def __init__(self, lr, fname):
1339 self.text = None
1336 self.text = None
1340 self.delta = False
1337 self.delta = False
1341 self.hunk = ['GIT binary patch\n']
1338 self.hunk = ['GIT binary patch\n']
1342 self._fname = fname
1339 self._fname = fname
1343 self._read(lr)
1340 self._read(lr)
1344
1341
1345 def complete(self):
1342 def complete(self):
1346 return self.text is not None
1343 return self.text is not None
1347
1344
1348 def new(self, lines):
1345 def new(self, lines):
1349 if self.delta:
1346 if self.delta:
1350 return [applybindelta(self.text, ''.join(lines))]
1347 return [applybindelta(self.text, ''.join(lines))]
1351 return [self.text]
1348 return [self.text]
1352
1349
1353 def _read(self, lr):
1350 def _read(self, lr):
1354 def getline(lr, hunk):
1351 def getline(lr, hunk):
1355 l = lr.readline()
1352 l = lr.readline()
1356 hunk.append(l)
1353 hunk.append(l)
1357 return l.rstrip('\r\n')
1354 return l.rstrip('\r\n')
1358
1355
1359 size = 0
1356 size = 0
1360 while True:
1357 while True:
1361 line = getline(lr, self.hunk)
1358 line = getline(lr, self.hunk)
1362 if not line:
1359 if not line:
1363 raise PatchError(_('could not extract "%s" binary data')
1360 raise PatchError(_('could not extract "%s" binary data')
1364 % self._fname)
1361 % self._fname)
1365 if line.startswith('literal '):
1362 if line.startswith('literal '):
1366 size = int(line[8:].rstrip())
1363 size = int(line[8:].rstrip())
1367 break
1364 break
1368 if line.startswith('delta '):
1365 if line.startswith('delta '):
1369 size = int(line[6:].rstrip())
1366 size = int(line[6:].rstrip())
1370 self.delta = True
1367 self.delta = True
1371 break
1368 break
1372 dec = []
1369 dec = []
1373 line = getline(lr, self.hunk)
1370 line = getline(lr, self.hunk)
1374 while len(line) > 1:
1371 while len(line) > 1:
1375 l = line[0]
1372 l = line[0]
1376 if l <= 'Z' and l >= 'A':
1373 if l <= 'Z' and l >= 'A':
1377 l = ord(l) - ord('A') + 1
1374 l = ord(l) - ord('A') + 1
1378 else:
1375 else:
1379 l = ord(l) - ord('a') + 27
1376 l = ord(l) - ord('a') + 27
1380 try:
1377 try:
1381 dec.append(base85.b85decode(line[1:])[:l])
1378 dec.append(base85.b85decode(line[1:])[:l])
1382 except ValueError as e:
1379 except ValueError as e:
1383 raise PatchError(_('could not decode "%s" binary patch: %s')
1380 raise PatchError(_('could not decode "%s" binary patch: %s')
1384 % (self._fname, str(e)))
1381 % (self._fname, str(e)))
1385 line = getline(lr, self.hunk)
1382 line = getline(lr, self.hunk)
1386 text = zlib.decompress(''.join(dec))
1383 text = zlib.decompress(''.join(dec))
1387 if len(text) != size:
1384 if len(text) != size:
1388 raise PatchError(_('"%s" length is %d bytes, should be %d')
1385 raise PatchError(_('"%s" length is %d bytes, should be %d')
1389 % (self._fname, len(text), size))
1386 % (self._fname, len(text), size))
1390 self.text = text
1387 self.text = text
1391
1388
1392 def parsefilename(str):
1389 def parsefilename(str):
1393 # --- filename \t|space stuff
1390 # --- filename \t|space stuff
1394 s = str[4:].rstrip('\r\n')
1391 s = str[4:].rstrip('\r\n')
1395 i = s.find('\t')
1392 i = s.find('\t')
1396 if i < 0:
1393 if i < 0:
1397 i = s.find(' ')
1394 i = s.find(' ')
1398 if i < 0:
1395 if i < 0:
1399 return s
1396 return s
1400 return s[:i]
1397 return s[:i]
1401
1398
1402 def reversehunks(hunks):
1399 def reversehunks(hunks):
1403 '''reverse the signs in the hunks given as argument
1400 '''reverse the signs in the hunks given as argument
1404
1401
1405 This function operates on hunks coming out of patch.filterpatch, that is
1402 This function operates on hunks coming out of patch.filterpatch, that is
1406 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1403 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1407
1404
1408 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1405 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1409 ... --- a/folder1/g
1406 ... --- a/folder1/g
1410 ... +++ b/folder1/g
1407 ... +++ b/folder1/g
1411 ... @@ -1,7 +1,7 @@
1408 ... @@ -1,7 +1,7 @@
1412 ... +firstline
1409 ... +firstline
1413 ... c
1410 ... c
1414 ... 1
1411 ... 1
1415 ... 2
1412 ... 2
1416 ... + 3
1413 ... + 3
1417 ... -4
1414 ... -4
1418 ... 5
1415 ... 5
1419 ... d
1416 ... d
1420 ... +lastline"""
1417 ... +lastline"""
1421 >>> hunks = parsepatch(rawpatch)
1418 >>> hunks = parsepatch(rawpatch)
1422 >>> hunkscomingfromfilterpatch = []
1419 >>> hunkscomingfromfilterpatch = []
1423 >>> for h in hunks:
1420 >>> for h in hunks:
1424 ... hunkscomingfromfilterpatch.append(h)
1421 ... hunkscomingfromfilterpatch.append(h)
1425 ... hunkscomingfromfilterpatch.extend(h.hunks)
1422 ... hunkscomingfromfilterpatch.extend(h.hunks)
1426
1423
1427 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1424 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1428 >>> fp = cStringIO.StringIO()
1425 >>> fp = cStringIO.StringIO()
1429 >>> for c in reversedhunks:
1426 >>> for c in reversedhunks:
1430 ... c.write(fp)
1427 ... c.write(fp)
1431 >>> fp.seek(0)
1428 >>> fp.seek(0)
1432 >>> reversedpatch = fp.read()
1429 >>> reversedpatch = fp.read()
1433 >>> print reversedpatch
1430 >>> print reversedpatch
1434 diff --git a/folder1/g b/folder1/g
1431 diff --git a/folder1/g b/folder1/g
1435 --- a/folder1/g
1432 --- a/folder1/g
1436 +++ b/folder1/g
1433 +++ b/folder1/g
1437 @@ -1,4 +1,3 @@
1434 @@ -1,4 +1,3 @@
1438 -firstline
1435 -firstline
1439 c
1436 c
1440 1
1437 1
1441 2
1438 2
1442 @@ -1,6 +2,6 @@
1439 @@ -1,6 +2,6 @@
1443 c
1440 c
1444 1
1441 1
1445 2
1442 2
1446 - 3
1443 - 3
1447 +4
1444 +4
1448 5
1445 5
1449 d
1446 d
1450 @@ -5,3 +6,2 @@
1447 @@ -5,3 +6,2 @@
1451 5
1448 5
1452 d
1449 d
1453 -lastline
1450 -lastline
1454
1451
1455 '''
1452 '''
1456
1453
1457 import crecord as crecordmod
1454 import crecord as crecordmod
1458 newhunks = []
1455 newhunks = []
1459 for c in hunks:
1456 for c in hunks:
1460 if isinstance(c, crecordmod.uihunk):
1457 if isinstance(c, crecordmod.uihunk):
1461 # curses hunks encapsulate the record hunk in _hunk
1458 # curses hunks encapsulate the record hunk in _hunk
1462 c = c._hunk
1459 c = c._hunk
1463 if isinstance(c, recordhunk):
1460 if isinstance(c, recordhunk):
1464 for j, line in enumerate(c.hunk):
1461 for j, line in enumerate(c.hunk):
1465 if line.startswith("-"):
1462 if line.startswith("-"):
1466 c.hunk[j] = "+" + c.hunk[j][1:]
1463 c.hunk[j] = "+" + c.hunk[j][1:]
1467 elif line.startswith("+"):
1464 elif line.startswith("+"):
1468 c.hunk[j] = "-" + c.hunk[j][1:]
1465 c.hunk[j] = "-" + c.hunk[j][1:]
1469 c.added, c.removed = c.removed, c.added
1466 c.added, c.removed = c.removed, c.added
1470 newhunks.append(c)
1467 newhunks.append(c)
1471 return newhunks
1468 return newhunks
1472
1469
1473 def parsepatch(originalchunks):
1470 def parsepatch(originalchunks):
1474 """patch -> [] of headers -> [] of hunks """
1471 """patch -> [] of headers -> [] of hunks """
1475 class parser(object):
1472 class parser(object):
1476 """patch parsing state machine"""
1473 """patch parsing state machine"""
1477 def __init__(self):
1474 def __init__(self):
1478 self.fromline = 0
1475 self.fromline = 0
1479 self.toline = 0
1476 self.toline = 0
1480 self.proc = ''
1477 self.proc = ''
1481 self.header = None
1478 self.header = None
1482 self.context = []
1479 self.context = []
1483 self.before = []
1480 self.before = []
1484 self.hunk = []
1481 self.hunk = []
1485 self.headers = []
1482 self.headers = []
1486
1483
1487 def addrange(self, limits):
1484 def addrange(self, limits):
1488 fromstart, fromend, tostart, toend, proc = limits
1485 fromstart, fromend, tostart, toend, proc = limits
1489 self.fromline = int(fromstart)
1486 self.fromline = int(fromstart)
1490 self.toline = int(tostart)
1487 self.toline = int(tostart)
1491 self.proc = proc
1488 self.proc = proc
1492
1489
1493 def addcontext(self, context):
1490 def addcontext(self, context):
1494 if self.hunk:
1491 if self.hunk:
1495 h = recordhunk(self.header, self.fromline, self.toline,
1492 h = recordhunk(self.header, self.fromline, self.toline,
1496 self.proc, self.before, self.hunk, context)
1493 self.proc, self.before, self.hunk, context)
1497 self.header.hunks.append(h)
1494 self.header.hunks.append(h)
1498 self.fromline += len(self.before) + h.removed
1495 self.fromline += len(self.before) + h.removed
1499 self.toline += len(self.before) + h.added
1496 self.toline += len(self.before) + h.added
1500 self.before = []
1497 self.before = []
1501 self.hunk = []
1498 self.hunk = []
1502 self.proc = ''
1499 self.proc = ''
1503 self.context = context
1500 self.context = context
1504
1501
1505 def addhunk(self, hunk):
1502 def addhunk(self, hunk):
1506 if self.context:
1503 if self.context:
1507 self.before = self.context
1504 self.before = self.context
1508 self.context = []
1505 self.context = []
1509 self.hunk = hunk
1506 self.hunk = hunk
1510
1507
1511 def newfile(self, hdr):
1508 def newfile(self, hdr):
1512 self.addcontext([])
1509 self.addcontext([])
1513 h = header(hdr)
1510 h = header(hdr)
1514 self.headers.append(h)
1511 self.headers.append(h)
1515 self.header = h
1512 self.header = h
1516
1513
1517 def addother(self, line):
1514 def addother(self, line):
1518 pass # 'other' lines are ignored
1515 pass # 'other' lines are ignored
1519
1516
1520 def finished(self):
1517 def finished(self):
1521 self.addcontext([])
1518 self.addcontext([])
1522 return self.headers
1519 return self.headers
1523
1520
1524 transitions = {
1521 transitions = {
1525 'file': {'context': addcontext,
1522 'file': {'context': addcontext,
1526 'file': newfile,
1523 'file': newfile,
1527 'hunk': addhunk,
1524 'hunk': addhunk,
1528 'range': addrange},
1525 'range': addrange},
1529 'context': {'file': newfile,
1526 'context': {'file': newfile,
1530 'hunk': addhunk,
1527 'hunk': addhunk,
1531 'range': addrange,
1528 'range': addrange,
1532 'other': addother},
1529 'other': addother},
1533 'hunk': {'context': addcontext,
1530 'hunk': {'context': addcontext,
1534 'file': newfile,
1531 'file': newfile,
1535 'range': addrange},
1532 'range': addrange},
1536 'range': {'context': addcontext,
1533 'range': {'context': addcontext,
1537 'hunk': addhunk},
1534 'hunk': addhunk},
1538 'other': {'other': addother},
1535 'other': {'other': addother},
1539 }
1536 }
1540
1537
1541 p = parser()
1538 p = parser()
1542 fp = cStringIO.StringIO()
1539 fp = cStringIO.StringIO()
1543 fp.write(''.join(originalchunks))
1540 fp.write(''.join(originalchunks))
1544 fp.seek(0)
1541 fp.seek(0)
1545
1542
1546 state = 'context'
1543 state = 'context'
1547 for newstate, data in scanpatch(fp):
1544 for newstate, data in scanpatch(fp):
1548 try:
1545 try:
1549 p.transitions[state][newstate](p, data)
1546 p.transitions[state][newstate](p, data)
1550 except KeyError:
1547 except KeyError:
1551 raise PatchError('unhandled transition: %s -> %s' %
1548 raise PatchError('unhandled transition: %s -> %s' %
1552 (state, newstate))
1549 (state, newstate))
1553 state = newstate
1550 state = newstate
1554 del fp
1551 del fp
1555 return p.finished()
1552 return p.finished()
1556
1553
1557 def pathtransform(path, strip, prefix):
1554 def pathtransform(path, strip, prefix):
1558 '''turn a path from a patch into a path suitable for the repository
1555 '''turn a path from a patch into a path suitable for the repository
1559
1556
1560 prefix, if not empty, is expected to be normalized with a / at the end.
1557 prefix, if not empty, is expected to be normalized with a / at the end.
1561
1558
1562 Returns (stripped components, path in repository).
1559 Returns (stripped components, path in repository).
1563
1560
1564 >>> pathtransform('a/b/c', 0, '')
1561 >>> pathtransform('a/b/c', 0, '')
1565 ('', 'a/b/c')
1562 ('', 'a/b/c')
1566 >>> pathtransform(' a/b/c ', 0, '')
1563 >>> pathtransform(' a/b/c ', 0, '')
1567 ('', ' a/b/c')
1564 ('', ' a/b/c')
1568 >>> pathtransform(' a/b/c ', 2, '')
1565 >>> pathtransform(' a/b/c ', 2, '')
1569 ('a/b/', 'c')
1566 ('a/b/', 'c')
1570 >>> pathtransform('a/b/c', 0, 'd/e/')
1567 >>> pathtransform('a/b/c', 0, 'd/e/')
1571 ('', 'd/e/a/b/c')
1568 ('', 'd/e/a/b/c')
1572 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1569 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1573 ('a//b/', 'd/e/c')
1570 ('a//b/', 'd/e/c')
1574 >>> pathtransform('a/b/c', 3, '')
1571 >>> pathtransform('a/b/c', 3, '')
1575 Traceback (most recent call last):
1572 Traceback (most recent call last):
1576 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1573 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1577 '''
1574 '''
1578 pathlen = len(path)
1575 pathlen = len(path)
1579 i = 0
1576 i = 0
1580 if strip == 0:
1577 if strip == 0:
1581 return '', prefix + path.rstrip()
1578 return '', prefix + path.rstrip()
1582 count = strip
1579 count = strip
1583 while count > 0:
1580 while count > 0:
1584 i = path.find('/', i)
1581 i = path.find('/', i)
1585 if i == -1:
1582 if i == -1:
1586 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1583 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1587 (count, strip, path))
1584 (count, strip, path))
1588 i += 1
1585 i += 1
1589 # consume '//' in the path
1586 # consume '//' in the path
1590 while i < pathlen - 1 and path[i] == '/':
1587 while i < pathlen - 1 and path[i] == '/':
1591 i += 1
1588 i += 1
1592 count -= 1
1589 count -= 1
1593 return path[:i].lstrip(), prefix + path[i:].rstrip()
1590 return path[:i].lstrip(), prefix + path[i:].rstrip()
1594
1591
1595 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1592 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1596 nulla = afile_orig == "/dev/null"
1593 nulla = afile_orig == "/dev/null"
1597 nullb = bfile_orig == "/dev/null"
1594 nullb = bfile_orig == "/dev/null"
1598 create = nulla and hunk.starta == 0 and hunk.lena == 0
1595 create = nulla and hunk.starta == 0 and hunk.lena == 0
1599 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1596 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1600 abase, afile = pathtransform(afile_orig, strip, prefix)
1597 abase, afile = pathtransform(afile_orig, strip, prefix)
1601 gooda = not nulla and backend.exists(afile)
1598 gooda = not nulla and backend.exists(afile)
1602 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1599 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1603 if afile == bfile:
1600 if afile == bfile:
1604 goodb = gooda
1601 goodb = gooda
1605 else:
1602 else:
1606 goodb = not nullb and backend.exists(bfile)
1603 goodb = not nullb and backend.exists(bfile)
1607 missing = not goodb and not gooda and not create
1604 missing = not goodb and not gooda and not create
1608
1605
1609 # some diff programs apparently produce patches where the afile is
1606 # some diff programs apparently produce patches where the afile is
1610 # not /dev/null, but afile starts with bfile
1607 # not /dev/null, but afile starts with bfile
1611 abasedir = afile[:afile.rfind('/') + 1]
1608 abasedir = afile[:afile.rfind('/') + 1]
1612 bbasedir = bfile[:bfile.rfind('/') + 1]
1609 bbasedir = bfile[:bfile.rfind('/') + 1]
1613 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1610 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1614 and hunk.starta == 0 and hunk.lena == 0):
1611 and hunk.starta == 0 and hunk.lena == 0):
1615 create = True
1612 create = True
1616 missing = False
1613 missing = False
1617
1614
1618 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1615 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1619 # diff is between a file and its backup. In this case, the original
1616 # diff is between a file and its backup. In this case, the original
1620 # file should be patched (see original mpatch code).
1617 # file should be patched (see original mpatch code).
1621 isbackup = (abase == bbase and bfile.startswith(afile))
1618 isbackup = (abase == bbase and bfile.startswith(afile))
1622 fname = None
1619 fname = None
1623 if not missing:
1620 if not missing:
1624 if gooda and goodb:
1621 if gooda and goodb:
1625 if isbackup:
1622 if isbackup:
1626 fname = afile
1623 fname = afile
1627 else:
1624 else:
1628 fname = bfile
1625 fname = bfile
1629 elif gooda:
1626 elif gooda:
1630 fname = afile
1627 fname = afile
1631
1628
1632 if not fname:
1629 if not fname:
1633 if not nullb:
1630 if not nullb:
1634 if isbackup:
1631 if isbackup:
1635 fname = afile
1632 fname = afile
1636 else:
1633 else:
1637 fname = bfile
1634 fname = bfile
1638 elif not nulla:
1635 elif not nulla:
1639 fname = afile
1636 fname = afile
1640 else:
1637 else:
1641 raise PatchError(_("undefined source and destination files"))
1638 raise PatchError(_("undefined source and destination files"))
1642
1639
1643 gp = patchmeta(fname)
1640 gp = patchmeta(fname)
1644 if create:
1641 if create:
1645 gp.op = 'ADD'
1642 gp.op = 'ADD'
1646 elif remove:
1643 elif remove:
1647 gp.op = 'DELETE'
1644 gp.op = 'DELETE'
1648 return gp
1645 return gp
1649
1646
1650 def scanpatch(fp):
1647 def scanpatch(fp):
1651 """like patch.iterhunks, but yield different events
1648 """like patch.iterhunks, but yield different events
1652
1649
1653 - ('file', [header_lines + fromfile + tofile])
1650 - ('file', [header_lines + fromfile + tofile])
1654 - ('context', [context_lines])
1651 - ('context', [context_lines])
1655 - ('hunk', [hunk_lines])
1652 - ('hunk', [hunk_lines])
1656 - ('range', (-start,len, +start,len, proc))
1653 - ('range', (-start,len, +start,len, proc))
1657 """
1654 """
1658 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1655 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1659 lr = linereader(fp)
1656 lr = linereader(fp)
1660
1657
1661 def scanwhile(first, p):
1658 def scanwhile(first, p):
1662 """scan lr while predicate holds"""
1659 """scan lr while predicate holds"""
1663 lines = [first]
1660 lines = [first]
1664 while True:
1661 while True:
1665 line = lr.readline()
1662 line = lr.readline()
1666 if not line:
1663 if not line:
1667 break
1664 break
1668 if p(line):
1665 if p(line):
1669 lines.append(line)
1666 lines.append(line)
1670 else:
1667 else:
1671 lr.push(line)
1668 lr.push(line)
1672 break
1669 break
1673 return lines
1670 return lines
1674
1671
1675 while True:
1672 while True:
1676 line = lr.readline()
1673 line = lr.readline()
1677 if not line:
1674 if not line:
1678 break
1675 break
1679 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1676 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1680 def notheader(line):
1677 def notheader(line):
1681 s = line.split(None, 1)
1678 s = line.split(None, 1)
1682 return not s or s[0] not in ('---', 'diff')
1679 return not s or s[0] not in ('---', 'diff')
1683 header = scanwhile(line, notheader)
1680 header = scanwhile(line, notheader)
1684 fromfile = lr.readline()
1681 fromfile = lr.readline()
1685 if fromfile.startswith('---'):
1682 if fromfile.startswith('---'):
1686 tofile = lr.readline()
1683 tofile = lr.readline()
1687 header += [fromfile, tofile]
1684 header += [fromfile, tofile]
1688 else:
1685 else:
1689 lr.push(fromfile)
1686 lr.push(fromfile)
1690 yield 'file', header
1687 yield 'file', header
1691 elif line[0] == ' ':
1688 elif line[0] == ' ':
1692 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1689 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1693 elif line[0] in '-+':
1690 elif line[0] in '-+':
1694 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1691 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1695 else:
1692 else:
1696 m = lines_re.match(line)
1693 m = lines_re.match(line)
1697 if m:
1694 if m:
1698 yield 'range', m.groups()
1695 yield 'range', m.groups()
1699 else:
1696 else:
1700 yield 'other', line
1697 yield 'other', line
1701
1698
1702 def scangitpatch(lr, firstline):
1699 def scangitpatch(lr, firstline):
1703 """
1700 """
1704 Git patches can emit:
1701 Git patches can emit:
1705 - rename a to b
1702 - rename a to b
1706 - change b
1703 - change b
1707 - copy a to c
1704 - copy a to c
1708 - change c
1705 - change c
1709
1706
1710 We cannot apply this sequence as-is, the renamed 'a' could not be
1707 We cannot apply this sequence as-is, the renamed 'a' could not be
1711 found for it would have been renamed already. And we cannot copy
1708 found for it would have been renamed already. And we cannot copy
1712 from 'b' instead because 'b' would have been changed already. So
1709 from 'b' instead because 'b' would have been changed already. So
1713 we scan the git patch for copy and rename commands so we can
1710 we scan the git patch for copy and rename commands so we can
1714 perform the copies ahead of time.
1711 perform the copies ahead of time.
1715 """
1712 """
1716 pos = 0
1713 pos = 0
1717 try:
1714 try:
1718 pos = lr.fp.tell()
1715 pos = lr.fp.tell()
1719 fp = lr.fp
1716 fp = lr.fp
1720 except IOError:
1717 except IOError:
1721 fp = cStringIO.StringIO(lr.fp.read())
1718 fp = cStringIO.StringIO(lr.fp.read())
1722 gitlr = linereader(fp)
1719 gitlr = linereader(fp)
1723 gitlr.push(firstline)
1720 gitlr.push(firstline)
1724 gitpatches = readgitpatch(gitlr)
1721 gitpatches = readgitpatch(gitlr)
1725 fp.seek(pos)
1722 fp.seek(pos)
1726 return gitpatches
1723 return gitpatches
1727
1724
1728 def iterhunks(fp):
1725 def iterhunks(fp):
1729 """Read a patch and yield the following events:
1726 """Read a patch and yield the following events:
1730 - ("file", afile, bfile, firsthunk): select a new target file.
1727 - ("file", afile, bfile, firsthunk): select a new target file.
1731 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1728 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1732 "file" event.
1729 "file" event.
1733 - ("git", gitchanges): current diff is in git format, gitchanges
1730 - ("git", gitchanges): current diff is in git format, gitchanges
1734 maps filenames to gitpatch records. Unique event.
1731 maps filenames to gitpatch records. Unique event.
1735 """
1732 """
1736 afile = ""
1733 afile = ""
1737 bfile = ""
1734 bfile = ""
1738 state = None
1735 state = None
1739 hunknum = 0
1736 hunknum = 0
1740 emitfile = newfile = False
1737 emitfile = newfile = False
1741 gitpatches = None
1738 gitpatches = None
1742
1739
1743 # our states
1740 # our states
1744 BFILE = 1
1741 BFILE = 1
1745 context = None
1742 context = None
1746 lr = linereader(fp)
1743 lr = linereader(fp)
1747
1744
1748 while True:
1745 while True:
1749 x = lr.readline()
1746 x = lr.readline()
1750 if not x:
1747 if not x:
1751 break
1748 break
1752 if state == BFILE and (
1749 if state == BFILE and (
1753 (not context and x[0] == '@')
1750 (not context and x[0] == '@')
1754 or (context is not False and x.startswith('***************'))
1751 or (context is not False and x.startswith('***************'))
1755 or x.startswith('GIT binary patch')):
1752 or x.startswith('GIT binary patch')):
1756 gp = None
1753 gp = None
1757 if (gitpatches and
1754 if (gitpatches and
1758 gitpatches[-1].ispatching(afile, bfile)):
1755 gitpatches[-1].ispatching(afile, bfile)):
1759 gp = gitpatches.pop()
1756 gp = gitpatches.pop()
1760 if x.startswith('GIT binary patch'):
1757 if x.startswith('GIT binary patch'):
1761 h = binhunk(lr, gp.path)
1758 h = binhunk(lr, gp.path)
1762 else:
1759 else:
1763 if context is None and x.startswith('***************'):
1760 if context is None and x.startswith('***************'):
1764 context = True
1761 context = True
1765 h = hunk(x, hunknum + 1, lr, context)
1762 h = hunk(x, hunknum + 1, lr, context)
1766 hunknum += 1
1763 hunknum += 1
1767 if emitfile:
1764 if emitfile:
1768 emitfile = False
1765 emitfile = False
1769 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1766 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1770 yield 'hunk', h
1767 yield 'hunk', h
1771 elif x.startswith('diff --git a/'):
1768 elif x.startswith('diff --git a/'):
1772 m = gitre.match(x.rstrip(' \r\n'))
1769 m = gitre.match(x.rstrip(' \r\n'))
1773 if not m:
1770 if not m:
1774 continue
1771 continue
1775 if gitpatches is None:
1772 if gitpatches is None:
1776 # scan whole input for git metadata
1773 # scan whole input for git metadata
1777 gitpatches = scangitpatch(lr, x)
1774 gitpatches = scangitpatch(lr, x)
1778 yield 'git', [g.copy() for g in gitpatches
1775 yield 'git', [g.copy() for g in gitpatches
1779 if g.op in ('COPY', 'RENAME')]
1776 if g.op in ('COPY', 'RENAME')]
1780 gitpatches.reverse()
1777 gitpatches.reverse()
1781 afile = 'a/' + m.group(1)
1778 afile = 'a/' + m.group(1)
1782 bfile = 'b/' + m.group(2)
1779 bfile = 'b/' + m.group(2)
1783 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1780 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1784 gp = gitpatches.pop()
1781 gp = gitpatches.pop()
1785 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1782 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1786 if not gitpatches:
1783 if not gitpatches:
1787 raise PatchError(_('failed to synchronize metadata for "%s"')
1784 raise PatchError(_('failed to synchronize metadata for "%s"')
1788 % afile[2:])
1785 % afile[2:])
1789 gp = gitpatches[-1]
1786 gp = gitpatches[-1]
1790 newfile = True
1787 newfile = True
1791 elif x.startswith('---'):
1788 elif x.startswith('---'):
1792 # check for a unified diff
1789 # check for a unified diff
1793 l2 = lr.readline()
1790 l2 = lr.readline()
1794 if not l2.startswith('+++'):
1791 if not l2.startswith('+++'):
1795 lr.push(l2)
1792 lr.push(l2)
1796 continue
1793 continue
1797 newfile = True
1794 newfile = True
1798 context = False
1795 context = False
1799 afile = parsefilename(x)
1796 afile = parsefilename(x)
1800 bfile = parsefilename(l2)
1797 bfile = parsefilename(l2)
1801 elif x.startswith('***'):
1798 elif x.startswith('***'):
1802 # check for a context diff
1799 # check for a context diff
1803 l2 = lr.readline()
1800 l2 = lr.readline()
1804 if not l2.startswith('---'):
1801 if not l2.startswith('---'):
1805 lr.push(l2)
1802 lr.push(l2)
1806 continue
1803 continue
1807 l3 = lr.readline()
1804 l3 = lr.readline()
1808 lr.push(l3)
1805 lr.push(l3)
1809 if not l3.startswith("***************"):
1806 if not l3.startswith("***************"):
1810 lr.push(l2)
1807 lr.push(l2)
1811 continue
1808 continue
1812 newfile = True
1809 newfile = True
1813 context = True
1810 context = True
1814 afile = parsefilename(x)
1811 afile = parsefilename(x)
1815 bfile = parsefilename(l2)
1812 bfile = parsefilename(l2)
1816
1813
1817 if newfile:
1814 if newfile:
1818 newfile = False
1815 newfile = False
1819 emitfile = True
1816 emitfile = True
1820 state = BFILE
1817 state = BFILE
1821 hunknum = 0
1818 hunknum = 0
1822
1819
1823 while gitpatches:
1820 while gitpatches:
1824 gp = gitpatches.pop()
1821 gp = gitpatches.pop()
1825 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1822 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1826
1823
1827 def applybindelta(binchunk, data):
1824 def applybindelta(binchunk, data):
1828 """Apply a binary delta hunk
1825 """Apply a binary delta hunk
1829 The algorithm used is the algorithm from git's patch-delta.c
1826 The algorithm used is the algorithm from git's patch-delta.c
1830 """
1827 """
1831 def deltahead(binchunk):
1828 def deltahead(binchunk):
1832 i = 0
1829 i = 0
1833 for c in binchunk:
1830 for c in binchunk:
1834 i += 1
1831 i += 1
1835 if not (ord(c) & 0x80):
1832 if not (ord(c) & 0x80):
1836 return i
1833 return i
1837 return i
1834 return i
1838 out = ""
1835 out = ""
1839 s = deltahead(binchunk)
1836 s = deltahead(binchunk)
1840 binchunk = binchunk[s:]
1837 binchunk = binchunk[s:]
1841 s = deltahead(binchunk)
1838 s = deltahead(binchunk)
1842 binchunk = binchunk[s:]
1839 binchunk = binchunk[s:]
1843 i = 0
1840 i = 0
1844 while i < len(binchunk):
1841 while i < len(binchunk):
1845 cmd = ord(binchunk[i])
1842 cmd = ord(binchunk[i])
1846 i += 1
1843 i += 1
1847 if (cmd & 0x80):
1844 if (cmd & 0x80):
1848 offset = 0
1845 offset = 0
1849 size = 0
1846 size = 0
1850 if (cmd & 0x01):
1847 if (cmd & 0x01):
1851 offset = ord(binchunk[i])
1848 offset = ord(binchunk[i])
1852 i += 1
1849 i += 1
1853 if (cmd & 0x02):
1850 if (cmd & 0x02):
1854 offset |= ord(binchunk[i]) << 8
1851 offset |= ord(binchunk[i]) << 8
1855 i += 1
1852 i += 1
1856 if (cmd & 0x04):
1853 if (cmd & 0x04):
1857 offset |= ord(binchunk[i]) << 16
1854 offset |= ord(binchunk[i]) << 16
1858 i += 1
1855 i += 1
1859 if (cmd & 0x08):
1856 if (cmd & 0x08):
1860 offset |= ord(binchunk[i]) << 24
1857 offset |= ord(binchunk[i]) << 24
1861 i += 1
1858 i += 1
1862 if (cmd & 0x10):
1859 if (cmd & 0x10):
1863 size = ord(binchunk[i])
1860 size = ord(binchunk[i])
1864 i += 1
1861 i += 1
1865 if (cmd & 0x20):
1862 if (cmd & 0x20):
1866 size |= ord(binchunk[i]) << 8
1863 size |= ord(binchunk[i]) << 8
1867 i += 1
1864 i += 1
1868 if (cmd & 0x40):
1865 if (cmd & 0x40):
1869 size |= ord(binchunk[i]) << 16
1866 size |= ord(binchunk[i]) << 16
1870 i += 1
1867 i += 1
1871 if size == 0:
1868 if size == 0:
1872 size = 0x10000
1869 size = 0x10000
1873 offset_end = offset + size
1870 offset_end = offset + size
1874 out += data[offset:offset_end]
1871 out += data[offset:offset_end]
1875 elif cmd != 0:
1872 elif cmd != 0:
1876 offset_end = i + cmd
1873 offset_end = i + cmd
1877 out += binchunk[i:offset_end]
1874 out += binchunk[i:offset_end]
1878 i += cmd
1875 i += cmd
1879 else:
1876 else:
1880 raise PatchError(_('unexpected delta opcode 0'))
1877 raise PatchError(_('unexpected delta opcode 0'))
1881 return out
1878 return out
1882
1879
1883 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1880 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1884 """Reads a patch from fp and tries to apply it.
1881 """Reads a patch from fp and tries to apply it.
1885
1882
1886 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1883 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1887 there was any fuzz.
1884 there was any fuzz.
1888
1885
1889 If 'eolmode' is 'strict', the patch content and patched file are
1886 If 'eolmode' is 'strict', the patch content and patched file are
1890 read in binary mode. Otherwise, line endings are ignored when
1887 read in binary mode. Otherwise, line endings are ignored when
1891 patching then normalized according to 'eolmode'.
1888 patching then normalized according to 'eolmode'.
1892 """
1889 """
1893 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1890 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1894 prefix=prefix, eolmode=eolmode)
1891 prefix=prefix, eolmode=eolmode)
1895
1892
1896 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1893 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1897 eolmode='strict'):
1894 eolmode='strict'):
1898
1895
1899 if prefix:
1896 if prefix:
1900 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1897 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1901 prefix)
1898 prefix)
1902 if prefix != '':
1899 if prefix != '':
1903 prefix += '/'
1900 prefix += '/'
1904 def pstrip(p):
1901 def pstrip(p):
1905 return pathtransform(p, strip - 1, prefix)[1]
1902 return pathtransform(p, strip - 1, prefix)[1]
1906
1903
1907 rejects = 0
1904 rejects = 0
1908 err = 0
1905 err = 0
1909 current_file = None
1906 current_file = None
1910
1907
1911 for state, values in iterhunks(fp):
1908 for state, values in iterhunks(fp):
1912 if state == 'hunk':
1909 if state == 'hunk':
1913 if not current_file:
1910 if not current_file:
1914 continue
1911 continue
1915 ret = current_file.apply(values)
1912 ret = current_file.apply(values)
1916 if ret > 0:
1913 if ret > 0:
1917 err = 1
1914 err = 1
1918 elif state == 'file':
1915 elif state == 'file':
1919 if current_file:
1916 if current_file:
1920 rejects += current_file.close()
1917 rejects += current_file.close()
1921 current_file = None
1918 current_file = None
1922 afile, bfile, first_hunk, gp = values
1919 afile, bfile, first_hunk, gp = values
1923 if gp:
1920 if gp:
1924 gp.path = pstrip(gp.path)
1921 gp.path = pstrip(gp.path)
1925 if gp.oldpath:
1922 if gp.oldpath:
1926 gp.oldpath = pstrip(gp.oldpath)
1923 gp.oldpath = pstrip(gp.oldpath)
1927 else:
1924 else:
1928 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1925 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1929 prefix)
1926 prefix)
1930 if gp.op == 'RENAME':
1927 if gp.op == 'RENAME':
1931 backend.unlink(gp.oldpath)
1928 backend.unlink(gp.oldpath)
1932 if not first_hunk:
1929 if not first_hunk:
1933 if gp.op == 'DELETE':
1930 if gp.op == 'DELETE':
1934 backend.unlink(gp.path)
1931 backend.unlink(gp.path)
1935 continue
1932 continue
1936 data, mode = None, None
1933 data, mode = None, None
1937 if gp.op in ('RENAME', 'COPY'):
1934 if gp.op in ('RENAME', 'COPY'):
1938 data, mode = store.getfile(gp.oldpath)[:2]
1935 data, mode = store.getfile(gp.oldpath)[:2]
1939 # FIXME: failing getfile has never been handled here
1936 # FIXME: failing getfile has never been handled here
1940 assert data is not None
1937 assert data is not None
1941 if gp.mode:
1938 if gp.mode:
1942 mode = gp.mode
1939 mode = gp.mode
1943 if gp.op == 'ADD':
1940 if gp.op == 'ADD':
1944 # Added files without content have no hunk and
1941 # Added files without content have no hunk and
1945 # must be created
1942 # must be created
1946 data = ''
1943 data = ''
1947 if data or mode:
1944 if data or mode:
1948 if (gp.op in ('ADD', 'RENAME', 'COPY')
1945 if (gp.op in ('ADD', 'RENAME', 'COPY')
1949 and backend.exists(gp.path)):
1946 and backend.exists(gp.path)):
1950 raise PatchError(_("cannot create %s: destination "
1947 raise PatchError(_("cannot create %s: destination "
1951 "already exists") % gp.path)
1948 "already exists") % gp.path)
1952 backend.setfile(gp.path, data, mode, gp.oldpath)
1949 backend.setfile(gp.path, data, mode, gp.oldpath)
1953 continue
1950 continue
1954 try:
1951 try:
1955 current_file = patcher(ui, gp, backend, store,
1952 current_file = patcher(ui, gp, backend, store,
1956 eolmode=eolmode)
1953 eolmode=eolmode)
1957 except PatchError as inst:
1954 except PatchError as inst:
1958 ui.warn(str(inst) + '\n')
1955 ui.warn(str(inst) + '\n')
1959 current_file = None
1956 current_file = None
1960 rejects += 1
1957 rejects += 1
1961 continue
1958 continue
1962 elif state == 'git':
1959 elif state == 'git':
1963 for gp in values:
1960 for gp in values:
1964 path = pstrip(gp.oldpath)
1961 path = pstrip(gp.oldpath)
1965 data, mode = backend.getfile(path)
1962 data, mode = backend.getfile(path)
1966 if data is None:
1963 if data is None:
1967 # The error ignored here will trigger a getfile()
1964 # The error ignored here will trigger a getfile()
1968 # error in a place more appropriate for error
1965 # error in a place more appropriate for error
1969 # handling, and will not interrupt the patching
1966 # handling, and will not interrupt the patching
1970 # process.
1967 # process.
1971 pass
1968 pass
1972 else:
1969 else:
1973 store.setfile(path, data, mode)
1970 store.setfile(path, data, mode)
1974 else:
1971 else:
1975 raise util.Abort(_('unsupported parser state: %s') % state)
1972 raise util.Abort(_('unsupported parser state: %s') % state)
1976
1973
1977 if current_file:
1974 if current_file:
1978 rejects += current_file.close()
1975 rejects += current_file.close()
1979
1976
1980 if rejects:
1977 if rejects:
1981 return -1
1978 return -1
1982 return err
1979 return err
1983
1980
1984 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1981 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1985 similarity):
1982 similarity):
1986 """use <patcher> to apply <patchname> to the working directory.
1983 """use <patcher> to apply <patchname> to the working directory.
1987 returns whether patch was applied with fuzz factor."""
1984 returns whether patch was applied with fuzz factor."""
1988
1985
1989 fuzz = False
1986 fuzz = False
1990 args = []
1987 args = []
1991 cwd = repo.root
1988 cwd = repo.root
1992 if cwd:
1989 if cwd:
1993 args.append('-d %s' % util.shellquote(cwd))
1990 args.append('-d %s' % util.shellquote(cwd))
1994 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1991 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1995 util.shellquote(patchname)))
1992 util.shellquote(patchname)))
1996 try:
1993 try:
1997 for line in fp:
1994 for line in fp:
1998 line = line.rstrip()
1995 line = line.rstrip()
1999 ui.note(line + '\n')
1996 ui.note(line + '\n')
2000 if line.startswith('patching file '):
1997 if line.startswith('patching file '):
2001 pf = util.parsepatchoutput(line)
1998 pf = util.parsepatchoutput(line)
2002 printed_file = False
1999 printed_file = False
2003 files.add(pf)
2000 files.add(pf)
2004 elif line.find('with fuzz') >= 0:
2001 elif line.find('with fuzz') >= 0:
2005 fuzz = True
2002 fuzz = True
2006 if not printed_file:
2003 if not printed_file:
2007 ui.warn(pf + '\n')
2004 ui.warn(pf + '\n')
2008 printed_file = True
2005 printed_file = True
2009 ui.warn(line + '\n')
2006 ui.warn(line + '\n')
2010 elif line.find('saving rejects to file') >= 0:
2007 elif line.find('saving rejects to file') >= 0:
2011 ui.warn(line + '\n')
2008 ui.warn(line + '\n')
2012 elif line.find('FAILED') >= 0:
2009 elif line.find('FAILED') >= 0:
2013 if not printed_file:
2010 if not printed_file:
2014 ui.warn(pf + '\n')
2011 ui.warn(pf + '\n')
2015 printed_file = True
2012 printed_file = True
2016 ui.warn(line + '\n')
2013 ui.warn(line + '\n')
2017 finally:
2014 finally:
2018 if files:
2015 if files:
2019 scmutil.marktouched(repo, files, similarity)
2016 scmutil.marktouched(repo, files, similarity)
2020 code = fp.close()
2017 code = fp.close()
2021 if code:
2018 if code:
2022 raise PatchError(_("patch command failed: %s") %
2019 raise PatchError(_("patch command failed: %s") %
2023 util.explainexit(code)[0])
2020 util.explainexit(code)[0])
2024 return fuzz
2021 return fuzz
2025
2022
2026 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2023 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2027 eolmode='strict'):
2024 eolmode='strict'):
2028 if files is None:
2025 if files is None:
2029 files = set()
2026 files = set()
2030 if eolmode is None:
2027 if eolmode is None:
2031 eolmode = ui.config('patch', 'eol', 'strict')
2028 eolmode = ui.config('patch', 'eol', 'strict')
2032 if eolmode.lower() not in eolmodes:
2029 if eolmode.lower() not in eolmodes:
2033 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2030 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2034 eolmode = eolmode.lower()
2031 eolmode = eolmode.lower()
2035
2032
2036 store = filestore()
2033 store = filestore()
2037 try:
2034 try:
2038 fp = open(patchobj, 'rb')
2035 fp = open(patchobj, 'rb')
2039 except TypeError:
2036 except TypeError:
2040 fp = patchobj
2037 fp = patchobj
2041 try:
2038 try:
2042 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2039 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2043 eolmode=eolmode)
2040 eolmode=eolmode)
2044 finally:
2041 finally:
2045 if fp != patchobj:
2042 if fp != patchobj:
2046 fp.close()
2043 fp.close()
2047 files.update(backend.close())
2044 files.update(backend.close())
2048 store.close()
2045 store.close()
2049 if ret < 0:
2046 if ret < 0:
2050 raise PatchError(_('patch failed to apply'))
2047 raise PatchError(_('patch failed to apply'))
2051 return ret > 0
2048 return ret > 0
2052
2049
2053 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2050 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2054 eolmode='strict', similarity=0):
2051 eolmode='strict', similarity=0):
2055 """use builtin patch to apply <patchobj> to the working directory.
2052 """use builtin patch to apply <patchobj> to the working directory.
2056 returns whether patch was applied with fuzz factor."""
2053 returns whether patch was applied with fuzz factor."""
2057 backend = workingbackend(ui, repo, similarity)
2054 backend = workingbackend(ui, repo, similarity)
2058 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2059
2056
2060 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2057 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2061 eolmode='strict'):
2058 eolmode='strict'):
2062 backend = repobackend(ui, repo, ctx, store)
2059 backend = repobackend(ui, repo, ctx, store)
2063 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2060 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2064
2061
2065 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2062 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2066 similarity=0):
2063 similarity=0):
2067 """Apply <patchname> to the working directory.
2064 """Apply <patchname> to the working directory.
2068
2065
2069 'eolmode' specifies how end of lines should be handled. It can be:
2066 'eolmode' specifies how end of lines should be handled. It can be:
2070 - 'strict': inputs are read in binary mode, EOLs are preserved
2067 - 'strict': inputs are read in binary mode, EOLs are preserved
2071 - 'crlf': EOLs are ignored when patching and reset to CRLF
2068 - 'crlf': EOLs are ignored when patching and reset to CRLF
2072 - 'lf': EOLs are ignored when patching and reset to LF
2069 - 'lf': EOLs are ignored when patching and reset to LF
2073 - None: get it from user settings, default to 'strict'
2070 - None: get it from user settings, default to 'strict'
2074 'eolmode' is ignored when using an external patcher program.
2071 'eolmode' is ignored when using an external patcher program.
2075
2072
2076 Returns whether patch was applied with fuzz factor.
2073 Returns whether patch was applied with fuzz factor.
2077 """
2074 """
2078 patcher = ui.config('ui', 'patch')
2075 patcher = ui.config('ui', 'patch')
2079 if files is None:
2076 if files is None:
2080 files = set()
2077 files = set()
2081 if patcher:
2078 if patcher:
2082 return _externalpatch(ui, repo, patcher, patchname, strip,
2079 return _externalpatch(ui, repo, patcher, patchname, strip,
2083 files, similarity)
2080 files, similarity)
2084 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2081 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2085 similarity)
2082 similarity)
2086
2083
2087 def changedfiles(ui, repo, patchpath, strip=1):
2084 def changedfiles(ui, repo, patchpath, strip=1):
2088 backend = fsbackend(ui, repo.root)
2085 backend = fsbackend(ui, repo.root)
2089 fp = open(patchpath, 'rb')
2086 fp = open(patchpath, 'rb')
2090 try:
2087 try:
2091 changed = set()
2088 changed = set()
2092 for state, values in iterhunks(fp):
2089 for state, values in iterhunks(fp):
2093 if state == 'file':
2090 if state == 'file':
2094 afile, bfile, first_hunk, gp = values
2091 afile, bfile, first_hunk, gp = values
2095 if gp:
2092 if gp:
2096 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2093 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2097 if gp.oldpath:
2094 if gp.oldpath:
2098 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2095 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2099 else:
2096 else:
2100 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2097 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2101 '')
2098 '')
2102 changed.add(gp.path)
2099 changed.add(gp.path)
2103 if gp.op == 'RENAME':
2100 if gp.op == 'RENAME':
2104 changed.add(gp.oldpath)
2101 changed.add(gp.oldpath)
2105 elif state not in ('hunk', 'git'):
2102 elif state not in ('hunk', 'git'):
2106 raise util.Abort(_('unsupported parser state: %s') % state)
2103 raise util.Abort(_('unsupported parser state: %s') % state)
2107 return changed
2104 return changed
2108 finally:
2105 finally:
2109 fp.close()
2106 fp.close()
2110
2107
2111 class GitDiffRequired(Exception):
2108 class GitDiffRequired(Exception):
2112 pass
2109 pass
2113
2110
2114 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2111 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2115 '''return diffopts with all features supported and parsed'''
2112 '''return diffopts with all features supported and parsed'''
2116 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2113 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2117 git=True, whitespace=True, formatchanging=True)
2114 git=True, whitespace=True, formatchanging=True)
2118
2115
2119 diffopts = diffallopts
2116 diffopts = diffallopts
2120
2117
2121 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2118 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2122 whitespace=False, formatchanging=False):
2119 whitespace=False, formatchanging=False):
2123 '''return diffopts with only opted-in features parsed
2120 '''return diffopts with only opted-in features parsed
2124
2121
2125 Features:
2122 Features:
2126 - git: git-style diffs
2123 - git: git-style diffs
2127 - whitespace: whitespace options like ignoreblanklines and ignorews
2124 - whitespace: whitespace options like ignoreblanklines and ignorews
2128 - formatchanging: options that will likely break or cause correctness issues
2125 - formatchanging: options that will likely break or cause correctness issues
2129 with most diff parsers
2126 with most diff parsers
2130 '''
2127 '''
2131 def get(key, name=None, getter=ui.configbool, forceplain=None):
2128 def get(key, name=None, getter=ui.configbool, forceplain=None):
2132 if opts:
2129 if opts:
2133 v = opts.get(key)
2130 v = opts.get(key)
2134 if v:
2131 if v:
2135 return v
2132 return v
2136 if forceplain is not None and ui.plain():
2133 if forceplain is not None and ui.plain():
2137 return forceplain
2134 return forceplain
2138 return getter(section, name or key, None, untrusted=untrusted)
2135 return getter(section, name or key, None, untrusted=untrusted)
2139
2136
2140 # core options, expected to be understood by every diff parser
2137 # core options, expected to be understood by every diff parser
2141 buildopts = {
2138 buildopts = {
2142 'nodates': get('nodates'),
2139 'nodates': get('nodates'),
2143 'showfunc': get('show_function', 'showfunc'),
2140 'showfunc': get('show_function', 'showfunc'),
2144 'context': get('unified', getter=ui.config),
2141 'context': get('unified', getter=ui.config),
2145 }
2142 }
2146
2143
2147 if git:
2144 if git:
2148 buildopts['git'] = get('git')
2145 buildopts['git'] = get('git')
2149 if whitespace:
2146 if whitespace:
2150 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2147 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2151 buildopts['ignorewsamount'] = get('ignore_space_change',
2148 buildopts['ignorewsamount'] = get('ignore_space_change',
2152 'ignorewsamount')
2149 'ignorewsamount')
2153 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2150 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2154 'ignoreblanklines')
2151 'ignoreblanklines')
2155 if formatchanging:
2152 if formatchanging:
2156 buildopts['text'] = opts and opts.get('text')
2153 buildopts['text'] = opts and opts.get('text')
2157 buildopts['nobinary'] = get('nobinary')
2154 buildopts['nobinary'] = get('nobinary')
2158 buildopts['noprefix'] = get('noprefix', forceplain=False)
2155 buildopts['noprefix'] = get('noprefix', forceplain=False)
2159
2156
2160 return mdiff.diffopts(**buildopts)
2157 return mdiff.diffopts(**buildopts)
2161
2158
2162 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2159 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2163 losedatafn=None, prefix='', relroot=''):
2160 losedatafn=None, prefix='', relroot=''):
2164 '''yields diff of changes to files between two nodes, or node and
2161 '''yields diff of changes to files between two nodes, or node and
2165 working directory.
2162 working directory.
2166
2163
2167 if node1 is None, use first dirstate parent instead.
2164 if node1 is None, use first dirstate parent instead.
2168 if node2 is None, compare node1 with working directory.
2165 if node2 is None, compare node1 with working directory.
2169
2166
2170 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2167 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2171 every time some change cannot be represented with the current
2168 every time some change cannot be represented with the current
2172 patch format. Return False to upgrade to git patch format, True to
2169 patch format. Return False to upgrade to git patch format, True to
2173 accept the loss or raise an exception to abort the diff. It is
2170 accept the loss or raise an exception to abort the diff. It is
2174 called with the name of current file being diffed as 'fn'. If set
2171 called with the name of current file being diffed as 'fn'. If set
2175 to None, patches will always be upgraded to git format when
2172 to None, patches will always be upgraded to git format when
2176 necessary.
2173 necessary.
2177
2174
2178 prefix is a filename prefix that is prepended to all filenames on
2175 prefix is a filename prefix that is prepended to all filenames on
2179 display (used for subrepos).
2176 display (used for subrepos).
2180
2177
2181 relroot, if not empty, must be normalized with a trailing /. Any match
2178 relroot, if not empty, must be normalized with a trailing /. Any match
2182 patterns that fall outside it will be ignored.'''
2179 patterns that fall outside it will be ignored.'''
2183
2180
2184 if opts is None:
2181 if opts is None:
2185 opts = mdiff.defaultopts
2182 opts = mdiff.defaultopts
2186
2183
2187 if not node1 and not node2:
2184 if not node1 and not node2:
2188 node1 = repo.dirstate.p1()
2185 node1 = repo.dirstate.p1()
2189
2186
2190 def lrugetfilectx():
2187 def lrugetfilectx():
2191 cache = {}
2188 cache = {}
2192 order = collections.deque()
2189 order = collections.deque()
2193 def getfilectx(f, ctx):
2190 def getfilectx(f, ctx):
2194 fctx = ctx.filectx(f, filelog=cache.get(f))
2191 fctx = ctx.filectx(f, filelog=cache.get(f))
2195 if f not in cache:
2192 if f not in cache:
2196 if len(cache) > 20:
2193 if len(cache) > 20:
2197 del cache[order.popleft()]
2194 del cache[order.popleft()]
2198 cache[f] = fctx.filelog()
2195 cache[f] = fctx.filelog()
2199 else:
2196 else:
2200 order.remove(f)
2197 order.remove(f)
2201 order.append(f)
2198 order.append(f)
2202 return fctx
2199 return fctx
2203 return getfilectx
2200 return getfilectx
2204 getfilectx = lrugetfilectx()
2201 getfilectx = lrugetfilectx()
2205
2202
2206 ctx1 = repo[node1]
2203 ctx1 = repo[node1]
2207 ctx2 = repo[node2]
2204 ctx2 = repo[node2]
2208
2205
2209 relfiltered = False
2206 relfiltered = False
2210 if relroot != '' and match.always():
2207 if relroot != '' and match.always():
2211 # as a special case, create a new matcher with just the relroot
2208 # as a special case, create a new matcher with just the relroot
2212 pats = [relroot]
2209 pats = [relroot]
2213 match = scmutil.match(ctx2, pats, default='path')
2210 match = scmutil.match(ctx2, pats, default='path')
2214 relfiltered = True
2211 relfiltered = True
2215
2212
2216 if not changes:
2213 if not changes:
2217 changes = repo.status(ctx1, ctx2, match=match)
2214 changes = repo.status(ctx1, ctx2, match=match)
2218 modified, added, removed = changes[:3]
2215 modified, added, removed = changes[:3]
2219
2216
2220 if not modified and not added and not removed:
2217 if not modified and not added and not removed:
2221 return []
2218 return []
2222
2219
2223 if repo.ui.debugflag:
2220 if repo.ui.debugflag:
2224 hexfunc = hex
2221 hexfunc = hex
2225 else:
2222 else:
2226 hexfunc = short
2223 hexfunc = short
2227 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2224 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2228
2225
2229 copy = {}
2226 copy = {}
2230 if opts.git or opts.upgrade:
2227 if opts.git or opts.upgrade:
2231 copy = copies.pathcopies(ctx1, ctx2, match=match)
2228 copy = copies.pathcopies(ctx1, ctx2, match=match)
2232
2229
2233 if relroot is not None:
2230 if relroot is not None:
2234 if not relfiltered:
2231 if not relfiltered:
2235 # XXX this would ideally be done in the matcher, but that is
2232 # XXX this would ideally be done in the matcher, but that is
2236 # generally meant to 'or' patterns, not 'and' them. In this case we
2233 # generally meant to 'or' patterns, not 'and' them. In this case we
2237 # need to 'and' all the patterns from the matcher with relroot.
2234 # need to 'and' all the patterns from the matcher with relroot.
2238 def filterrel(l):
2235 def filterrel(l):
2239 return [f for f in l if f.startswith(relroot)]
2236 return [f for f in l if f.startswith(relroot)]
2240 modified = filterrel(modified)
2237 modified = filterrel(modified)
2241 added = filterrel(added)
2238 added = filterrel(added)
2242 removed = filterrel(removed)
2239 removed = filterrel(removed)
2243 relfiltered = True
2240 relfiltered = True
2244 # filter out copies where either side isn't inside the relative root
2241 # filter out copies where either side isn't inside the relative root
2245 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2242 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2246 if dst.startswith(relroot)
2243 if dst.startswith(relroot)
2247 and src.startswith(relroot)))
2244 and src.startswith(relroot)))
2248
2245
2249 def difffn(opts, losedata):
2246 def difffn(opts, losedata):
2250 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2247 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2251 copy, getfilectx, opts, losedata, prefix, relroot)
2248 copy, getfilectx, opts, losedata, prefix, relroot)
2252 if opts.upgrade and not opts.git:
2249 if opts.upgrade and not opts.git:
2253 try:
2250 try:
2254 def losedata(fn):
2251 def losedata(fn):
2255 if not losedatafn or not losedatafn(fn=fn):
2252 if not losedatafn or not losedatafn(fn=fn):
2256 raise GitDiffRequired
2253 raise GitDiffRequired
2257 # Buffer the whole output until we are sure it can be generated
2254 # Buffer the whole output until we are sure it can be generated
2258 return list(difffn(opts.copy(git=False), losedata))
2255 return list(difffn(opts.copy(git=False), losedata))
2259 except GitDiffRequired:
2256 except GitDiffRequired:
2260 return difffn(opts.copy(git=True), None)
2257 return difffn(opts.copy(git=True), None)
2261 else:
2258 else:
2262 return difffn(opts, None)
2259 return difffn(opts, None)
2263
2260
2264 def difflabel(func, *args, **kw):
2261 def difflabel(func, *args, **kw):
2265 '''yields 2-tuples of (output, label) based on the output of func()'''
2262 '''yields 2-tuples of (output, label) based on the output of func()'''
2266 headprefixes = [('diff', 'diff.diffline'),
2263 headprefixes = [('diff', 'diff.diffline'),
2267 ('copy', 'diff.extended'),
2264 ('copy', 'diff.extended'),
2268 ('rename', 'diff.extended'),
2265 ('rename', 'diff.extended'),
2269 ('old', 'diff.extended'),
2266 ('old', 'diff.extended'),
2270 ('new', 'diff.extended'),
2267 ('new', 'diff.extended'),
2271 ('deleted', 'diff.extended'),
2268 ('deleted', 'diff.extended'),
2272 ('---', 'diff.file_a'),
2269 ('---', 'diff.file_a'),
2273 ('+++', 'diff.file_b')]
2270 ('+++', 'diff.file_b')]
2274 textprefixes = [('@', 'diff.hunk'),
2271 textprefixes = [('@', 'diff.hunk'),
2275 ('-', 'diff.deleted'),
2272 ('-', 'diff.deleted'),
2276 ('+', 'diff.inserted')]
2273 ('+', 'diff.inserted')]
2277 head = False
2274 head = False
2278 for chunk in func(*args, **kw):
2275 for chunk in func(*args, **kw):
2279 lines = chunk.split('\n')
2276 lines = chunk.split('\n')
2280 for i, line in enumerate(lines):
2277 for i, line in enumerate(lines):
2281 if i != 0:
2278 if i != 0:
2282 yield ('\n', '')
2279 yield ('\n', '')
2283 if head:
2280 if head:
2284 if line.startswith('@'):
2281 if line.startswith('@'):
2285 head = False
2282 head = False
2286 else:
2283 else:
2287 if line and line[0] not in ' +-@\\':
2284 if line and line[0] not in ' +-@\\':
2288 head = True
2285 head = True
2289 stripline = line
2286 stripline = line
2290 diffline = False
2287 diffline = False
2291 if not head and line and line[0] in '+-':
2288 if not head and line and line[0] in '+-':
2292 # highlight tabs and trailing whitespace, but only in
2289 # highlight tabs and trailing whitespace, but only in
2293 # changed lines
2290 # changed lines
2294 stripline = line.rstrip()
2291 stripline = line.rstrip()
2295 diffline = True
2292 diffline = True
2296
2293
2297 prefixes = textprefixes
2294 prefixes = textprefixes
2298 if head:
2295 if head:
2299 prefixes = headprefixes
2296 prefixes = headprefixes
2300 for prefix, label in prefixes:
2297 for prefix, label in prefixes:
2301 if stripline.startswith(prefix):
2298 if stripline.startswith(prefix):
2302 if diffline:
2299 if diffline:
2303 for token in tabsplitter.findall(stripline):
2300 for token in tabsplitter.findall(stripline):
2304 if '\t' == token[0]:
2301 if '\t' == token[0]:
2305 yield (token, 'diff.tab')
2302 yield (token, 'diff.tab')
2306 else:
2303 else:
2307 yield (token, label)
2304 yield (token, label)
2308 else:
2305 else:
2309 yield (stripline, label)
2306 yield (stripline, label)
2310 break
2307 break
2311 else:
2308 else:
2312 yield (line, '')
2309 yield (line, '')
2313 if line != stripline:
2310 if line != stripline:
2314 yield (line[len(stripline):], 'diff.trailingwhitespace')
2311 yield (line[len(stripline):], 'diff.trailingwhitespace')
2315
2312
2316 def diffui(*args, **kw):
2313 def diffui(*args, **kw):
2317 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2314 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2318 return difflabel(diff, *args, **kw)
2315 return difflabel(diff, *args, **kw)
2319
2316
2320 def _filepairs(ctx1, modified, added, removed, copy, opts):
2317 def _filepairs(ctx1, modified, added, removed, copy, opts):
2321 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2318 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2322 before and f2 is the the name after. For added files, f1 will be None,
2319 before and f2 is the the name after. For added files, f1 will be None,
2323 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2320 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2324 or 'rename' (the latter two only if opts.git is set).'''
2321 or 'rename' (the latter two only if opts.git is set).'''
2325 gone = set()
2322 gone = set()
2326
2323
2327 copyto = dict([(v, k) for k, v in copy.items()])
2324 copyto = dict([(v, k) for k, v in copy.items()])
2328
2325
2329 addedset, removedset = set(added), set(removed)
2326 addedset, removedset = set(added), set(removed)
2330 # Fix up added, since merged-in additions appear as
2327 # Fix up added, since merged-in additions appear as
2331 # modifications during merges
2328 # modifications during merges
2332 for f in modified:
2329 for f in modified:
2333 if f not in ctx1:
2330 if f not in ctx1:
2334 addedset.add(f)
2331 addedset.add(f)
2335
2332
2336 for f in sorted(modified + added + removed):
2333 for f in sorted(modified + added + removed):
2337 copyop = None
2334 copyop = None
2338 f1, f2 = f, f
2335 f1, f2 = f, f
2339 if f in addedset:
2336 if f in addedset:
2340 f1 = None
2337 f1 = None
2341 if f in copy:
2338 if f in copy:
2342 if opts.git:
2339 if opts.git:
2343 f1 = copy[f]
2340 f1 = copy[f]
2344 if f1 in removedset and f1 not in gone:
2341 if f1 in removedset and f1 not in gone:
2345 copyop = 'rename'
2342 copyop = 'rename'
2346 gone.add(f1)
2343 gone.add(f1)
2347 else:
2344 else:
2348 copyop = 'copy'
2345 copyop = 'copy'
2349 elif f in removedset:
2346 elif f in removedset:
2350 f2 = None
2347 f2 = None
2351 if opts.git:
2348 if opts.git:
2352 # have we already reported a copy above?
2349 # have we already reported a copy above?
2353 if (f in copyto and copyto[f] in addedset
2350 if (f in copyto and copyto[f] in addedset
2354 and copy[copyto[f]] == f):
2351 and copy[copyto[f]] == f):
2355 continue
2352 continue
2356 yield f1, f2, copyop
2353 yield f1, f2, copyop
2357
2354
2358 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2355 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2359 copy, getfilectx, opts, losedatafn, prefix, relroot):
2356 copy, getfilectx, opts, losedatafn, prefix, relroot):
2360 '''given input data, generate a diff and yield it in blocks
2357 '''given input data, generate a diff and yield it in blocks
2361
2358
2362 If generating a diff would lose data like flags or binary data and
2359 If generating a diff would lose data like flags or binary data and
2363 losedatafn is not None, it will be called.
2360 losedatafn is not None, it will be called.
2364
2361
2365 relroot is removed and prefix is added to every path in the diff output.
2362 relroot is removed and prefix is added to every path in the diff output.
2366
2363
2367 If relroot is not empty, this function expects every path in modified,
2364 If relroot is not empty, this function expects every path in modified,
2368 added, removed and copy to start with it.'''
2365 added, removed and copy to start with it.'''
2369
2366
2370 def gitindex(text):
2367 def gitindex(text):
2371 if not text:
2368 if not text:
2372 text = ""
2369 text = ""
2373 l = len(text)
2370 l = len(text)
2374 s = util.sha1('blob %d\0' % l)
2371 s = util.sha1('blob %d\0' % l)
2375 s.update(text)
2372 s.update(text)
2376 return s.hexdigest()
2373 return s.hexdigest()
2377
2374
2378 if opts.noprefix:
2375 if opts.noprefix:
2379 aprefix = bprefix = ''
2376 aprefix = bprefix = ''
2380 else:
2377 else:
2381 aprefix = 'a/'
2378 aprefix = 'a/'
2382 bprefix = 'b/'
2379 bprefix = 'b/'
2383
2380
2384 def diffline(f, revs):
2381 def diffline(f, revs):
2385 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2382 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2386 return 'diff %s %s' % (revinfo, f)
2383 return 'diff %s %s' % (revinfo, f)
2387
2384
2388 date1 = util.datestr(ctx1.date())
2385 date1 = util.datestr(ctx1.date())
2389 date2 = util.datestr(ctx2.date())
2386 date2 = util.datestr(ctx2.date())
2390
2387
2391 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2388 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2392
2389
2393 if relroot != '' and (repo.ui.configbool('devel', 'all')
2390 if relroot != '' and (repo.ui.configbool('devel', 'all')
2394 or repo.ui.configbool('devel', 'check-relroot')):
2391 or repo.ui.configbool('devel', 'check-relroot')):
2395 for f in modified + added + removed + copy.keys() + copy.values():
2392 for f in modified + added + removed + copy.keys() + copy.values():
2396 if f is not None and not f.startswith(relroot):
2393 if f is not None and not f.startswith(relroot):
2397 raise AssertionError(
2394 raise AssertionError(
2398 "file %s doesn't start with relroot %s" % (f, relroot))
2395 "file %s doesn't start with relroot %s" % (f, relroot))
2399
2396
2400 for f1, f2, copyop in _filepairs(
2397 for f1, f2, copyop in _filepairs(
2401 ctx1, modified, added, removed, copy, opts):
2398 ctx1, modified, added, removed, copy, opts):
2402 content1 = None
2399 content1 = None
2403 content2 = None
2400 content2 = None
2404 flag1 = None
2401 flag1 = None
2405 flag2 = None
2402 flag2 = None
2406 if f1:
2403 if f1:
2407 content1 = getfilectx(f1, ctx1).data()
2404 content1 = getfilectx(f1, ctx1).data()
2408 if opts.git or losedatafn:
2405 if opts.git or losedatafn:
2409 flag1 = ctx1.flags(f1)
2406 flag1 = ctx1.flags(f1)
2410 if f2:
2407 if f2:
2411 content2 = getfilectx(f2, ctx2).data()
2408 content2 = getfilectx(f2, ctx2).data()
2412 if opts.git or losedatafn:
2409 if opts.git or losedatafn:
2413 flag2 = ctx2.flags(f2)
2410 flag2 = ctx2.flags(f2)
2414 binary = False
2411 binary = False
2415 if opts.git or losedatafn:
2412 if opts.git or losedatafn:
2416 binary = util.binary(content1) or util.binary(content2)
2413 binary = util.binary(content1) or util.binary(content2)
2417
2414
2418 if losedatafn and not opts.git:
2415 if losedatafn and not opts.git:
2419 if (binary or
2416 if (binary or
2420 # copy/rename
2417 # copy/rename
2421 f2 in copy or
2418 f2 in copy or
2422 # empty file creation
2419 # empty file creation
2423 (not f1 and not content2) or
2420 (not f1 and not content2) or
2424 # empty file deletion
2421 # empty file deletion
2425 (not content1 and not f2) or
2422 (not content1 and not f2) or
2426 # create with flags
2423 # create with flags
2427 (not f1 and flag2) or
2424 (not f1 and flag2) or
2428 # change flags
2425 # change flags
2429 (f1 and f2 and flag1 != flag2)):
2426 (f1 and f2 and flag1 != flag2)):
2430 losedatafn(f2 or f1)
2427 losedatafn(f2 or f1)
2431
2428
2432 path1 = f1 or f2
2429 path1 = f1 or f2
2433 path2 = f2 or f1
2430 path2 = f2 or f1
2434 path1 = posixpath.join(prefix, path1[len(relroot):])
2431 path1 = posixpath.join(prefix, path1[len(relroot):])
2435 path2 = posixpath.join(prefix, path2[len(relroot):])
2432 path2 = posixpath.join(prefix, path2[len(relroot):])
2436 header = []
2433 header = []
2437 if opts.git:
2434 if opts.git:
2438 header.append('diff --git %s%s %s%s' %
2435 header.append('diff --git %s%s %s%s' %
2439 (aprefix, path1, bprefix, path2))
2436 (aprefix, path1, bprefix, path2))
2440 if not f1: # added
2437 if not f1: # added
2441 header.append('new file mode %s' % gitmode[flag2])
2438 header.append('new file mode %s' % gitmode[flag2])
2442 elif not f2: # removed
2439 elif not f2: # removed
2443 header.append('deleted file mode %s' % gitmode[flag1])
2440 header.append('deleted file mode %s' % gitmode[flag1])
2444 else: # modified/copied/renamed
2441 else: # modified/copied/renamed
2445 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2442 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2446 if mode1 != mode2:
2443 if mode1 != mode2:
2447 header.append('old mode %s' % mode1)
2444 header.append('old mode %s' % mode1)
2448 header.append('new mode %s' % mode2)
2445 header.append('new mode %s' % mode2)
2449 if copyop is not None:
2446 if copyop is not None:
2450 header.append('%s from %s' % (copyop, path1))
2447 header.append('%s from %s' % (copyop, path1))
2451 header.append('%s to %s' % (copyop, path2))
2448 header.append('%s to %s' % (copyop, path2))
2452 elif revs and not repo.ui.quiet:
2449 elif revs and not repo.ui.quiet:
2453 header.append(diffline(path1, revs))
2450 header.append(diffline(path1, revs))
2454
2451
2455 if binary and opts.git and not opts.nobinary:
2452 if binary and opts.git and not opts.nobinary:
2456 text = mdiff.b85diff(content1, content2)
2453 text = mdiff.b85diff(content1, content2)
2457 if text:
2454 if text:
2458 header.append('index %s..%s' %
2455 header.append('index %s..%s' %
2459 (gitindex(content1), gitindex(content2)))
2456 (gitindex(content1), gitindex(content2)))
2460 else:
2457 else:
2461 text = mdiff.unidiff(content1, date1,
2458 text = mdiff.unidiff(content1, date1,
2462 content2, date2,
2459 content2, date2,
2463 path1, path2, opts=opts)
2460 path1, path2, opts=opts)
2464 if header and (text or len(header) > 1):
2461 if header and (text or len(header) > 1):
2465 yield '\n'.join(header) + '\n'
2462 yield '\n'.join(header) + '\n'
2466 if text:
2463 if text:
2467 yield text
2464 yield text
2468
2465
2469 def diffstatsum(stats):
2466 def diffstatsum(stats):
2470 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2467 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2471 for f, a, r, b in stats:
2468 for f, a, r, b in stats:
2472 maxfile = max(maxfile, encoding.colwidth(f))
2469 maxfile = max(maxfile, encoding.colwidth(f))
2473 maxtotal = max(maxtotal, a + r)
2470 maxtotal = max(maxtotal, a + r)
2474 addtotal += a
2471 addtotal += a
2475 removetotal += r
2472 removetotal += r
2476 binary = binary or b
2473 binary = binary or b
2477
2474
2478 return maxfile, maxtotal, addtotal, removetotal, binary
2475 return maxfile, maxtotal, addtotal, removetotal, binary
2479
2476
2480 def diffstatdata(lines):
2477 def diffstatdata(lines):
2481 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2478 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2482
2479
2483 results = []
2480 results = []
2484 filename, adds, removes, isbinary = None, 0, 0, False
2481 filename, adds, removes, isbinary = None, 0, 0, False
2485
2482
2486 def addresult():
2483 def addresult():
2487 if filename:
2484 if filename:
2488 results.append((filename, adds, removes, isbinary))
2485 results.append((filename, adds, removes, isbinary))
2489
2486
2490 for line in lines:
2487 for line in lines:
2491 if line.startswith('diff'):
2488 if line.startswith('diff'):
2492 addresult()
2489 addresult()
2493 # set numbers to 0 anyway when starting new file
2490 # set numbers to 0 anyway when starting new file
2494 adds, removes, isbinary = 0, 0, False
2491 adds, removes, isbinary = 0, 0, False
2495 if line.startswith('diff --git a/'):
2492 if line.startswith('diff --git a/'):
2496 filename = gitre.search(line).group(2)
2493 filename = gitre.search(line).group(2)
2497 elif line.startswith('diff -r'):
2494 elif line.startswith('diff -r'):
2498 # format: "diff -r ... -r ... filename"
2495 # format: "diff -r ... -r ... filename"
2499 filename = diffre.search(line).group(1)
2496 filename = diffre.search(line).group(1)
2500 elif line.startswith('+') and not line.startswith('+++ '):
2497 elif line.startswith('+') and not line.startswith('+++ '):
2501 adds += 1
2498 adds += 1
2502 elif line.startswith('-') and not line.startswith('--- '):
2499 elif line.startswith('-') and not line.startswith('--- '):
2503 removes += 1
2500 removes += 1
2504 elif (line.startswith('GIT binary patch') or
2501 elif (line.startswith('GIT binary patch') or
2505 line.startswith('Binary file')):
2502 line.startswith('Binary file')):
2506 isbinary = True
2503 isbinary = True
2507 addresult()
2504 addresult()
2508 return results
2505 return results
2509
2506
2510 def diffstat(lines, width=80, git=False):
2507 def diffstat(lines, width=80, git=False):
2511 output = []
2508 output = []
2512 stats = diffstatdata(lines)
2509 stats = diffstatdata(lines)
2513 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2510 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2514
2511
2515 countwidth = len(str(maxtotal))
2512 countwidth = len(str(maxtotal))
2516 if hasbinary and countwidth < 3:
2513 if hasbinary and countwidth < 3:
2517 countwidth = 3
2514 countwidth = 3
2518 graphwidth = width - countwidth - maxname - 6
2515 graphwidth = width - countwidth - maxname - 6
2519 if graphwidth < 10:
2516 if graphwidth < 10:
2520 graphwidth = 10
2517 graphwidth = 10
2521
2518
2522 def scale(i):
2519 def scale(i):
2523 if maxtotal <= graphwidth:
2520 if maxtotal <= graphwidth:
2524 return i
2521 return i
2525 # If diffstat runs out of room it doesn't print anything,
2522 # If diffstat runs out of room it doesn't print anything,
2526 # which isn't very useful, so always print at least one + or -
2523 # which isn't very useful, so always print at least one + or -
2527 # if there were at least some changes.
2524 # if there were at least some changes.
2528 return max(i * graphwidth // maxtotal, int(bool(i)))
2525 return max(i * graphwidth // maxtotal, int(bool(i)))
2529
2526
2530 for filename, adds, removes, isbinary in stats:
2527 for filename, adds, removes, isbinary in stats:
2531 if isbinary:
2528 if isbinary:
2532 count = 'Bin'
2529 count = 'Bin'
2533 else:
2530 else:
2534 count = adds + removes
2531 count = adds + removes
2535 pluses = '+' * scale(adds)
2532 pluses = '+' * scale(adds)
2536 minuses = '-' * scale(removes)
2533 minuses = '-' * scale(removes)
2537 output.append(' %s%s | %*s %s%s\n' %
2534 output.append(' %s%s | %*s %s%s\n' %
2538 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2535 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2539 countwidth, count, pluses, minuses))
2536 countwidth, count, pluses, minuses))
2540
2537
2541 if stats:
2538 if stats:
2542 output.append(_(' %d files changed, %d insertions(+), '
2539 output.append(_(' %d files changed, %d insertions(+), '
2543 '%d deletions(-)\n')
2540 '%d deletions(-)\n')
2544 % (len(stats), totaladds, totalremoves))
2541 % (len(stats), totaladds, totalremoves))
2545
2542
2546 return ''.join(output)
2543 return ''.join(output)
2547
2544
2548 def diffstatui(*args, **kw):
2545 def diffstatui(*args, **kw):
2549 '''like diffstat(), but yields 2-tuples of (output, label) for
2546 '''like diffstat(), but yields 2-tuples of (output, label) for
2550 ui.write()
2547 ui.write()
2551 '''
2548 '''
2552
2549
2553 for line in diffstat(*args, **kw).splitlines():
2550 for line in diffstat(*args, **kw).splitlines():
2554 if line and line[-1] in '+-':
2551 if line and line[-1] in '+-':
2555 name, graph = line.rsplit(' ', 1)
2552 name, graph = line.rsplit(' ', 1)
2556 yield (name + ' ', '')
2553 yield (name + ' ', '')
2557 m = re.search(r'\++', graph)
2554 m = re.search(r'\++', graph)
2558 if m:
2555 if m:
2559 yield (m.group(0), 'diffstat.inserted')
2556 yield (m.group(0), 'diffstat.inserted')
2560 m = re.search(r'-+', graph)
2557 m = re.search(r'-+', graph)
2561 if m:
2558 if m:
2562 yield (m.group(0), 'diffstat.deleted')
2559 yield (m.group(0), 'diffstat.deleted')
2563 else:
2560 else:
2564 yield (line, '')
2561 yield (line, '')
2565 yield ('\n', '')
2562 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now