##// END OF EJS Templates
extract: parse 'nodeid' using the generic mechanism...
Pierre-Yves David -
r26560:75d448d5 default
parent child Browse files
Show More
@@ -1,2558 +1,2557 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 ## Some facility for extensible patch parsing:
154 ## Some facility for extensible patch parsing:
155 # list of pairs ("header to match", "data key")
155 # list of pairs ("header to match", "data key")
156 patchheadermap = [('Date', 'date'),
156 patchheadermap = [('Date', 'date'),
157 ('Branch', 'branch'),
157 ('Branch', 'branch'),
158 ('Node ID', 'nodeid'),
158 ]
159 ]
159
160
160 def extract(ui, fileobj):
161 def extract(ui, fileobj):
161 '''extract patch from data read from fileobj.
162 '''extract patch from data read from fileobj.
162
163
163 patch can be a normal patch or contained in an email message.
164 patch can be a normal patch or contained in an email message.
164
165
165 return a dictionnary. Standard keys are:
166 return a dictionnary. Standard keys are:
166 - filename,
167 - filename,
167 - message,
168 - message,
168 - user,
169 - user,
169 - date,
170 - date,
170 - branch,
171 - branch,
171 - node,
172 - node,
172 - p1,
173 - p1,
173 - p2.
174 - p2.
174 Any item can be missing from the dictionary. If filename is mising,
175 Any item can be missing from the dictionary. If filename is mising,
175 fileobj did not contain a patch. Caller must unlink filename when done.'''
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
176
177
177 # attempt to detect the start of a patch
178 # attempt to detect the start of a patch
178 # (this heuristic is borrowed from quilt)
179 # (this heuristic is borrowed from quilt)
179 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183
184
184 data = {}
185 data = {}
185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
187 try:
188 try:
188 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
189
190
190 subject = msg['Subject']
191 subject = msg['Subject']
191 data['user'] = msg['From']
192 data['user'] = msg['From']
192 if not subject and not data['user']:
193 if not subject and not data['user']:
193 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195
196
196 # should try to parse msg['Date']
197 # should try to parse msg['Date']
197 parents = []
198 parents = []
198
199
199 if subject:
200 if subject:
200 if subject.startswith('[PATCH'):
201 if subject.startswith('[PATCH'):
201 pend = subject.find(']')
202 pend = subject.find(']')
202 if pend >= 0:
203 if pend >= 0:
203 subject = subject[pend + 1:].lstrip()
204 subject = subject[pend + 1:].lstrip()
204 subject = re.sub(r'\n[ \t]+', ' ', subject)
205 subject = re.sub(r'\n[ \t]+', ' ', subject)
205 ui.debug('Subject: %s\n' % subject)
206 ui.debug('Subject: %s\n' % subject)
206 if data['user']:
207 if data['user']:
207 ui.debug('From: %s\n' % data['user'])
208 ui.debug('From: %s\n' % data['user'])
208 diffs_seen = 0
209 diffs_seen = 0
209 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
210 message = ''
211 message = ''
211 for part in msg.walk():
212 for part in msg.walk():
212 content_type = part.get_content_type()
213 content_type = part.get_content_type()
213 ui.debug('Content-Type: %s\n' % content_type)
214 ui.debug('Content-Type: %s\n' % content_type)
214 if content_type not in ok_types:
215 if content_type not in ok_types:
215 continue
216 continue
216 payload = part.get_payload(decode=True)
217 payload = part.get_payload(decode=True)
217 m = diffre.search(payload)
218 m = diffre.search(payload)
218 if m:
219 if m:
219 hgpatch = False
220 hgpatch = False
220 hgpatchheader = False
221 hgpatchheader = False
221 ignoretext = False
222 ignoretext = False
222
223
223 ui.debug('found patch at byte %d\n' % m.start(0))
224 ui.debug('found patch at byte %d\n' % m.start(0))
224 diffs_seen += 1
225 diffs_seen += 1
225 cfp = cStringIO.StringIO()
226 cfp = cStringIO.StringIO()
226 for line in payload[:m.start(0)].splitlines():
227 for line in payload[:m.start(0)].splitlines():
227 if line.startswith('# HG changeset patch') and not hgpatch:
228 if line.startswith('# HG changeset patch') and not hgpatch:
228 ui.debug('patch generated by hg export\n')
229 ui.debug('patch generated by hg export\n')
229 hgpatch = True
230 hgpatch = True
230 hgpatchheader = True
231 hgpatchheader = True
231 # drop earlier commit message content
232 # drop earlier commit message content
232 cfp.seek(0)
233 cfp.seek(0)
233 cfp.truncate()
234 cfp.truncate()
234 subject = None
235 subject = None
235 elif hgpatchheader:
236 elif hgpatchheader:
236 if line.startswith('# User '):
237 if line.startswith('# User '):
237 data['user'] = line[7:]
238 data['user'] = line[7:]
238 ui.debug('From: %s\n' % data['user'])
239 ui.debug('From: %s\n' % data['user'])
239 elif line.startswith("# Node ID "):
240 data['nodeid'] = line[10:]
241 elif line.startswith("# Parent "):
240 elif line.startswith("# Parent "):
242 parents.append(line[9:].lstrip())
241 parents.append(line[9:].lstrip())
243 elif line.startswith("# "):
242 elif line.startswith("# "):
244 for header, key in patchheadermap:
243 for header, key in patchheadermap:
245 prefix = '# %s ' % header
244 prefix = '# %s ' % header
246 if line.startswith(prefix):
245 if line.startswith(prefix):
247 data[key] = line[len(prefix):]
246 data[key] = line[len(prefix):]
248 else:
247 else:
249 hgpatchheader = False
248 hgpatchheader = False
250 elif line == '---':
249 elif line == '---':
251 ignoretext = True
250 ignoretext = True
252 if not hgpatchheader and not ignoretext:
251 if not hgpatchheader and not ignoretext:
253 cfp.write(line)
252 cfp.write(line)
254 cfp.write('\n')
253 cfp.write('\n')
255 message = cfp.getvalue()
254 message = cfp.getvalue()
256 if tmpfp:
255 if tmpfp:
257 tmpfp.write(payload)
256 tmpfp.write(payload)
258 if not payload.endswith('\n'):
257 if not payload.endswith('\n'):
259 tmpfp.write('\n')
258 tmpfp.write('\n')
260 elif not diffs_seen and message and content_type == 'text/plain':
259 elif not diffs_seen and message and content_type == 'text/plain':
261 message += '\n' + payload
260 message += '\n' + payload
262 except: # re-raises
261 except: # re-raises
263 tmpfp.close()
262 tmpfp.close()
264 os.unlink(tmpname)
263 os.unlink(tmpname)
265 raise
264 raise
266
265
267 if subject and not message.startswith(subject):
266 if subject and not message.startswith(subject):
268 message = '%s\n%s' % (subject, message)
267 message = '%s\n%s' % (subject, message)
269 data['message'] = message
268 data['message'] = message
270 tmpfp.close()
269 tmpfp.close()
271 if parents:
270 if parents:
272 data['p1'] = parents.pop(0)
271 data['p1'] = parents.pop(0)
273 if parents:
272 if parents:
274 data['p2'] = parents.pop(0)
273 data['p2'] = parents.pop(0)
275
274
276 if diffs_seen:
275 if diffs_seen:
277 data['filename'] = tmpname
276 data['filename'] = tmpname
278 else:
277 else:
279 os.unlink(tmpname)
278 os.unlink(tmpname)
280 return data
279 return data
281
280
282 class patchmeta(object):
281 class patchmeta(object):
283 """Patched file metadata
282 """Patched file metadata
284
283
285 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
286 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
287 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
288 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
289 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
290 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
291 """
290 """
292 def __init__(self, path):
291 def __init__(self, path):
293 self.path = path
292 self.path = path
294 self.oldpath = None
293 self.oldpath = None
295 self.mode = None
294 self.mode = None
296 self.op = 'MODIFY'
295 self.op = 'MODIFY'
297 self.binary = False
296 self.binary = False
298
297
299 def setmode(self, mode):
298 def setmode(self, mode):
300 islink = mode & 0o20000
299 islink = mode & 0o20000
301 isexec = mode & 0o100
300 isexec = mode & 0o100
302 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
303
302
304 def copy(self):
303 def copy(self):
305 other = patchmeta(self.path)
304 other = patchmeta(self.path)
306 other.oldpath = self.oldpath
305 other.oldpath = self.oldpath
307 other.mode = self.mode
306 other.mode = self.mode
308 other.op = self.op
307 other.op = self.op
309 other.binary = self.binary
308 other.binary = self.binary
310 return other
309 return other
311
310
312 def _ispatchinga(self, afile):
311 def _ispatchinga(self, afile):
313 if afile == '/dev/null':
312 if afile == '/dev/null':
314 return self.op == 'ADD'
313 return self.op == 'ADD'
315 return afile == 'a/' + (self.oldpath or self.path)
314 return afile == 'a/' + (self.oldpath or self.path)
316
315
317 def _ispatchingb(self, bfile):
316 def _ispatchingb(self, bfile):
318 if bfile == '/dev/null':
317 if bfile == '/dev/null':
319 return self.op == 'DELETE'
318 return self.op == 'DELETE'
320 return bfile == 'b/' + self.path
319 return bfile == 'b/' + self.path
321
320
322 def ispatching(self, afile, bfile):
321 def ispatching(self, afile, bfile):
323 return self._ispatchinga(afile) and self._ispatchingb(bfile)
322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
324
323
325 def __repr__(self):
324 def __repr__(self):
326 return "<patchmeta %s %r>" % (self.op, self.path)
325 return "<patchmeta %s %r>" % (self.op, self.path)
327
326
328 def readgitpatch(lr):
327 def readgitpatch(lr):
329 """extract git-style metadata about patches from <patchname>"""
328 """extract git-style metadata about patches from <patchname>"""
330
329
331 # Filter patch for git information
330 # Filter patch for git information
332 gp = None
331 gp = None
333 gitpatches = []
332 gitpatches = []
334 for line in lr:
333 for line in lr:
335 line = line.rstrip(' \r\n')
334 line = line.rstrip(' \r\n')
336 if line.startswith('diff --git a/'):
335 if line.startswith('diff --git a/'):
337 m = gitre.match(line)
336 m = gitre.match(line)
338 if m:
337 if m:
339 if gp:
338 if gp:
340 gitpatches.append(gp)
339 gitpatches.append(gp)
341 dst = m.group(2)
340 dst = m.group(2)
342 gp = patchmeta(dst)
341 gp = patchmeta(dst)
343 elif gp:
342 elif gp:
344 if line.startswith('--- '):
343 if line.startswith('--- '):
345 gitpatches.append(gp)
344 gitpatches.append(gp)
346 gp = None
345 gp = None
347 continue
346 continue
348 if line.startswith('rename from '):
347 if line.startswith('rename from '):
349 gp.op = 'RENAME'
348 gp.op = 'RENAME'
350 gp.oldpath = line[12:]
349 gp.oldpath = line[12:]
351 elif line.startswith('rename to '):
350 elif line.startswith('rename to '):
352 gp.path = line[10:]
351 gp.path = line[10:]
353 elif line.startswith('copy from '):
352 elif line.startswith('copy from '):
354 gp.op = 'COPY'
353 gp.op = 'COPY'
355 gp.oldpath = line[10:]
354 gp.oldpath = line[10:]
356 elif line.startswith('copy to '):
355 elif line.startswith('copy to '):
357 gp.path = line[8:]
356 gp.path = line[8:]
358 elif line.startswith('deleted file'):
357 elif line.startswith('deleted file'):
359 gp.op = 'DELETE'
358 gp.op = 'DELETE'
360 elif line.startswith('new file mode '):
359 elif line.startswith('new file mode '):
361 gp.op = 'ADD'
360 gp.op = 'ADD'
362 gp.setmode(int(line[-6:], 8))
361 gp.setmode(int(line[-6:], 8))
363 elif line.startswith('new mode '):
362 elif line.startswith('new mode '):
364 gp.setmode(int(line[-6:], 8))
363 gp.setmode(int(line[-6:], 8))
365 elif line.startswith('GIT binary patch'):
364 elif line.startswith('GIT binary patch'):
366 gp.binary = True
365 gp.binary = True
367 if gp:
366 if gp:
368 gitpatches.append(gp)
367 gitpatches.append(gp)
369
368
370 return gitpatches
369 return gitpatches
371
370
372 class linereader(object):
371 class linereader(object):
373 # simple class to allow pushing lines back into the input stream
372 # simple class to allow pushing lines back into the input stream
374 def __init__(self, fp):
373 def __init__(self, fp):
375 self.fp = fp
374 self.fp = fp
376 self.buf = []
375 self.buf = []
377
376
378 def push(self, line):
377 def push(self, line):
379 if line is not None:
378 if line is not None:
380 self.buf.append(line)
379 self.buf.append(line)
381
380
382 def readline(self):
381 def readline(self):
383 if self.buf:
382 if self.buf:
384 l = self.buf[0]
383 l = self.buf[0]
385 del self.buf[0]
384 del self.buf[0]
386 return l
385 return l
387 return self.fp.readline()
386 return self.fp.readline()
388
387
389 def __iter__(self):
388 def __iter__(self):
390 while True:
389 while True:
391 l = self.readline()
390 l = self.readline()
392 if not l:
391 if not l:
393 break
392 break
394 yield l
393 yield l
395
394
396 class abstractbackend(object):
395 class abstractbackend(object):
397 def __init__(self, ui):
396 def __init__(self, ui):
398 self.ui = ui
397 self.ui = ui
399
398
400 def getfile(self, fname):
399 def getfile(self, fname):
401 """Return target file data and flags as a (data, (islink,
400 """Return target file data and flags as a (data, (islink,
402 isexec)) tuple. Data is None if file is missing/deleted.
401 isexec)) tuple. Data is None if file is missing/deleted.
403 """
402 """
404 raise NotImplementedError
403 raise NotImplementedError
405
404
406 def setfile(self, fname, data, mode, copysource):
405 def setfile(self, fname, data, mode, copysource):
407 """Write data to target file fname and set its mode. mode is a
406 """Write data to target file fname and set its mode. mode is a
408 (islink, isexec) tuple. If data is None, the file content should
407 (islink, isexec) tuple. If data is None, the file content should
409 be left unchanged. If the file is modified after being copied,
408 be left unchanged. If the file is modified after being copied,
410 copysource is set to the original file name.
409 copysource is set to the original file name.
411 """
410 """
412 raise NotImplementedError
411 raise NotImplementedError
413
412
414 def unlink(self, fname):
413 def unlink(self, fname):
415 """Unlink target file."""
414 """Unlink target file."""
416 raise NotImplementedError
415 raise NotImplementedError
417
416
418 def writerej(self, fname, failed, total, lines):
417 def writerej(self, fname, failed, total, lines):
419 """Write rejected lines for fname. total is the number of hunks
418 """Write rejected lines for fname. total is the number of hunks
420 which failed to apply and total the total number of hunks for this
419 which failed to apply and total the total number of hunks for this
421 files.
420 files.
422 """
421 """
423 pass
422 pass
424
423
425 def exists(self, fname):
424 def exists(self, fname):
426 raise NotImplementedError
425 raise NotImplementedError
427
426
428 class fsbackend(abstractbackend):
427 class fsbackend(abstractbackend):
429 def __init__(self, ui, basedir):
428 def __init__(self, ui, basedir):
430 super(fsbackend, self).__init__(ui)
429 super(fsbackend, self).__init__(ui)
431 self.opener = scmutil.opener(basedir)
430 self.opener = scmutil.opener(basedir)
432
431
433 def _join(self, f):
432 def _join(self, f):
434 return os.path.join(self.opener.base, f)
433 return os.path.join(self.opener.base, f)
435
434
436 def getfile(self, fname):
435 def getfile(self, fname):
437 if self.opener.islink(fname):
436 if self.opener.islink(fname):
438 return (self.opener.readlink(fname), (True, False))
437 return (self.opener.readlink(fname), (True, False))
439
438
440 isexec = False
439 isexec = False
441 try:
440 try:
442 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
443 except OSError as e:
442 except OSError as e:
444 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
445 raise
444 raise
446 try:
445 try:
447 return (self.opener.read(fname), (False, isexec))
446 return (self.opener.read(fname), (False, isexec))
448 except IOError as e:
447 except IOError as e:
449 if e.errno != errno.ENOENT:
448 if e.errno != errno.ENOENT:
450 raise
449 raise
451 return None, None
450 return None, None
452
451
453 def setfile(self, fname, data, mode, copysource):
452 def setfile(self, fname, data, mode, copysource):
454 islink, isexec = mode
453 islink, isexec = mode
455 if data is None:
454 if data is None:
456 self.opener.setflags(fname, islink, isexec)
455 self.opener.setflags(fname, islink, isexec)
457 return
456 return
458 if islink:
457 if islink:
459 self.opener.symlink(data, fname)
458 self.opener.symlink(data, fname)
460 else:
459 else:
461 self.opener.write(fname, data)
460 self.opener.write(fname, data)
462 if isexec:
461 if isexec:
463 self.opener.setflags(fname, False, True)
462 self.opener.setflags(fname, False, True)
464
463
465 def unlink(self, fname):
464 def unlink(self, fname):
466 self.opener.unlinkpath(fname, ignoremissing=True)
465 self.opener.unlinkpath(fname, ignoremissing=True)
467
466
468 def writerej(self, fname, failed, total, lines):
467 def writerej(self, fname, failed, total, lines):
469 fname = fname + ".rej"
468 fname = fname + ".rej"
470 self.ui.warn(
469 self.ui.warn(
471 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
472 (failed, total, fname))
471 (failed, total, fname))
473 fp = self.opener(fname, 'w')
472 fp = self.opener(fname, 'w')
474 fp.writelines(lines)
473 fp.writelines(lines)
475 fp.close()
474 fp.close()
476
475
477 def exists(self, fname):
476 def exists(self, fname):
478 return self.opener.lexists(fname)
477 return self.opener.lexists(fname)
479
478
480 class workingbackend(fsbackend):
479 class workingbackend(fsbackend):
481 def __init__(self, ui, repo, similarity):
480 def __init__(self, ui, repo, similarity):
482 super(workingbackend, self).__init__(ui, repo.root)
481 super(workingbackend, self).__init__(ui, repo.root)
483 self.repo = repo
482 self.repo = repo
484 self.similarity = similarity
483 self.similarity = similarity
485 self.removed = set()
484 self.removed = set()
486 self.changed = set()
485 self.changed = set()
487 self.copied = []
486 self.copied = []
488
487
489 def _checkknown(self, fname):
488 def _checkknown(self, fname):
490 if self.repo.dirstate[fname] == '?' and self.exists(fname):
489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
491 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
492
491
493 def setfile(self, fname, data, mode, copysource):
492 def setfile(self, fname, data, mode, copysource):
494 self._checkknown(fname)
493 self._checkknown(fname)
495 super(workingbackend, self).setfile(fname, data, mode, copysource)
494 super(workingbackend, self).setfile(fname, data, mode, copysource)
496 if copysource is not None:
495 if copysource is not None:
497 self.copied.append((copysource, fname))
496 self.copied.append((copysource, fname))
498 self.changed.add(fname)
497 self.changed.add(fname)
499
498
500 def unlink(self, fname):
499 def unlink(self, fname):
501 self._checkknown(fname)
500 self._checkknown(fname)
502 super(workingbackend, self).unlink(fname)
501 super(workingbackend, self).unlink(fname)
503 self.removed.add(fname)
502 self.removed.add(fname)
504 self.changed.add(fname)
503 self.changed.add(fname)
505
504
506 def close(self):
505 def close(self):
507 wctx = self.repo[None]
506 wctx = self.repo[None]
508 changed = set(self.changed)
507 changed = set(self.changed)
509 for src, dst in self.copied:
508 for src, dst in self.copied:
510 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
511 if self.removed:
510 if self.removed:
512 wctx.forget(sorted(self.removed))
511 wctx.forget(sorted(self.removed))
513 for f in self.removed:
512 for f in self.removed:
514 if f not in self.repo.dirstate:
513 if f not in self.repo.dirstate:
515 # File was deleted and no longer belongs to the
514 # File was deleted and no longer belongs to the
516 # dirstate, it was probably marked added then
515 # dirstate, it was probably marked added then
517 # deleted, and should not be considered by
516 # deleted, and should not be considered by
518 # marktouched().
517 # marktouched().
519 changed.discard(f)
518 changed.discard(f)
520 if changed:
519 if changed:
521 scmutil.marktouched(self.repo, changed, self.similarity)
520 scmutil.marktouched(self.repo, changed, self.similarity)
522 return sorted(self.changed)
521 return sorted(self.changed)
523
522
524 class filestore(object):
523 class filestore(object):
525 def __init__(self, maxsize=None):
524 def __init__(self, maxsize=None):
526 self.opener = None
525 self.opener = None
527 self.files = {}
526 self.files = {}
528 self.created = 0
527 self.created = 0
529 self.maxsize = maxsize
528 self.maxsize = maxsize
530 if self.maxsize is None:
529 if self.maxsize is None:
531 self.maxsize = 4*(2**20)
530 self.maxsize = 4*(2**20)
532 self.size = 0
531 self.size = 0
533 self.data = {}
532 self.data = {}
534
533
535 def setfile(self, fname, data, mode, copied=None):
534 def setfile(self, fname, data, mode, copied=None):
536 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
537 self.data[fname] = (data, mode, copied)
536 self.data[fname] = (data, mode, copied)
538 self.size += len(data)
537 self.size += len(data)
539 else:
538 else:
540 if self.opener is None:
539 if self.opener is None:
541 root = tempfile.mkdtemp(prefix='hg-patch-')
540 root = tempfile.mkdtemp(prefix='hg-patch-')
542 self.opener = scmutil.opener(root)
541 self.opener = scmutil.opener(root)
543 # Avoid filename issues with these simple names
542 # Avoid filename issues with these simple names
544 fn = str(self.created)
543 fn = str(self.created)
545 self.opener.write(fn, data)
544 self.opener.write(fn, data)
546 self.created += 1
545 self.created += 1
547 self.files[fname] = (fn, mode, copied)
546 self.files[fname] = (fn, mode, copied)
548
547
549 def getfile(self, fname):
548 def getfile(self, fname):
550 if fname in self.data:
549 if fname in self.data:
551 return self.data[fname]
550 return self.data[fname]
552 if not self.opener or fname not in self.files:
551 if not self.opener or fname not in self.files:
553 return None, None, None
552 return None, None, None
554 fn, mode, copied = self.files[fname]
553 fn, mode, copied = self.files[fname]
555 return self.opener.read(fn), mode, copied
554 return self.opener.read(fn), mode, copied
556
555
557 def close(self):
556 def close(self):
558 if self.opener:
557 if self.opener:
559 shutil.rmtree(self.opener.base)
558 shutil.rmtree(self.opener.base)
560
559
561 class repobackend(abstractbackend):
560 class repobackend(abstractbackend):
562 def __init__(self, ui, repo, ctx, store):
561 def __init__(self, ui, repo, ctx, store):
563 super(repobackend, self).__init__(ui)
562 super(repobackend, self).__init__(ui)
564 self.repo = repo
563 self.repo = repo
565 self.ctx = ctx
564 self.ctx = ctx
566 self.store = store
565 self.store = store
567 self.changed = set()
566 self.changed = set()
568 self.removed = set()
567 self.removed = set()
569 self.copied = {}
568 self.copied = {}
570
569
571 def _checkknown(self, fname):
570 def _checkknown(self, fname):
572 if fname not in self.ctx:
571 if fname not in self.ctx:
573 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
574
573
575 def getfile(self, fname):
574 def getfile(self, fname):
576 try:
575 try:
577 fctx = self.ctx[fname]
576 fctx = self.ctx[fname]
578 except error.LookupError:
577 except error.LookupError:
579 return None, None
578 return None, None
580 flags = fctx.flags()
579 flags = fctx.flags()
581 return fctx.data(), ('l' in flags, 'x' in flags)
580 return fctx.data(), ('l' in flags, 'x' in flags)
582
581
583 def setfile(self, fname, data, mode, copysource):
582 def setfile(self, fname, data, mode, copysource):
584 if copysource:
583 if copysource:
585 self._checkknown(copysource)
584 self._checkknown(copysource)
586 if data is None:
585 if data is None:
587 data = self.ctx[fname].data()
586 data = self.ctx[fname].data()
588 self.store.setfile(fname, data, mode, copysource)
587 self.store.setfile(fname, data, mode, copysource)
589 self.changed.add(fname)
588 self.changed.add(fname)
590 if copysource:
589 if copysource:
591 self.copied[fname] = copysource
590 self.copied[fname] = copysource
592
591
593 def unlink(self, fname):
592 def unlink(self, fname):
594 self._checkknown(fname)
593 self._checkknown(fname)
595 self.removed.add(fname)
594 self.removed.add(fname)
596
595
597 def exists(self, fname):
596 def exists(self, fname):
598 return fname in self.ctx
597 return fname in self.ctx
599
598
600 def close(self):
599 def close(self):
601 return self.changed | self.removed
600 return self.changed | self.removed
602
601
603 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
604 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
605 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
606 eolmodes = ['strict', 'crlf', 'lf', 'auto']
605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
607
606
608 class patchfile(object):
607 class patchfile(object):
609 def __init__(self, ui, gp, backend, store, eolmode='strict'):
608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
610 self.fname = gp.path
609 self.fname = gp.path
611 self.eolmode = eolmode
610 self.eolmode = eolmode
612 self.eol = None
611 self.eol = None
613 self.backend = backend
612 self.backend = backend
614 self.ui = ui
613 self.ui = ui
615 self.lines = []
614 self.lines = []
616 self.exists = False
615 self.exists = False
617 self.missing = True
616 self.missing = True
618 self.mode = gp.mode
617 self.mode = gp.mode
619 self.copysource = gp.oldpath
618 self.copysource = gp.oldpath
620 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
621 self.remove = gp.op == 'DELETE'
620 self.remove = gp.op == 'DELETE'
622 if self.copysource is None:
621 if self.copysource is None:
623 data, mode = backend.getfile(self.fname)
622 data, mode = backend.getfile(self.fname)
624 else:
623 else:
625 data, mode = store.getfile(self.copysource)[:2]
624 data, mode = store.getfile(self.copysource)[:2]
626 if data is not None:
625 if data is not None:
627 self.exists = self.copysource is None or backend.exists(self.fname)
626 self.exists = self.copysource is None or backend.exists(self.fname)
628 self.missing = False
627 self.missing = False
629 if data:
628 if data:
630 self.lines = mdiff.splitnewlines(data)
629 self.lines = mdiff.splitnewlines(data)
631 if self.mode is None:
630 if self.mode is None:
632 self.mode = mode
631 self.mode = mode
633 if self.lines:
632 if self.lines:
634 # Normalize line endings
633 # Normalize line endings
635 if self.lines[0].endswith('\r\n'):
634 if self.lines[0].endswith('\r\n'):
636 self.eol = '\r\n'
635 self.eol = '\r\n'
637 elif self.lines[0].endswith('\n'):
636 elif self.lines[0].endswith('\n'):
638 self.eol = '\n'
637 self.eol = '\n'
639 if eolmode != 'strict':
638 if eolmode != 'strict':
640 nlines = []
639 nlines = []
641 for l in self.lines:
640 for l in self.lines:
642 if l.endswith('\r\n'):
641 if l.endswith('\r\n'):
643 l = l[:-2] + '\n'
642 l = l[:-2] + '\n'
644 nlines.append(l)
643 nlines.append(l)
645 self.lines = nlines
644 self.lines = nlines
646 else:
645 else:
647 if self.create:
646 if self.create:
648 self.missing = False
647 self.missing = False
649 if self.mode is None:
648 if self.mode is None:
650 self.mode = (False, False)
649 self.mode = (False, False)
651 if self.missing:
650 if self.missing:
652 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
653
652
654 self.hash = {}
653 self.hash = {}
655 self.dirty = 0
654 self.dirty = 0
656 self.offset = 0
655 self.offset = 0
657 self.skew = 0
656 self.skew = 0
658 self.rej = []
657 self.rej = []
659 self.fileprinted = False
658 self.fileprinted = False
660 self.printfile(False)
659 self.printfile(False)
661 self.hunks = 0
660 self.hunks = 0
662
661
663 def writelines(self, fname, lines, mode):
662 def writelines(self, fname, lines, mode):
664 if self.eolmode == 'auto':
663 if self.eolmode == 'auto':
665 eol = self.eol
664 eol = self.eol
666 elif self.eolmode == 'crlf':
665 elif self.eolmode == 'crlf':
667 eol = '\r\n'
666 eol = '\r\n'
668 else:
667 else:
669 eol = '\n'
668 eol = '\n'
670
669
671 if self.eolmode != 'strict' and eol and eol != '\n':
670 if self.eolmode != 'strict' and eol and eol != '\n':
672 rawlines = []
671 rawlines = []
673 for l in lines:
672 for l in lines:
674 if l and l[-1] == '\n':
673 if l and l[-1] == '\n':
675 l = l[:-1] + eol
674 l = l[:-1] + eol
676 rawlines.append(l)
675 rawlines.append(l)
677 lines = rawlines
676 lines = rawlines
678
677
679 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
680
679
681 def printfile(self, warn):
680 def printfile(self, warn):
682 if self.fileprinted:
681 if self.fileprinted:
683 return
682 return
684 if warn or self.ui.verbose:
683 if warn or self.ui.verbose:
685 self.fileprinted = True
684 self.fileprinted = True
686 s = _("patching file %s\n") % self.fname
685 s = _("patching file %s\n") % self.fname
687 if warn:
686 if warn:
688 self.ui.warn(s)
687 self.ui.warn(s)
689 else:
688 else:
690 self.ui.note(s)
689 self.ui.note(s)
691
690
692
691
693 def findlines(self, l, linenum):
692 def findlines(self, l, linenum):
694 # looks through the hash and finds candidate lines. The
693 # looks through the hash and finds candidate lines. The
695 # result is a list of line numbers sorted based on distance
694 # result is a list of line numbers sorted based on distance
696 # from linenum
695 # from linenum
697
696
698 cand = self.hash.get(l, [])
697 cand = self.hash.get(l, [])
699 if len(cand) > 1:
698 if len(cand) > 1:
700 # resort our list of potentials forward then back.
699 # resort our list of potentials forward then back.
701 cand.sort(key=lambda x: abs(x - linenum))
700 cand.sort(key=lambda x: abs(x - linenum))
702 return cand
701 return cand
703
702
704 def write_rej(self):
703 def write_rej(self):
705 # our rejects are a little different from patch(1). This always
704 # our rejects are a little different from patch(1). This always
706 # creates rejects in the same form as the original patch. A file
705 # creates rejects in the same form as the original patch. A file
707 # header is inserted so that you can run the reject through patch again
706 # header is inserted so that you can run the reject through patch again
708 # without having to type the filename.
707 # without having to type the filename.
709 if not self.rej:
708 if not self.rej:
710 return
709 return
711 base = os.path.basename(self.fname)
710 base = os.path.basename(self.fname)
712 lines = ["--- %s\n+++ %s\n" % (base, base)]
711 lines = ["--- %s\n+++ %s\n" % (base, base)]
713 for x in self.rej:
712 for x in self.rej:
714 for l in x.hunk:
713 for l in x.hunk:
715 lines.append(l)
714 lines.append(l)
716 if l[-1] != '\n':
715 if l[-1] != '\n':
717 lines.append("\n\ No newline at end of file\n")
716 lines.append("\n\ No newline at end of file\n")
718 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
719
718
720 def apply(self, h):
719 def apply(self, h):
721 if not h.complete():
720 if not h.complete():
722 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
723 (h.number, h.desc, len(h.a), h.lena, len(h.b),
722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
724 h.lenb))
723 h.lenb))
725
724
726 self.hunks += 1
725 self.hunks += 1
727
726
728 if self.missing:
727 if self.missing:
729 self.rej.append(h)
728 self.rej.append(h)
730 return -1
729 return -1
731
730
732 if self.exists and self.create:
731 if self.exists and self.create:
733 if self.copysource:
732 if self.copysource:
734 self.ui.warn(_("cannot create %s: destination already "
733 self.ui.warn(_("cannot create %s: destination already "
735 "exists\n") % self.fname)
734 "exists\n") % self.fname)
736 else:
735 else:
737 self.ui.warn(_("file %s already exists\n") % self.fname)
736 self.ui.warn(_("file %s already exists\n") % self.fname)
738 self.rej.append(h)
737 self.rej.append(h)
739 return -1
738 return -1
740
739
741 if isinstance(h, binhunk):
740 if isinstance(h, binhunk):
742 if self.remove:
741 if self.remove:
743 self.backend.unlink(self.fname)
742 self.backend.unlink(self.fname)
744 else:
743 else:
745 l = h.new(self.lines)
744 l = h.new(self.lines)
746 self.lines[:] = l
745 self.lines[:] = l
747 self.offset += len(l)
746 self.offset += len(l)
748 self.dirty = True
747 self.dirty = True
749 return 0
748 return 0
750
749
751 horig = h
750 horig = h
752 if (self.eolmode in ('crlf', 'lf')
751 if (self.eolmode in ('crlf', 'lf')
753 or self.eolmode == 'auto' and self.eol):
752 or self.eolmode == 'auto' and self.eol):
754 # If new eols are going to be normalized, then normalize
753 # If new eols are going to be normalized, then normalize
755 # hunk data before patching. Otherwise, preserve input
754 # hunk data before patching. Otherwise, preserve input
756 # line-endings.
755 # line-endings.
757 h = h.getnormalized()
756 h = h.getnormalized()
758
757
759 # fast case first, no offsets, no fuzz
758 # fast case first, no offsets, no fuzz
760 old, oldstart, new, newstart = h.fuzzit(0, False)
759 old, oldstart, new, newstart = h.fuzzit(0, False)
761 oldstart += self.offset
760 oldstart += self.offset
762 orig_start = oldstart
761 orig_start = oldstart
763 # if there's skew we want to emit the "(offset %d lines)" even
762 # if there's skew we want to emit the "(offset %d lines)" even
764 # when the hunk cleanly applies at start + skew, so skip the
763 # when the hunk cleanly applies at start + skew, so skip the
765 # fast case code
764 # fast case code
766 if (self.skew == 0 and
765 if (self.skew == 0 and
767 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
768 if self.remove:
767 if self.remove:
769 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
770 else:
769 else:
771 self.lines[oldstart:oldstart + len(old)] = new
770 self.lines[oldstart:oldstart + len(old)] = new
772 self.offset += len(new) - len(old)
771 self.offset += len(new) - len(old)
773 self.dirty = True
772 self.dirty = True
774 return 0
773 return 0
775
774
776 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
777 self.hash = {}
776 self.hash = {}
778 for x, s in enumerate(self.lines):
777 for x, s in enumerate(self.lines):
779 self.hash.setdefault(s, []).append(x)
778 self.hash.setdefault(s, []).append(x)
780
779
781 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
782 for toponly in [True, False]:
781 for toponly in [True, False]:
783 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
784 oldstart = oldstart + self.offset + self.skew
783 oldstart = oldstart + self.offset + self.skew
785 oldstart = min(oldstart, len(self.lines))
784 oldstart = min(oldstart, len(self.lines))
786 if old:
785 if old:
787 cand = self.findlines(old[0][1:], oldstart)
786 cand = self.findlines(old[0][1:], oldstart)
788 else:
787 else:
789 # Only adding lines with no or fuzzed context, just
788 # Only adding lines with no or fuzzed context, just
790 # take the skew in account
789 # take the skew in account
791 cand = [oldstart]
790 cand = [oldstart]
792
791
793 for l in cand:
792 for l in cand:
794 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
795 self.lines[l : l + len(old)] = new
794 self.lines[l : l + len(old)] = new
796 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
797 self.skew = l - orig_start
796 self.skew = l - orig_start
798 self.dirty = True
797 self.dirty = True
799 offset = l - orig_start - fuzzlen
798 offset = l - orig_start - fuzzlen
800 if fuzzlen:
799 if fuzzlen:
801 msg = _("Hunk #%d succeeded at %d "
800 msg = _("Hunk #%d succeeded at %d "
802 "with fuzz %d "
801 "with fuzz %d "
803 "(offset %d lines).\n")
802 "(offset %d lines).\n")
804 self.printfile(True)
803 self.printfile(True)
805 self.ui.warn(msg %
804 self.ui.warn(msg %
806 (h.number, l + 1, fuzzlen, offset))
805 (h.number, l + 1, fuzzlen, offset))
807 else:
806 else:
808 msg = _("Hunk #%d succeeded at %d "
807 msg = _("Hunk #%d succeeded at %d "
809 "(offset %d lines).\n")
808 "(offset %d lines).\n")
810 self.ui.note(msg % (h.number, l + 1, offset))
809 self.ui.note(msg % (h.number, l + 1, offset))
811 return fuzzlen
810 return fuzzlen
812 self.printfile(True)
811 self.printfile(True)
813 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
814 self.rej.append(horig)
813 self.rej.append(horig)
815 return -1
814 return -1
816
815
817 def close(self):
816 def close(self):
818 if self.dirty:
817 if self.dirty:
819 self.writelines(self.fname, self.lines, self.mode)
818 self.writelines(self.fname, self.lines, self.mode)
820 self.write_rej()
819 self.write_rej()
821 return len(self.rej)
820 return len(self.rej)
822
821
823 class header(object):
822 class header(object):
824 """patch header
823 """patch header
825 """
824 """
826 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
827 diff_re = re.compile('diff -r .* (.*)$')
826 diff_re = re.compile('diff -r .* (.*)$')
828 allhunks_re = re.compile('(?:index|deleted file) ')
827 allhunks_re = re.compile('(?:index|deleted file) ')
829 pretty_re = re.compile('(?:new file|deleted file) ')
828 pretty_re = re.compile('(?:new file|deleted file) ')
830 special_re = re.compile('(?:index|deleted|copy|rename) ')
829 special_re = re.compile('(?:index|deleted|copy|rename) ')
831 newfile_re = re.compile('(?:new file)')
830 newfile_re = re.compile('(?:new file)')
832
831
833 def __init__(self, header):
832 def __init__(self, header):
834 self.header = header
833 self.header = header
835 self.hunks = []
834 self.hunks = []
836
835
837 def binary(self):
836 def binary(self):
838 return any(h.startswith('index ') for h in self.header)
837 return any(h.startswith('index ') for h in self.header)
839
838
840 def pretty(self, fp):
839 def pretty(self, fp):
841 for h in self.header:
840 for h in self.header:
842 if h.startswith('index '):
841 if h.startswith('index '):
843 fp.write(_('this modifies a binary file (all or nothing)\n'))
842 fp.write(_('this modifies a binary file (all or nothing)\n'))
844 break
843 break
845 if self.pretty_re.match(h):
844 if self.pretty_re.match(h):
846 fp.write(h)
845 fp.write(h)
847 if self.binary():
846 if self.binary():
848 fp.write(_('this is a binary file\n'))
847 fp.write(_('this is a binary file\n'))
849 break
848 break
850 if h.startswith('---'):
849 if h.startswith('---'):
851 fp.write(_('%d hunks, %d lines changed\n') %
850 fp.write(_('%d hunks, %d lines changed\n') %
852 (len(self.hunks),
851 (len(self.hunks),
853 sum([max(h.added, h.removed) for h in self.hunks])))
852 sum([max(h.added, h.removed) for h in self.hunks])))
854 break
853 break
855 fp.write(h)
854 fp.write(h)
856
855
857 def write(self, fp):
856 def write(self, fp):
858 fp.write(''.join(self.header))
857 fp.write(''.join(self.header))
859
858
860 def allhunks(self):
859 def allhunks(self):
861 return any(self.allhunks_re.match(h) for h in self.header)
860 return any(self.allhunks_re.match(h) for h in self.header)
862
861
863 def files(self):
862 def files(self):
864 match = self.diffgit_re.match(self.header[0])
863 match = self.diffgit_re.match(self.header[0])
865 if match:
864 if match:
866 fromfile, tofile = match.groups()
865 fromfile, tofile = match.groups()
867 if fromfile == tofile:
866 if fromfile == tofile:
868 return [fromfile]
867 return [fromfile]
869 return [fromfile, tofile]
868 return [fromfile, tofile]
870 else:
869 else:
871 return self.diff_re.match(self.header[0]).groups()
870 return self.diff_re.match(self.header[0]).groups()
872
871
873 def filename(self):
872 def filename(self):
874 return self.files()[-1]
873 return self.files()[-1]
875
874
876 def __repr__(self):
875 def __repr__(self):
877 return '<header %s>' % (' '.join(map(repr, self.files())))
876 return '<header %s>' % (' '.join(map(repr, self.files())))
878
877
879 def isnewfile(self):
878 def isnewfile(self):
880 return any(self.newfile_re.match(h) for h in self.header)
879 return any(self.newfile_re.match(h) for h in self.header)
881
880
882 def special(self):
881 def special(self):
883 # Special files are shown only at the header level and not at the hunk
882 # Special files are shown only at the header level and not at the hunk
884 # level for example a file that has been deleted is a special file.
883 # level for example a file that has been deleted is a special file.
885 # The user cannot change the content of the operation, in the case of
884 # The user cannot change the content of the operation, in the case of
886 # the deleted file he has to take the deletion or not take it, he
885 # the deleted file he has to take the deletion or not take it, he
887 # cannot take some of it.
886 # cannot take some of it.
888 # Newly added files are special if they are empty, they are not special
887 # Newly added files are special if they are empty, they are not special
889 # if they have some content as we want to be able to change it
888 # if they have some content as we want to be able to change it
890 nocontent = len(self.header) == 2
889 nocontent = len(self.header) == 2
891 emptynewfile = self.isnewfile() and nocontent
890 emptynewfile = self.isnewfile() and nocontent
892 return emptynewfile or \
891 return emptynewfile or \
893 any(self.special_re.match(h) for h in self.header)
892 any(self.special_re.match(h) for h in self.header)
894
893
895 class recordhunk(object):
894 class recordhunk(object):
896 """patch hunk
895 """patch hunk
897
896
898 XXX shouldn't we merge this with the other hunk class?
897 XXX shouldn't we merge this with the other hunk class?
899 """
898 """
900 maxcontext = 3
899 maxcontext = 3
901
900
902 def __init__(self, header, fromline, toline, proc, before, hunk, after):
901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
903 def trimcontext(number, lines):
902 def trimcontext(number, lines):
904 delta = len(lines) - self.maxcontext
903 delta = len(lines) - self.maxcontext
905 if False and delta > 0:
904 if False and delta > 0:
906 return number + delta, lines[:self.maxcontext]
905 return number + delta, lines[:self.maxcontext]
907 return number, lines
906 return number, lines
908
907
909 self.header = header
908 self.header = header
910 self.fromline, self.before = trimcontext(fromline, before)
909 self.fromline, self.before = trimcontext(fromline, before)
911 self.toline, self.after = trimcontext(toline, after)
910 self.toline, self.after = trimcontext(toline, after)
912 self.proc = proc
911 self.proc = proc
913 self.hunk = hunk
912 self.hunk = hunk
914 self.added, self.removed = self.countchanges(self.hunk)
913 self.added, self.removed = self.countchanges(self.hunk)
915
914
916 def __eq__(self, v):
915 def __eq__(self, v):
917 if not isinstance(v, recordhunk):
916 if not isinstance(v, recordhunk):
918 return False
917 return False
919
918
920 return ((v.hunk == self.hunk) and
919 return ((v.hunk == self.hunk) and
921 (v.proc == self.proc) and
920 (v.proc == self.proc) and
922 (self.fromline == v.fromline) and
921 (self.fromline == v.fromline) and
923 (self.header.files() == v.header.files()))
922 (self.header.files() == v.header.files()))
924
923
925 def __hash__(self):
924 def __hash__(self):
926 return hash((tuple(self.hunk),
925 return hash((tuple(self.hunk),
927 tuple(self.header.files()),
926 tuple(self.header.files()),
928 self.fromline,
927 self.fromline,
929 self.proc))
928 self.proc))
930
929
931 def countchanges(self, hunk):
930 def countchanges(self, hunk):
932 """hunk -> (n+,n-)"""
931 """hunk -> (n+,n-)"""
933 add = len([h for h in hunk if h[0] == '+'])
932 add = len([h for h in hunk if h[0] == '+'])
934 rem = len([h for h in hunk if h[0] == '-'])
933 rem = len([h for h in hunk if h[0] == '-'])
935 return add, rem
934 return add, rem
936
935
937 def write(self, fp):
936 def write(self, fp):
938 delta = len(self.before) + len(self.after)
937 delta = len(self.before) + len(self.after)
939 if self.after and self.after[-1] == '\\ No newline at end of file\n':
938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
940 delta -= 1
939 delta -= 1
941 fromlen = delta + self.removed
940 fromlen = delta + self.removed
942 tolen = delta + self.added
941 tolen = delta + self.added
943 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
944 (self.fromline, fromlen, self.toline, tolen,
943 (self.fromline, fromlen, self.toline, tolen,
945 self.proc and (' ' + self.proc)))
944 self.proc and (' ' + self.proc)))
946 fp.write(''.join(self.before + self.hunk + self.after))
945 fp.write(''.join(self.before + self.hunk + self.after))
947
946
948 pretty = write
947 pretty = write
949
948
950 def filename(self):
949 def filename(self):
951 return self.header.filename()
950 return self.header.filename()
952
951
953 def __repr__(self):
952 def __repr__(self):
954 return '<hunk %r@%d>' % (self.filename(), self.fromline)
953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
955
954
956 def filterpatch(ui, headers, operation=None):
955 def filterpatch(ui, headers, operation=None):
957 """Interactively filter patch chunks into applied-only chunks"""
956 """Interactively filter patch chunks into applied-only chunks"""
958 if operation is None:
957 if operation is None:
959 operation = _('record')
958 operation = _('record')
960
959
961 def prompt(skipfile, skipall, query, chunk):
960 def prompt(skipfile, skipall, query, chunk):
962 """prompt query, and process base inputs
961 """prompt query, and process base inputs
963
962
964 - y/n for the rest of file
963 - y/n for the rest of file
965 - y/n for the rest
964 - y/n for the rest
966 - ? (help)
965 - ? (help)
967 - q (quit)
966 - q (quit)
968
967
969 Return True/False and possibly updated skipfile and skipall.
968 Return True/False and possibly updated skipfile and skipall.
970 """
969 """
971 newpatches = None
970 newpatches = None
972 if skipall is not None:
971 if skipall is not None:
973 return skipall, skipfile, skipall, newpatches
972 return skipall, skipfile, skipall, newpatches
974 if skipfile is not None:
973 if skipfile is not None:
975 return skipfile, skipfile, skipall, newpatches
974 return skipfile, skipfile, skipall, newpatches
976 while True:
975 while True:
977 resps = _('[Ynesfdaq?]'
976 resps = _('[Ynesfdaq?]'
978 '$$ &Yes, record this change'
977 '$$ &Yes, record this change'
979 '$$ &No, skip this change'
978 '$$ &No, skip this change'
980 '$$ &Edit this change manually'
979 '$$ &Edit this change manually'
981 '$$ &Skip remaining changes to this file'
980 '$$ &Skip remaining changes to this file'
982 '$$ Record remaining changes to this &file'
981 '$$ Record remaining changes to this &file'
983 '$$ &Done, skip remaining changes and files'
982 '$$ &Done, skip remaining changes and files'
984 '$$ Record &all changes to all remaining files'
983 '$$ Record &all changes to all remaining files'
985 '$$ &Quit, recording no changes'
984 '$$ &Quit, recording no changes'
986 '$$ &? (display help)')
985 '$$ &? (display help)')
987 r = ui.promptchoice("%s %s" % (query, resps))
986 r = ui.promptchoice("%s %s" % (query, resps))
988 ui.write("\n")
987 ui.write("\n")
989 if r == 8: # ?
988 if r == 8: # ?
990 for c, t in ui.extractchoices(resps)[1]:
989 for c, t in ui.extractchoices(resps)[1]:
991 ui.write('%s - %s\n' % (c, t.lower()))
990 ui.write('%s - %s\n' % (c, t.lower()))
992 continue
991 continue
993 elif r == 0: # yes
992 elif r == 0: # yes
994 ret = True
993 ret = True
995 elif r == 1: # no
994 elif r == 1: # no
996 ret = False
995 ret = False
997 elif r == 2: # Edit patch
996 elif r == 2: # Edit patch
998 if chunk is None:
997 if chunk is None:
999 ui.write(_('cannot edit patch for whole file'))
998 ui.write(_('cannot edit patch for whole file'))
1000 ui.write("\n")
999 ui.write("\n")
1001 continue
1000 continue
1002 if chunk.header.binary():
1001 if chunk.header.binary():
1003 ui.write(_('cannot edit patch for binary file'))
1002 ui.write(_('cannot edit patch for binary file'))
1004 ui.write("\n")
1003 ui.write("\n")
1005 continue
1004 continue
1006 # Patch comment based on the Git one (based on comment at end of
1005 # Patch comment based on the Git one (based on comment at end of
1007 # https://mercurial-scm.org/wiki/RecordExtension)
1006 # https://mercurial-scm.org/wiki/RecordExtension)
1008 phelp = '---' + _("""
1007 phelp = '---' + _("""
1009 To remove '-' lines, make them ' ' lines (context).
1008 To remove '-' lines, make them ' ' lines (context).
1010 To remove '+' lines, delete them.
1009 To remove '+' lines, delete them.
1011 Lines starting with # will be removed from the patch.
1010 Lines starting with # will be removed from the patch.
1012
1011
1013 If the patch applies cleanly, the edited hunk will immediately be
1012 If the patch applies cleanly, the edited hunk will immediately be
1014 added to the record list. If it does not apply cleanly, a rejects
1013 added to the record list. If it does not apply cleanly, a rejects
1015 file will be generated: you can use that when you try again. If
1014 file will be generated: you can use that when you try again. If
1016 all lines of the hunk are removed, then the edit is aborted and
1015 all lines of the hunk are removed, then the edit is aborted and
1017 the hunk is left unchanged.
1016 the hunk is left unchanged.
1018 """)
1017 """)
1019 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1020 suffix=".diff", text=True)
1019 suffix=".diff", text=True)
1021 ncpatchfp = None
1020 ncpatchfp = None
1022 try:
1021 try:
1023 # Write the initial patch
1022 # Write the initial patch
1024 f = os.fdopen(patchfd, "w")
1023 f = os.fdopen(patchfd, "w")
1025 chunk.header.write(f)
1024 chunk.header.write(f)
1026 chunk.write(f)
1025 chunk.write(f)
1027 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1028 f.close()
1027 f.close()
1029 # Start the editor and wait for it to complete
1028 # Start the editor and wait for it to complete
1030 editor = ui.geteditor()
1029 editor = ui.geteditor()
1031 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1032 environ={'HGUSER': ui.username()})
1031 environ={'HGUSER': ui.username()})
1033 if ret != 0:
1032 if ret != 0:
1034 ui.warn(_("editor exited with exit code %d\n") % ret)
1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1035 continue
1034 continue
1036 # Remove comment lines
1035 # Remove comment lines
1037 patchfp = open(patchfn)
1036 patchfp = open(patchfn)
1038 ncpatchfp = cStringIO.StringIO()
1037 ncpatchfp = cStringIO.StringIO()
1039 for line in patchfp:
1038 for line in patchfp:
1040 if not line.startswith('#'):
1039 if not line.startswith('#'):
1041 ncpatchfp.write(line)
1040 ncpatchfp.write(line)
1042 patchfp.close()
1041 patchfp.close()
1043 ncpatchfp.seek(0)
1042 ncpatchfp.seek(0)
1044 newpatches = parsepatch(ncpatchfp)
1043 newpatches = parsepatch(ncpatchfp)
1045 finally:
1044 finally:
1046 os.unlink(patchfn)
1045 os.unlink(patchfn)
1047 del ncpatchfp
1046 del ncpatchfp
1048 # Signal that the chunk shouldn't be applied as-is, but
1047 # Signal that the chunk shouldn't be applied as-is, but
1049 # provide the new patch to be used instead.
1048 # provide the new patch to be used instead.
1050 ret = False
1049 ret = False
1051 elif r == 3: # Skip
1050 elif r == 3: # Skip
1052 ret = skipfile = False
1051 ret = skipfile = False
1053 elif r == 4: # file (Record remaining)
1052 elif r == 4: # file (Record remaining)
1054 ret = skipfile = True
1053 ret = skipfile = True
1055 elif r == 5: # done, skip remaining
1054 elif r == 5: # done, skip remaining
1056 ret = skipall = False
1055 ret = skipall = False
1057 elif r == 6: # all
1056 elif r == 6: # all
1058 ret = skipall = True
1057 ret = skipall = True
1059 elif r == 7: # quit
1058 elif r == 7: # quit
1060 raise util.Abort(_('user quit'))
1059 raise util.Abort(_('user quit'))
1061 return ret, skipfile, skipall, newpatches
1060 return ret, skipfile, skipall, newpatches
1062
1061
1063 seen = set()
1062 seen = set()
1064 applied = {} # 'filename' -> [] of chunks
1063 applied = {} # 'filename' -> [] of chunks
1065 skipfile, skipall = None, None
1064 skipfile, skipall = None, None
1066 pos, total = 1, sum(len(h.hunks) for h in headers)
1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1067 for h in headers:
1066 for h in headers:
1068 pos += len(h.hunks)
1067 pos += len(h.hunks)
1069 skipfile = None
1068 skipfile = None
1070 fixoffset = 0
1069 fixoffset = 0
1071 hdr = ''.join(h.header)
1070 hdr = ''.join(h.header)
1072 if hdr in seen:
1071 if hdr in seen:
1073 continue
1072 continue
1074 seen.add(hdr)
1073 seen.add(hdr)
1075 if skipall is None:
1074 if skipall is None:
1076 h.pretty(ui)
1075 h.pretty(ui)
1077 msg = (_('examine changes to %s?') %
1076 msg = (_('examine changes to %s?') %
1078 _(' and ').join("'%s'" % f for f in h.files()))
1077 _(' and ').join("'%s'" % f for f in h.files()))
1079 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1080 if not r:
1079 if not r:
1081 continue
1080 continue
1082 applied[h.filename()] = [h]
1081 applied[h.filename()] = [h]
1083 if h.allhunks():
1082 if h.allhunks():
1084 applied[h.filename()] += h.hunks
1083 applied[h.filename()] += h.hunks
1085 continue
1084 continue
1086 for i, chunk in enumerate(h.hunks):
1085 for i, chunk in enumerate(h.hunks):
1087 if skipfile is None and skipall is None:
1086 if skipfile is None and skipall is None:
1088 chunk.pretty(ui)
1087 chunk.pretty(ui)
1089 if total == 1:
1088 if total == 1:
1090 msg = _("record this change to '%s'?") % chunk.filename()
1089 msg = _("record this change to '%s'?") % chunk.filename()
1091 else:
1090 else:
1092 idx = pos - len(h.hunks) + i
1091 idx = pos - len(h.hunks) + i
1093 msg = _("record change %d/%d to '%s'?") % (idx, total,
1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1094 chunk.filename())
1093 chunk.filename())
1095 r, skipfile, skipall, newpatches = prompt(skipfile,
1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1096 skipall, msg, chunk)
1095 skipall, msg, chunk)
1097 if r:
1096 if r:
1098 if fixoffset:
1097 if fixoffset:
1099 chunk = copy.copy(chunk)
1098 chunk = copy.copy(chunk)
1100 chunk.toline += fixoffset
1099 chunk.toline += fixoffset
1101 applied[chunk.filename()].append(chunk)
1100 applied[chunk.filename()].append(chunk)
1102 elif newpatches is not None:
1101 elif newpatches is not None:
1103 for newpatch in newpatches:
1102 for newpatch in newpatches:
1104 for newhunk in newpatch.hunks:
1103 for newhunk in newpatch.hunks:
1105 if fixoffset:
1104 if fixoffset:
1106 newhunk.toline += fixoffset
1105 newhunk.toline += fixoffset
1107 applied[newhunk.filename()].append(newhunk)
1106 applied[newhunk.filename()].append(newhunk)
1108 else:
1107 else:
1109 fixoffset += chunk.removed - chunk.added
1108 fixoffset += chunk.removed - chunk.added
1110 return sum([h for h in applied.itervalues()
1109 return sum([h for h in applied.itervalues()
1111 if h[0].special() or len(h) > 1], [])
1110 if h[0].special() or len(h) > 1], [])
1112 class hunk(object):
1111 class hunk(object):
1113 def __init__(self, desc, num, lr, context):
1112 def __init__(self, desc, num, lr, context):
1114 self.number = num
1113 self.number = num
1115 self.desc = desc
1114 self.desc = desc
1116 self.hunk = [desc]
1115 self.hunk = [desc]
1117 self.a = []
1116 self.a = []
1118 self.b = []
1117 self.b = []
1119 self.starta = self.lena = None
1118 self.starta = self.lena = None
1120 self.startb = self.lenb = None
1119 self.startb = self.lenb = None
1121 if lr is not None:
1120 if lr is not None:
1122 if context:
1121 if context:
1123 self.read_context_hunk(lr)
1122 self.read_context_hunk(lr)
1124 else:
1123 else:
1125 self.read_unified_hunk(lr)
1124 self.read_unified_hunk(lr)
1126
1125
1127 def getnormalized(self):
1126 def getnormalized(self):
1128 """Return a copy with line endings normalized to LF."""
1127 """Return a copy with line endings normalized to LF."""
1129
1128
1130 def normalize(lines):
1129 def normalize(lines):
1131 nlines = []
1130 nlines = []
1132 for line in lines:
1131 for line in lines:
1133 if line.endswith('\r\n'):
1132 if line.endswith('\r\n'):
1134 line = line[:-2] + '\n'
1133 line = line[:-2] + '\n'
1135 nlines.append(line)
1134 nlines.append(line)
1136 return nlines
1135 return nlines
1137
1136
1138 # Dummy object, it is rebuilt manually
1137 # Dummy object, it is rebuilt manually
1139 nh = hunk(self.desc, self.number, None, None)
1138 nh = hunk(self.desc, self.number, None, None)
1140 nh.number = self.number
1139 nh.number = self.number
1141 nh.desc = self.desc
1140 nh.desc = self.desc
1142 nh.hunk = self.hunk
1141 nh.hunk = self.hunk
1143 nh.a = normalize(self.a)
1142 nh.a = normalize(self.a)
1144 nh.b = normalize(self.b)
1143 nh.b = normalize(self.b)
1145 nh.starta = self.starta
1144 nh.starta = self.starta
1146 nh.startb = self.startb
1145 nh.startb = self.startb
1147 nh.lena = self.lena
1146 nh.lena = self.lena
1148 nh.lenb = self.lenb
1147 nh.lenb = self.lenb
1149 return nh
1148 return nh
1150
1149
1151 def read_unified_hunk(self, lr):
1150 def read_unified_hunk(self, lr):
1152 m = unidesc.match(self.desc)
1151 m = unidesc.match(self.desc)
1153 if not m:
1152 if not m:
1154 raise PatchError(_("bad hunk #%d") % self.number)
1153 raise PatchError(_("bad hunk #%d") % self.number)
1155 self.starta, self.lena, self.startb, self.lenb = m.groups()
1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1156 if self.lena is None:
1155 if self.lena is None:
1157 self.lena = 1
1156 self.lena = 1
1158 else:
1157 else:
1159 self.lena = int(self.lena)
1158 self.lena = int(self.lena)
1160 if self.lenb is None:
1159 if self.lenb is None:
1161 self.lenb = 1
1160 self.lenb = 1
1162 else:
1161 else:
1163 self.lenb = int(self.lenb)
1162 self.lenb = int(self.lenb)
1164 self.starta = int(self.starta)
1163 self.starta = int(self.starta)
1165 self.startb = int(self.startb)
1164 self.startb = int(self.startb)
1166 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1167 self.b)
1166 self.b)
1168 # if we hit eof before finishing out the hunk, the last line will
1167 # if we hit eof before finishing out the hunk, the last line will
1169 # be zero length. Lets try to fix it up.
1168 # be zero length. Lets try to fix it up.
1170 while len(self.hunk[-1]) == 0:
1169 while len(self.hunk[-1]) == 0:
1171 del self.hunk[-1]
1170 del self.hunk[-1]
1172 del self.a[-1]
1171 del self.a[-1]
1173 del self.b[-1]
1172 del self.b[-1]
1174 self.lena -= 1
1173 self.lena -= 1
1175 self.lenb -= 1
1174 self.lenb -= 1
1176 self._fixnewline(lr)
1175 self._fixnewline(lr)
1177
1176
1178 def read_context_hunk(self, lr):
1177 def read_context_hunk(self, lr):
1179 self.desc = lr.readline()
1178 self.desc = lr.readline()
1180 m = contextdesc.match(self.desc)
1179 m = contextdesc.match(self.desc)
1181 if not m:
1180 if not m:
1182 raise PatchError(_("bad hunk #%d") % self.number)
1181 raise PatchError(_("bad hunk #%d") % self.number)
1183 self.starta, aend = m.groups()
1182 self.starta, aend = m.groups()
1184 self.starta = int(self.starta)
1183 self.starta = int(self.starta)
1185 if aend is None:
1184 if aend is None:
1186 aend = self.starta
1185 aend = self.starta
1187 self.lena = int(aend) - self.starta
1186 self.lena = int(aend) - self.starta
1188 if self.starta:
1187 if self.starta:
1189 self.lena += 1
1188 self.lena += 1
1190 for x in xrange(self.lena):
1189 for x in xrange(self.lena):
1191 l = lr.readline()
1190 l = lr.readline()
1192 if l.startswith('---'):
1191 if l.startswith('---'):
1193 # lines addition, old block is empty
1192 # lines addition, old block is empty
1194 lr.push(l)
1193 lr.push(l)
1195 break
1194 break
1196 s = l[2:]
1195 s = l[2:]
1197 if l.startswith('- ') or l.startswith('! '):
1196 if l.startswith('- ') or l.startswith('! '):
1198 u = '-' + s
1197 u = '-' + s
1199 elif l.startswith(' '):
1198 elif l.startswith(' '):
1200 u = ' ' + s
1199 u = ' ' + s
1201 else:
1200 else:
1202 raise PatchError(_("bad hunk #%d old text line %d") %
1201 raise PatchError(_("bad hunk #%d old text line %d") %
1203 (self.number, x))
1202 (self.number, x))
1204 self.a.append(u)
1203 self.a.append(u)
1205 self.hunk.append(u)
1204 self.hunk.append(u)
1206
1205
1207 l = lr.readline()
1206 l = lr.readline()
1208 if l.startswith('\ '):
1207 if l.startswith('\ '):
1209 s = self.a[-1][:-1]
1208 s = self.a[-1][:-1]
1210 self.a[-1] = s
1209 self.a[-1] = s
1211 self.hunk[-1] = s
1210 self.hunk[-1] = s
1212 l = lr.readline()
1211 l = lr.readline()
1213 m = contextdesc.match(l)
1212 m = contextdesc.match(l)
1214 if not m:
1213 if not m:
1215 raise PatchError(_("bad hunk #%d") % self.number)
1214 raise PatchError(_("bad hunk #%d") % self.number)
1216 self.startb, bend = m.groups()
1215 self.startb, bend = m.groups()
1217 self.startb = int(self.startb)
1216 self.startb = int(self.startb)
1218 if bend is None:
1217 if bend is None:
1219 bend = self.startb
1218 bend = self.startb
1220 self.lenb = int(bend) - self.startb
1219 self.lenb = int(bend) - self.startb
1221 if self.startb:
1220 if self.startb:
1222 self.lenb += 1
1221 self.lenb += 1
1223 hunki = 1
1222 hunki = 1
1224 for x in xrange(self.lenb):
1223 for x in xrange(self.lenb):
1225 l = lr.readline()
1224 l = lr.readline()
1226 if l.startswith('\ '):
1225 if l.startswith('\ '):
1227 # XXX: the only way to hit this is with an invalid line range.
1226 # XXX: the only way to hit this is with an invalid line range.
1228 # The no-eol marker is not counted in the line range, but I
1227 # The no-eol marker is not counted in the line range, but I
1229 # guess there are diff(1) out there which behave differently.
1228 # guess there are diff(1) out there which behave differently.
1230 s = self.b[-1][:-1]
1229 s = self.b[-1][:-1]
1231 self.b[-1] = s
1230 self.b[-1] = s
1232 self.hunk[hunki - 1] = s
1231 self.hunk[hunki - 1] = s
1233 continue
1232 continue
1234 if not l:
1233 if not l:
1235 # line deletions, new block is empty and we hit EOF
1234 # line deletions, new block is empty and we hit EOF
1236 lr.push(l)
1235 lr.push(l)
1237 break
1236 break
1238 s = l[2:]
1237 s = l[2:]
1239 if l.startswith('+ ') or l.startswith('! '):
1238 if l.startswith('+ ') or l.startswith('! '):
1240 u = '+' + s
1239 u = '+' + s
1241 elif l.startswith(' '):
1240 elif l.startswith(' '):
1242 u = ' ' + s
1241 u = ' ' + s
1243 elif len(self.b) == 0:
1242 elif len(self.b) == 0:
1244 # line deletions, new block is empty
1243 # line deletions, new block is empty
1245 lr.push(l)
1244 lr.push(l)
1246 break
1245 break
1247 else:
1246 else:
1248 raise PatchError(_("bad hunk #%d old text line %d") %
1247 raise PatchError(_("bad hunk #%d old text line %d") %
1249 (self.number, x))
1248 (self.number, x))
1250 self.b.append(s)
1249 self.b.append(s)
1251 while True:
1250 while True:
1252 if hunki >= len(self.hunk):
1251 if hunki >= len(self.hunk):
1253 h = ""
1252 h = ""
1254 else:
1253 else:
1255 h = self.hunk[hunki]
1254 h = self.hunk[hunki]
1256 hunki += 1
1255 hunki += 1
1257 if h == u:
1256 if h == u:
1258 break
1257 break
1259 elif h.startswith('-'):
1258 elif h.startswith('-'):
1260 continue
1259 continue
1261 else:
1260 else:
1262 self.hunk.insert(hunki - 1, u)
1261 self.hunk.insert(hunki - 1, u)
1263 break
1262 break
1264
1263
1265 if not self.a:
1264 if not self.a:
1266 # this happens when lines were only added to the hunk
1265 # this happens when lines were only added to the hunk
1267 for x in self.hunk:
1266 for x in self.hunk:
1268 if x.startswith('-') or x.startswith(' '):
1267 if x.startswith('-') or x.startswith(' '):
1269 self.a.append(x)
1268 self.a.append(x)
1270 if not self.b:
1269 if not self.b:
1271 # this happens when lines were only deleted from the hunk
1270 # this happens when lines were only deleted from the hunk
1272 for x in self.hunk:
1271 for x in self.hunk:
1273 if x.startswith('+') or x.startswith(' '):
1272 if x.startswith('+') or x.startswith(' '):
1274 self.b.append(x[1:])
1273 self.b.append(x[1:])
1275 # @@ -start,len +start,len @@
1274 # @@ -start,len +start,len @@
1276 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1277 self.startb, self.lenb)
1276 self.startb, self.lenb)
1278 self.hunk[0] = self.desc
1277 self.hunk[0] = self.desc
1279 self._fixnewline(lr)
1278 self._fixnewline(lr)
1280
1279
1281 def _fixnewline(self, lr):
1280 def _fixnewline(self, lr):
1282 l = lr.readline()
1281 l = lr.readline()
1283 if l.startswith('\ '):
1282 if l.startswith('\ '):
1284 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1285 else:
1284 else:
1286 lr.push(l)
1285 lr.push(l)
1287
1286
1288 def complete(self):
1287 def complete(self):
1289 return len(self.a) == self.lena and len(self.b) == self.lenb
1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1290
1289
1291 def _fuzzit(self, old, new, fuzz, toponly):
1290 def _fuzzit(self, old, new, fuzz, toponly):
1292 # this removes context lines from the top and bottom of list 'l'. It
1291 # this removes context lines from the top and bottom of list 'l'. It
1293 # checks the hunk to make sure only context lines are removed, and then
1292 # checks the hunk to make sure only context lines are removed, and then
1294 # returns a new shortened list of lines.
1293 # returns a new shortened list of lines.
1295 fuzz = min(fuzz, len(old))
1294 fuzz = min(fuzz, len(old))
1296 if fuzz:
1295 if fuzz:
1297 top = 0
1296 top = 0
1298 bot = 0
1297 bot = 0
1299 hlen = len(self.hunk)
1298 hlen = len(self.hunk)
1300 for x in xrange(hlen - 1):
1299 for x in xrange(hlen - 1):
1301 # the hunk starts with the @@ line, so use x+1
1300 # the hunk starts with the @@ line, so use x+1
1302 if self.hunk[x + 1][0] == ' ':
1301 if self.hunk[x + 1][0] == ' ':
1303 top += 1
1302 top += 1
1304 else:
1303 else:
1305 break
1304 break
1306 if not toponly:
1305 if not toponly:
1307 for x in xrange(hlen - 1):
1306 for x in xrange(hlen - 1):
1308 if self.hunk[hlen - bot - 1][0] == ' ':
1307 if self.hunk[hlen - bot - 1][0] == ' ':
1309 bot += 1
1308 bot += 1
1310 else:
1309 else:
1311 break
1310 break
1312
1311
1313 bot = min(fuzz, bot)
1312 bot = min(fuzz, bot)
1314 top = min(fuzz, top)
1313 top = min(fuzz, top)
1315 return old[top:len(old) - bot], new[top:len(new) - bot], top
1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1316 return old, new, 0
1315 return old, new, 0
1317
1316
1318 def fuzzit(self, fuzz, toponly):
1317 def fuzzit(self, fuzz, toponly):
1319 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1320 oldstart = self.starta + top
1319 oldstart = self.starta + top
1321 newstart = self.startb + top
1320 newstart = self.startb + top
1322 # zero length hunk ranges already have their start decremented
1321 # zero length hunk ranges already have their start decremented
1323 if self.lena and oldstart > 0:
1322 if self.lena and oldstart > 0:
1324 oldstart -= 1
1323 oldstart -= 1
1325 if self.lenb and newstart > 0:
1324 if self.lenb and newstart > 0:
1326 newstart -= 1
1325 newstart -= 1
1327 return old, oldstart, new, newstart
1326 return old, oldstart, new, newstart
1328
1327
1329 class binhunk(object):
1328 class binhunk(object):
1330 'A binary patch file.'
1329 'A binary patch file.'
1331 def __init__(self, lr, fname):
1330 def __init__(self, lr, fname):
1332 self.text = None
1331 self.text = None
1333 self.delta = False
1332 self.delta = False
1334 self.hunk = ['GIT binary patch\n']
1333 self.hunk = ['GIT binary patch\n']
1335 self._fname = fname
1334 self._fname = fname
1336 self._read(lr)
1335 self._read(lr)
1337
1336
1338 def complete(self):
1337 def complete(self):
1339 return self.text is not None
1338 return self.text is not None
1340
1339
1341 def new(self, lines):
1340 def new(self, lines):
1342 if self.delta:
1341 if self.delta:
1343 return [applybindelta(self.text, ''.join(lines))]
1342 return [applybindelta(self.text, ''.join(lines))]
1344 return [self.text]
1343 return [self.text]
1345
1344
1346 def _read(self, lr):
1345 def _read(self, lr):
1347 def getline(lr, hunk):
1346 def getline(lr, hunk):
1348 l = lr.readline()
1347 l = lr.readline()
1349 hunk.append(l)
1348 hunk.append(l)
1350 return l.rstrip('\r\n')
1349 return l.rstrip('\r\n')
1351
1350
1352 size = 0
1351 size = 0
1353 while True:
1352 while True:
1354 line = getline(lr, self.hunk)
1353 line = getline(lr, self.hunk)
1355 if not line:
1354 if not line:
1356 raise PatchError(_('could not extract "%s" binary data')
1355 raise PatchError(_('could not extract "%s" binary data')
1357 % self._fname)
1356 % self._fname)
1358 if line.startswith('literal '):
1357 if line.startswith('literal '):
1359 size = int(line[8:].rstrip())
1358 size = int(line[8:].rstrip())
1360 break
1359 break
1361 if line.startswith('delta '):
1360 if line.startswith('delta '):
1362 size = int(line[6:].rstrip())
1361 size = int(line[6:].rstrip())
1363 self.delta = True
1362 self.delta = True
1364 break
1363 break
1365 dec = []
1364 dec = []
1366 line = getline(lr, self.hunk)
1365 line = getline(lr, self.hunk)
1367 while len(line) > 1:
1366 while len(line) > 1:
1368 l = line[0]
1367 l = line[0]
1369 if l <= 'Z' and l >= 'A':
1368 if l <= 'Z' and l >= 'A':
1370 l = ord(l) - ord('A') + 1
1369 l = ord(l) - ord('A') + 1
1371 else:
1370 else:
1372 l = ord(l) - ord('a') + 27
1371 l = ord(l) - ord('a') + 27
1373 try:
1372 try:
1374 dec.append(base85.b85decode(line[1:])[:l])
1373 dec.append(base85.b85decode(line[1:])[:l])
1375 except ValueError as e:
1374 except ValueError as e:
1376 raise PatchError(_('could not decode "%s" binary patch: %s')
1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1377 % (self._fname, str(e)))
1376 % (self._fname, str(e)))
1378 line = getline(lr, self.hunk)
1377 line = getline(lr, self.hunk)
1379 text = zlib.decompress(''.join(dec))
1378 text = zlib.decompress(''.join(dec))
1380 if len(text) != size:
1379 if len(text) != size:
1381 raise PatchError(_('"%s" length is %d bytes, should be %d')
1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1382 % (self._fname, len(text), size))
1381 % (self._fname, len(text), size))
1383 self.text = text
1382 self.text = text
1384
1383
1385 def parsefilename(str):
1384 def parsefilename(str):
1386 # --- filename \t|space stuff
1385 # --- filename \t|space stuff
1387 s = str[4:].rstrip('\r\n')
1386 s = str[4:].rstrip('\r\n')
1388 i = s.find('\t')
1387 i = s.find('\t')
1389 if i < 0:
1388 if i < 0:
1390 i = s.find(' ')
1389 i = s.find(' ')
1391 if i < 0:
1390 if i < 0:
1392 return s
1391 return s
1393 return s[:i]
1392 return s[:i]
1394
1393
1395 def reversehunks(hunks):
1394 def reversehunks(hunks):
1396 '''reverse the signs in the hunks given as argument
1395 '''reverse the signs in the hunks given as argument
1397
1396
1398 This function operates on hunks coming out of patch.filterpatch, that is
1397 This function operates on hunks coming out of patch.filterpatch, that is
1399 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1400
1399
1401 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1402 ... --- a/folder1/g
1401 ... --- a/folder1/g
1403 ... +++ b/folder1/g
1402 ... +++ b/folder1/g
1404 ... @@ -1,7 +1,7 @@
1403 ... @@ -1,7 +1,7 @@
1405 ... +firstline
1404 ... +firstline
1406 ... c
1405 ... c
1407 ... 1
1406 ... 1
1408 ... 2
1407 ... 2
1409 ... + 3
1408 ... + 3
1410 ... -4
1409 ... -4
1411 ... 5
1410 ... 5
1412 ... d
1411 ... d
1413 ... +lastline"""
1412 ... +lastline"""
1414 >>> hunks = parsepatch(rawpatch)
1413 >>> hunks = parsepatch(rawpatch)
1415 >>> hunkscomingfromfilterpatch = []
1414 >>> hunkscomingfromfilterpatch = []
1416 >>> for h in hunks:
1415 >>> for h in hunks:
1417 ... hunkscomingfromfilterpatch.append(h)
1416 ... hunkscomingfromfilterpatch.append(h)
1418 ... hunkscomingfromfilterpatch.extend(h.hunks)
1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1419
1418
1420 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1421 >>> fp = cStringIO.StringIO()
1420 >>> fp = cStringIO.StringIO()
1422 >>> for c in reversedhunks:
1421 >>> for c in reversedhunks:
1423 ... c.write(fp)
1422 ... c.write(fp)
1424 >>> fp.seek(0)
1423 >>> fp.seek(0)
1425 >>> reversedpatch = fp.read()
1424 >>> reversedpatch = fp.read()
1426 >>> print reversedpatch
1425 >>> print reversedpatch
1427 diff --git a/folder1/g b/folder1/g
1426 diff --git a/folder1/g b/folder1/g
1428 --- a/folder1/g
1427 --- a/folder1/g
1429 +++ b/folder1/g
1428 +++ b/folder1/g
1430 @@ -1,4 +1,3 @@
1429 @@ -1,4 +1,3 @@
1431 -firstline
1430 -firstline
1432 c
1431 c
1433 1
1432 1
1434 2
1433 2
1435 @@ -1,6 +2,6 @@
1434 @@ -1,6 +2,6 @@
1436 c
1435 c
1437 1
1436 1
1438 2
1437 2
1439 - 3
1438 - 3
1440 +4
1439 +4
1441 5
1440 5
1442 d
1441 d
1443 @@ -5,3 +6,2 @@
1442 @@ -5,3 +6,2 @@
1444 5
1443 5
1445 d
1444 d
1446 -lastline
1445 -lastline
1447
1446
1448 '''
1447 '''
1449
1448
1450 import crecord as crecordmod
1449 import crecord as crecordmod
1451 newhunks = []
1450 newhunks = []
1452 for c in hunks:
1451 for c in hunks:
1453 if isinstance(c, crecordmod.uihunk):
1452 if isinstance(c, crecordmod.uihunk):
1454 # curses hunks encapsulate the record hunk in _hunk
1453 # curses hunks encapsulate the record hunk in _hunk
1455 c = c._hunk
1454 c = c._hunk
1456 if isinstance(c, recordhunk):
1455 if isinstance(c, recordhunk):
1457 for j, line in enumerate(c.hunk):
1456 for j, line in enumerate(c.hunk):
1458 if line.startswith("-"):
1457 if line.startswith("-"):
1459 c.hunk[j] = "+" + c.hunk[j][1:]
1458 c.hunk[j] = "+" + c.hunk[j][1:]
1460 elif line.startswith("+"):
1459 elif line.startswith("+"):
1461 c.hunk[j] = "-" + c.hunk[j][1:]
1460 c.hunk[j] = "-" + c.hunk[j][1:]
1462 c.added, c.removed = c.removed, c.added
1461 c.added, c.removed = c.removed, c.added
1463 newhunks.append(c)
1462 newhunks.append(c)
1464 return newhunks
1463 return newhunks
1465
1464
1466 def parsepatch(originalchunks):
1465 def parsepatch(originalchunks):
1467 """patch -> [] of headers -> [] of hunks """
1466 """patch -> [] of headers -> [] of hunks """
1468 class parser(object):
1467 class parser(object):
1469 """patch parsing state machine"""
1468 """patch parsing state machine"""
1470 def __init__(self):
1469 def __init__(self):
1471 self.fromline = 0
1470 self.fromline = 0
1472 self.toline = 0
1471 self.toline = 0
1473 self.proc = ''
1472 self.proc = ''
1474 self.header = None
1473 self.header = None
1475 self.context = []
1474 self.context = []
1476 self.before = []
1475 self.before = []
1477 self.hunk = []
1476 self.hunk = []
1478 self.headers = []
1477 self.headers = []
1479
1478
1480 def addrange(self, limits):
1479 def addrange(self, limits):
1481 fromstart, fromend, tostart, toend, proc = limits
1480 fromstart, fromend, tostart, toend, proc = limits
1482 self.fromline = int(fromstart)
1481 self.fromline = int(fromstart)
1483 self.toline = int(tostart)
1482 self.toline = int(tostart)
1484 self.proc = proc
1483 self.proc = proc
1485
1484
1486 def addcontext(self, context):
1485 def addcontext(self, context):
1487 if self.hunk:
1486 if self.hunk:
1488 h = recordhunk(self.header, self.fromline, self.toline,
1487 h = recordhunk(self.header, self.fromline, self.toline,
1489 self.proc, self.before, self.hunk, context)
1488 self.proc, self.before, self.hunk, context)
1490 self.header.hunks.append(h)
1489 self.header.hunks.append(h)
1491 self.fromline += len(self.before) + h.removed
1490 self.fromline += len(self.before) + h.removed
1492 self.toline += len(self.before) + h.added
1491 self.toline += len(self.before) + h.added
1493 self.before = []
1492 self.before = []
1494 self.hunk = []
1493 self.hunk = []
1495 self.proc = ''
1494 self.proc = ''
1496 self.context = context
1495 self.context = context
1497
1496
1498 def addhunk(self, hunk):
1497 def addhunk(self, hunk):
1499 if self.context:
1498 if self.context:
1500 self.before = self.context
1499 self.before = self.context
1501 self.context = []
1500 self.context = []
1502 self.hunk = hunk
1501 self.hunk = hunk
1503
1502
1504 def newfile(self, hdr):
1503 def newfile(self, hdr):
1505 self.addcontext([])
1504 self.addcontext([])
1506 h = header(hdr)
1505 h = header(hdr)
1507 self.headers.append(h)
1506 self.headers.append(h)
1508 self.header = h
1507 self.header = h
1509
1508
1510 def addother(self, line):
1509 def addother(self, line):
1511 pass # 'other' lines are ignored
1510 pass # 'other' lines are ignored
1512
1511
1513 def finished(self):
1512 def finished(self):
1514 self.addcontext([])
1513 self.addcontext([])
1515 return self.headers
1514 return self.headers
1516
1515
1517 transitions = {
1516 transitions = {
1518 'file': {'context': addcontext,
1517 'file': {'context': addcontext,
1519 'file': newfile,
1518 'file': newfile,
1520 'hunk': addhunk,
1519 'hunk': addhunk,
1521 'range': addrange},
1520 'range': addrange},
1522 'context': {'file': newfile,
1521 'context': {'file': newfile,
1523 'hunk': addhunk,
1522 'hunk': addhunk,
1524 'range': addrange,
1523 'range': addrange,
1525 'other': addother},
1524 'other': addother},
1526 'hunk': {'context': addcontext,
1525 'hunk': {'context': addcontext,
1527 'file': newfile,
1526 'file': newfile,
1528 'range': addrange},
1527 'range': addrange},
1529 'range': {'context': addcontext,
1528 'range': {'context': addcontext,
1530 'hunk': addhunk},
1529 'hunk': addhunk},
1531 'other': {'other': addother},
1530 'other': {'other': addother},
1532 }
1531 }
1533
1532
1534 p = parser()
1533 p = parser()
1535 fp = cStringIO.StringIO()
1534 fp = cStringIO.StringIO()
1536 fp.write(''.join(originalchunks))
1535 fp.write(''.join(originalchunks))
1537 fp.seek(0)
1536 fp.seek(0)
1538
1537
1539 state = 'context'
1538 state = 'context'
1540 for newstate, data in scanpatch(fp):
1539 for newstate, data in scanpatch(fp):
1541 try:
1540 try:
1542 p.transitions[state][newstate](p, data)
1541 p.transitions[state][newstate](p, data)
1543 except KeyError:
1542 except KeyError:
1544 raise PatchError('unhandled transition: %s -> %s' %
1543 raise PatchError('unhandled transition: %s -> %s' %
1545 (state, newstate))
1544 (state, newstate))
1546 state = newstate
1545 state = newstate
1547 del fp
1546 del fp
1548 return p.finished()
1547 return p.finished()
1549
1548
1550 def pathtransform(path, strip, prefix):
1549 def pathtransform(path, strip, prefix):
1551 '''turn a path from a patch into a path suitable for the repository
1550 '''turn a path from a patch into a path suitable for the repository
1552
1551
1553 prefix, if not empty, is expected to be normalized with a / at the end.
1552 prefix, if not empty, is expected to be normalized with a / at the end.
1554
1553
1555 Returns (stripped components, path in repository).
1554 Returns (stripped components, path in repository).
1556
1555
1557 >>> pathtransform('a/b/c', 0, '')
1556 >>> pathtransform('a/b/c', 0, '')
1558 ('', 'a/b/c')
1557 ('', 'a/b/c')
1559 >>> pathtransform(' a/b/c ', 0, '')
1558 >>> pathtransform(' a/b/c ', 0, '')
1560 ('', ' a/b/c')
1559 ('', ' a/b/c')
1561 >>> pathtransform(' a/b/c ', 2, '')
1560 >>> pathtransform(' a/b/c ', 2, '')
1562 ('a/b/', 'c')
1561 ('a/b/', 'c')
1563 >>> pathtransform('a/b/c', 0, 'd/e/')
1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1564 ('', 'd/e/a/b/c')
1563 ('', 'd/e/a/b/c')
1565 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1566 ('a//b/', 'd/e/c')
1565 ('a//b/', 'd/e/c')
1567 >>> pathtransform('a/b/c', 3, '')
1566 >>> pathtransform('a/b/c', 3, '')
1568 Traceback (most recent call last):
1567 Traceback (most recent call last):
1569 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1570 '''
1569 '''
1571 pathlen = len(path)
1570 pathlen = len(path)
1572 i = 0
1571 i = 0
1573 if strip == 0:
1572 if strip == 0:
1574 return '', prefix + path.rstrip()
1573 return '', prefix + path.rstrip()
1575 count = strip
1574 count = strip
1576 while count > 0:
1575 while count > 0:
1577 i = path.find('/', i)
1576 i = path.find('/', i)
1578 if i == -1:
1577 if i == -1:
1579 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1580 (count, strip, path))
1579 (count, strip, path))
1581 i += 1
1580 i += 1
1582 # consume '//' in the path
1581 # consume '//' in the path
1583 while i < pathlen - 1 and path[i] == '/':
1582 while i < pathlen - 1 and path[i] == '/':
1584 i += 1
1583 i += 1
1585 count -= 1
1584 count -= 1
1586 return path[:i].lstrip(), prefix + path[i:].rstrip()
1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1587
1586
1588 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1589 nulla = afile_orig == "/dev/null"
1588 nulla = afile_orig == "/dev/null"
1590 nullb = bfile_orig == "/dev/null"
1589 nullb = bfile_orig == "/dev/null"
1591 create = nulla and hunk.starta == 0 and hunk.lena == 0
1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1592 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1593 abase, afile = pathtransform(afile_orig, strip, prefix)
1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1594 gooda = not nulla and backend.exists(afile)
1593 gooda = not nulla and backend.exists(afile)
1595 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1596 if afile == bfile:
1595 if afile == bfile:
1597 goodb = gooda
1596 goodb = gooda
1598 else:
1597 else:
1599 goodb = not nullb and backend.exists(bfile)
1598 goodb = not nullb and backend.exists(bfile)
1600 missing = not goodb and not gooda and not create
1599 missing = not goodb and not gooda and not create
1601
1600
1602 # some diff programs apparently produce patches where the afile is
1601 # some diff programs apparently produce patches where the afile is
1603 # not /dev/null, but afile starts with bfile
1602 # not /dev/null, but afile starts with bfile
1604 abasedir = afile[:afile.rfind('/') + 1]
1603 abasedir = afile[:afile.rfind('/') + 1]
1605 bbasedir = bfile[:bfile.rfind('/') + 1]
1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1606 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1607 and hunk.starta == 0 and hunk.lena == 0):
1606 and hunk.starta == 0 and hunk.lena == 0):
1608 create = True
1607 create = True
1609 missing = False
1608 missing = False
1610
1609
1611 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1612 # diff is between a file and its backup. In this case, the original
1611 # diff is between a file and its backup. In this case, the original
1613 # file should be patched (see original mpatch code).
1612 # file should be patched (see original mpatch code).
1614 isbackup = (abase == bbase and bfile.startswith(afile))
1613 isbackup = (abase == bbase and bfile.startswith(afile))
1615 fname = None
1614 fname = None
1616 if not missing:
1615 if not missing:
1617 if gooda and goodb:
1616 if gooda and goodb:
1618 if isbackup:
1617 if isbackup:
1619 fname = afile
1618 fname = afile
1620 else:
1619 else:
1621 fname = bfile
1620 fname = bfile
1622 elif gooda:
1621 elif gooda:
1623 fname = afile
1622 fname = afile
1624
1623
1625 if not fname:
1624 if not fname:
1626 if not nullb:
1625 if not nullb:
1627 if isbackup:
1626 if isbackup:
1628 fname = afile
1627 fname = afile
1629 else:
1628 else:
1630 fname = bfile
1629 fname = bfile
1631 elif not nulla:
1630 elif not nulla:
1632 fname = afile
1631 fname = afile
1633 else:
1632 else:
1634 raise PatchError(_("undefined source and destination files"))
1633 raise PatchError(_("undefined source and destination files"))
1635
1634
1636 gp = patchmeta(fname)
1635 gp = patchmeta(fname)
1637 if create:
1636 if create:
1638 gp.op = 'ADD'
1637 gp.op = 'ADD'
1639 elif remove:
1638 elif remove:
1640 gp.op = 'DELETE'
1639 gp.op = 'DELETE'
1641 return gp
1640 return gp
1642
1641
1643 def scanpatch(fp):
1642 def scanpatch(fp):
1644 """like patch.iterhunks, but yield different events
1643 """like patch.iterhunks, but yield different events
1645
1644
1646 - ('file', [header_lines + fromfile + tofile])
1645 - ('file', [header_lines + fromfile + tofile])
1647 - ('context', [context_lines])
1646 - ('context', [context_lines])
1648 - ('hunk', [hunk_lines])
1647 - ('hunk', [hunk_lines])
1649 - ('range', (-start,len, +start,len, proc))
1648 - ('range', (-start,len, +start,len, proc))
1650 """
1649 """
1651 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1652 lr = linereader(fp)
1651 lr = linereader(fp)
1653
1652
1654 def scanwhile(first, p):
1653 def scanwhile(first, p):
1655 """scan lr while predicate holds"""
1654 """scan lr while predicate holds"""
1656 lines = [first]
1655 lines = [first]
1657 while True:
1656 while True:
1658 line = lr.readline()
1657 line = lr.readline()
1659 if not line:
1658 if not line:
1660 break
1659 break
1661 if p(line):
1660 if p(line):
1662 lines.append(line)
1661 lines.append(line)
1663 else:
1662 else:
1664 lr.push(line)
1663 lr.push(line)
1665 break
1664 break
1666 return lines
1665 return lines
1667
1666
1668 while True:
1667 while True:
1669 line = lr.readline()
1668 line = lr.readline()
1670 if not line:
1669 if not line:
1671 break
1670 break
1672 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1673 def notheader(line):
1672 def notheader(line):
1674 s = line.split(None, 1)
1673 s = line.split(None, 1)
1675 return not s or s[0] not in ('---', 'diff')
1674 return not s or s[0] not in ('---', 'diff')
1676 header = scanwhile(line, notheader)
1675 header = scanwhile(line, notheader)
1677 fromfile = lr.readline()
1676 fromfile = lr.readline()
1678 if fromfile.startswith('---'):
1677 if fromfile.startswith('---'):
1679 tofile = lr.readline()
1678 tofile = lr.readline()
1680 header += [fromfile, tofile]
1679 header += [fromfile, tofile]
1681 else:
1680 else:
1682 lr.push(fromfile)
1681 lr.push(fromfile)
1683 yield 'file', header
1682 yield 'file', header
1684 elif line[0] == ' ':
1683 elif line[0] == ' ':
1685 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1686 elif line[0] in '-+':
1685 elif line[0] in '-+':
1687 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1688 else:
1687 else:
1689 m = lines_re.match(line)
1688 m = lines_re.match(line)
1690 if m:
1689 if m:
1691 yield 'range', m.groups()
1690 yield 'range', m.groups()
1692 else:
1691 else:
1693 yield 'other', line
1692 yield 'other', line
1694
1693
1695 def scangitpatch(lr, firstline):
1694 def scangitpatch(lr, firstline):
1696 """
1695 """
1697 Git patches can emit:
1696 Git patches can emit:
1698 - rename a to b
1697 - rename a to b
1699 - change b
1698 - change b
1700 - copy a to c
1699 - copy a to c
1701 - change c
1700 - change c
1702
1701
1703 We cannot apply this sequence as-is, the renamed 'a' could not be
1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1704 found for it would have been renamed already. And we cannot copy
1703 found for it would have been renamed already. And we cannot copy
1705 from 'b' instead because 'b' would have been changed already. So
1704 from 'b' instead because 'b' would have been changed already. So
1706 we scan the git patch for copy and rename commands so we can
1705 we scan the git patch for copy and rename commands so we can
1707 perform the copies ahead of time.
1706 perform the copies ahead of time.
1708 """
1707 """
1709 pos = 0
1708 pos = 0
1710 try:
1709 try:
1711 pos = lr.fp.tell()
1710 pos = lr.fp.tell()
1712 fp = lr.fp
1711 fp = lr.fp
1713 except IOError:
1712 except IOError:
1714 fp = cStringIO.StringIO(lr.fp.read())
1713 fp = cStringIO.StringIO(lr.fp.read())
1715 gitlr = linereader(fp)
1714 gitlr = linereader(fp)
1716 gitlr.push(firstline)
1715 gitlr.push(firstline)
1717 gitpatches = readgitpatch(gitlr)
1716 gitpatches = readgitpatch(gitlr)
1718 fp.seek(pos)
1717 fp.seek(pos)
1719 return gitpatches
1718 return gitpatches
1720
1719
1721 def iterhunks(fp):
1720 def iterhunks(fp):
1722 """Read a patch and yield the following events:
1721 """Read a patch and yield the following events:
1723 - ("file", afile, bfile, firsthunk): select a new target file.
1722 - ("file", afile, bfile, firsthunk): select a new target file.
1724 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1725 "file" event.
1724 "file" event.
1726 - ("git", gitchanges): current diff is in git format, gitchanges
1725 - ("git", gitchanges): current diff is in git format, gitchanges
1727 maps filenames to gitpatch records. Unique event.
1726 maps filenames to gitpatch records. Unique event.
1728 """
1727 """
1729 afile = ""
1728 afile = ""
1730 bfile = ""
1729 bfile = ""
1731 state = None
1730 state = None
1732 hunknum = 0
1731 hunknum = 0
1733 emitfile = newfile = False
1732 emitfile = newfile = False
1734 gitpatches = None
1733 gitpatches = None
1735
1734
1736 # our states
1735 # our states
1737 BFILE = 1
1736 BFILE = 1
1738 context = None
1737 context = None
1739 lr = linereader(fp)
1738 lr = linereader(fp)
1740
1739
1741 while True:
1740 while True:
1742 x = lr.readline()
1741 x = lr.readline()
1743 if not x:
1742 if not x:
1744 break
1743 break
1745 if state == BFILE and (
1744 if state == BFILE and (
1746 (not context and x[0] == '@')
1745 (not context and x[0] == '@')
1747 or (context is not False and x.startswith('***************'))
1746 or (context is not False and x.startswith('***************'))
1748 or x.startswith('GIT binary patch')):
1747 or x.startswith('GIT binary patch')):
1749 gp = None
1748 gp = None
1750 if (gitpatches and
1749 if (gitpatches and
1751 gitpatches[-1].ispatching(afile, bfile)):
1750 gitpatches[-1].ispatching(afile, bfile)):
1752 gp = gitpatches.pop()
1751 gp = gitpatches.pop()
1753 if x.startswith('GIT binary patch'):
1752 if x.startswith('GIT binary patch'):
1754 h = binhunk(lr, gp.path)
1753 h = binhunk(lr, gp.path)
1755 else:
1754 else:
1756 if context is None and x.startswith('***************'):
1755 if context is None and x.startswith('***************'):
1757 context = True
1756 context = True
1758 h = hunk(x, hunknum + 1, lr, context)
1757 h = hunk(x, hunknum + 1, lr, context)
1759 hunknum += 1
1758 hunknum += 1
1760 if emitfile:
1759 if emitfile:
1761 emitfile = False
1760 emitfile = False
1762 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1763 yield 'hunk', h
1762 yield 'hunk', h
1764 elif x.startswith('diff --git a/'):
1763 elif x.startswith('diff --git a/'):
1765 m = gitre.match(x.rstrip(' \r\n'))
1764 m = gitre.match(x.rstrip(' \r\n'))
1766 if not m:
1765 if not m:
1767 continue
1766 continue
1768 if gitpatches is None:
1767 if gitpatches is None:
1769 # scan whole input for git metadata
1768 # scan whole input for git metadata
1770 gitpatches = scangitpatch(lr, x)
1769 gitpatches = scangitpatch(lr, x)
1771 yield 'git', [g.copy() for g in gitpatches
1770 yield 'git', [g.copy() for g in gitpatches
1772 if g.op in ('COPY', 'RENAME')]
1771 if g.op in ('COPY', 'RENAME')]
1773 gitpatches.reverse()
1772 gitpatches.reverse()
1774 afile = 'a/' + m.group(1)
1773 afile = 'a/' + m.group(1)
1775 bfile = 'b/' + m.group(2)
1774 bfile = 'b/' + m.group(2)
1776 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1777 gp = gitpatches.pop()
1776 gp = gitpatches.pop()
1778 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1779 if not gitpatches:
1778 if not gitpatches:
1780 raise PatchError(_('failed to synchronize metadata for "%s"')
1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1781 % afile[2:])
1780 % afile[2:])
1782 gp = gitpatches[-1]
1781 gp = gitpatches[-1]
1783 newfile = True
1782 newfile = True
1784 elif x.startswith('---'):
1783 elif x.startswith('---'):
1785 # check for a unified diff
1784 # check for a unified diff
1786 l2 = lr.readline()
1785 l2 = lr.readline()
1787 if not l2.startswith('+++'):
1786 if not l2.startswith('+++'):
1788 lr.push(l2)
1787 lr.push(l2)
1789 continue
1788 continue
1790 newfile = True
1789 newfile = True
1791 context = False
1790 context = False
1792 afile = parsefilename(x)
1791 afile = parsefilename(x)
1793 bfile = parsefilename(l2)
1792 bfile = parsefilename(l2)
1794 elif x.startswith('***'):
1793 elif x.startswith('***'):
1795 # check for a context diff
1794 # check for a context diff
1796 l2 = lr.readline()
1795 l2 = lr.readline()
1797 if not l2.startswith('---'):
1796 if not l2.startswith('---'):
1798 lr.push(l2)
1797 lr.push(l2)
1799 continue
1798 continue
1800 l3 = lr.readline()
1799 l3 = lr.readline()
1801 lr.push(l3)
1800 lr.push(l3)
1802 if not l3.startswith("***************"):
1801 if not l3.startswith("***************"):
1803 lr.push(l2)
1802 lr.push(l2)
1804 continue
1803 continue
1805 newfile = True
1804 newfile = True
1806 context = True
1805 context = True
1807 afile = parsefilename(x)
1806 afile = parsefilename(x)
1808 bfile = parsefilename(l2)
1807 bfile = parsefilename(l2)
1809
1808
1810 if newfile:
1809 if newfile:
1811 newfile = False
1810 newfile = False
1812 emitfile = True
1811 emitfile = True
1813 state = BFILE
1812 state = BFILE
1814 hunknum = 0
1813 hunknum = 0
1815
1814
1816 while gitpatches:
1815 while gitpatches:
1817 gp = gitpatches.pop()
1816 gp = gitpatches.pop()
1818 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1819
1818
1820 def applybindelta(binchunk, data):
1819 def applybindelta(binchunk, data):
1821 """Apply a binary delta hunk
1820 """Apply a binary delta hunk
1822 The algorithm used is the algorithm from git's patch-delta.c
1821 The algorithm used is the algorithm from git's patch-delta.c
1823 """
1822 """
1824 def deltahead(binchunk):
1823 def deltahead(binchunk):
1825 i = 0
1824 i = 0
1826 for c in binchunk:
1825 for c in binchunk:
1827 i += 1
1826 i += 1
1828 if not (ord(c) & 0x80):
1827 if not (ord(c) & 0x80):
1829 return i
1828 return i
1830 return i
1829 return i
1831 out = ""
1830 out = ""
1832 s = deltahead(binchunk)
1831 s = deltahead(binchunk)
1833 binchunk = binchunk[s:]
1832 binchunk = binchunk[s:]
1834 s = deltahead(binchunk)
1833 s = deltahead(binchunk)
1835 binchunk = binchunk[s:]
1834 binchunk = binchunk[s:]
1836 i = 0
1835 i = 0
1837 while i < len(binchunk):
1836 while i < len(binchunk):
1838 cmd = ord(binchunk[i])
1837 cmd = ord(binchunk[i])
1839 i += 1
1838 i += 1
1840 if (cmd & 0x80):
1839 if (cmd & 0x80):
1841 offset = 0
1840 offset = 0
1842 size = 0
1841 size = 0
1843 if (cmd & 0x01):
1842 if (cmd & 0x01):
1844 offset = ord(binchunk[i])
1843 offset = ord(binchunk[i])
1845 i += 1
1844 i += 1
1846 if (cmd & 0x02):
1845 if (cmd & 0x02):
1847 offset |= ord(binchunk[i]) << 8
1846 offset |= ord(binchunk[i]) << 8
1848 i += 1
1847 i += 1
1849 if (cmd & 0x04):
1848 if (cmd & 0x04):
1850 offset |= ord(binchunk[i]) << 16
1849 offset |= ord(binchunk[i]) << 16
1851 i += 1
1850 i += 1
1852 if (cmd & 0x08):
1851 if (cmd & 0x08):
1853 offset |= ord(binchunk[i]) << 24
1852 offset |= ord(binchunk[i]) << 24
1854 i += 1
1853 i += 1
1855 if (cmd & 0x10):
1854 if (cmd & 0x10):
1856 size = ord(binchunk[i])
1855 size = ord(binchunk[i])
1857 i += 1
1856 i += 1
1858 if (cmd & 0x20):
1857 if (cmd & 0x20):
1859 size |= ord(binchunk[i]) << 8
1858 size |= ord(binchunk[i]) << 8
1860 i += 1
1859 i += 1
1861 if (cmd & 0x40):
1860 if (cmd & 0x40):
1862 size |= ord(binchunk[i]) << 16
1861 size |= ord(binchunk[i]) << 16
1863 i += 1
1862 i += 1
1864 if size == 0:
1863 if size == 0:
1865 size = 0x10000
1864 size = 0x10000
1866 offset_end = offset + size
1865 offset_end = offset + size
1867 out += data[offset:offset_end]
1866 out += data[offset:offset_end]
1868 elif cmd != 0:
1867 elif cmd != 0:
1869 offset_end = i + cmd
1868 offset_end = i + cmd
1870 out += binchunk[i:offset_end]
1869 out += binchunk[i:offset_end]
1871 i += cmd
1870 i += cmd
1872 else:
1871 else:
1873 raise PatchError(_('unexpected delta opcode 0'))
1872 raise PatchError(_('unexpected delta opcode 0'))
1874 return out
1873 return out
1875
1874
1876 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1877 """Reads a patch from fp and tries to apply it.
1876 """Reads a patch from fp and tries to apply it.
1878
1877
1879 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1880 there was any fuzz.
1879 there was any fuzz.
1881
1880
1882 If 'eolmode' is 'strict', the patch content and patched file are
1881 If 'eolmode' is 'strict', the patch content and patched file are
1883 read in binary mode. Otherwise, line endings are ignored when
1882 read in binary mode. Otherwise, line endings are ignored when
1884 patching then normalized according to 'eolmode'.
1883 patching then normalized according to 'eolmode'.
1885 """
1884 """
1886 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1887 prefix=prefix, eolmode=eolmode)
1886 prefix=prefix, eolmode=eolmode)
1888
1887
1889 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1890 eolmode='strict'):
1889 eolmode='strict'):
1891
1890
1892 if prefix:
1891 if prefix:
1893 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1894 prefix)
1893 prefix)
1895 if prefix != '':
1894 if prefix != '':
1896 prefix += '/'
1895 prefix += '/'
1897 def pstrip(p):
1896 def pstrip(p):
1898 return pathtransform(p, strip - 1, prefix)[1]
1897 return pathtransform(p, strip - 1, prefix)[1]
1899
1898
1900 rejects = 0
1899 rejects = 0
1901 err = 0
1900 err = 0
1902 current_file = None
1901 current_file = None
1903
1902
1904 for state, values in iterhunks(fp):
1903 for state, values in iterhunks(fp):
1905 if state == 'hunk':
1904 if state == 'hunk':
1906 if not current_file:
1905 if not current_file:
1907 continue
1906 continue
1908 ret = current_file.apply(values)
1907 ret = current_file.apply(values)
1909 if ret > 0:
1908 if ret > 0:
1910 err = 1
1909 err = 1
1911 elif state == 'file':
1910 elif state == 'file':
1912 if current_file:
1911 if current_file:
1913 rejects += current_file.close()
1912 rejects += current_file.close()
1914 current_file = None
1913 current_file = None
1915 afile, bfile, first_hunk, gp = values
1914 afile, bfile, first_hunk, gp = values
1916 if gp:
1915 if gp:
1917 gp.path = pstrip(gp.path)
1916 gp.path = pstrip(gp.path)
1918 if gp.oldpath:
1917 if gp.oldpath:
1919 gp.oldpath = pstrip(gp.oldpath)
1918 gp.oldpath = pstrip(gp.oldpath)
1920 else:
1919 else:
1921 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1922 prefix)
1921 prefix)
1923 if gp.op == 'RENAME':
1922 if gp.op == 'RENAME':
1924 backend.unlink(gp.oldpath)
1923 backend.unlink(gp.oldpath)
1925 if not first_hunk:
1924 if not first_hunk:
1926 if gp.op == 'DELETE':
1925 if gp.op == 'DELETE':
1927 backend.unlink(gp.path)
1926 backend.unlink(gp.path)
1928 continue
1927 continue
1929 data, mode = None, None
1928 data, mode = None, None
1930 if gp.op in ('RENAME', 'COPY'):
1929 if gp.op in ('RENAME', 'COPY'):
1931 data, mode = store.getfile(gp.oldpath)[:2]
1930 data, mode = store.getfile(gp.oldpath)[:2]
1932 # FIXME: failing getfile has never been handled here
1931 # FIXME: failing getfile has never been handled here
1933 assert data is not None
1932 assert data is not None
1934 if gp.mode:
1933 if gp.mode:
1935 mode = gp.mode
1934 mode = gp.mode
1936 if gp.op == 'ADD':
1935 if gp.op == 'ADD':
1937 # Added files without content have no hunk and
1936 # Added files without content have no hunk and
1938 # must be created
1937 # must be created
1939 data = ''
1938 data = ''
1940 if data or mode:
1939 if data or mode:
1941 if (gp.op in ('ADD', 'RENAME', 'COPY')
1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1942 and backend.exists(gp.path)):
1941 and backend.exists(gp.path)):
1943 raise PatchError(_("cannot create %s: destination "
1942 raise PatchError(_("cannot create %s: destination "
1944 "already exists") % gp.path)
1943 "already exists") % gp.path)
1945 backend.setfile(gp.path, data, mode, gp.oldpath)
1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1946 continue
1945 continue
1947 try:
1946 try:
1948 current_file = patcher(ui, gp, backend, store,
1947 current_file = patcher(ui, gp, backend, store,
1949 eolmode=eolmode)
1948 eolmode=eolmode)
1950 except PatchError as inst:
1949 except PatchError as inst:
1951 ui.warn(str(inst) + '\n')
1950 ui.warn(str(inst) + '\n')
1952 current_file = None
1951 current_file = None
1953 rejects += 1
1952 rejects += 1
1954 continue
1953 continue
1955 elif state == 'git':
1954 elif state == 'git':
1956 for gp in values:
1955 for gp in values:
1957 path = pstrip(gp.oldpath)
1956 path = pstrip(gp.oldpath)
1958 data, mode = backend.getfile(path)
1957 data, mode = backend.getfile(path)
1959 if data is None:
1958 if data is None:
1960 # The error ignored here will trigger a getfile()
1959 # The error ignored here will trigger a getfile()
1961 # error in a place more appropriate for error
1960 # error in a place more appropriate for error
1962 # handling, and will not interrupt the patching
1961 # handling, and will not interrupt the patching
1963 # process.
1962 # process.
1964 pass
1963 pass
1965 else:
1964 else:
1966 store.setfile(path, data, mode)
1965 store.setfile(path, data, mode)
1967 else:
1966 else:
1968 raise util.Abort(_('unsupported parser state: %s') % state)
1967 raise util.Abort(_('unsupported parser state: %s') % state)
1969
1968
1970 if current_file:
1969 if current_file:
1971 rejects += current_file.close()
1970 rejects += current_file.close()
1972
1971
1973 if rejects:
1972 if rejects:
1974 return -1
1973 return -1
1975 return err
1974 return err
1976
1975
1977 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1978 similarity):
1977 similarity):
1979 """use <patcher> to apply <patchname> to the working directory.
1978 """use <patcher> to apply <patchname> to the working directory.
1980 returns whether patch was applied with fuzz factor."""
1979 returns whether patch was applied with fuzz factor."""
1981
1980
1982 fuzz = False
1981 fuzz = False
1983 args = []
1982 args = []
1984 cwd = repo.root
1983 cwd = repo.root
1985 if cwd:
1984 if cwd:
1986 args.append('-d %s' % util.shellquote(cwd))
1985 args.append('-d %s' % util.shellquote(cwd))
1987 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1988 util.shellquote(patchname)))
1987 util.shellquote(patchname)))
1989 try:
1988 try:
1990 for line in fp:
1989 for line in fp:
1991 line = line.rstrip()
1990 line = line.rstrip()
1992 ui.note(line + '\n')
1991 ui.note(line + '\n')
1993 if line.startswith('patching file '):
1992 if line.startswith('patching file '):
1994 pf = util.parsepatchoutput(line)
1993 pf = util.parsepatchoutput(line)
1995 printed_file = False
1994 printed_file = False
1996 files.add(pf)
1995 files.add(pf)
1997 elif line.find('with fuzz') >= 0:
1996 elif line.find('with fuzz') >= 0:
1998 fuzz = True
1997 fuzz = True
1999 if not printed_file:
1998 if not printed_file:
2000 ui.warn(pf + '\n')
1999 ui.warn(pf + '\n')
2001 printed_file = True
2000 printed_file = True
2002 ui.warn(line + '\n')
2001 ui.warn(line + '\n')
2003 elif line.find('saving rejects to file') >= 0:
2002 elif line.find('saving rejects to file') >= 0:
2004 ui.warn(line + '\n')
2003 ui.warn(line + '\n')
2005 elif line.find('FAILED') >= 0:
2004 elif line.find('FAILED') >= 0:
2006 if not printed_file:
2005 if not printed_file:
2007 ui.warn(pf + '\n')
2006 ui.warn(pf + '\n')
2008 printed_file = True
2007 printed_file = True
2009 ui.warn(line + '\n')
2008 ui.warn(line + '\n')
2010 finally:
2009 finally:
2011 if files:
2010 if files:
2012 scmutil.marktouched(repo, files, similarity)
2011 scmutil.marktouched(repo, files, similarity)
2013 code = fp.close()
2012 code = fp.close()
2014 if code:
2013 if code:
2015 raise PatchError(_("patch command failed: %s") %
2014 raise PatchError(_("patch command failed: %s") %
2016 util.explainexit(code)[0])
2015 util.explainexit(code)[0])
2017 return fuzz
2016 return fuzz
2018
2017
2019 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2020 eolmode='strict'):
2019 eolmode='strict'):
2021 if files is None:
2020 if files is None:
2022 files = set()
2021 files = set()
2023 if eolmode is None:
2022 if eolmode is None:
2024 eolmode = ui.config('patch', 'eol', 'strict')
2023 eolmode = ui.config('patch', 'eol', 'strict')
2025 if eolmode.lower() not in eolmodes:
2024 if eolmode.lower() not in eolmodes:
2026 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2025 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2027 eolmode = eolmode.lower()
2026 eolmode = eolmode.lower()
2028
2027
2029 store = filestore()
2028 store = filestore()
2030 try:
2029 try:
2031 fp = open(patchobj, 'rb')
2030 fp = open(patchobj, 'rb')
2032 except TypeError:
2031 except TypeError:
2033 fp = patchobj
2032 fp = patchobj
2034 try:
2033 try:
2035 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2036 eolmode=eolmode)
2035 eolmode=eolmode)
2037 finally:
2036 finally:
2038 if fp != patchobj:
2037 if fp != patchobj:
2039 fp.close()
2038 fp.close()
2040 files.update(backend.close())
2039 files.update(backend.close())
2041 store.close()
2040 store.close()
2042 if ret < 0:
2041 if ret < 0:
2043 raise PatchError(_('patch failed to apply'))
2042 raise PatchError(_('patch failed to apply'))
2044 return ret > 0
2043 return ret > 0
2045
2044
2046 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2047 eolmode='strict', similarity=0):
2046 eolmode='strict', similarity=0):
2048 """use builtin patch to apply <patchobj> to the working directory.
2047 """use builtin patch to apply <patchobj> to the working directory.
2049 returns whether patch was applied with fuzz factor."""
2048 returns whether patch was applied with fuzz factor."""
2050 backend = workingbackend(ui, repo, similarity)
2049 backend = workingbackend(ui, repo, similarity)
2051 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2052
2051
2053 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2054 eolmode='strict'):
2053 eolmode='strict'):
2055 backend = repobackend(ui, repo, ctx, store)
2054 backend = repobackend(ui, repo, ctx, store)
2056 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2057
2056
2058 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2059 similarity=0):
2058 similarity=0):
2060 """Apply <patchname> to the working directory.
2059 """Apply <patchname> to the working directory.
2061
2060
2062 'eolmode' specifies how end of lines should be handled. It can be:
2061 'eolmode' specifies how end of lines should be handled. It can be:
2063 - 'strict': inputs are read in binary mode, EOLs are preserved
2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2064 - 'crlf': EOLs are ignored when patching and reset to CRLF
2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2065 - 'lf': EOLs are ignored when patching and reset to LF
2064 - 'lf': EOLs are ignored when patching and reset to LF
2066 - None: get it from user settings, default to 'strict'
2065 - None: get it from user settings, default to 'strict'
2067 'eolmode' is ignored when using an external patcher program.
2066 'eolmode' is ignored when using an external patcher program.
2068
2067
2069 Returns whether patch was applied with fuzz factor.
2068 Returns whether patch was applied with fuzz factor.
2070 """
2069 """
2071 patcher = ui.config('ui', 'patch')
2070 patcher = ui.config('ui', 'patch')
2072 if files is None:
2071 if files is None:
2073 files = set()
2072 files = set()
2074 if patcher:
2073 if patcher:
2075 return _externalpatch(ui, repo, patcher, patchname, strip,
2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2076 files, similarity)
2075 files, similarity)
2077 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2078 similarity)
2077 similarity)
2079
2078
2080 def changedfiles(ui, repo, patchpath, strip=1):
2079 def changedfiles(ui, repo, patchpath, strip=1):
2081 backend = fsbackend(ui, repo.root)
2080 backend = fsbackend(ui, repo.root)
2082 fp = open(patchpath, 'rb')
2081 fp = open(patchpath, 'rb')
2083 try:
2082 try:
2084 changed = set()
2083 changed = set()
2085 for state, values in iterhunks(fp):
2084 for state, values in iterhunks(fp):
2086 if state == 'file':
2085 if state == 'file':
2087 afile, bfile, first_hunk, gp = values
2086 afile, bfile, first_hunk, gp = values
2088 if gp:
2087 if gp:
2089 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2090 if gp.oldpath:
2089 if gp.oldpath:
2091 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2092 else:
2091 else:
2093 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2094 '')
2093 '')
2095 changed.add(gp.path)
2094 changed.add(gp.path)
2096 if gp.op == 'RENAME':
2095 if gp.op == 'RENAME':
2097 changed.add(gp.oldpath)
2096 changed.add(gp.oldpath)
2098 elif state not in ('hunk', 'git'):
2097 elif state not in ('hunk', 'git'):
2099 raise util.Abort(_('unsupported parser state: %s') % state)
2098 raise util.Abort(_('unsupported parser state: %s') % state)
2100 return changed
2099 return changed
2101 finally:
2100 finally:
2102 fp.close()
2101 fp.close()
2103
2102
2104 class GitDiffRequired(Exception):
2103 class GitDiffRequired(Exception):
2105 pass
2104 pass
2106
2105
2107 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2108 '''return diffopts with all features supported and parsed'''
2107 '''return diffopts with all features supported and parsed'''
2109 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2110 git=True, whitespace=True, formatchanging=True)
2109 git=True, whitespace=True, formatchanging=True)
2111
2110
2112 diffopts = diffallopts
2111 diffopts = diffallopts
2113
2112
2114 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2115 whitespace=False, formatchanging=False):
2114 whitespace=False, formatchanging=False):
2116 '''return diffopts with only opted-in features parsed
2115 '''return diffopts with only opted-in features parsed
2117
2116
2118 Features:
2117 Features:
2119 - git: git-style diffs
2118 - git: git-style diffs
2120 - whitespace: whitespace options like ignoreblanklines and ignorews
2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2121 - formatchanging: options that will likely break or cause correctness issues
2120 - formatchanging: options that will likely break or cause correctness issues
2122 with most diff parsers
2121 with most diff parsers
2123 '''
2122 '''
2124 def get(key, name=None, getter=ui.configbool, forceplain=None):
2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2125 if opts:
2124 if opts:
2126 v = opts.get(key)
2125 v = opts.get(key)
2127 if v:
2126 if v:
2128 return v
2127 return v
2129 if forceplain is not None and ui.plain():
2128 if forceplain is not None and ui.plain():
2130 return forceplain
2129 return forceplain
2131 return getter(section, name or key, None, untrusted=untrusted)
2130 return getter(section, name or key, None, untrusted=untrusted)
2132
2131
2133 # core options, expected to be understood by every diff parser
2132 # core options, expected to be understood by every diff parser
2134 buildopts = {
2133 buildopts = {
2135 'nodates': get('nodates'),
2134 'nodates': get('nodates'),
2136 'showfunc': get('show_function', 'showfunc'),
2135 'showfunc': get('show_function', 'showfunc'),
2137 'context': get('unified', getter=ui.config),
2136 'context': get('unified', getter=ui.config),
2138 }
2137 }
2139
2138
2140 if git:
2139 if git:
2141 buildopts['git'] = get('git')
2140 buildopts['git'] = get('git')
2142 if whitespace:
2141 if whitespace:
2143 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2144 buildopts['ignorewsamount'] = get('ignore_space_change',
2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2145 'ignorewsamount')
2144 'ignorewsamount')
2146 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2147 'ignoreblanklines')
2146 'ignoreblanklines')
2148 if formatchanging:
2147 if formatchanging:
2149 buildopts['text'] = opts and opts.get('text')
2148 buildopts['text'] = opts and opts.get('text')
2150 buildopts['nobinary'] = get('nobinary')
2149 buildopts['nobinary'] = get('nobinary')
2151 buildopts['noprefix'] = get('noprefix', forceplain=False)
2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2152
2151
2153 return mdiff.diffopts(**buildopts)
2152 return mdiff.diffopts(**buildopts)
2154
2153
2155 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2156 losedatafn=None, prefix='', relroot=''):
2155 losedatafn=None, prefix='', relroot=''):
2157 '''yields diff of changes to files between two nodes, or node and
2156 '''yields diff of changes to files between two nodes, or node and
2158 working directory.
2157 working directory.
2159
2158
2160 if node1 is None, use first dirstate parent instead.
2159 if node1 is None, use first dirstate parent instead.
2161 if node2 is None, compare node1 with working directory.
2160 if node2 is None, compare node1 with working directory.
2162
2161
2163 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2164 every time some change cannot be represented with the current
2163 every time some change cannot be represented with the current
2165 patch format. Return False to upgrade to git patch format, True to
2164 patch format. Return False to upgrade to git patch format, True to
2166 accept the loss or raise an exception to abort the diff. It is
2165 accept the loss or raise an exception to abort the diff. It is
2167 called with the name of current file being diffed as 'fn'. If set
2166 called with the name of current file being diffed as 'fn'. If set
2168 to None, patches will always be upgraded to git format when
2167 to None, patches will always be upgraded to git format when
2169 necessary.
2168 necessary.
2170
2169
2171 prefix is a filename prefix that is prepended to all filenames on
2170 prefix is a filename prefix that is prepended to all filenames on
2172 display (used for subrepos).
2171 display (used for subrepos).
2173
2172
2174 relroot, if not empty, must be normalized with a trailing /. Any match
2173 relroot, if not empty, must be normalized with a trailing /. Any match
2175 patterns that fall outside it will be ignored.'''
2174 patterns that fall outside it will be ignored.'''
2176
2175
2177 if opts is None:
2176 if opts is None:
2178 opts = mdiff.defaultopts
2177 opts = mdiff.defaultopts
2179
2178
2180 if not node1 and not node2:
2179 if not node1 and not node2:
2181 node1 = repo.dirstate.p1()
2180 node1 = repo.dirstate.p1()
2182
2181
2183 def lrugetfilectx():
2182 def lrugetfilectx():
2184 cache = {}
2183 cache = {}
2185 order = collections.deque()
2184 order = collections.deque()
2186 def getfilectx(f, ctx):
2185 def getfilectx(f, ctx):
2187 fctx = ctx.filectx(f, filelog=cache.get(f))
2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2188 if f not in cache:
2187 if f not in cache:
2189 if len(cache) > 20:
2188 if len(cache) > 20:
2190 del cache[order.popleft()]
2189 del cache[order.popleft()]
2191 cache[f] = fctx.filelog()
2190 cache[f] = fctx.filelog()
2192 else:
2191 else:
2193 order.remove(f)
2192 order.remove(f)
2194 order.append(f)
2193 order.append(f)
2195 return fctx
2194 return fctx
2196 return getfilectx
2195 return getfilectx
2197 getfilectx = lrugetfilectx()
2196 getfilectx = lrugetfilectx()
2198
2197
2199 ctx1 = repo[node1]
2198 ctx1 = repo[node1]
2200 ctx2 = repo[node2]
2199 ctx2 = repo[node2]
2201
2200
2202 relfiltered = False
2201 relfiltered = False
2203 if relroot != '' and match.always():
2202 if relroot != '' and match.always():
2204 # as a special case, create a new matcher with just the relroot
2203 # as a special case, create a new matcher with just the relroot
2205 pats = [relroot]
2204 pats = [relroot]
2206 match = scmutil.match(ctx2, pats, default='path')
2205 match = scmutil.match(ctx2, pats, default='path')
2207 relfiltered = True
2206 relfiltered = True
2208
2207
2209 if not changes:
2208 if not changes:
2210 changes = repo.status(ctx1, ctx2, match=match)
2209 changes = repo.status(ctx1, ctx2, match=match)
2211 modified, added, removed = changes[:3]
2210 modified, added, removed = changes[:3]
2212
2211
2213 if not modified and not added and not removed:
2212 if not modified and not added and not removed:
2214 return []
2213 return []
2215
2214
2216 if repo.ui.debugflag:
2215 if repo.ui.debugflag:
2217 hexfunc = hex
2216 hexfunc = hex
2218 else:
2217 else:
2219 hexfunc = short
2218 hexfunc = short
2220 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2221
2220
2222 copy = {}
2221 copy = {}
2223 if opts.git or opts.upgrade:
2222 if opts.git or opts.upgrade:
2224 copy = copies.pathcopies(ctx1, ctx2, match=match)
2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2225
2224
2226 if relroot is not None:
2225 if relroot is not None:
2227 if not relfiltered:
2226 if not relfiltered:
2228 # XXX this would ideally be done in the matcher, but that is
2227 # XXX this would ideally be done in the matcher, but that is
2229 # generally meant to 'or' patterns, not 'and' them. In this case we
2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2230 # need to 'and' all the patterns from the matcher with relroot.
2229 # need to 'and' all the patterns from the matcher with relroot.
2231 def filterrel(l):
2230 def filterrel(l):
2232 return [f for f in l if f.startswith(relroot)]
2231 return [f for f in l if f.startswith(relroot)]
2233 modified = filterrel(modified)
2232 modified = filterrel(modified)
2234 added = filterrel(added)
2233 added = filterrel(added)
2235 removed = filterrel(removed)
2234 removed = filterrel(removed)
2236 relfiltered = True
2235 relfiltered = True
2237 # filter out copies where either side isn't inside the relative root
2236 # filter out copies where either side isn't inside the relative root
2238 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2239 if dst.startswith(relroot)
2238 if dst.startswith(relroot)
2240 and src.startswith(relroot)))
2239 and src.startswith(relroot)))
2241
2240
2242 def difffn(opts, losedata):
2241 def difffn(opts, losedata):
2243 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2244 copy, getfilectx, opts, losedata, prefix, relroot)
2243 copy, getfilectx, opts, losedata, prefix, relroot)
2245 if opts.upgrade and not opts.git:
2244 if opts.upgrade and not opts.git:
2246 try:
2245 try:
2247 def losedata(fn):
2246 def losedata(fn):
2248 if not losedatafn or not losedatafn(fn=fn):
2247 if not losedatafn or not losedatafn(fn=fn):
2249 raise GitDiffRequired
2248 raise GitDiffRequired
2250 # Buffer the whole output until we are sure it can be generated
2249 # Buffer the whole output until we are sure it can be generated
2251 return list(difffn(opts.copy(git=False), losedata))
2250 return list(difffn(opts.copy(git=False), losedata))
2252 except GitDiffRequired:
2251 except GitDiffRequired:
2253 return difffn(opts.copy(git=True), None)
2252 return difffn(opts.copy(git=True), None)
2254 else:
2253 else:
2255 return difffn(opts, None)
2254 return difffn(opts, None)
2256
2255
2257 def difflabel(func, *args, **kw):
2256 def difflabel(func, *args, **kw):
2258 '''yields 2-tuples of (output, label) based on the output of func()'''
2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2259 headprefixes = [('diff', 'diff.diffline'),
2258 headprefixes = [('diff', 'diff.diffline'),
2260 ('copy', 'diff.extended'),
2259 ('copy', 'diff.extended'),
2261 ('rename', 'diff.extended'),
2260 ('rename', 'diff.extended'),
2262 ('old', 'diff.extended'),
2261 ('old', 'diff.extended'),
2263 ('new', 'diff.extended'),
2262 ('new', 'diff.extended'),
2264 ('deleted', 'diff.extended'),
2263 ('deleted', 'diff.extended'),
2265 ('---', 'diff.file_a'),
2264 ('---', 'diff.file_a'),
2266 ('+++', 'diff.file_b')]
2265 ('+++', 'diff.file_b')]
2267 textprefixes = [('@', 'diff.hunk'),
2266 textprefixes = [('@', 'diff.hunk'),
2268 ('-', 'diff.deleted'),
2267 ('-', 'diff.deleted'),
2269 ('+', 'diff.inserted')]
2268 ('+', 'diff.inserted')]
2270 head = False
2269 head = False
2271 for chunk in func(*args, **kw):
2270 for chunk in func(*args, **kw):
2272 lines = chunk.split('\n')
2271 lines = chunk.split('\n')
2273 for i, line in enumerate(lines):
2272 for i, line in enumerate(lines):
2274 if i != 0:
2273 if i != 0:
2275 yield ('\n', '')
2274 yield ('\n', '')
2276 if head:
2275 if head:
2277 if line.startswith('@'):
2276 if line.startswith('@'):
2278 head = False
2277 head = False
2279 else:
2278 else:
2280 if line and line[0] not in ' +-@\\':
2279 if line and line[0] not in ' +-@\\':
2281 head = True
2280 head = True
2282 stripline = line
2281 stripline = line
2283 diffline = False
2282 diffline = False
2284 if not head and line and line[0] in '+-':
2283 if not head and line and line[0] in '+-':
2285 # highlight tabs and trailing whitespace, but only in
2284 # highlight tabs and trailing whitespace, but only in
2286 # changed lines
2285 # changed lines
2287 stripline = line.rstrip()
2286 stripline = line.rstrip()
2288 diffline = True
2287 diffline = True
2289
2288
2290 prefixes = textprefixes
2289 prefixes = textprefixes
2291 if head:
2290 if head:
2292 prefixes = headprefixes
2291 prefixes = headprefixes
2293 for prefix, label in prefixes:
2292 for prefix, label in prefixes:
2294 if stripline.startswith(prefix):
2293 if stripline.startswith(prefix):
2295 if diffline:
2294 if diffline:
2296 for token in tabsplitter.findall(stripline):
2295 for token in tabsplitter.findall(stripline):
2297 if '\t' == token[0]:
2296 if '\t' == token[0]:
2298 yield (token, 'diff.tab')
2297 yield (token, 'diff.tab')
2299 else:
2298 else:
2300 yield (token, label)
2299 yield (token, label)
2301 else:
2300 else:
2302 yield (stripline, label)
2301 yield (stripline, label)
2303 break
2302 break
2304 else:
2303 else:
2305 yield (line, '')
2304 yield (line, '')
2306 if line != stripline:
2305 if line != stripline:
2307 yield (line[len(stripline):], 'diff.trailingwhitespace')
2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2308
2307
2309 def diffui(*args, **kw):
2308 def diffui(*args, **kw):
2310 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2311 return difflabel(diff, *args, **kw)
2310 return difflabel(diff, *args, **kw)
2312
2311
2313 def _filepairs(ctx1, modified, added, removed, copy, opts):
2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2314 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2315 before and f2 is the the name after. For added files, f1 will be None,
2314 before and f2 is the the name after. For added files, f1 will be None,
2316 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2317 or 'rename' (the latter two only if opts.git is set).'''
2316 or 'rename' (the latter two only if opts.git is set).'''
2318 gone = set()
2317 gone = set()
2319
2318
2320 copyto = dict([(v, k) for k, v in copy.items()])
2319 copyto = dict([(v, k) for k, v in copy.items()])
2321
2320
2322 addedset, removedset = set(added), set(removed)
2321 addedset, removedset = set(added), set(removed)
2323 # Fix up added, since merged-in additions appear as
2322 # Fix up added, since merged-in additions appear as
2324 # modifications during merges
2323 # modifications during merges
2325 for f in modified:
2324 for f in modified:
2326 if f not in ctx1:
2325 if f not in ctx1:
2327 addedset.add(f)
2326 addedset.add(f)
2328
2327
2329 for f in sorted(modified + added + removed):
2328 for f in sorted(modified + added + removed):
2330 copyop = None
2329 copyop = None
2331 f1, f2 = f, f
2330 f1, f2 = f, f
2332 if f in addedset:
2331 if f in addedset:
2333 f1 = None
2332 f1 = None
2334 if f in copy:
2333 if f in copy:
2335 if opts.git:
2334 if opts.git:
2336 f1 = copy[f]
2335 f1 = copy[f]
2337 if f1 in removedset and f1 not in gone:
2336 if f1 in removedset and f1 not in gone:
2338 copyop = 'rename'
2337 copyop = 'rename'
2339 gone.add(f1)
2338 gone.add(f1)
2340 else:
2339 else:
2341 copyop = 'copy'
2340 copyop = 'copy'
2342 elif f in removedset:
2341 elif f in removedset:
2343 f2 = None
2342 f2 = None
2344 if opts.git:
2343 if opts.git:
2345 # have we already reported a copy above?
2344 # have we already reported a copy above?
2346 if (f in copyto and copyto[f] in addedset
2345 if (f in copyto and copyto[f] in addedset
2347 and copy[copyto[f]] == f):
2346 and copy[copyto[f]] == f):
2348 continue
2347 continue
2349 yield f1, f2, copyop
2348 yield f1, f2, copyop
2350
2349
2351 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2352 copy, getfilectx, opts, losedatafn, prefix, relroot):
2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2353 '''given input data, generate a diff and yield it in blocks
2352 '''given input data, generate a diff and yield it in blocks
2354
2353
2355 If generating a diff would lose data like flags or binary data and
2354 If generating a diff would lose data like flags or binary data and
2356 losedatafn is not None, it will be called.
2355 losedatafn is not None, it will be called.
2357
2356
2358 relroot is removed and prefix is added to every path in the diff output.
2357 relroot is removed and prefix is added to every path in the diff output.
2359
2358
2360 If relroot is not empty, this function expects every path in modified,
2359 If relroot is not empty, this function expects every path in modified,
2361 added, removed and copy to start with it.'''
2360 added, removed and copy to start with it.'''
2362
2361
2363 def gitindex(text):
2362 def gitindex(text):
2364 if not text:
2363 if not text:
2365 text = ""
2364 text = ""
2366 l = len(text)
2365 l = len(text)
2367 s = util.sha1('blob %d\0' % l)
2366 s = util.sha1('blob %d\0' % l)
2368 s.update(text)
2367 s.update(text)
2369 return s.hexdigest()
2368 return s.hexdigest()
2370
2369
2371 if opts.noprefix:
2370 if opts.noprefix:
2372 aprefix = bprefix = ''
2371 aprefix = bprefix = ''
2373 else:
2372 else:
2374 aprefix = 'a/'
2373 aprefix = 'a/'
2375 bprefix = 'b/'
2374 bprefix = 'b/'
2376
2375
2377 def diffline(f, revs):
2376 def diffline(f, revs):
2378 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2379 return 'diff %s %s' % (revinfo, f)
2378 return 'diff %s %s' % (revinfo, f)
2380
2379
2381 date1 = util.datestr(ctx1.date())
2380 date1 = util.datestr(ctx1.date())
2382 date2 = util.datestr(ctx2.date())
2381 date2 = util.datestr(ctx2.date())
2383
2382
2384 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2385
2384
2386 if relroot != '' and (repo.ui.configbool('devel', 'all')
2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2387 or repo.ui.configbool('devel', 'check-relroot')):
2386 or repo.ui.configbool('devel', 'check-relroot')):
2388 for f in modified + added + removed + copy.keys() + copy.values():
2387 for f in modified + added + removed + copy.keys() + copy.values():
2389 if f is not None and not f.startswith(relroot):
2388 if f is not None and not f.startswith(relroot):
2390 raise AssertionError(
2389 raise AssertionError(
2391 "file %s doesn't start with relroot %s" % (f, relroot))
2390 "file %s doesn't start with relroot %s" % (f, relroot))
2392
2391
2393 for f1, f2, copyop in _filepairs(
2392 for f1, f2, copyop in _filepairs(
2394 ctx1, modified, added, removed, copy, opts):
2393 ctx1, modified, added, removed, copy, opts):
2395 content1 = None
2394 content1 = None
2396 content2 = None
2395 content2 = None
2397 flag1 = None
2396 flag1 = None
2398 flag2 = None
2397 flag2 = None
2399 if f1:
2398 if f1:
2400 content1 = getfilectx(f1, ctx1).data()
2399 content1 = getfilectx(f1, ctx1).data()
2401 if opts.git or losedatafn:
2400 if opts.git or losedatafn:
2402 flag1 = ctx1.flags(f1)
2401 flag1 = ctx1.flags(f1)
2403 if f2:
2402 if f2:
2404 content2 = getfilectx(f2, ctx2).data()
2403 content2 = getfilectx(f2, ctx2).data()
2405 if opts.git or losedatafn:
2404 if opts.git or losedatafn:
2406 flag2 = ctx2.flags(f2)
2405 flag2 = ctx2.flags(f2)
2407 binary = False
2406 binary = False
2408 if opts.git or losedatafn:
2407 if opts.git or losedatafn:
2409 binary = util.binary(content1) or util.binary(content2)
2408 binary = util.binary(content1) or util.binary(content2)
2410
2409
2411 if losedatafn and not opts.git:
2410 if losedatafn and not opts.git:
2412 if (binary or
2411 if (binary or
2413 # copy/rename
2412 # copy/rename
2414 f2 in copy or
2413 f2 in copy or
2415 # empty file creation
2414 # empty file creation
2416 (not f1 and not content2) or
2415 (not f1 and not content2) or
2417 # empty file deletion
2416 # empty file deletion
2418 (not content1 and not f2) or
2417 (not content1 and not f2) or
2419 # create with flags
2418 # create with flags
2420 (not f1 and flag2) or
2419 (not f1 and flag2) or
2421 # change flags
2420 # change flags
2422 (f1 and f2 and flag1 != flag2)):
2421 (f1 and f2 and flag1 != flag2)):
2423 losedatafn(f2 or f1)
2422 losedatafn(f2 or f1)
2424
2423
2425 path1 = f1 or f2
2424 path1 = f1 or f2
2426 path2 = f2 or f1
2425 path2 = f2 or f1
2427 path1 = posixpath.join(prefix, path1[len(relroot):])
2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2428 path2 = posixpath.join(prefix, path2[len(relroot):])
2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2429 header = []
2428 header = []
2430 if opts.git:
2429 if opts.git:
2431 header.append('diff --git %s%s %s%s' %
2430 header.append('diff --git %s%s %s%s' %
2432 (aprefix, path1, bprefix, path2))
2431 (aprefix, path1, bprefix, path2))
2433 if not f1: # added
2432 if not f1: # added
2434 header.append('new file mode %s' % gitmode[flag2])
2433 header.append('new file mode %s' % gitmode[flag2])
2435 elif not f2: # removed
2434 elif not f2: # removed
2436 header.append('deleted file mode %s' % gitmode[flag1])
2435 header.append('deleted file mode %s' % gitmode[flag1])
2437 else: # modified/copied/renamed
2436 else: # modified/copied/renamed
2438 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2439 if mode1 != mode2:
2438 if mode1 != mode2:
2440 header.append('old mode %s' % mode1)
2439 header.append('old mode %s' % mode1)
2441 header.append('new mode %s' % mode2)
2440 header.append('new mode %s' % mode2)
2442 if copyop is not None:
2441 if copyop is not None:
2443 header.append('%s from %s' % (copyop, path1))
2442 header.append('%s from %s' % (copyop, path1))
2444 header.append('%s to %s' % (copyop, path2))
2443 header.append('%s to %s' % (copyop, path2))
2445 elif revs and not repo.ui.quiet:
2444 elif revs and not repo.ui.quiet:
2446 header.append(diffline(path1, revs))
2445 header.append(diffline(path1, revs))
2447
2446
2448 if binary and opts.git and not opts.nobinary:
2447 if binary and opts.git and not opts.nobinary:
2449 text = mdiff.b85diff(content1, content2)
2448 text = mdiff.b85diff(content1, content2)
2450 if text:
2449 if text:
2451 header.append('index %s..%s' %
2450 header.append('index %s..%s' %
2452 (gitindex(content1), gitindex(content2)))
2451 (gitindex(content1), gitindex(content2)))
2453 else:
2452 else:
2454 text = mdiff.unidiff(content1, date1,
2453 text = mdiff.unidiff(content1, date1,
2455 content2, date2,
2454 content2, date2,
2456 path1, path2, opts=opts)
2455 path1, path2, opts=opts)
2457 if header and (text or len(header) > 1):
2456 if header and (text or len(header) > 1):
2458 yield '\n'.join(header) + '\n'
2457 yield '\n'.join(header) + '\n'
2459 if text:
2458 if text:
2460 yield text
2459 yield text
2461
2460
2462 def diffstatsum(stats):
2461 def diffstatsum(stats):
2463 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2464 for f, a, r, b in stats:
2463 for f, a, r, b in stats:
2465 maxfile = max(maxfile, encoding.colwidth(f))
2464 maxfile = max(maxfile, encoding.colwidth(f))
2466 maxtotal = max(maxtotal, a + r)
2465 maxtotal = max(maxtotal, a + r)
2467 addtotal += a
2466 addtotal += a
2468 removetotal += r
2467 removetotal += r
2469 binary = binary or b
2468 binary = binary or b
2470
2469
2471 return maxfile, maxtotal, addtotal, removetotal, binary
2470 return maxfile, maxtotal, addtotal, removetotal, binary
2472
2471
2473 def diffstatdata(lines):
2472 def diffstatdata(lines):
2474 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2475
2474
2476 results = []
2475 results = []
2477 filename, adds, removes, isbinary = None, 0, 0, False
2476 filename, adds, removes, isbinary = None, 0, 0, False
2478
2477
2479 def addresult():
2478 def addresult():
2480 if filename:
2479 if filename:
2481 results.append((filename, adds, removes, isbinary))
2480 results.append((filename, adds, removes, isbinary))
2482
2481
2483 for line in lines:
2482 for line in lines:
2484 if line.startswith('diff'):
2483 if line.startswith('diff'):
2485 addresult()
2484 addresult()
2486 # set numbers to 0 anyway when starting new file
2485 # set numbers to 0 anyway when starting new file
2487 adds, removes, isbinary = 0, 0, False
2486 adds, removes, isbinary = 0, 0, False
2488 if line.startswith('diff --git a/'):
2487 if line.startswith('diff --git a/'):
2489 filename = gitre.search(line).group(2)
2488 filename = gitre.search(line).group(2)
2490 elif line.startswith('diff -r'):
2489 elif line.startswith('diff -r'):
2491 # format: "diff -r ... -r ... filename"
2490 # format: "diff -r ... -r ... filename"
2492 filename = diffre.search(line).group(1)
2491 filename = diffre.search(line).group(1)
2493 elif line.startswith('+') and not line.startswith('+++ '):
2492 elif line.startswith('+') and not line.startswith('+++ '):
2494 adds += 1
2493 adds += 1
2495 elif line.startswith('-') and not line.startswith('--- '):
2494 elif line.startswith('-') and not line.startswith('--- '):
2496 removes += 1
2495 removes += 1
2497 elif (line.startswith('GIT binary patch') or
2496 elif (line.startswith('GIT binary patch') or
2498 line.startswith('Binary file')):
2497 line.startswith('Binary file')):
2499 isbinary = True
2498 isbinary = True
2500 addresult()
2499 addresult()
2501 return results
2500 return results
2502
2501
2503 def diffstat(lines, width=80, git=False):
2502 def diffstat(lines, width=80, git=False):
2504 output = []
2503 output = []
2505 stats = diffstatdata(lines)
2504 stats = diffstatdata(lines)
2506 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2507
2506
2508 countwidth = len(str(maxtotal))
2507 countwidth = len(str(maxtotal))
2509 if hasbinary and countwidth < 3:
2508 if hasbinary and countwidth < 3:
2510 countwidth = 3
2509 countwidth = 3
2511 graphwidth = width - countwidth - maxname - 6
2510 graphwidth = width - countwidth - maxname - 6
2512 if graphwidth < 10:
2511 if graphwidth < 10:
2513 graphwidth = 10
2512 graphwidth = 10
2514
2513
2515 def scale(i):
2514 def scale(i):
2516 if maxtotal <= graphwidth:
2515 if maxtotal <= graphwidth:
2517 return i
2516 return i
2518 # If diffstat runs out of room it doesn't print anything,
2517 # If diffstat runs out of room it doesn't print anything,
2519 # which isn't very useful, so always print at least one + or -
2518 # which isn't very useful, so always print at least one + or -
2520 # if there were at least some changes.
2519 # if there were at least some changes.
2521 return max(i * graphwidth // maxtotal, int(bool(i)))
2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2522
2521
2523 for filename, adds, removes, isbinary in stats:
2522 for filename, adds, removes, isbinary in stats:
2524 if isbinary:
2523 if isbinary:
2525 count = 'Bin'
2524 count = 'Bin'
2526 else:
2525 else:
2527 count = adds + removes
2526 count = adds + removes
2528 pluses = '+' * scale(adds)
2527 pluses = '+' * scale(adds)
2529 minuses = '-' * scale(removes)
2528 minuses = '-' * scale(removes)
2530 output.append(' %s%s | %*s %s%s\n' %
2529 output.append(' %s%s | %*s %s%s\n' %
2531 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2532 countwidth, count, pluses, minuses))
2531 countwidth, count, pluses, minuses))
2533
2532
2534 if stats:
2533 if stats:
2535 output.append(_(' %d files changed, %d insertions(+), '
2534 output.append(_(' %d files changed, %d insertions(+), '
2536 '%d deletions(-)\n')
2535 '%d deletions(-)\n')
2537 % (len(stats), totaladds, totalremoves))
2536 % (len(stats), totaladds, totalremoves))
2538
2537
2539 return ''.join(output)
2538 return ''.join(output)
2540
2539
2541 def diffstatui(*args, **kw):
2540 def diffstatui(*args, **kw):
2542 '''like diffstat(), but yields 2-tuples of (output, label) for
2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2543 ui.write()
2542 ui.write()
2544 '''
2543 '''
2545
2544
2546 for line in diffstat(*args, **kw).splitlines():
2545 for line in diffstat(*args, **kw).splitlines():
2547 if line and line[-1] in '+-':
2546 if line and line[-1] in '+-':
2548 name, graph = line.rsplit(' ', 1)
2547 name, graph = line.rsplit(' ', 1)
2549 yield (name + ' ', '')
2548 yield (name + ' ', '')
2550 m = re.search(r'\++', graph)
2549 m = re.search(r'\++', graph)
2551 if m:
2550 if m:
2552 yield (m.group(0), 'diffstat.inserted')
2551 yield (m.group(0), 'diffstat.inserted')
2553 m = re.search(r'-+', graph)
2552 m = re.search(r'-+', graph)
2554 if m:
2553 if m:
2555 yield (m.group(0), 'diffstat.deleted')
2554 yield (m.group(0), 'diffstat.deleted')
2556 else:
2555 else:
2557 yield (line, '')
2556 yield (line, '')
2558 yield ('\n', '')
2557 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now