##// END OF EJS Templates
extract: parse 'date' using the generic mechanism...
Pierre-Yves David -
r26558:fe52cd04 default
parent child Browse files
Show More
@@ -1,2560 +1,2558 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 ## Some facility for extensible patch parsing:
154 ## Some facility for extensible patch parsing:
155 # list of pairs ("header to match", "data key")
155 # list of pairs ("header to match", "data key")
156 patchheadermap = []
156 patchheadermap = [('Date', 'date')]
157
157
158 def extract(ui, fileobj):
158 def extract(ui, fileobj):
159 '''extract patch from data read from fileobj.
159 '''extract patch from data read from fileobj.
160
160
161 patch can be a normal patch or contained in an email message.
161 patch can be a normal patch or contained in an email message.
162
162
163 return a dictionnary. Standard keys are:
163 return a dictionnary. Standard keys are:
164 - filename,
164 - filename,
165 - message,
165 - message,
166 - user,
166 - user,
167 - date,
167 - date,
168 - branch,
168 - branch,
169 - node,
169 - node,
170 - p1,
170 - p1,
171 - p2.
171 - p2.
172 Any item can be missing from the dictionary. If filename is mising,
172 Any item can be missing from the dictionary. If filename is mising,
173 fileobj did not contain a patch. Caller must unlink filename when done.'''
173 fileobj did not contain a patch. Caller must unlink filename when done.'''
174
174
175 # attempt to detect the start of a patch
175 # attempt to detect the start of a patch
176 # (this heuristic is borrowed from quilt)
176 # (this heuristic is borrowed from quilt)
177 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
177 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
178 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
178 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
179 r'---[ \t].*?^\+\+\+[ \t]|'
179 r'---[ \t].*?^\+\+\+[ \t]|'
180 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
180 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
181
181
182 data = {}
182 data = {}
183 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
183 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
184 tmpfp = os.fdopen(fd, 'w')
184 tmpfp = os.fdopen(fd, 'w')
185 try:
185 try:
186 msg = email.Parser.Parser().parse(fileobj)
186 msg = email.Parser.Parser().parse(fileobj)
187
187
188 subject = msg['Subject']
188 subject = msg['Subject']
189 data['user'] = msg['From']
189 data['user'] = msg['From']
190 if not subject and not data['user']:
190 if not subject and not data['user']:
191 # Not an email, restore parsed headers if any
191 # Not an email, restore parsed headers if any
192 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
192 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
193
193
194 # should try to parse msg['Date']
194 # should try to parse msg['Date']
195 parents = []
195 parents = []
196
196
197 if subject:
197 if subject:
198 if subject.startswith('[PATCH'):
198 if subject.startswith('[PATCH'):
199 pend = subject.find(']')
199 pend = subject.find(']')
200 if pend >= 0:
200 if pend >= 0:
201 subject = subject[pend + 1:].lstrip()
201 subject = subject[pend + 1:].lstrip()
202 subject = re.sub(r'\n[ \t]+', ' ', subject)
202 subject = re.sub(r'\n[ \t]+', ' ', subject)
203 ui.debug('Subject: %s\n' % subject)
203 ui.debug('Subject: %s\n' % subject)
204 if data['user']:
204 if data['user']:
205 ui.debug('From: %s\n' % data['user'])
205 ui.debug('From: %s\n' % data['user'])
206 diffs_seen = 0
206 diffs_seen = 0
207 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
207 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
208 message = ''
208 message = ''
209 for part in msg.walk():
209 for part in msg.walk():
210 content_type = part.get_content_type()
210 content_type = part.get_content_type()
211 ui.debug('Content-Type: %s\n' % content_type)
211 ui.debug('Content-Type: %s\n' % content_type)
212 if content_type not in ok_types:
212 if content_type not in ok_types:
213 continue
213 continue
214 payload = part.get_payload(decode=True)
214 payload = part.get_payload(decode=True)
215 m = diffre.search(payload)
215 m = diffre.search(payload)
216 if m:
216 if m:
217 hgpatch = False
217 hgpatch = False
218 hgpatchheader = False
218 hgpatchheader = False
219 ignoretext = False
219 ignoretext = False
220
220
221 ui.debug('found patch at byte %d\n' % m.start(0))
221 ui.debug('found patch at byte %d\n' % m.start(0))
222 diffs_seen += 1
222 diffs_seen += 1
223 cfp = cStringIO.StringIO()
223 cfp = cStringIO.StringIO()
224 for line in payload[:m.start(0)].splitlines():
224 for line in payload[:m.start(0)].splitlines():
225 if line.startswith('# HG changeset patch') and not hgpatch:
225 if line.startswith('# HG changeset patch') and not hgpatch:
226 ui.debug('patch generated by hg export\n')
226 ui.debug('patch generated by hg export\n')
227 hgpatch = True
227 hgpatch = True
228 hgpatchheader = True
228 hgpatchheader = True
229 # drop earlier commit message content
229 # drop earlier commit message content
230 cfp.seek(0)
230 cfp.seek(0)
231 cfp.truncate()
231 cfp.truncate()
232 subject = None
232 subject = None
233 elif hgpatchheader:
233 elif hgpatchheader:
234 if line.startswith('# User '):
234 if line.startswith('# User '):
235 data['user'] = line[7:]
235 data['user'] = line[7:]
236 ui.debug('From: %s\n' % data['user'])
236 ui.debug('From: %s\n' % data['user'])
237 elif line.startswith("# Date "):
238 data['date'] = line[7:]
239 elif line.startswith("# Branch "):
237 elif line.startswith("# Branch "):
240 data['branch'] = line[9:]
238 data['branch'] = line[9:]
241 elif line.startswith("# Node ID "):
239 elif line.startswith("# Node ID "):
242 data['nodeid'] = line[10:]
240 data['nodeid'] = line[10:]
243 elif line.startswith("# Parent "):
241 elif line.startswith("# Parent "):
244 parents.append(line[9:].lstrip())
242 parents.append(line[9:].lstrip())
245 elif line.startswith("# "):
243 elif line.startswith("# "):
246 for header, key in patchheadermap:
244 for header, key in patchheadermap:
247 prefix = '# %s ' % header
245 prefix = '# %s ' % header
248 if line.startswith(prefix):
246 if line.startswith(prefix):
249 data[key] = line[len(prefix):]
247 data[key] = line[len(prefix):]
250 else:
248 else:
251 hgpatchheader = False
249 hgpatchheader = False
252 elif line == '---':
250 elif line == '---':
253 ignoretext = True
251 ignoretext = True
254 if not hgpatchheader and not ignoretext:
252 if not hgpatchheader and not ignoretext:
255 cfp.write(line)
253 cfp.write(line)
256 cfp.write('\n')
254 cfp.write('\n')
257 message = cfp.getvalue()
255 message = cfp.getvalue()
258 if tmpfp:
256 if tmpfp:
259 tmpfp.write(payload)
257 tmpfp.write(payload)
260 if not payload.endswith('\n'):
258 if not payload.endswith('\n'):
261 tmpfp.write('\n')
259 tmpfp.write('\n')
262 elif not diffs_seen and message and content_type == 'text/plain':
260 elif not diffs_seen and message and content_type == 'text/plain':
263 message += '\n' + payload
261 message += '\n' + payload
264 except: # re-raises
262 except: # re-raises
265 tmpfp.close()
263 tmpfp.close()
266 os.unlink(tmpname)
264 os.unlink(tmpname)
267 raise
265 raise
268
266
269 if subject and not message.startswith(subject):
267 if subject and not message.startswith(subject):
270 message = '%s\n%s' % (subject, message)
268 message = '%s\n%s' % (subject, message)
271 data['message'] = message
269 data['message'] = message
272 tmpfp.close()
270 tmpfp.close()
273 if parents:
271 if parents:
274 data['p1'] = parents.pop(0)
272 data['p1'] = parents.pop(0)
275 if parents:
273 if parents:
276 data['p2'] = parents.pop(0)
274 data['p2'] = parents.pop(0)
277
275
278 if diffs_seen:
276 if diffs_seen:
279 data['filename'] = tmpname
277 data['filename'] = tmpname
280 else:
278 else:
281 os.unlink(tmpname)
279 os.unlink(tmpname)
282 return data
280 return data
283
281
284 class patchmeta(object):
282 class patchmeta(object):
285 """Patched file metadata
283 """Patched file metadata
286
284
287 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
288 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 or COPY. 'path' is patched file path. 'oldpath' is set to the
289 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 origin file when 'op' is either COPY or RENAME, None otherwise. If
290 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 file mode is changed, 'mode' is a tuple (islink, isexec) where
291 'islink' is True if the file is a symlink and 'isexec' is True if
289 'islink' is True if the file is a symlink and 'isexec' is True if
292 the file is executable. Otherwise, 'mode' is None.
290 the file is executable. Otherwise, 'mode' is None.
293 """
291 """
294 def __init__(self, path):
292 def __init__(self, path):
295 self.path = path
293 self.path = path
296 self.oldpath = None
294 self.oldpath = None
297 self.mode = None
295 self.mode = None
298 self.op = 'MODIFY'
296 self.op = 'MODIFY'
299 self.binary = False
297 self.binary = False
300
298
301 def setmode(self, mode):
299 def setmode(self, mode):
302 islink = mode & 0o20000
300 islink = mode & 0o20000
303 isexec = mode & 0o100
301 isexec = mode & 0o100
304 self.mode = (islink, isexec)
302 self.mode = (islink, isexec)
305
303
306 def copy(self):
304 def copy(self):
307 other = patchmeta(self.path)
305 other = patchmeta(self.path)
308 other.oldpath = self.oldpath
306 other.oldpath = self.oldpath
309 other.mode = self.mode
307 other.mode = self.mode
310 other.op = self.op
308 other.op = self.op
311 other.binary = self.binary
309 other.binary = self.binary
312 return other
310 return other
313
311
314 def _ispatchinga(self, afile):
312 def _ispatchinga(self, afile):
315 if afile == '/dev/null':
313 if afile == '/dev/null':
316 return self.op == 'ADD'
314 return self.op == 'ADD'
317 return afile == 'a/' + (self.oldpath or self.path)
315 return afile == 'a/' + (self.oldpath or self.path)
318
316
319 def _ispatchingb(self, bfile):
317 def _ispatchingb(self, bfile):
320 if bfile == '/dev/null':
318 if bfile == '/dev/null':
321 return self.op == 'DELETE'
319 return self.op == 'DELETE'
322 return bfile == 'b/' + self.path
320 return bfile == 'b/' + self.path
323
321
324 def ispatching(self, afile, bfile):
322 def ispatching(self, afile, bfile):
325 return self._ispatchinga(afile) and self._ispatchingb(bfile)
323 return self._ispatchinga(afile) and self._ispatchingb(bfile)
326
324
327 def __repr__(self):
325 def __repr__(self):
328 return "<patchmeta %s %r>" % (self.op, self.path)
326 return "<patchmeta %s %r>" % (self.op, self.path)
329
327
330 def readgitpatch(lr):
328 def readgitpatch(lr):
331 """extract git-style metadata about patches from <patchname>"""
329 """extract git-style metadata about patches from <patchname>"""
332
330
333 # Filter patch for git information
331 # Filter patch for git information
334 gp = None
332 gp = None
335 gitpatches = []
333 gitpatches = []
336 for line in lr:
334 for line in lr:
337 line = line.rstrip(' \r\n')
335 line = line.rstrip(' \r\n')
338 if line.startswith('diff --git a/'):
336 if line.startswith('diff --git a/'):
339 m = gitre.match(line)
337 m = gitre.match(line)
340 if m:
338 if m:
341 if gp:
339 if gp:
342 gitpatches.append(gp)
340 gitpatches.append(gp)
343 dst = m.group(2)
341 dst = m.group(2)
344 gp = patchmeta(dst)
342 gp = patchmeta(dst)
345 elif gp:
343 elif gp:
346 if line.startswith('--- '):
344 if line.startswith('--- '):
347 gitpatches.append(gp)
345 gitpatches.append(gp)
348 gp = None
346 gp = None
349 continue
347 continue
350 if line.startswith('rename from '):
348 if line.startswith('rename from '):
351 gp.op = 'RENAME'
349 gp.op = 'RENAME'
352 gp.oldpath = line[12:]
350 gp.oldpath = line[12:]
353 elif line.startswith('rename to '):
351 elif line.startswith('rename to '):
354 gp.path = line[10:]
352 gp.path = line[10:]
355 elif line.startswith('copy from '):
353 elif line.startswith('copy from '):
356 gp.op = 'COPY'
354 gp.op = 'COPY'
357 gp.oldpath = line[10:]
355 gp.oldpath = line[10:]
358 elif line.startswith('copy to '):
356 elif line.startswith('copy to '):
359 gp.path = line[8:]
357 gp.path = line[8:]
360 elif line.startswith('deleted file'):
358 elif line.startswith('deleted file'):
361 gp.op = 'DELETE'
359 gp.op = 'DELETE'
362 elif line.startswith('new file mode '):
360 elif line.startswith('new file mode '):
363 gp.op = 'ADD'
361 gp.op = 'ADD'
364 gp.setmode(int(line[-6:], 8))
362 gp.setmode(int(line[-6:], 8))
365 elif line.startswith('new mode '):
363 elif line.startswith('new mode '):
366 gp.setmode(int(line[-6:], 8))
364 gp.setmode(int(line[-6:], 8))
367 elif line.startswith('GIT binary patch'):
365 elif line.startswith('GIT binary patch'):
368 gp.binary = True
366 gp.binary = True
369 if gp:
367 if gp:
370 gitpatches.append(gp)
368 gitpatches.append(gp)
371
369
372 return gitpatches
370 return gitpatches
373
371
374 class linereader(object):
372 class linereader(object):
375 # simple class to allow pushing lines back into the input stream
373 # simple class to allow pushing lines back into the input stream
376 def __init__(self, fp):
374 def __init__(self, fp):
377 self.fp = fp
375 self.fp = fp
378 self.buf = []
376 self.buf = []
379
377
380 def push(self, line):
378 def push(self, line):
381 if line is not None:
379 if line is not None:
382 self.buf.append(line)
380 self.buf.append(line)
383
381
384 def readline(self):
382 def readline(self):
385 if self.buf:
383 if self.buf:
386 l = self.buf[0]
384 l = self.buf[0]
387 del self.buf[0]
385 del self.buf[0]
388 return l
386 return l
389 return self.fp.readline()
387 return self.fp.readline()
390
388
391 def __iter__(self):
389 def __iter__(self):
392 while True:
390 while True:
393 l = self.readline()
391 l = self.readline()
394 if not l:
392 if not l:
395 break
393 break
396 yield l
394 yield l
397
395
398 class abstractbackend(object):
396 class abstractbackend(object):
399 def __init__(self, ui):
397 def __init__(self, ui):
400 self.ui = ui
398 self.ui = ui
401
399
402 def getfile(self, fname):
400 def getfile(self, fname):
403 """Return target file data and flags as a (data, (islink,
401 """Return target file data and flags as a (data, (islink,
404 isexec)) tuple. Data is None if file is missing/deleted.
402 isexec)) tuple. Data is None if file is missing/deleted.
405 """
403 """
406 raise NotImplementedError
404 raise NotImplementedError
407
405
408 def setfile(self, fname, data, mode, copysource):
406 def setfile(self, fname, data, mode, copysource):
409 """Write data to target file fname and set its mode. mode is a
407 """Write data to target file fname and set its mode. mode is a
410 (islink, isexec) tuple. If data is None, the file content should
408 (islink, isexec) tuple. If data is None, the file content should
411 be left unchanged. If the file is modified after being copied,
409 be left unchanged. If the file is modified after being copied,
412 copysource is set to the original file name.
410 copysource is set to the original file name.
413 """
411 """
414 raise NotImplementedError
412 raise NotImplementedError
415
413
416 def unlink(self, fname):
414 def unlink(self, fname):
417 """Unlink target file."""
415 """Unlink target file."""
418 raise NotImplementedError
416 raise NotImplementedError
419
417
420 def writerej(self, fname, failed, total, lines):
418 def writerej(self, fname, failed, total, lines):
421 """Write rejected lines for fname. total is the number of hunks
419 """Write rejected lines for fname. total is the number of hunks
422 which failed to apply and total the total number of hunks for this
420 which failed to apply and total the total number of hunks for this
423 files.
421 files.
424 """
422 """
425 pass
423 pass
426
424
427 def exists(self, fname):
425 def exists(self, fname):
428 raise NotImplementedError
426 raise NotImplementedError
429
427
430 class fsbackend(abstractbackend):
428 class fsbackend(abstractbackend):
431 def __init__(self, ui, basedir):
429 def __init__(self, ui, basedir):
432 super(fsbackend, self).__init__(ui)
430 super(fsbackend, self).__init__(ui)
433 self.opener = scmutil.opener(basedir)
431 self.opener = scmutil.opener(basedir)
434
432
435 def _join(self, f):
433 def _join(self, f):
436 return os.path.join(self.opener.base, f)
434 return os.path.join(self.opener.base, f)
437
435
438 def getfile(self, fname):
436 def getfile(self, fname):
439 if self.opener.islink(fname):
437 if self.opener.islink(fname):
440 return (self.opener.readlink(fname), (True, False))
438 return (self.opener.readlink(fname), (True, False))
441
439
442 isexec = False
440 isexec = False
443 try:
441 try:
444 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
442 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
445 except OSError as e:
443 except OSError as e:
446 if e.errno != errno.ENOENT:
444 if e.errno != errno.ENOENT:
447 raise
445 raise
448 try:
446 try:
449 return (self.opener.read(fname), (False, isexec))
447 return (self.opener.read(fname), (False, isexec))
450 except IOError as e:
448 except IOError as e:
451 if e.errno != errno.ENOENT:
449 if e.errno != errno.ENOENT:
452 raise
450 raise
453 return None, None
451 return None, None
454
452
455 def setfile(self, fname, data, mode, copysource):
453 def setfile(self, fname, data, mode, copysource):
456 islink, isexec = mode
454 islink, isexec = mode
457 if data is None:
455 if data is None:
458 self.opener.setflags(fname, islink, isexec)
456 self.opener.setflags(fname, islink, isexec)
459 return
457 return
460 if islink:
458 if islink:
461 self.opener.symlink(data, fname)
459 self.opener.symlink(data, fname)
462 else:
460 else:
463 self.opener.write(fname, data)
461 self.opener.write(fname, data)
464 if isexec:
462 if isexec:
465 self.opener.setflags(fname, False, True)
463 self.opener.setflags(fname, False, True)
466
464
467 def unlink(self, fname):
465 def unlink(self, fname):
468 self.opener.unlinkpath(fname, ignoremissing=True)
466 self.opener.unlinkpath(fname, ignoremissing=True)
469
467
470 def writerej(self, fname, failed, total, lines):
468 def writerej(self, fname, failed, total, lines):
471 fname = fname + ".rej"
469 fname = fname + ".rej"
472 self.ui.warn(
470 self.ui.warn(
473 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
471 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
474 (failed, total, fname))
472 (failed, total, fname))
475 fp = self.opener(fname, 'w')
473 fp = self.opener(fname, 'w')
476 fp.writelines(lines)
474 fp.writelines(lines)
477 fp.close()
475 fp.close()
478
476
479 def exists(self, fname):
477 def exists(self, fname):
480 return self.opener.lexists(fname)
478 return self.opener.lexists(fname)
481
479
482 class workingbackend(fsbackend):
480 class workingbackend(fsbackend):
483 def __init__(self, ui, repo, similarity):
481 def __init__(self, ui, repo, similarity):
484 super(workingbackend, self).__init__(ui, repo.root)
482 super(workingbackend, self).__init__(ui, repo.root)
485 self.repo = repo
483 self.repo = repo
486 self.similarity = similarity
484 self.similarity = similarity
487 self.removed = set()
485 self.removed = set()
488 self.changed = set()
486 self.changed = set()
489 self.copied = []
487 self.copied = []
490
488
491 def _checkknown(self, fname):
489 def _checkknown(self, fname):
492 if self.repo.dirstate[fname] == '?' and self.exists(fname):
490 if self.repo.dirstate[fname] == '?' and self.exists(fname):
493 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
491 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
494
492
495 def setfile(self, fname, data, mode, copysource):
493 def setfile(self, fname, data, mode, copysource):
496 self._checkknown(fname)
494 self._checkknown(fname)
497 super(workingbackend, self).setfile(fname, data, mode, copysource)
495 super(workingbackend, self).setfile(fname, data, mode, copysource)
498 if copysource is not None:
496 if copysource is not None:
499 self.copied.append((copysource, fname))
497 self.copied.append((copysource, fname))
500 self.changed.add(fname)
498 self.changed.add(fname)
501
499
502 def unlink(self, fname):
500 def unlink(self, fname):
503 self._checkknown(fname)
501 self._checkknown(fname)
504 super(workingbackend, self).unlink(fname)
502 super(workingbackend, self).unlink(fname)
505 self.removed.add(fname)
503 self.removed.add(fname)
506 self.changed.add(fname)
504 self.changed.add(fname)
507
505
508 def close(self):
506 def close(self):
509 wctx = self.repo[None]
507 wctx = self.repo[None]
510 changed = set(self.changed)
508 changed = set(self.changed)
511 for src, dst in self.copied:
509 for src, dst in self.copied:
512 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
510 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
513 if self.removed:
511 if self.removed:
514 wctx.forget(sorted(self.removed))
512 wctx.forget(sorted(self.removed))
515 for f in self.removed:
513 for f in self.removed:
516 if f not in self.repo.dirstate:
514 if f not in self.repo.dirstate:
517 # File was deleted and no longer belongs to the
515 # File was deleted and no longer belongs to the
518 # dirstate, it was probably marked added then
516 # dirstate, it was probably marked added then
519 # deleted, and should not be considered by
517 # deleted, and should not be considered by
520 # marktouched().
518 # marktouched().
521 changed.discard(f)
519 changed.discard(f)
522 if changed:
520 if changed:
523 scmutil.marktouched(self.repo, changed, self.similarity)
521 scmutil.marktouched(self.repo, changed, self.similarity)
524 return sorted(self.changed)
522 return sorted(self.changed)
525
523
526 class filestore(object):
524 class filestore(object):
527 def __init__(self, maxsize=None):
525 def __init__(self, maxsize=None):
528 self.opener = None
526 self.opener = None
529 self.files = {}
527 self.files = {}
530 self.created = 0
528 self.created = 0
531 self.maxsize = maxsize
529 self.maxsize = maxsize
532 if self.maxsize is None:
530 if self.maxsize is None:
533 self.maxsize = 4*(2**20)
531 self.maxsize = 4*(2**20)
534 self.size = 0
532 self.size = 0
535 self.data = {}
533 self.data = {}
536
534
537 def setfile(self, fname, data, mode, copied=None):
535 def setfile(self, fname, data, mode, copied=None):
538 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
536 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
539 self.data[fname] = (data, mode, copied)
537 self.data[fname] = (data, mode, copied)
540 self.size += len(data)
538 self.size += len(data)
541 else:
539 else:
542 if self.opener is None:
540 if self.opener is None:
543 root = tempfile.mkdtemp(prefix='hg-patch-')
541 root = tempfile.mkdtemp(prefix='hg-patch-')
544 self.opener = scmutil.opener(root)
542 self.opener = scmutil.opener(root)
545 # Avoid filename issues with these simple names
543 # Avoid filename issues with these simple names
546 fn = str(self.created)
544 fn = str(self.created)
547 self.opener.write(fn, data)
545 self.opener.write(fn, data)
548 self.created += 1
546 self.created += 1
549 self.files[fname] = (fn, mode, copied)
547 self.files[fname] = (fn, mode, copied)
550
548
551 def getfile(self, fname):
549 def getfile(self, fname):
552 if fname in self.data:
550 if fname in self.data:
553 return self.data[fname]
551 return self.data[fname]
554 if not self.opener or fname not in self.files:
552 if not self.opener or fname not in self.files:
555 return None, None, None
553 return None, None, None
556 fn, mode, copied = self.files[fname]
554 fn, mode, copied = self.files[fname]
557 return self.opener.read(fn), mode, copied
555 return self.opener.read(fn), mode, copied
558
556
559 def close(self):
557 def close(self):
560 if self.opener:
558 if self.opener:
561 shutil.rmtree(self.opener.base)
559 shutil.rmtree(self.opener.base)
562
560
563 class repobackend(abstractbackend):
561 class repobackend(abstractbackend):
564 def __init__(self, ui, repo, ctx, store):
562 def __init__(self, ui, repo, ctx, store):
565 super(repobackend, self).__init__(ui)
563 super(repobackend, self).__init__(ui)
566 self.repo = repo
564 self.repo = repo
567 self.ctx = ctx
565 self.ctx = ctx
568 self.store = store
566 self.store = store
569 self.changed = set()
567 self.changed = set()
570 self.removed = set()
568 self.removed = set()
571 self.copied = {}
569 self.copied = {}
572
570
573 def _checkknown(self, fname):
571 def _checkknown(self, fname):
574 if fname not in self.ctx:
572 if fname not in self.ctx:
575 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
573 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
576
574
577 def getfile(self, fname):
575 def getfile(self, fname):
578 try:
576 try:
579 fctx = self.ctx[fname]
577 fctx = self.ctx[fname]
580 except error.LookupError:
578 except error.LookupError:
581 return None, None
579 return None, None
582 flags = fctx.flags()
580 flags = fctx.flags()
583 return fctx.data(), ('l' in flags, 'x' in flags)
581 return fctx.data(), ('l' in flags, 'x' in flags)
584
582
585 def setfile(self, fname, data, mode, copysource):
583 def setfile(self, fname, data, mode, copysource):
586 if copysource:
584 if copysource:
587 self._checkknown(copysource)
585 self._checkknown(copysource)
588 if data is None:
586 if data is None:
589 data = self.ctx[fname].data()
587 data = self.ctx[fname].data()
590 self.store.setfile(fname, data, mode, copysource)
588 self.store.setfile(fname, data, mode, copysource)
591 self.changed.add(fname)
589 self.changed.add(fname)
592 if copysource:
590 if copysource:
593 self.copied[fname] = copysource
591 self.copied[fname] = copysource
594
592
595 def unlink(self, fname):
593 def unlink(self, fname):
596 self._checkknown(fname)
594 self._checkknown(fname)
597 self.removed.add(fname)
595 self.removed.add(fname)
598
596
599 def exists(self, fname):
597 def exists(self, fname):
600 return fname in self.ctx
598 return fname in self.ctx
601
599
602 def close(self):
600 def close(self):
603 return self.changed | self.removed
601 return self.changed | self.removed
604
602
605 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
603 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
606 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
604 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
607 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
605 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
608 eolmodes = ['strict', 'crlf', 'lf', 'auto']
606 eolmodes = ['strict', 'crlf', 'lf', 'auto']
609
607
610 class patchfile(object):
608 class patchfile(object):
611 def __init__(self, ui, gp, backend, store, eolmode='strict'):
609 def __init__(self, ui, gp, backend, store, eolmode='strict'):
612 self.fname = gp.path
610 self.fname = gp.path
613 self.eolmode = eolmode
611 self.eolmode = eolmode
614 self.eol = None
612 self.eol = None
615 self.backend = backend
613 self.backend = backend
616 self.ui = ui
614 self.ui = ui
617 self.lines = []
615 self.lines = []
618 self.exists = False
616 self.exists = False
619 self.missing = True
617 self.missing = True
620 self.mode = gp.mode
618 self.mode = gp.mode
621 self.copysource = gp.oldpath
619 self.copysource = gp.oldpath
622 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
620 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
623 self.remove = gp.op == 'DELETE'
621 self.remove = gp.op == 'DELETE'
624 if self.copysource is None:
622 if self.copysource is None:
625 data, mode = backend.getfile(self.fname)
623 data, mode = backend.getfile(self.fname)
626 else:
624 else:
627 data, mode = store.getfile(self.copysource)[:2]
625 data, mode = store.getfile(self.copysource)[:2]
628 if data is not None:
626 if data is not None:
629 self.exists = self.copysource is None or backend.exists(self.fname)
627 self.exists = self.copysource is None or backend.exists(self.fname)
630 self.missing = False
628 self.missing = False
631 if data:
629 if data:
632 self.lines = mdiff.splitnewlines(data)
630 self.lines = mdiff.splitnewlines(data)
633 if self.mode is None:
631 if self.mode is None:
634 self.mode = mode
632 self.mode = mode
635 if self.lines:
633 if self.lines:
636 # Normalize line endings
634 # Normalize line endings
637 if self.lines[0].endswith('\r\n'):
635 if self.lines[0].endswith('\r\n'):
638 self.eol = '\r\n'
636 self.eol = '\r\n'
639 elif self.lines[0].endswith('\n'):
637 elif self.lines[0].endswith('\n'):
640 self.eol = '\n'
638 self.eol = '\n'
641 if eolmode != 'strict':
639 if eolmode != 'strict':
642 nlines = []
640 nlines = []
643 for l in self.lines:
641 for l in self.lines:
644 if l.endswith('\r\n'):
642 if l.endswith('\r\n'):
645 l = l[:-2] + '\n'
643 l = l[:-2] + '\n'
646 nlines.append(l)
644 nlines.append(l)
647 self.lines = nlines
645 self.lines = nlines
648 else:
646 else:
649 if self.create:
647 if self.create:
650 self.missing = False
648 self.missing = False
651 if self.mode is None:
649 if self.mode is None:
652 self.mode = (False, False)
650 self.mode = (False, False)
653 if self.missing:
651 if self.missing:
654 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
652 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
655
653
656 self.hash = {}
654 self.hash = {}
657 self.dirty = 0
655 self.dirty = 0
658 self.offset = 0
656 self.offset = 0
659 self.skew = 0
657 self.skew = 0
660 self.rej = []
658 self.rej = []
661 self.fileprinted = False
659 self.fileprinted = False
662 self.printfile(False)
660 self.printfile(False)
663 self.hunks = 0
661 self.hunks = 0
664
662
665 def writelines(self, fname, lines, mode):
663 def writelines(self, fname, lines, mode):
666 if self.eolmode == 'auto':
664 if self.eolmode == 'auto':
667 eol = self.eol
665 eol = self.eol
668 elif self.eolmode == 'crlf':
666 elif self.eolmode == 'crlf':
669 eol = '\r\n'
667 eol = '\r\n'
670 else:
668 else:
671 eol = '\n'
669 eol = '\n'
672
670
673 if self.eolmode != 'strict' and eol and eol != '\n':
671 if self.eolmode != 'strict' and eol and eol != '\n':
674 rawlines = []
672 rawlines = []
675 for l in lines:
673 for l in lines:
676 if l and l[-1] == '\n':
674 if l and l[-1] == '\n':
677 l = l[:-1] + eol
675 l = l[:-1] + eol
678 rawlines.append(l)
676 rawlines.append(l)
679 lines = rawlines
677 lines = rawlines
680
678
681 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
679 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
682
680
683 def printfile(self, warn):
681 def printfile(self, warn):
684 if self.fileprinted:
682 if self.fileprinted:
685 return
683 return
686 if warn or self.ui.verbose:
684 if warn or self.ui.verbose:
687 self.fileprinted = True
685 self.fileprinted = True
688 s = _("patching file %s\n") % self.fname
686 s = _("patching file %s\n") % self.fname
689 if warn:
687 if warn:
690 self.ui.warn(s)
688 self.ui.warn(s)
691 else:
689 else:
692 self.ui.note(s)
690 self.ui.note(s)
693
691
694
692
695 def findlines(self, l, linenum):
693 def findlines(self, l, linenum):
696 # looks through the hash and finds candidate lines. The
694 # looks through the hash and finds candidate lines. The
697 # result is a list of line numbers sorted based on distance
695 # result is a list of line numbers sorted based on distance
698 # from linenum
696 # from linenum
699
697
700 cand = self.hash.get(l, [])
698 cand = self.hash.get(l, [])
701 if len(cand) > 1:
699 if len(cand) > 1:
702 # resort our list of potentials forward then back.
700 # resort our list of potentials forward then back.
703 cand.sort(key=lambda x: abs(x - linenum))
701 cand.sort(key=lambda x: abs(x - linenum))
704 return cand
702 return cand
705
703
706 def write_rej(self):
704 def write_rej(self):
707 # our rejects are a little different from patch(1). This always
705 # our rejects are a little different from patch(1). This always
708 # creates rejects in the same form as the original patch. A file
706 # creates rejects in the same form as the original patch. A file
709 # header is inserted so that you can run the reject through patch again
707 # header is inserted so that you can run the reject through patch again
710 # without having to type the filename.
708 # without having to type the filename.
711 if not self.rej:
709 if not self.rej:
712 return
710 return
713 base = os.path.basename(self.fname)
711 base = os.path.basename(self.fname)
714 lines = ["--- %s\n+++ %s\n" % (base, base)]
712 lines = ["--- %s\n+++ %s\n" % (base, base)]
715 for x in self.rej:
713 for x in self.rej:
716 for l in x.hunk:
714 for l in x.hunk:
717 lines.append(l)
715 lines.append(l)
718 if l[-1] != '\n':
716 if l[-1] != '\n':
719 lines.append("\n\ No newline at end of file\n")
717 lines.append("\n\ No newline at end of file\n")
720 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
718 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
721
719
722 def apply(self, h):
720 def apply(self, h):
723 if not h.complete():
721 if not h.complete():
724 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
722 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
725 (h.number, h.desc, len(h.a), h.lena, len(h.b),
723 (h.number, h.desc, len(h.a), h.lena, len(h.b),
726 h.lenb))
724 h.lenb))
727
725
728 self.hunks += 1
726 self.hunks += 1
729
727
730 if self.missing:
728 if self.missing:
731 self.rej.append(h)
729 self.rej.append(h)
732 return -1
730 return -1
733
731
734 if self.exists and self.create:
732 if self.exists and self.create:
735 if self.copysource:
733 if self.copysource:
736 self.ui.warn(_("cannot create %s: destination already "
734 self.ui.warn(_("cannot create %s: destination already "
737 "exists\n") % self.fname)
735 "exists\n") % self.fname)
738 else:
736 else:
739 self.ui.warn(_("file %s already exists\n") % self.fname)
737 self.ui.warn(_("file %s already exists\n") % self.fname)
740 self.rej.append(h)
738 self.rej.append(h)
741 return -1
739 return -1
742
740
743 if isinstance(h, binhunk):
741 if isinstance(h, binhunk):
744 if self.remove:
742 if self.remove:
745 self.backend.unlink(self.fname)
743 self.backend.unlink(self.fname)
746 else:
744 else:
747 l = h.new(self.lines)
745 l = h.new(self.lines)
748 self.lines[:] = l
746 self.lines[:] = l
749 self.offset += len(l)
747 self.offset += len(l)
750 self.dirty = True
748 self.dirty = True
751 return 0
749 return 0
752
750
753 horig = h
751 horig = h
754 if (self.eolmode in ('crlf', 'lf')
752 if (self.eolmode in ('crlf', 'lf')
755 or self.eolmode == 'auto' and self.eol):
753 or self.eolmode == 'auto' and self.eol):
756 # If new eols are going to be normalized, then normalize
754 # If new eols are going to be normalized, then normalize
757 # hunk data before patching. Otherwise, preserve input
755 # hunk data before patching. Otherwise, preserve input
758 # line-endings.
756 # line-endings.
759 h = h.getnormalized()
757 h = h.getnormalized()
760
758
761 # fast case first, no offsets, no fuzz
759 # fast case first, no offsets, no fuzz
762 old, oldstart, new, newstart = h.fuzzit(0, False)
760 old, oldstart, new, newstart = h.fuzzit(0, False)
763 oldstart += self.offset
761 oldstart += self.offset
764 orig_start = oldstart
762 orig_start = oldstart
765 # if there's skew we want to emit the "(offset %d lines)" even
763 # if there's skew we want to emit the "(offset %d lines)" even
766 # when the hunk cleanly applies at start + skew, so skip the
764 # when the hunk cleanly applies at start + skew, so skip the
767 # fast case code
765 # fast case code
768 if (self.skew == 0 and
766 if (self.skew == 0 and
769 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
767 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
770 if self.remove:
768 if self.remove:
771 self.backend.unlink(self.fname)
769 self.backend.unlink(self.fname)
772 else:
770 else:
773 self.lines[oldstart:oldstart + len(old)] = new
771 self.lines[oldstart:oldstart + len(old)] = new
774 self.offset += len(new) - len(old)
772 self.offset += len(new) - len(old)
775 self.dirty = True
773 self.dirty = True
776 return 0
774 return 0
777
775
778 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
776 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
779 self.hash = {}
777 self.hash = {}
780 for x, s in enumerate(self.lines):
778 for x, s in enumerate(self.lines):
781 self.hash.setdefault(s, []).append(x)
779 self.hash.setdefault(s, []).append(x)
782
780
783 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
781 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
784 for toponly in [True, False]:
782 for toponly in [True, False]:
785 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
783 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
786 oldstart = oldstart + self.offset + self.skew
784 oldstart = oldstart + self.offset + self.skew
787 oldstart = min(oldstart, len(self.lines))
785 oldstart = min(oldstart, len(self.lines))
788 if old:
786 if old:
789 cand = self.findlines(old[0][1:], oldstart)
787 cand = self.findlines(old[0][1:], oldstart)
790 else:
788 else:
791 # Only adding lines with no or fuzzed context, just
789 # Only adding lines with no or fuzzed context, just
792 # take the skew in account
790 # take the skew in account
793 cand = [oldstart]
791 cand = [oldstart]
794
792
795 for l in cand:
793 for l in cand:
796 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
794 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
797 self.lines[l : l + len(old)] = new
795 self.lines[l : l + len(old)] = new
798 self.offset += len(new) - len(old)
796 self.offset += len(new) - len(old)
799 self.skew = l - orig_start
797 self.skew = l - orig_start
800 self.dirty = True
798 self.dirty = True
801 offset = l - orig_start - fuzzlen
799 offset = l - orig_start - fuzzlen
802 if fuzzlen:
800 if fuzzlen:
803 msg = _("Hunk #%d succeeded at %d "
801 msg = _("Hunk #%d succeeded at %d "
804 "with fuzz %d "
802 "with fuzz %d "
805 "(offset %d lines).\n")
803 "(offset %d lines).\n")
806 self.printfile(True)
804 self.printfile(True)
807 self.ui.warn(msg %
805 self.ui.warn(msg %
808 (h.number, l + 1, fuzzlen, offset))
806 (h.number, l + 1, fuzzlen, offset))
809 else:
807 else:
810 msg = _("Hunk #%d succeeded at %d "
808 msg = _("Hunk #%d succeeded at %d "
811 "(offset %d lines).\n")
809 "(offset %d lines).\n")
812 self.ui.note(msg % (h.number, l + 1, offset))
810 self.ui.note(msg % (h.number, l + 1, offset))
813 return fuzzlen
811 return fuzzlen
814 self.printfile(True)
812 self.printfile(True)
815 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
813 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
816 self.rej.append(horig)
814 self.rej.append(horig)
817 return -1
815 return -1
818
816
819 def close(self):
817 def close(self):
820 if self.dirty:
818 if self.dirty:
821 self.writelines(self.fname, self.lines, self.mode)
819 self.writelines(self.fname, self.lines, self.mode)
822 self.write_rej()
820 self.write_rej()
823 return len(self.rej)
821 return len(self.rej)
824
822
825 class header(object):
823 class header(object):
826 """patch header
824 """patch header
827 """
825 """
828 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
826 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
829 diff_re = re.compile('diff -r .* (.*)$')
827 diff_re = re.compile('diff -r .* (.*)$')
830 allhunks_re = re.compile('(?:index|deleted file) ')
828 allhunks_re = re.compile('(?:index|deleted file) ')
831 pretty_re = re.compile('(?:new file|deleted file) ')
829 pretty_re = re.compile('(?:new file|deleted file) ')
832 special_re = re.compile('(?:index|deleted|copy|rename) ')
830 special_re = re.compile('(?:index|deleted|copy|rename) ')
833 newfile_re = re.compile('(?:new file)')
831 newfile_re = re.compile('(?:new file)')
834
832
835 def __init__(self, header):
833 def __init__(self, header):
836 self.header = header
834 self.header = header
837 self.hunks = []
835 self.hunks = []
838
836
839 def binary(self):
837 def binary(self):
840 return any(h.startswith('index ') for h in self.header)
838 return any(h.startswith('index ') for h in self.header)
841
839
842 def pretty(self, fp):
840 def pretty(self, fp):
843 for h in self.header:
841 for h in self.header:
844 if h.startswith('index '):
842 if h.startswith('index '):
845 fp.write(_('this modifies a binary file (all or nothing)\n'))
843 fp.write(_('this modifies a binary file (all or nothing)\n'))
846 break
844 break
847 if self.pretty_re.match(h):
845 if self.pretty_re.match(h):
848 fp.write(h)
846 fp.write(h)
849 if self.binary():
847 if self.binary():
850 fp.write(_('this is a binary file\n'))
848 fp.write(_('this is a binary file\n'))
851 break
849 break
852 if h.startswith('---'):
850 if h.startswith('---'):
853 fp.write(_('%d hunks, %d lines changed\n') %
851 fp.write(_('%d hunks, %d lines changed\n') %
854 (len(self.hunks),
852 (len(self.hunks),
855 sum([max(h.added, h.removed) for h in self.hunks])))
853 sum([max(h.added, h.removed) for h in self.hunks])))
856 break
854 break
857 fp.write(h)
855 fp.write(h)
858
856
859 def write(self, fp):
857 def write(self, fp):
860 fp.write(''.join(self.header))
858 fp.write(''.join(self.header))
861
859
862 def allhunks(self):
860 def allhunks(self):
863 return any(self.allhunks_re.match(h) for h in self.header)
861 return any(self.allhunks_re.match(h) for h in self.header)
864
862
865 def files(self):
863 def files(self):
866 match = self.diffgit_re.match(self.header[0])
864 match = self.diffgit_re.match(self.header[0])
867 if match:
865 if match:
868 fromfile, tofile = match.groups()
866 fromfile, tofile = match.groups()
869 if fromfile == tofile:
867 if fromfile == tofile:
870 return [fromfile]
868 return [fromfile]
871 return [fromfile, tofile]
869 return [fromfile, tofile]
872 else:
870 else:
873 return self.diff_re.match(self.header[0]).groups()
871 return self.diff_re.match(self.header[0]).groups()
874
872
875 def filename(self):
873 def filename(self):
876 return self.files()[-1]
874 return self.files()[-1]
877
875
878 def __repr__(self):
876 def __repr__(self):
879 return '<header %s>' % (' '.join(map(repr, self.files())))
877 return '<header %s>' % (' '.join(map(repr, self.files())))
880
878
881 def isnewfile(self):
879 def isnewfile(self):
882 return any(self.newfile_re.match(h) for h in self.header)
880 return any(self.newfile_re.match(h) for h in self.header)
883
881
884 def special(self):
882 def special(self):
885 # Special files are shown only at the header level and not at the hunk
883 # Special files are shown only at the header level and not at the hunk
886 # level for example a file that has been deleted is a special file.
884 # level for example a file that has been deleted is a special file.
887 # The user cannot change the content of the operation, in the case of
885 # The user cannot change the content of the operation, in the case of
888 # the deleted file he has to take the deletion or not take it, he
886 # the deleted file he has to take the deletion or not take it, he
889 # cannot take some of it.
887 # cannot take some of it.
890 # Newly added files are special if they are empty, they are not special
888 # Newly added files are special if they are empty, they are not special
891 # if they have some content as we want to be able to change it
889 # if they have some content as we want to be able to change it
892 nocontent = len(self.header) == 2
890 nocontent = len(self.header) == 2
893 emptynewfile = self.isnewfile() and nocontent
891 emptynewfile = self.isnewfile() and nocontent
894 return emptynewfile or \
892 return emptynewfile or \
895 any(self.special_re.match(h) for h in self.header)
893 any(self.special_re.match(h) for h in self.header)
896
894
897 class recordhunk(object):
895 class recordhunk(object):
898 """patch hunk
896 """patch hunk
899
897
900 XXX shouldn't we merge this with the other hunk class?
898 XXX shouldn't we merge this with the other hunk class?
901 """
899 """
902 maxcontext = 3
900 maxcontext = 3
903
901
904 def __init__(self, header, fromline, toline, proc, before, hunk, after):
902 def __init__(self, header, fromline, toline, proc, before, hunk, after):
905 def trimcontext(number, lines):
903 def trimcontext(number, lines):
906 delta = len(lines) - self.maxcontext
904 delta = len(lines) - self.maxcontext
907 if False and delta > 0:
905 if False and delta > 0:
908 return number + delta, lines[:self.maxcontext]
906 return number + delta, lines[:self.maxcontext]
909 return number, lines
907 return number, lines
910
908
911 self.header = header
909 self.header = header
912 self.fromline, self.before = trimcontext(fromline, before)
910 self.fromline, self.before = trimcontext(fromline, before)
913 self.toline, self.after = trimcontext(toline, after)
911 self.toline, self.after = trimcontext(toline, after)
914 self.proc = proc
912 self.proc = proc
915 self.hunk = hunk
913 self.hunk = hunk
916 self.added, self.removed = self.countchanges(self.hunk)
914 self.added, self.removed = self.countchanges(self.hunk)
917
915
918 def __eq__(self, v):
916 def __eq__(self, v):
919 if not isinstance(v, recordhunk):
917 if not isinstance(v, recordhunk):
920 return False
918 return False
921
919
922 return ((v.hunk == self.hunk) and
920 return ((v.hunk == self.hunk) and
923 (v.proc == self.proc) and
921 (v.proc == self.proc) and
924 (self.fromline == v.fromline) and
922 (self.fromline == v.fromline) and
925 (self.header.files() == v.header.files()))
923 (self.header.files() == v.header.files()))
926
924
927 def __hash__(self):
925 def __hash__(self):
928 return hash((tuple(self.hunk),
926 return hash((tuple(self.hunk),
929 tuple(self.header.files()),
927 tuple(self.header.files()),
930 self.fromline,
928 self.fromline,
931 self.proc))
929 self.proc))
932
930
933 def countchanges(self, hunk):
931 def countchanges(self, hunk):
934 """hunk -> (n+,n-)"""
932 """hunk -> (n+,n-)"""
935 add = len([h for h in hunk if h[0] == '+'])
933 add = len([h for h in hunk if h[0] == '+'])
936 rem = len([h for h in hunk if h[0] == '-'])
934 rem = len([h for h in hunk if h[0] == '-'])
937 return add, rem
935 return add, rem
938
936
939 def write(self, fp):
937 def write(self, fp):
940 delta = len(self.before) + len(self.after)
938 delta = len(self.before) + len(self.after)
941 if self.after and self.after[-1] == '\\ No newline at end of file\n':
939 if self.after and self.after[-1] == '\\ No newline at end of file\n':
942 delta -= 1
940 delta -= 1
943 fromlen = delta + self.removed
941 fromlen = delta + self.removed
944 tolen = delta + self.added
942 tolen = delta + self.added
945 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
943 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
946 (self.fromline, fromlen, self.toline, tolen,
944 (self.fromline, fromlen, self.toline, tolen,
947 self.proc and (' ' + self.proc)))
945 self.proc and (' ' + self.proc)))
948 fp.write(''.join(self.before + self.hunk + self.after))
946 fp.write(''.join(self.before + self.hunk + self.after))
949
947
950 pretty = write
948 pretty = write
951
949
952 def filename(self):
950 def filename(self):
953 return self.header.filename()
951 return self.header.filename()
954
952
955 def __repr__(self):
953 def __repr__(self):
956 return '<hunk %r@%d>' % (self.filename(), self.fromline)
954 return '<hunk %r@%d>' % (self.filename(), self.fromline)
957
955
958 def filterpatch(ui, headers, operation=None):
956 def filterpatch(ui, headers, operation=None):
959 """Interactively filter patch chunks into applied-only chunks"""
957 """Interactively filter patch chunks into applied-only chunks"""
960 if operation is None:
958 if operation is None:
961 operation = _('record')
959 operation = _('record')
962
960
963 def prompt(skipfile, skipall, query, chunk):
961 def prompt(skipfile, skipall, query, chunk):
964 """prompt query, and process base inputs
962 """prompt query, and process base inputs
965
963
966 - y/n for the rest of file
964 - y/n for the rest of file
967 - y/n for the rest
965 - y/n for the rest
968 - ? (help)
966 - ? (help)
969 - q (quit)
967 - q (quit)
970
968
971 Return True/False and possibly updated skipfile and skipall.
969 Return True/False and possibly updated skipfile and skipall.
972 """
970 """
973 newpatches = None
971 newpatches = None
974 if skipall is not None:
972 if skipall is not None:
975 return skipall, skipfile, skipall, newpatches
973 return skipall, skipfile, skipall, newpatches
976 if skipfile is not None:
974 if skipfile is not None:
977 return skipfile, skipfile, skipall, newpatches
975 return skipfile, skipfile, skipall, newpatches
978 while True:
976 while True:
979 resps = _('[Ynesfdaq?]'
977 resps = _('[Ynesfdaq?]'
980 '$$ &Yes, record this change'
978 '$$ &Yes, record this change'
981 '$$ &No, skip this change'
979 '$$ &No, skip this change'
982 '$$ &Edit this change manually'
980 '$$ &Edit this change manually'
983 '$$ &Skip remaining changes to this file'
981 '$$ &Skip remaining changes to this file'
984 '$$ Record remaining changes to this &file'
982 '$$ Record remaining changes to this &file'
985 '$$ &Done, skip remaining changes and files'
983 '$$ &Done, skip remaining changes and files'
986 '$$ Record &all changes to all remaining files'
984 '$$ Record &all changes to all remaining files'
987 '$$ &Quit, recording no changes'
985 '$$ &Quit, recording no changes'
988 '$$ &? (display help)')
986 '$$ &? (display help)')
989 r = ui.promptchoice("%s %s" % (query, resps))
987 r = ui.promptchoice("%s %s" % (query, resps))
990 ui.write("\n")
988 ui.write("\n")
991 if r == 8: # ?
989 if r == 8: # ?
992 for c, t in ui.extractchoices(resps)[1]:
990 for c, t in ui.extractchoices(resps)[1]:
993 ui.write('%s - %s\n' % (c, t.lower()))
991 ui.write('%s - %s\n' % (c, t.lower()))
994 continue
992 continue
995 elif r == 0: # yes
993 elif r == 0: # yes
996 ret = True
994 ret = True
997 elif r == 1: # no
995 elif r == 1: # no
998 ret = False
996 ret = False
999 elif r == 2: # Edit patch
997 elif r == 2: # Edit patch
1000 if chunk is None:
998 if chunk is None:
1001 ui.write(_('cannot edit patch for whole file'))
999 ui.write(_('cannot edit patch for whole file'))
1002 ui.write("\n")
1000 ui.write("\n")
1003 continue
1001 continue
1004 if chunk.header.binary():
1002 if chunk.header.binary():
1005 ui.write(_('cannot edit patch for binary file'))
1003 ui.write(_('cannot edit patch for binary file'))
1006 ui.write("\n")
1004 ui.write("\n")
1007 continue
1005 continue
1008 # Patch comment based on the Git one (based on comment at end of
1006 # Patch comment based on the Git one (based on comment at end of
1009 # https://mercurial-scm.org/wiki/RecordExtension)
1007 # https://mercurial-scm.org/wiki/RecordExtension)
1010 phelp = '---' + _("""
1008 phelp = '---' + _("""
1011 To remove '-' lines, make them ' ' lines (context).
1009 To remove '-' lines, make them ' ' lines (context).
1012 To remove '+' lines, delete them.
1010 To remove '+' lines, delete them.
1013 Lines starting with # will be removed from the patch.
1011 Lines starting with # will be removed from the patch.
1014
1012
1015 If the patch applies cleanly, the edited hunk will immediately be
1013 If the patch applies cleanly, the edited hunk will immediately be
1016 added to the record list. If it does not apply cleanly, a rejects
1014 added to the record list. If it does not apply cleanly, a rejects
1017 file will be generated: you can use that when you try again. If
1015 file will be generated: you can use that when you try again. If
1018 all lines of the hunk are removed, then the edit is aborted and
1016 all lines of the hunk are removed, then the edit is aborted and
1019 the hunk is left unchanged.
1017 the hunk is left unchanged.
1020 """)
1018 """)
1021 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1019 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1022 suffix=".diff", text=True)
1020 suffix=".diff", text=True)
1023 ncpatchfp = None
1021 ncpatchfp = None
1024 try:
1022 try:
1025 # Write the initial patch
1023 # Write the initial patch
1026 f = os.fdopen(patchfd, "w")
1024 f = os.fdopen(patchfd, "w")
1027 chunk.header.write(f)
1025 chunk.header.write(f)
1028 chunk.write(f)
1026 chunk.write(f)
1029 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1027 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1030 f.close()
1028 f.close()
1031 # Start the editor and wait for it to complete
1029 # Start the editor and wait for it to complete
1032 editor = ui.geteditor()
1030 editor = ui.geteditor()
1033 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1031 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1034 environ={'HGUSER': ui.username()})
1032 environ={'HGUSER': ui.username()})
1035 if ret != 0:
1033 if ret != 0:
1036 ui.warn(_("editor exited with exit code %d\n") % ret)
1034 ui.warn(_("editor exited with exit code %d\n") % ret)
1037 continue
1035 continue
1038 # Remove comment lines
1036 # Remove comment lines
1039 patchfp = open(patchfn)
1037 patchfp = open(patchfn)
1040 ncpatchfp = cStringIO.StringIO()
1038 ncpatchfp = cStringIO.StringIO()
1041 for line in patchfp:
1039 for line in patchfp:
1042 if not line.startswith('#'):
1040 if not line.startswith('#'):
1043 ncpatchfp.write(line)
1041 ncpatchfp.write(line)
1044 patchfp.close()
1042 patchfp.close()
1045 ncpatchfp.seek(0)
1043 ncpatchfp.seek(0)
1046 newpatches = parsepatch(ncpatchfp)
1044 newpatches = parsepatch(ncpatchfp)
1047 finally:
1045 finally:
1048 os.unlink(patchfn)
1046 os.unlink(patchfn)
1049 del ncpatchfp
1047 del ncpatchfp
1050 # Signal that the chunk shouldn't be applied as-is, but
1048 # Signal that the chunk shouldn't be applied as-is, but
1051 # provide the new patch to be used instead.
1049 # provide the new patch to be used instead.
1052 ret = False
1050 ret = False
1053 elif r == 3: # Skip
1051 elif r == 3: # Skip
1054 ret = skipfile = False
1052 ret = skipfile = False
1055 elif r == 4: # file (Record remaining)
1053 elif r == 4: # file (Record remaining)
1056 ret = skipfile = True
1054 ret = skipfile = True
1057 elif r == 5: # done, skip remaining
1055 elif r == 5: # done, skip remaining
1058 ret = skipall = False
1056 ret = skipall = False
1059 elif r == 6: # all
1057 elif r == 6: # all
1060 ret = skipall = True
1058 ret = skipall = True
1061 elif r == 7: # quit
1059 elif r == 7: # quit
1062 raise util.Abort(_('user quit'))
1060 raise util.Abort(_('user quit'))
1063 return ret, skipfile, skipall, newpatches
1061 return ret, skipfile, skipall, newpatches
1064
1062
1065 seen = set()
1063 seen = set()
1066 applied = {} # 'filename' -> [] of chunks
1064 applied = {} # 'filename' -> [] of chunks
1067 skipfile, skipall = None, None
1065 skipfile, skipall = None, None
1068 pos, total = 1, sum(len(h.hunks) for h in headers)
1066 pos, total = 1, sum(len(h.hunks) for h in headers)
1069 for h in headers:
1067 for h in headers:
1070 pos += len(h.hunks)
1068 pos += len(h.hunks)
1071 skipfile = None
1069 skipfile = None
1072 fixoffset = 0
1070 fixoffset = 0
1073 hdr = ''.join(h.header)
1071 hdr = ''.join(h.header)
1074 if hdr in seen:
1072 if hdr in seen:
1075 continue
1073 continue
1076 seen.add(hdr)
1074 seen.add(hdr)
1077 if skipall is None:
1075 if skipall is None:
1078 h.pretty(ui)
1076 h.pretty(ui)
1079 msg = (_('examine changes to %s?') %
1077 msg = (_('examine changes to %s?') %
1080 _(' and ').join("'%s'" % f for f in h.files()))
1078 _(' and ').join("'%s'" % f for f in h.files()))
1081 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1079 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1082 if not r:
1080 if not r:
1083 continue
1081 continue
1084 applied[h.filename()] = [h]
1082 applied[h.filename()] = [h]
1085 if h.allhunks():
1083 if h.allhunks():
1086 applied[h.filename()] += h.hunks
1084 applied[h.filename()] += h.hunks
1087 continue
1085 continue
1088 for i, chunk in enumerate(h.hunks):
1086 for i, chunk in enumerate(h.hunks):
1089 if skipfile is None and skipall is None:
1087 if skipfile is None and skipall is None:
1090 chunk.pretty(ui)
1088 chunk.pretty(ui)
1091 if total == 1:
1089 if total == 1:
1092 msg = _("record this change to '%s'?") % chunk.filename()
1090 msg = _("record this change to '%s'?") % chunk.filename()
1093 else:
1091 else:
1094 idx = pos - len(h.hunks) + i
1092 idx = pos - len(h.hunks) + i
1095 msg = _("record change %d/%d to '%s'?") % (idx, total,
1093 msg = _("record change %d/%d to '%s'?") % (idx, total,
1096 chunk.filename())
1094 chunk.filename())
1097 r, skipfile, skipall, newpatches = prompt(skipfile,
1095 r, skipfile, skipall, newpatches = prompt(skipfile,
1098 skipall, msg, chunk)
1096 skipall, msg, chunk)
1099 if r:
1097 if r:
1100 if fixoffset:
1098 if fixoffset:
1101 chunk = copy.copy(chunk)
1099 chunk = copy.copy(chunk)
1102 chunk.toline += fixoffset
1100 chunk.toline += fixoffset
1103 applied[chunk.filename()].append(chunk)
1101 applied[chunk.filename()].append(chunk)
1104 elif newpatches is not None:
1102 elif newpatches is not None:
1105 for newpatch in newpatches:
1103 for newpatch in newpatches:
1106 for newhunk in newpatch.hunks:
1104 for newhunk in newpatch.hunks:
1107 if fixoffset:
1105 if fixoffset:
1108 newhunk.toline += fixoffset
1106 newhunk.toline += fixoffset
1109 applied[newhunk.filename()].append(newhunk)
1107 applied[newhunk.filename()].append(newhunk)
1110 else:
1108 else:
1111 fixoffset += chunk.removed - chunk.added
1109 fixoffset += chunk.removed - chunk.added
1112 return sum([h for h in applied.itervalues()
1110 return sum([h for h in applied.itervalues()
1113 if h[0].special() or len(h) > 1], [])
1111 if h[0].special() or len(h) > 1], [])
1114 class hunk(object):
1112 class hunk(object):
1115 def __init__(self, desc, num, lr, context):
1113 def __init__(self, desc, num, lr, context):
1116 self.number = num
1114 self.number = num
1117 self.desc = desc
1115 self.desc = desc
1118 self.hunk = [desc]
1116 self.hunk = [desc]
1119 self.a = []
1117 self.a = []
1120 self.b = []
1118 self.b = []
1121 self.starta = self.lena = None
1119 self.starta = self.lena = None
1122 self.startb = self.lenb = None
1120 self.startb = self.lenb = None
1123 if lr is not None:
1121 if lr is not None:
1124 if context:
1122 if context:
1125 self.read_context_hunk(lr)
1123 self.read_context_hunk(lr)
1126 else:
1124 else:
1127 self.read_unified_hunk(lr)
1125 self.read_unified_hunk(lr)
1128
1126
1129 def getnormalized(self):
1127 def getnormalized(self):
1130 """Return a copy with line endings normalized to LF."""
1128 """Return a copy with line endings normalized to LF."""
1131
1129
1132 def normalize(lines):
1130 def normalize(lines):
1133 nlines = []
1131 nlines = []
1134 for line in lines:
1132 for line in lines:
1135 if line.endswith('\r\n'):
1133 if line.endswith('\r\n'):
1136 line = line[:-2] + '\n'
1134 line = line[:-2] + '\n'
1137 nlines.append(line)
1135 nlines.append(line)
1138 return nlines
1136 return nlines
1139
1137
1140 # Dummy object, it is rebuilt manually
1138 # Dummy object, it is rebuilt manually
1141 nh = hunk(self.desc, self.number, None, None)
1139 nh = hunk(self.desc, self.number, None, None)
1142 nh.number = self.number
1140 nh.number = self.number
1143 nh.desc = self.desc
1141 nh.desc = self.desc
1144 nh.hunk = self.hunk
1142 nh.hunk = self.hunk
1145 nh.a = normalize(self.a)
1143 nh.a = normalize(self.a)
1146 nh.b = normalize(self.b)
1144 nh.b = normalize(self.b)
1147 nh.starta = self.starta
1145 nh.starta = self.starta
1148 nh.startb = self.startb
1146 nh.startb = self.startb
1149 nh.lena = self.lena
1147 nh.lena = self.lena
1150 nh.lenb = self.lenb
1148 nh.lenb = self.lenb
1151 return nh
1149 return nh
1152
1150
1153 def read_unified_hunk(self, lr):
1151 def read_unified_hunk(self, lr):
1154 m = unidesc.match(self.desc)
1152 m = unidesc.match(self.desc)
1155 if not m:
1153 if not m:
1156 raise PatchError(_("bad hunk #%d") % self.number)
1154 raise PatchError(_("bad hunk #%d") % self.number)
1157 self.starta, self.lena, self.startb, self.lenb = m.groups()
1155 self.starta, self.lena, self.startb, self.lenb = m.groups()
1158 if self.lena is None:
1156 if self.lena is None:
1159 self.lena = 1
1157 self.lena = 1
1160 else:
1158 else:
1161 self.lena = int(self.lena)
1159 self.lena = int(self.lena)
1162 if self.lenb is None:
1160 if self.lenb is None:
1163 self.lenb = 1
1161 self.lenb = 1
1164 else:
1162 else:
1165 self.lenb = int(self.lenb)
1163 self.lenb = int(self.lenb)
1166 self.starta = int(self.starta)
1164 self.starta = int(self.starta)
1167 self.startb = int(self.startb)
1165 self.startb = int(self.startb)
1168 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1166 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1169 self.b)
1167 self.b)
1170 # if we hit eof before finishing out the hunk, the last line will
1168 # if we hit eof before finishing out the hunk, the last line will
1171 # be zero length. Lets try to fix it up.
1169 # be zero length. Lets try to fix it up.
1172 while len(self.hunk[-1]) == 0:
1170 while len(self.hunk[-1]) == 0:
1173 del self.hunk[-1]
1171 del self.hunk[-1]
1174 del self.a[-1]
1172 del self.a[-1]
1175 del self.b[-1]
1173 del self.b[-1]
1176 self.lena -= 1
1174 self.lena -= 1
1177 self.lenb -= 1
1175 self.lenb -= 1
1178 self._fixnewline(lr)
1176 self._fixnewline(lr)
1179
1177
1180 def read_context_hunk(self, lr):
1178 def read_context_hunk(self, lr):
1181 self.desc = lr.readline()
1179 self.desc = lr.readline()
1182 m = contextdesc.match(self.desc)
1180 m = contextdesc.match(self.desc)
1183 if not m:
1181 if not m:
1184 raise PatchError(_("bad hunk #%d") % self.number)
1182 raise PatchError(_("bad hunk #%d") % self.number)
1185 self.starta, aend = m.groups()
1183 self.starta, aend = m.groups()
1186 self.starta = int(self.starta)
1184 self.starta = int(self.starta)
1187 if aend is None:
1185 if aend is None:
1188 aend = self.starta
1186 aend = self.starta
1189 self.lena = int(aend) - self.starta
1187 self.lena = int(aend) - self.starta
1190 if self.starta:
1188 if self.starta:
1191 self.lena += 1
1189 self.lena += 1
1192 for x in xrange(self.lena):
1190 for x in xrange(self.lena):
1193 l = lr.readline()
1191 l = lr.readline()
1194 if l.startswith('---'):
1192 if l.startswith('---'):
1195 # lines addition, old block is empty
1193 # lines addition, old block is empty
1196 lr.push(l)
1194 lr.push(l)
1197 break
1195 break
1198 s = l[2:]
1196 s = l[2:]
1199 if l.startswith('- ') or l.startswith('! '):
1197 if l.startswith('- ') or l.startswith('! '):
1200 u = '-' + s
1198 u = '-' + s
1201 elif l.startswith(' '):
1199 elif l.startswith(' '):
1202 u = ' ' + s
1200 u = ' ' + s
1203 else:
1201 else:
1204 raise PatchError(_("bad hunk #%d old text line %d") %
1202 raise PatchError(_("bad hunk #%d old text line %d") %
1205 (self.number, x))
1203 (self.number, x))
1206 self.a.append(u)
1204 self.a.append(u)
1207 self.hunk.append(u)
1205 self.hunk.append(u)
1208
1206
1209 l = lr.readline()
1207 l = lr.readline()
1210 if l.startswith('\ '):
1208 if l.startswith('\ '):
1211 s = self.a[-1][:-1]
1209 s = self.a[-1][:-1]
1212 self.a[-1] = s
1210 self.a[-1] = s
1213 self.hunk[-1] = s
1211 self.hunk[-1] = s
1214 l = lr.readline()
1212 l = lr.readline()
1215 m = contextdesc.match(l)
1213 m = contextdesc.match(l)
1216 if not m:
1214 if not m:
1217 raise PatchError(_("bad hunk #%d") % self.number)
1215 raise PatchError(_("bad hunk #%d") % self.number)
1218 self.startb, bend = m.groups()
1216 self.startb, bend = m.groups()
1219 self.startb = int(self.startb)
1217 self.startb = int(self.startb)
1220 if bend is None:
1218 if bend is None:
1221 bend = self.startb
1219 bend = self.startb
1222 self.lenb = int(bend) - self.startb
1220 self.lenb = int(bend) - self.startb
1223 if self.startb:
1221 if self.startb:
1224 self.lenb += 1
1222 self.lenb += 1
1225 hunki = 1
1223 hunki = 1
1226 for x in xrange(self.lenb):
1224 for x in xrange(self.lenb):
1227 l = lr.readline()
1225 l = lr.readline()
1228 if l.startswith('\ '):
1226 if l.startswith('\ '):
1229 # XXX: the only way to hit this is with an invalid line range.
1227 # XXX: the only way to hit this is with an invalid line range.
1230 # The no-eol marker is not counted in the line range, but I
1228 # The no-eol marker is not counted in the line range, but I
1231 # guess there are diff(1) out there which behave differently.
1229 # guess there are diff(1) out there which behave differently.
1232 s = self.b[-1][:-1]
1230 s = self.b[-1][:-1]
1233 self.b[-1] = s
1231 self.b[-1] = s
1234 self.hunk[hunki - 1] = s
1232 self.hunk[hunki - 1] = s
1235 continue
1233 continue
1236 if not l:
1234 if not l:
1237 # line deletions, new block is empty and we hit EOF
1235 # line deletions, new block is empty and we hit EOF
1238 lr.push(l)
1236 lr.push(l)
1239 break
1237 break
1240 s = l[2:]
1238 s = l[2:]
1241 if l.startswith('+ ') or l.startswith('! '):
1239 if l.startswith('+ ') or l.startswith('! '):
1242 u = '+' + s
1240 u = '+' + s
1243 elif l.startswith(' '):
1241 elif l.startswith(' '):
1244 u = ' ' + s
1242 u = ' ' + s
1245 elif len(self.b) == 0:
1243 elif len(self.b) == 0:
1246 # line deletions, new block is empty
1244 # line deletions, new block is empty
1247 lr.push(l)
1245 lr.push(l)
1248 break
1246 break
1249 else:
1247 else:
1250 raise PatchError(_("bad hunk #%d old text line %d") %
1248 raise PatchError(_("bad hunk #%d old text line %d") %
1251 (self.number, x))
1249 (self.number, x))
1252 self.b.append(s)
1250 self.b.append(s)
1253 while True:
1251 while True:
1254 if hunki >= len(self.hunk):
1252 if hunki >= len(self.hunk):
1255 h = ""
1253 h = ""
1256 else:
1254 else:
1257 h = self.hunk[hunki]
1255 h = self.hunk[hunki]
1258 hunki += 1
1256 hunki += 1
1259 if h == u:
1257 if h == u:
1260 break
1258 break
1261 elif h.startswith('-'):
1259 elif h.startswith('-'):
1262 continue
1260 continue
1263 else:
1261 else:
1264 self.hunk.insert(hunki - 1, u)
1262 self.hunk.insert(hunki - 1, u)
1265 break
1263 break
1266
1264
1267 if not self.a:
1265 if not self.a:
1268 # this happens when lines were only added to the hunk
1266 # this happens when lines were only added to the hunk
1269 for x in self.hunk:
1267 for x in self.hunk:
1270 if x.startswith('-') or x.startswith(' '):
1268 if x.startswith('-') or x.startswith(' '):
1271 self.a.append(x)
1269 self.a.append(x)
1272 if not self.b:
1270 if not self.b:
1273 # this happens when lines were only deleted from the hunk
1271 # this happens when lines were only deleted from the hunk
1274 for x in self.hunk:
1272 for x in self.hunk:
1275 if x.startswith('+') or x.startswith(' '):
1273 if x.startswith('+') or x.startswith(' '):
1276 self.b.append(x[1:])
1274 self.b.append(x[1:])
1277 # @@ -start,len +start,len @@
1275 # @@ -start,len +start,len @@
1278 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1276 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1279 self.startb, self.lenb)
1277 self.startb, self.lenb)
1280 self.hunk[0] = self.desc
1278 self.hunk[0] = self.desc
1281 self._fixnewline(lr)
1279 self._fixnewline(lr)
1282
1280
1283 def _fixnewline(self, lr):
1281 def _fixnewline(self, lr):
1284 l = lr.readline()
1282 l = lr.readline()
1285 if l.startswith('\ '):
1283 if l.startswith('\ '):
1286 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1284 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1287 else:
1285 else:
1288 lr.push(l)
1286 lr.push(l)
1289
1287
1290 def complete(self):
1288 def complete(self):
1291 return len(self.a) == self.lena and len(self.b) == self.lenb
1289 return len(self.a) == self.lena and len(self.b) == self.lenb
1292
1290
1293 def _fuzzit(self, old, new, fuzz, toponly):
1291 def _fuzzit(self, old, new, fuzz, toponly):
1294 # this removes context lines from the top and bottom of list 'l'. It
1292 # this removes context lines from the top and bottom of list 'l'. It
1295 # checks the hunk to make sure only context lines are removed, and then
1293 # checks the hunk to make sure only context lines are removed, and then
1296 # returns a new shortened list of lines.
1294 # returns a new shortened list of lines.
1297 fuzz = min(fuzz, len(old))
1295 fuzz = min(fuzz, len(old))
1298 if fuzz:
1296 if fuzz:
1299 top = 0
1297 top = 0
1300 bot = 0
1298 bot = 0
1301 hlen = len(self.hunk)
1299 hlen = len(self.hunk)
1302 for x in xrange(hlen - 1):
1300 for x in xrange(hlen - 1):
1303 # the hunk starts with the @@ line, so use x+1
1301 # the hunk starts with the @@ line, so use x+1
1304 if self.hunk[x + 1][0] == ' ':
1302 if self.hunk[x + 1][0] == ' ':
1305 top += 1
1303 top += 1
1306 else:
1304 else:
1307 break
1305 break
1308 if not toponly:
1306 if not toponly:
1309 for x in xrange(hlen - 1):
1307 for x in xrange(hlen - 1):
1310 if self.hunk[hlen - bot - 1][0] == ' ':
1308 if self.hunk[hlen - bot - 1][0] == ' ':
1311 bot += 1
1309 bot += 1
1312 else:
1310 else:
1313 break
1311 break
1314
1312
1315 bot = min(fuzz, bot)
1313 bot = min(fuzz, bot)
1316 top = min(fuzz, top)
1314 top = min(fuzz, top)
1317 return old[top:len(old) - bot], new[top:len(new) - bot], top
1315 return old[top:len(old) - bot], new[top:len(new) - bot], top
1318 return old, new, 0
1316 return old, new, 0
1319
1317
1320 def fuzzit(self, fuzz, toponly):
1318 def fuzzit(self, fuzz, toponly):
1321 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1319 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1322 oldstart = self.starta + top
1320 oldstart = self.starta + top
1323 newstart = self.startb + top
1321 newstart = self.startb + top
1324 # zero length hunk ranges already have their start decremented
1322 # zero length hunk ranges already have their start decremented
1325 if self.lena and oldstart > 0:
1323 if self.lena and oldstart > 0:
1326 oldstart -= 1
1324 oldstart -= 1
1327 if self.lenb and newstart > 0:
1325 if self.lenb and newstart > 0:
1328 newstart -= 1
1326 newstart -= 1
1329 return old, oldstart, new, newstart
1327 return old, oldstart, new, newstart
1330
1328
1331 class binhunk(object):
1329 class binhunk(object):
1332 'A binary patch file.'
1330 'A binary patch file.'
1333 def __init__(self, lr, fname):
1331 def __init__(self, lr, fname):
1334 self.text = None
1332 self.text = None
1335 self.delta = False
1333 self.delta = False
1336 self.hunk = ['GIT binary patch\n']
1334 self.hunk = ['GIT binary patch\n']
1337 self._fname = fname
1335 self._fname = fname
1338 self._read(lr)
1336 self._read(lr)
1339
1337
1340 def complete(self):
1338 def complete(self):
1341 return self.text is not None
1339 return self.text is not None
1342
1340
1343 def new(self, lines):
1341 def new(self, lines):
1344 if self.delta:
1342 if self.delta:
1345 return [applybindelta(self.text, ''.join(lines))]
1343 return [applybindelta(self.text, ''.join(lines))]
1346 return [self.text]
1344 return [self.text]
1347
1345
1348 def _read(self, lr):
1346 def _read(self, lr):
1349 def getline(lr, hunk):
1347 def getline(lr, hunk):
1350 l = lr.readline()
1348 l = lr.readline()
1351 hunk.append(l)
1349 hunk.append(l)
1352 return l.rstrip('\r\n')
1350 return l.rstrip('\r\n')
1353
1351
1354 size = 0
1352 size = 0
1355 while True:
1353 while True:
1356 line = getline(lr, self.hunk)
1354 line = getline(lr, self.hunk)
1357 if not line:
1355 if not line:
1358 raise PatchError(_('could not extract "%s" binary data')
1356 raise PatchError(_('could not extract "%s" binary data')
1359 % self._fname)
1357 % self._fname)
1360 if line.startswith('literal '):
1358 if line.startswith('literal '):
1361 size = int(line[8:].rstrip())
1359 size = int(line[8:].rstrip())
1362 break
1360 break
1363 if line.startswith('delta '):
1361 if line.startswith('delta '):
1364 size = int(line[6:].rstrip())
1362 size = int(line[6:].rstrip())
1365 self.delta = True
1363 self.delta = True
1366 break
1364 break
1367 dec = []
1365 dec = []
1368 line = getline(lr, self.hunk)
1366 line = getline(lr, self.hunk)
1369 while len(line) > 1:
1367 while len(line) > 1:
1370 l = line[0]
1368 l = line[0]
1371 if l <= 'Z' and l >= 'A':
1369 if l <= 'Z' and l >= 'A':
1372 l = ord(l) - ord('A') + 1
1370 l = ord(l) - ord('A') + 1
1373 else:
1371 else:
1374 l = ord(l) - ord('a') + 27
1372 l = ord(l) - ord('a') + 27
1375 try:
1373 try:
1376 dec.append(base85.b85decode(line[1:])[:l])
1374 dec.append(base85.b85decode(line[1:])[:l])
1377 except ValueError as e:
1375 except ValueError as e:
1378 raise PatchError(_('could not decode "%s" binary patch: %s')
1376 raise PatchError(_('could not decode "%s" binary patch: %s')
1379 % (self._fname, str(e)))
1377 % (self._fname, str(e)))
1380 line = getline(lr, self.hunk)
1378 line = getline(lr, self.hunk)
1381 text = zlib.decompress(''.join(dec))
1379 text = zlib.decompress(''.join(dec))
1382 if len(text) != size:
1380 if len(text) != size:
1383 raise PatchError(_('"%s" length is %d bytes, should be %d')
1381 raise PatchError(_('"%s" length is %d bytes, should be %d')
1384 % (self._fname, len(text), size))
1382 % (self._fname, len(text), size))
1385 self.text = text
1383 self.text = text
1386
1384
1387 def parsefilename(str):
1385 def parsefilename(str):
1388 # --- filename \t|space stuff
1386 # --- filename \t|space stuff
1389 s = str[4:].rstrip('\r\n')
1387 s = str[4:].rstrip('\r\n')
1390 i = s.find('\t')
1388 i = s.find('\t')
1391 if i < 0:
1389 if i < 0:
1392 i = s.find(' ')
1390 i = s.find(' ')
1393 if i < 0:
1391 if i < 0:
1394 return s
1392 return s
1395 return s[:i]
1393 return s[:i]
1396
1394
1397 def reversehunks(hunks):
1395 def reversehunks(hunks):
1398 '''reverse the signs in the hunks given as argument
1396 '''reverse the signs in the hunks given as argument
1399
1397
1400 This function operates on hunks coming out of patch.filterpatch, that is
1398 This function operates on hunks coming out of patch.filterpatch, that is
1401 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1399 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1402
1400
1403 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1401 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1404 ... --- a/folder1/g
1402 ... --- a/folder1/g
1405 ... +++ b/folder1/g
1403 ... +++ b/folder1/g
1406 ... @@ -1,7 +1,7 @@
1404 ... @@ -1,7 +1,7 @@
1407 ... +firstline
1405 ... +firstline
1408 ... c
1406 ... c
1409 ... 1
1407 ... 1
1410 ... 2
1408 ... 2
1411 ... + 3
1409 ... + 3
1412 ... -4
1410 ... -4
1413 ... 5
1411 ... 5
1414 ... d
1412 ... d
1415 ... +lastline"""
1413 ... +lastline"""
1416 >>> hunks = parsepatch(rawpatch)
1414 >>> hunks = parsepatch(rawpatch)
1417 >>> hunkscomingfromfilterpatch = []
1415 >>> hunkscomingfromfilterpatch = []
1418 >>> for h in hunks:
1416 >>> for h in hunks:
1419 ... hunkscomingfromfilterpatch.append(h)
1417 ... hunkscomingfromfilterpatch.append(h)
1420 ... hunkscomingfromfilterpatch.extend(h.hunks)
1418 ... hunkscomingfromfilterpatch.extend(h.hunks)
1421
1419
1422 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1420 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1423 >>> fp = cStringIO.StringIO()
1421 >>> fp = cStringIO.StringIO()
1424 >>> for c in reversedhunks:
1422 >>> for c in reversedhunks:
1425 ... c.write(fp)
1423 ... c.write(fp)
1426 >>> fp.seek(0)
1424 >>> fp.seek(0)
1427 >>> reversedpatch = fp.read()
1425 >>> reversedpatch = fp.read()
1428 >>> print reversedpatch
1426 >>> print reversedpatch
1429 diff --git a/folder1/g b/folder1/g
1427 diff --git a/folder1/g b/folder1/g
1430 --- a/folder1/g
1428 --- a/folder1/g
1431 +++ b/folder1/g
1429 +++ b/folder1/g
1432 @@ -1,4 +1,3 @@
1430 @@ -1,4 +1,3 @@
1433 -firstline
1431 -firstline
1434 c
1432 c
1435 1
1433 1
1436 2
1434 2
1437 @@ -1,6 +2,6 @@
1435 @@ -1,6 +2,6 @@
1438 c
1436 c
1439 1
1437 1
1440 2
1438 2
1441 - 3
1439 - 3
1442 +4
1440 +4
1443 5
1441 5
1444 d
1442 d
1445 @@ -5,3 +6,2 @@
1443 @@ -5,3 +6,2 @@
1446 5
1444 5
1447 d
1445 d
1448 -lastline
1446 -lastline
1449
1447
1450 '''
1448 '''
1451
1449
1452 import crecord as crecordmod
1450 import crecord as crecordmod
1453 newhunks = []
1451 newhunks = []
1454 for c in hunks:
1452 for c in hunks:
1455 if isinstance(c, crecordmod.uihunk):
1453 if isinstance(c, crecordmod.uihunk):
1456 # curses hunks encapsulate the record hunk in _hunk
1454 # curses hunks encapsulate the record hunk in _hunk
1457 c = c._hunk
1455 c = c._hunk
1458 if isinstance(c, recordhunk):
1456 if isinstance(c, recordhunk):
1459 for j, line in enumerate(c.hunk):
1457 for j, line in enumerate(c.hunk):
1460 if line.startswith("-"):
1458 if line.startswith("-"):
1461 c.hunk[j] = "+" + c.hunk[j][1:]
1459 c.hunk[j] = "+" + c.hunk[j][1:]
1462 elif line.startswith("+"):
1460 elif line.startswith("+"):
1463 c.hunk[j] = "-" + c.hunk[j][1:]
1461 c.hunk[j] = "-" + c.hunk[j][1:]
1464 c.added, c.removed = c.removed, c.added
1462 c.added, c.removed = c.removed, c.added
1465 newhunks.append(c)
1463 newhunks.append(c)
1466 return newhunks
1464 return newhunks
1467
1465
1468 def parsepatch(originalchunks):
1466 def parsepatch(originalchunks):
1469 """patch -> [] of headers -> [] of hunks """
1467 """patch -> [] of headers -> [] of hunks """
1470 class parser(object):
1468 class parser(object):
1471 """patch parsing state machine"""
1469 """patch parsing state machine"""
1472 def __init__(self):
1470 def __init__(self):
1473 self.fromline = 0
1471 self.fromline = 0
1474 self.toline = 0
1472 self.toline = 0
1475 self.proc = ''
1473 self.proc = ''
1476 self.header = None
1474 self.header = None
1477 self.context = []
1475 self.context = []
1478 self.before = []
1476 self.before = []
1479 self.hunk = []
1477 self.hunk = []
1480 self.headers = []
1478 self.headers = []
1481
1479
1482 def addrange(self, limits):
1480 def addrange(self, limits):
1483 fromstart, fromend, tostart, toend, proc = limits
1481 fromstart, fromend, tostart, toend, proc = limits
1484 self.fromline = int(fromstart)
1482 self.fromline = int(fromstart)
1485 self.toline = int(tostart)
1483 self.toline = int(tostart)
1486 self.proc = proc
1484 self.proc = proc
1487
1485
1488 def addcontext(self, context):
1486 def addcontext(self, context):
1489 if self.hunk:
1487 if self.hunk:
1490 h = recordhunk(self.header, self.fromline, self.toline,
1488 h = recordhunk(self.header, self.fromline, self.toline,
1491 self.proc, self.before, self.hunk, context)
1489 self.proc, self.before, self.hunk, context)
1492 self.header.hunks.append(h)
1490 self.header.hunks.append(h)
1493 self.fromline += len(self.before) + h.removed
1491 self.fromline += len(self.before) + h.removed
1494 self.toline += len(self.before) + h.added
1492 self.toline += len(self.before) + h.added
1495 self.before = []
1493 self.before = []
1496 self.hunk = []
1494 self.hunk = []
1497 self.proc = ''
1495 self.proc = ''
1498 self.context = context
1496 self.context = context
1499
1497
1500 def addhunk(self, hunk):
1498 def addhunk(self, hunk):
1501 if self.context:
1499 if self.context:
1502 self.before = self.context
1500 self.before = self.context
1503 self.context = []
1501 self.context = []
1504 self.hunk = hunk
1502 self.hunk = hunk
1505
1503
1506 def newfile(self, hdr):
1504 def newfile(self, hdr):
1507 self.addcontext([])
1505 self.addcontext([])
1508 h = header(hdr)
1506 h = header(hdr)
1509 self.headers.append(h)
1507 self.headers.append(h)
1510 self.header = h
1508 self.header = h
1511
1509
1512 def addother(self, line):
1510 def addother(self, line):
1513 pass # 'other' lines are ignored
1511 pass # 'other' lines are ignored
1514
1512
1515 def finished(self):
1513 def finished(self):
1516 self.addcontext([])
1514 self.addcontext([])
1517 return self.headers
1515 return self.headers
1518
1516
1519 transitions = {
1517 transitions = {
1520 'file': {'context': addcontext,
1518 'file': {'context': addcontext,
1521 'file': newfile,
1519 'file': newfile,
1522 'hunk': addhunk,
1520 'hunk': addhunk,
1523 'range': addrange},
1521 'range': addrange},
1524 'context': {'file': newfile,
1522 'context': {'file': newfile,
1525 'hunk': addhunk,
1523 'hunk': addhunk,
1526 'range': addrange,
1524 'range': addrange,
1527 'other': addother},
1525 'other': addother},
1528 'hunk': {'context': addcontext,
1526 'hunk': {'context': addcontext,
1529 'file': newfile,
1527 'file': newfile,
1530 'range': addrange},
1528 'range': addrange},
1531 'range': {'context': addcontext,
1529 'range': {'context': addcontext,
1532 'hunk': addhunk},
1530 'hunk': addhunk},
1533 'other': {'other': addother},
1531 'other': {'other': addother},
1534 }
1532 }
1535
1533
1536 p = parser()
1534 p = parser()
1537 fp = cStringIO.StringIO()
1535 fp = cStringIO.StringIO()
1538 fp.write(''.join(originalchunks))
1536 fp.write(''.join(originalchunks))
1539 fp.seek(0)
1537 fp.seek(0)
1540
1538
1541 state = 'context'
1539 state = 'context'
1542 for newstate, data in scanpatch(fp):
1540 for newstate, data in scanpatch(fp):
1543 try:
1541 try:
1544 p.transitions[state][newstate](p, data)
1542 p.transitions[state][newstate](p, data)
1545 except KeyError:
1543 except KeyError:
1546 raise PatchError('unhandled transition: %s -> %s' %
1544 raise PatchError('unhandled transition: %s -> %s' %
1547 (state, newstate))
1545 (state, newstate))
1548 state = newstate
1546 state = newstate
1549 del fp
1547 del fp
1550 return p.finished()
1548 return p.finished()
1551
1549
1552 def pathtransform(path, strip, prefix):
1550 def pathtransform(path, strip, prefix):
1553 '''turn a path from a patch into a path suitable for the repository
1551 '''turn a path from a patch into a path suitable for the repository
1554
1552
1555 prefix, if not empty, is expected to be normalized with a / at the end.
1553 prefix, if not empty, is expected to be normalized with a / at the end.
1556
1554
1557 Returns (stripped components, path in repository).
1555 Returns (stripped components, path in repository).
1558
1556
1559 >>> pathtransform('a/b/c', 0, '')
1557 >>> pathtransform('a/b/c', 0, '')
1560 ('', 'a/b/c')
1558 ('', 'a/b/c')
1561 >>> pathtransform(' a/b/c ', 0, '')
1559 >>> pathtransform(' a/b/c ', 0, '')
1562 ('', ' a/b/c')
1560 ('', ' a/b/c')
1563 >>> pathtransform(' a/b/c ', 2, '')
1561 >>> pathtransform(' a/b/c ', 2, '')
1564 ('a/b/', 'c')
1562 ('a/b/', 'c')
1565 >>> pathtransform('a/b/c', 0, 'd/e/')
1563 >>> pathtransform('a/b/c', 0, 'd/e/')
1566 ('', 'd/e/a/b/c')
1564 ('', 'd/e/a/b/c')
1567 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1565 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1568 ('a//b/', 'd/e/c')
1566 ('a//b/', 'd/e/c')
1569 >>> pathtransform('a/b/c', 3, '')
1567 >>> pathtransform('a/b/c', 3, '')
1570 Traceback (most recent call last):
1568 Traceback (most recent call last):
1571 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1569 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1572 '''
1570 '''
1573 pathlen = len(path)
1571 pathlen = len(path)
1574 i = 0
1572 i = 0
1575 if strip == 0:
1573 if strip == 0:
1576 return '', prefix + path.rstrip()
1574 return '', prefix + path.rstrip()
1577 count = strip
1575 count = strip
1578 while count > 0:
1576 while count > 0:
1579 i = path.find('/', i)
1577 i = path.find('/', i)
1580 if i == -1:
1578 if i == -1:
1581 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1579 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1582 (count, strip, path))
1580 (count, strip, path))
1583 i += 1
1581 i += 1
1584 # consume '//' in the path
1582 # consume '//' in the path
1585 while i < pathlen - 1 and path[i] == '/':
1583 while i < pathlen - 1 and path[i] == '/':
1586 i += 1
1584 i += 1
1587 count -= 1
1585 count -= 1
1588 return path[:i].lstrip(), prefix + path[i:].rstrip()
1586 return path[:i].lstrip(), prefix + path[i:].rstrip()
1589
1587
1590 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1588 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1591 nulla = afile_orig == "/dev/null"
1589 nulla = afile_orig == "/dev/null"
1592 nullb = bfile_orig == "/dev/null"
1590 nullb = bfile_orig == "/dev/null"
1593 create = nulla and hunk.starta == 0 and hunk.lena == 0
1591 create = nulla and hunk.starta == 0 and hunk.lena == 0
1594 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1592 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1595 abase, afile = pathtransform(afile_orig, strip, prefix)
1593 abase, afile = pathtransform(afile_orig, strip, prefix)
1596 gooda = not nulla and backend.exists(afile)
1594 gooda = not nulla and backend.exists(afile)
1597 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1595 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1598 if afile == bfile:
1596 if afile == bfile:
1599 goodb = gooda
1597 goodb = gooda
1600 else:
1598 else:
1601 goodb = not nullb and backend.exists(bfile)
1599 goodb = not nullb and backend.exists(bfile)
1602 missing = not goodb and not gooda and not create
1600 missing = not goodb and not gooda and not create
1603
1601
1604 # some diff programs apparently produce patches where the afile is
1602 # some diff programs apparently produce patches where the afile is
1605 # not /dev/null, but afile starts with bfile
1603 # not /dev/null, but afile starts with bfile
1606 abasedir = afile[:afile.rfind('/') + 1]
1604 abasedir = afile[:afile.rfind('/') + 1]
1607 bbasedir = bfile[:bfile.rfind('/') + 1]
1605 bbasedir = bfile[:bfile.rfind('/') + 1]
1608 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1606 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1609 and hunk.starta == 0 and hunk.lena == 0):
1607 and hunk.starta == 0 and hunk.lena == 0):
1610 create = True
1608 create = True
1611 missing = False
1609 missing = False
1612
1610
1613 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1611 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1614 # diff is between a file and its backup. In this case, the original
1612 # diff is between a file and its backup. In this case, the original
1615 # file should be patched (see original mpatch code).
1613 # file should be patched (see original mpatch code).
1616 isbackup = (abase == bbase and bfile.startswith(afile))
1614 isbackup = (abase == bbase and bfile.startswith(afile))
1617 fname = None
1615 fname = None
1618 if not missing:
1616 if not missing:
1619 if gooda and goodb:
1617 if gooda and goodb:
1620 if isbackup:
1618 if isbackup:
1621 fname = afile
1619 fname = afile
1622 else:
1620 else:
1623 fname = bfile
1621 fname = bfile
1624 elif gooda:
1622 elif gooda:
1625 fname = afile
1623 fname = afile
1626
1624
1627 if not fname:
1625 if not fname:
1628 if not nullb:
1626 if not nullb:
1629 if isbackup:
1627 if isbackup:
1630 fname = afile
1628 fname = afile
1631 else:
1629 else:
1632 fname = bfile
1630 fname = bfile
1633 elif not nulla:
1631 elif not nulla:
1634 fname = afile
1632 fname = afile
1635 else:
1633 else:
1636 raise PatchError(_("undefined source and destination files"))
1634 raise PatchError(_("undefined source and destination files"))
1637
1635
1638 gp = patchmeta(fname)
1636 gp = patchmeta(fname)
1639 if create:
1637 if create:
1640 gp.op = 'ADD'
1638 gp.op = 'ADD'
1641 elif remove:
1639 elif remove:
1642 gp.op = 'DELETE'
1640 gp.op = 'DELETE'
1643 return gp
1641 return gp
1644
1642
1645 def scanpatch(fp):
1643 def scanpatch(fp):
1646 """like patch.iterhunks, but yield different events
1644 """like patch.iterhunks, but yield different events
1647
1645
1648 - ('file', [header_lines + fromfile + tofile])
1646 - ('file', [header_lines + fromfile + tofile])
1649 - ('context', [context_lines])
1647 - ('context', [context_lines])
1650 - ('hunk', [hunk_lines])
1648 - ('hunk', [hunk_lines])
1651 - ('range', (-start,len, +start,len, proc))
1649 - ('range', (-start,len, +start,len, proc))
1652 """
1650 """
1653 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1651 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1654 lr = linereader(fp)
1652 lr = linereader(fp)
1655
1653
1656 def scanwhile(first, p):
1654 def scanwhile(first, p):
1657 """scan lr while predicate holds"""
1655 """scan lr while predicate holds"""
1658 lines = [first]
1656 lines = [first]
1659 while True:
1657 while True:
1660 line = lr.readline()
1658 line = lr.readline()
1661 if not line:
1659 if not line:
1662 break
1660 break
1663 if p(line):
1661 if p(line):
1664 lines.append(line)
1662 lines.append(line)
1665 else:
1663 else:
1666 lr.push(line)
1664 lr.push(line)
1667 break
1665 break
1668 return lines
1666 return lines
1669
1667
1670 while True:
1668 while True:
1671 line = lr.readline()
1669 line = lr.readline()
1672 if not line:
1670 if not line:
1673 break
1671 break
1674 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1672 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1675 def notheader(line):
1673 def notheader(line):
1676 s = line.split(None, 1)
1674 s = line.split(None, 1)
1677 return not s or s[0] not in ('---', 'diff')
1675 return not s or s[0] not in ('---', 'diff')
1678 header = scanwhile(line, notheader)
1676 header = scanwhile(line, notheader)
1679 fromfile = lr.readline()
1677 fromfile = lr.readline()
1680 if fromfile.startswith('---'):
1678 if fromfile.startswith('---'):
1681 tofile = lr.readline()
1679 tofile = lr.readline()
1682 header += [fromfile, tofile]
1680 header += [fromfile, tofile]
1683 else:
1681 else:
1684 lr.push(fromfile)
1682 lr.push(fromfile)
1685 yield 'file', header
1683 yield 'file', header
1686 elif line[0] == ' ':
1684 elif line[0] == ' ':
1687 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1685 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1688 elif line[0] in '-+':
1686 elif line[0] in '-+':
1689 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1687 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1690 else:
1688 else:
1691 m = lines_re.match(line)
1689 m = lines_re.match(line)
1692 if m:
1690 if m:
1693 yield 'range', m.groups()
1691 yield 'range', m.groups()
1694 else:
1692 else:
1695 yield 'other', line
1693 yield 'other', line
1696
1694
1697 def scangitpatch(lr, firstline):
1695 def scangitpatch(lr, firstline):
1698 """
1696 """
1699 Git patches can emit:
1697 Git patches can emit:
1700 - rename a to b
1698 - rename a to b
1701 - change b
1699 - change b
1702 - copy a to c
1700 - copy a to c
1703 - change c
1701 - change c
1704
1702
1705 We cannot apply this sequence as-is, the renamed 'a' could not be
1703 We cannot apply this sequence as-is, the renamed 'a' could not be
1706 found for it would have been renamed already. And we cannot copy
1704 found for it would have been renamed already. And we cannot copy
1707 from 'b' instead because 'b' would have been changed already. So
1705 from 'b' instead because 'b' would have been changed already. So
1708 we scan the git patch for copy and rename commands so we can
1706 we scan the git patch for copy and rename commands so we can
1709 perform the copies ahead of time.
1707 perform the copies ahead of time.
1710 """
1708 """
1711 pos = 0
1709 pos = 0
1712 try:
1710 try:
1713 pos = lr.fp.tell()
1711 pos = lr.fp.tell()
1714 fp = lr.fp
1712 fp = lr.fp
1715 except IOError:
1713 except IOError:
1716 fp = cStringIO.StringIO(lr.fp.read())
1714 fp = cStringIO.StringIO(lr.fp.read())
1717 gitlr = linereader(fp)
1715 gitlr = linereader(fp)
1718 gitlr.push(firstline)
1716 gitlr.push(firstline)
1719 gitpatches = readgitpatch(gitlr)
1717 gitpatches = readgitpatch(gitlr)
1720 fp.seek(pos)
1718 fp.seek(pos)
1721 return gitpatches
1719 return gitpatches
1722
1720
1723 def iterhunks(fp):
1721 def iterhunks(fp):
1724 """Read a patch and yield the following events:
1722 """Read a patch and yield the following events:
1725 - ("file", afile, bfile, firsthunk): select a new target file.
1723 - ("file", afile, bfile, firsthunk): select a new target file.
1726 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1724 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1727 "file" event.
1725 "file" event.
1728 - ("git", gitchanges): current diff is in git format, gitchanges
1726 - ("git", gitchanges): current diff is in git format, gitchanges
1729 maps filenames to gitpatch records. Unique event.
1727 maps filenames to gitpatch records. Unique event.
1730 """
1728 """
1731 afile = ""
1729 afile = ""
1732 bfile = ""
1730 bfile = ""
1733 state = None
1731 state = None
1734 hunknum = 0
1732 hunknum = 0
1735 emitfile = newfile = False
1733 emitfile = newfile = False
1736 gitpatches = None
1734 gitpatches = None
1737
1735
1738 # our states
1736 # our states
1739 BFILE = 1
1737 BFILE = 1
1740 context = None
1738 context = None
1741 lr = linereader(fp)
1739 lr = linereader(fp)
1742
1740
1743 while True:
1741 while True:
1744 x = lr.readline()
1742 x = lr.readline()
1745 if not x:
1743 if not x:
1746 break
1744 break
1747 if state == BFILE and (
1745 if state == BFILE and (
1748 (not context and x[0] == '@')
1746 (not context and x[0] == '@')
1749 or (context is not False and x.startswith('***************'))
1747 or (context is not False and x.startswith('***************'))
1750 or x.startswith('GIT binary patch')):
1748 or x.startswith('GIT binary patch')):
1751 gp = None
1749 gp = None
1752 if (gitpatches and
1750 if (gitpatches and
1753 gitpatches[-1].ispatching(afile, bfile)):
1751 gitpatches[-1].ispatching(afile, bfile)):
1754 gp = gitpatches.pop()
1752 gp = gitpatches.pop()
1755 if x.startswith('GIT binary patch'):
1753 if x.startswith('GIT binary patch'):
1756 h = binhunk(lr, gp.path)
1754 h = binhunk(lr, gp.path)
1757 else:
1755 else:
1758 if context is None and x.startswith('***************'):
1756 if context is None and x.startswith('***************'):
1759 context = True
1757 context = True
1760 h = hunk(x, hunknum + 1, lr, context)
1758 h = hunk(x, hunknum + 1, lr, context)
1761 hunknum += 1
1759 hunknum += 1
1762 if emitfile:
1760 if emitfile:
1763 emitfile = False
1761 emitfile = False
1764 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1762 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1765 yield 'hunk', h
1763 yield 'hunk', h
1766 elif x.startswith('diff --git a/'):
1764 elif x.startswith('diff --git a/'):
1767 m = gitre.match(x.rstrip(' \r\n'))
1765 m = gitre.match(x.rstrip(' \r\n'))
1768 if not m:
1766 if not m:
1769 continue
1767 continue
1770 if gitpatches is None:
1768 if gitpatches is None:
1771 # scan whole input for git metadata
1769 # scan whole input for git metadata
1772 gitpatches = scangitpatch(lr, x)
1770 gitpatches = scangitpatch(lr, x)
1773 yield 'git', [g.copy() for g in gitpatches
1771 yield 'git', [g.copy() for g in gitpatches
1774 if g.op in ('COPY', 'RENAME')]
1772 if g.op in ('COPY', 'RENAME')]
1775 gitpatches.reverse()
1773 gitpatches.reverse()
1776 afile = 'a/' + m.group(1)
1774 afile = 'a/' + m.group(1)
1777 bfile = 'b/' + m.group(2)
1775 bfile = 'b/' + m.group(2)
1778 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1776 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1779 gp = gitpatches.pop()
1777 gp = gitpatches.pop()
1780 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1778 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1781 if not gitpatches:
1779 if not gitpatches:
1782 raise PatchError(_('failed to synchronize metadata for "%s"')
1780 raise PatchError(_('failed to synchronize metadata for "%s"')
1783 % afile[2:])
1781 % afile[2:])
1784 gp = gitpatches[-1]
1782 gp = gitpatches[-1]
1785 newfile = True
1783 newfile = True
1786 elif x.startswith('---'):
1784 elif x.startswith('---'):
1787 # check for a unified diff
1785 # check for a unified diff
1788 l2 = lr.readline()
1786 l2 = lr.readline()
1789 if not l2.startswith('+++'):
1787 if not l2.startswith('+++'):
1790 lr.push(l2)
1788 lr.push(l2)
1791 continue
1789 continue
1792 newfile = True
1790 newfile = True
1793 context = False
1791 context = False
1794 afile = parsefilename(x)
1792 afile = parsefilename(x)
1795 bfile = parsefilename(l2)
1793 bfile = parsefilename(l2)
1796 elif x.startswith('***'):
1794 elif x.startswith('***'):
1797 # check for a context diff
1795 # check for a context diff
1798 l2 = lr.readline()
1796 l2 = lr.readline()
1799 if not l2.startswith('---'):
1797 if not l2.startswith('---'):
1800 lr.push(l2)
1798 lr.push(l2)
1801 continue
1799 continue
1802 l3 = lr.readline()
1800 l3 = lr.readline()
1803 lr.push(l3)
1801 lr.push(l3)
1804 if not l3.startswith("***************"):
1802 if not l3.startswith("***************"):
1805 lr.push(l2)
1803 lr.push(l2)
1806 continue
1804 continue
1807 newfile = True
1805 newfile = True
1808 context = True
1806 context = True
1809 afile = parsefilename(x)
1807 afile = parsefilename(x)
1810 bfile = parsefilename(l2)
1808 bfile = parsefilename(l2)
1811
1809
1812 if newfile:
1810 if newfile:
1813 newfile = False
1811 newfile = False
1814 emitfile = True
1812 emitfile = True
1815 state = BFILE
1813 state = BFILE
1816 hunknum = 0
1814 hunknum = 0
1817
1815
1818 while gitpatches:
1816 while gitpatches:
1819 gp = gitpatches.pop()
1817 gp = gitpatches.pop()
1820 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1818 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1821
1819
1822 def applybindelta(binchunk, data):
1820 def applybindelta(binchunk, data):
1823 """Apply a binary delta hunk
1821 """Apply a binary delta hunk
1824 The algorithm used is the algorithm from git's patch-delta.c
1822 The algorithm used is the algorithm from git's patch-delta.c
1825 """
1823 """
1826 def deltahead(binchunk):
1824 def deltahead(binchunk):
1827 i = 0
1825 i = 0
1828 for c in binchunk:
1826 for c in binchunk:
1829 i += 1
1827 i += 1
1830 if not (ord(c) & 0x80):
1828 if not (ord(c) & 0x80):
1831 return i
1829 return i
1832 return i
1830 return i
1833 out = ""
1831 out = ""
1834 s = deltahead(binchunk)
1832 s = deltahead(binchunk)
1835 binchunk = binchunk[s:]
1833 binchunk = binchunk[s:]
1836 s = deltahead(binchunk)
1834 s = deltahead(binchunk)
1837 binchunk = binchunk[s:]
1835 binchunk = binchunk[s:]
1838 i = 0
1836 i = 0
1839 while i < len(binchunk):
1837 while i < len(binchunk):
1840 cmd = ord(binchunk[i])
1838 cmd = ord(binchunk[i])
1841 i += 1
1839 i += 1
1842 if (cmd & 0x80):
1840 if (cmd & 0x80):
1843 offset = 0
1841 offset = 0
1844 size = 0
1842 size = 0
1845 if (cmd & 0x01):
1843 if (cmd & 0x01):
1846 offset = ord(binchunk[i])
1844 offset = ord(binchunk[i])
1847 i += 1
1845 i += 1
1848 if (cmd & 0x02):
1846 if (cmd & 0x02):
1849 offset |= ord(binchunk[i]) << 8
1847 offset |= ord(binchunk[i]) << 8
1850 i += 1
1848 i += 1
1851 if (cmd & 0x04):
1849 if (cmd & 0x04):
1852 offset |= ord(binchunk[i]) << 16
1850 offset |= ord(binchunk[i]) << 16
1853 i += 1
1851 i += 1
1854 if (cmd & 0x08):
1852 if (cmd & 0x08):
1855 offset |= ord(binchunk[i]) << 24
1853 offset |= ord(binchunk[i]) << 24
1856 i += 1
1854 i += 1
1857 if (cmd & 0x10):
1855 if (cmd & 0x10):
1858 size = ord(binchunk[i])
1856 size = ord(binchunk[i])
1859 i += 1
1857 i += 1
1860 if (cmd & 0x20):
1858 if (cmd & 0x20):
1861 size |= ord(binchunk[i]) << 8
1859 size |= ord(binchunk[i]) << 8
1862 i += 1
1860 i += 1
1863 if (cmd & 0x40):
1861 if (cmd & 0x40):
1864 size |= ord(binchunk[i]) << 16
1862 size |= ord(binchunk[i]) << 16
1865 i += 1
1863 i += 1
1866 if size == 0:
1864 if size == 0:
1867 size = 0x10000
1865 size = 0x10000
1868 offset_end = offset + size
1866 offset_end = offset + size
1869 out += data[offset:offset_end]
1867 out += data[offset:offset_end]
1870 elif cmd != 0:
1868 elif cmd != 0:
1871 offset_end = i + cmd
1869 offset_end = i + cmd
1872 out += binchunk[i:offset_end]
1870 out += binchunk[i:offset_end]
1873 i += cmd
1871 i += cmd
1874 else:
1872 else:
1875 raise PatchError(_('unexpected delta opcode 0'))
1873 raise PatchError(_('unexpected delta opcode 0'))
1876 return out
1874 return out
1877
1875
1878 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1876 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1879 """Reads a patch from fp and tries to apply it.
1877 """Reads a patch from fp and tries to apply it.
1880
1878
1881 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1879 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1882 there was any fuzz.
1880 there was any fuzz.
1883
1881
1884 If 'eolmode' is 'strict', the patch content and patched file are
1882 If 'eolmode' is 'strict', the patch content and patched file are
1885 read in binary mode. Otherwise, line endings are ignored when
1883 read in binary mode. Otherwise, line endings are ignored when
1886 patching then normalized according to 'eolmode'.
1884 patching then normalized according to 'eolmode'.
1887 """
1885 """
1888 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1886 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1889 prefix=prefix, eolmode=eolmode)
1887 prefix=prefix, eolmode=eolmode)
1890
1888
1891 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1889 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1892 eolmode='strict'):
1890 eolmode='strict'):
1893
1891
1894 if prefix:
1892 if prefix:
1895 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1893 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1896 prefix)
1894 prefix)
1897 if prefix != '':
1895 if prefix != '':
1898 prefix += '/'
1896 prefix += '/'
1899 def pstrip(p):
1897 def pstrip(p):
1900 return pathtransform(p, strip - 1, prefix)[1]
1898 return pathtransform(p, strip - 1, prefix)[1]
1901
1899
1902 rejects = 0
1900 rejects = 0
1903 err = 0
1901 err = 0
1904 current_file = None
1902 current_file = None
1905
1903
1906 for state, values in iterhunks(fp):
1904 for state, values in iterhunks(fp):
1907 if state == 'hunk':
1905 if state == 'hunk':
1908 if not current_file:
1906 if not current_file:
1909 continue
1907 continue
1910 ret = current_file.apply(values)
1908 ret = current_file.apply(values)
1911 if ret > 0:
1909 if ret > 0:
1912 err = 1
1910 err = 1
1913 elif state == 'file':
1911 elif state == 'file':
1914 if current_file:
1912 if current_file:
1915 rejects += current_file.close()
1913 rejects += current_file.close()
1916 current_file = None
1914 current_file = None
1917 afile, bfile, first_hunk, gp = values
1915 afile, bfile, first_hunk, gp = values
1918 if gp:
1916 if gp:
1919 gp.path = pstrip(gp.path)
1917 gp.path = pstrip(gp.path)
1920 if gp.oldpath:
1918 if gp.oldpath:
1921 gp.oldpath = pstrip(gp.oldpath)
1919 gp.oldpath = pstrip(gp.oldpath)
1922 else:
1920 else:
1923 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1921 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1924 prefix)
1922 prefix)
1925 if gp.op == 'RENAME':
1923 if gp.op == 'RENAME':
1926 backend.unlink(gp.oldpath)
1924 backend.unlink(gp.oldpath)
1927 if not first_hunk:
1925 if not first_hunk:
1928 if gp.op == 'DELETE':
1926 if gp.op == 'DELETE':
1929 backend.unlink(gp.path)
1927 backend.unlink(gp.path)
1930 continue
1928 continue
1931 data, mode = None, None
1929 data, mode = None, None
1932 if gp.op in ('RENAME', 'COPY'):
1930 if gp.op in ('RENAME', 'COPY'):
1933 data, mode = store.getfile(gp.oldpath)[:2]
1931 data, mode = store.getfile(gp.oldpath)[:2]
1934 # FIXME: failing getfile has never been handled here
1932 # FIXME: failing getfile has never been handled here
1935 assert data is not None
1933 assert data is not None
1936 if gp.mode:
1934 if gp.mode:
1937 mode = gp.mode
1935 mode = gp.mode
1938 if gp.op == 'ADD':
1936 if gp.op == 'ADD':
1939 # Added files without content have no hunk and
1937 # Added files without content have no hunk and
1940 # must be created
1938 # must be created
1941 data = ''
1939 data = ''
1942 if data or mode:
1940 if data or mode:
1943 if (gp.op in ('ADD', 'RENAME', 'COPY')
1941 if (gp.op in ('ADD', 'RENAME', 'COPY')
1944 and backend.exists(gp.path)):
1942 and backend.exists(gp.path)):
1945 raise PatchError(_("cannot create %s: destination "
1943 raise PatchError(_("cannot create %s: destination "
1946 "already exists") % gp.path)
1944 "already exists") % gp.path)
1947 backend.setfile(gp.path, data, mode, gp.oldpath)
1945 backend.setfile(gp.path, data, mode, gp.oldpath)
1948 continue
1946 continue
1949 try:
1947 try:
1950 current_file = patcher(ui, gp, backend, store,
1948 current_file = patcher(ui, gp, backend, store,
1951 eolmode=eolmode)
1949 eolmode=eolmode)
1952 except PatchError as inst:
1950 except PatchError as inst:
1953 ui.warn(str(inst) + '\n')
1951 ui.warn(str(inst) + '\n')
1954 current_file = None
1952 current_file = None
1955 rejects += 1
1953 rejects += 1
1956 continue
1954 continue
1957 elif state == 'git':
1955 elif state == 'git':
1958 for gp in values:
1956 for gp in values:
1959 path = pstrip(gp.oldpath)
1957 path = pstrip(gp.oldpath)
1960 data, mode = backend.getfile(path)
1958 data, mode = backend.getfile(path)
1961 if data is None:
1959 if data is None:
1962 # The error ignored here will trigger a getfile()
1960 # The error ignored here will trigger a getfile()
1963 # error in a place more appropriate for error
1961 # error in a place more appropriate for error
1964 # handling, and will not interrupt the patching
1962 # handling, and will not interrupt the patching
1965 # process.
1963 # process.
1966 pass
1964 pass
1967 else:
1965 else:
1968 store.setfile(path, data, mode)
1966 store.setfile(path, data, mode)
1969 else:
1967 else:
1970 raise util.Abort(_('unsupported parser state: %s') % state)
1968 raise util.Abort(_('unsupported parser state: %s') % state)
1971
1969
1972 if current_file:
1970 if current_file:
1973 rejects += current_file.close()
1971 rejects += current_file.close()
1974
1972
1975 if rejects:
1973 if rejects:
1976 return -1
1974 return -1
1977 return err
1975 return err
1978
1976
1979 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1977 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1980 similarity):
1978 similarity):
1981 """use <patcher> to apply <patchname> to the working directory.
1979 """use <patcher> to apply <patchname> to the working directory.
1982 returns whether patch was applied with fuzz factor."""
1980 returns whether patch was applied with fuzz factor."""
1983
1981
1984 fuzz = False
1982 fuzz = False
1985 args = []
1983 args = []
1986 cwd = repo.root
1984 cwd = repo.root
1987 if cwd:
1985 if cwd:
1988 args.append('-d %s' % util.shellquote(cwd))
1986 args.append('-d %s' % util.shellquote(cwd))
1989 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1987 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1990 util.shellquote(patchname)))
1988 util.shellquote(patchname)))
1991 try:
1989 try:
1992 for line in fp:
1990 for line in fp:
1993 line = line.rstrip()
1991 line = line.rstrip()
1994 ui.note(line + '\n')
1992 ui.note(line + '\n')
1995 if line.startswith('patching file '):
1993 if line.startswith('patching file '):
1996 pf = util.parsepatchoutput(line)
1994 pf = util.parsepatchoutput(line)
1997 printed_file = False
1995 printed_file = False
1998 files.add(pf)
1996 files.add(pf)
1999 elif line.find('with fuzz') >= 0:
1997 elif line.find('with fuzz') >= 0:
2000 fuzz = True
1998 fuzz = True
2001 if not printed_file:
1999 if not printed_file:
2002 ui.warn(pf + '\n')
2000 ui.warn(pf + '\n')
2003 printed_file = True
2001 printed_file = True
2004 ui.warn(line + '\n')
2002 ui.warn(line + '\n')
2005 elif line.find('saving rejects to file') >= 0:
2003 elif line.find('saving rejects to file') >= 0:
2006 ui.warn(line + '\n')
2004 ui.warn(line + '\n')
2007 elif line.find('FAILED') >= 0:
2005 elif line.find('FAILED') >= 0:
2008 if not printed_file:
2006 if not printed_file:
2009 ui.warn(pf + '\n')
2007 ui.warn(pf + '\n')
2010 printed_file = True
2008 printed_file = True
2011 ui.warn(line + '\n')
2009 ui.warn(line + '\n')
2012 finally:
2010 finally:
2013 if files:
2011 if files:
2014 scmutil.marktouched(repo, files, similarity)
2012 scmutil.marktouched(repo, files, similarity)
2015 code = fp.close()
2013 code = fp.close()
2016 if code:
2014 if code:
2017 raise PatchError(_("patch command failed: %s") %
2015 raise PatchError(_("patch command failed: %s") %
2018 util.explainexit(code)[0])
2016 util.explainexit(code)[0])
2019 return fuzz
2017 return fuzz
2020
2018
2021 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2019 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2022 eolmode='strict'):
2020 eolmode='strict'):
2023 if files is None:
2021 if files is None:
2024 files = set()
2022 files = set()
2025 if eolmode is None:
2023 if eolmode is None:
2026 eolmode = ui.config('patch', 'eol', 'strict')
2024 eolmode = ui.config('patch', 'eol', 'strict')
2027 if eolmode.lower() not in eolmodes:
2025 if eolmode.lower() not in eolmodes:
2028 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2026 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2029 eolmode = eolmode.lower()
2027 eolmode = eolmode.lower()
2030
2028
2031 store = filestore()
2029 store = filestore()
2032 try:
2030 try:
2033 fp = open(patchobj, 'rb')
2031 fp = open(patchobj, 'rb')
2034 except TypeError:
2032 except TypeError:
2035 fp = patchobj
2033 fp = patchobj
2036 try:
2034 try:
2037 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2035 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2038 eolmode=eolmode)
2036 eolmode=eolmode)
2039 finally:
2037 finally:
2040 if fp != patchobj:
2038 if fp != patchobj:
2041 fp.close()
2039 fp.close()
2042 files.update(backend.close())
2040 files.update(backend.close())
2043 store.close()
2041 store.close()
2044 if ret < 0:
2042 if ret < 0:
2045 raise PatchError(_('patch failed to apply'))
2043 raise PatchError(_('patch failed to apply'))
2046 return ret > 0
2044 return ret > 0
2047
2045
2048 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2046 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2049 eolmode='strict', similarity=0):
2047 eolmode='strict', similarity=0):
2050 """use builtin patch to apply <patchobj> to the working directory.
2048 """use builtin patch to apply <patchobj> to the working directory.
2051 returns whether patch was applied with fuzz factor."""
2049 returns whether patch was applied with fuzz factor."""
2052 backend = workingbackend(ui, repo, similarity)
2050 backend = workingbackend(ui, repo, similarity)
2053 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2051 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2054
2052
2055 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2053 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2056 eolmode='strict'):
2054 eolmode='strict'):
2057 backend = repobackend(ui, repo, ctx, store)
2055 backend = repobackend(ui, repo, ctx, store)
2058 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2056 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2059
2057
2060 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2058 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2061 similarity=0):
2059 similarity=0):
2062 """Apply <patchname> to the working directory.
2060 """Apply <patchname> to the working directory.
2063
2061
2064 'eolmode' specifies how end of lines should be handled. It can be:
2062 'eolmode' specifies how end of lines should be handled. It can be:
2065 - 'strict': inputs are read in binary mode, EOLs are preserved
2063 - 'strict': inputs are read in binary mode, EOLs are preserved
2066 - 'crlf': EOLs are ignored when patching and reset to CRLF
2064 - 'crlf': EOLs are ignored when patching and reset to CRLF
2067 - 'lf': EOLs are ignored when patching and reset to LF
2065 - 'lf': EOLs are ignored when patching and reset to LF
2068 - None: get it from user settings, default to 'strict'
2066 - None: get it from user settings, default to 'strict'
2069 'eolmode' is ignored when using an external patcher program.
2067 'eolmode' is ignored when using an external patcher program.
2070
2068
2071 Returns whether patch was applied with fuzz factor.
2069 Returns whether patch was applied with fuzz factor.
2072 """
2070 """
2073 patcher = ui.config('ui', 'patch')
2071 patcher = ui.config('ui', 'patch')
2074 if files is None:
2072 if files is None:
2075 files = set()
2073 files = set()
2076 if patcher:
2074 if patcher:
2077 return _externalpatch(ui, repo, patcher, patchname, strip,
2075 return _externalpatch(ui, repo, patcher, patchname, strip,
2078 files, similarity)
2076 files, similarity)
2079 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2077 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2080 similarity)
2078 similarity)
2081
2079
2082 def changedfiles(ui, repo, patchpath, strip=1):
2080 def changedfiles(ui, repo, patchpath, strip=1):
2083 backend = fsbackend(ui, repo.root)
2081 backend = fsbackend(ui, repo.root)
2084 fp = open(patchpath, 'rb')
2082 fp = open(patchpath, 'rb')
2085 try:
2083 try:
2086 changed = set()
2084 changed = set()
2087 for state, values in iterhunks(fp):
2085 for state, values in iterhunks(fp):
2088 if state == 'file':
2086 if state == 'file':
2089 afile, bfile, first_hunk, gp = values
2087 afile, bfile, first_hunk, gp = values
2090 if gp:
2088 if gp:
2091 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2089 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2092 if gp.oldpath:
2090 if gp.oldpath:
2093 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2091 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2094 else:
2092 else:
2095 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2093 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2096 '')
2094 '')
2097 changed.add(gp.path)
2095 changed.add(gp.path)
2098 if gp.op == 'RENAME':
2096 if gp.op == 'RENAME':
2099 changed.add(gp.oldpath)
2097 changed.add(gp.oldpath)
2100 elif state not in ('hunk', 'git'):
2098 elif state not in ('hunk', 'git'):
2101 raise util.Abort(_('unsupported parser state: %s') % state)
2099 raise util.Abort(_('unsupported parser state: %s') % state)
2102 return changed
2100 return changed
2103 finally:
2101 finally:
2104 fp.close()
2102 fp.close()
2105
2103
2106 class GitDiffRequired(Exception):
2104 class GitDiffRequired(Exception):
2107 pass
2105 pass
2108
2106
2109 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2107 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2110 '''return diffopts with all features supported and parsed'''
2108 '''return diffopts with all features supported and parsed'''
2111 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2109 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2112 git=True, whitespace=True, formatchanging=True)
2110 git=True, whitespace=True, formatchanging=True)
2113
2111
2114 diffopts = diffallopts
2112 diffopts = diffallopts
2115
2113
2116 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2114 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2117 whitespace=False, formatchanging=False):
2115 whitespace=False, formatchanging=False):
2118 '''return diffopts with only opted-in features parsed
2116 '''return diffopts with only opted-in features parsed
2119
2117
2120 Features:
2118 Features:
2121 - git: git-style diffs
2119 - git: git-style diffs
2122 - whitespace: whitespace options like ignoreblanklines and ignorews
2120 - whitespace: whitespace options like ignoreblanklines and ignorews
2123 - formatchanging: options that will likely break or cause correctness issues
2121 - formatchanging: options that will likely break or cause correctness issues
2124 with most diff parsers
2122 with most diff parsers
2125 '''
2123 '''
2126 def get(key, name=None, getter=ui.configbool, forceplain=None):
2124 def get(key, name=None, getter=ui.configbool, forceplain=None):
2127 if opts:
2125 if opts:
2128 v = opts.get(key)
2126 v = opts.get(key)
2129 if v:
2127 if v:
2130 return v
2128 return v
2131 if forceplain is not None and ui.plain():
2129 if forceplain is not None and ui.plain():
2132 return forceplain
2130 return forceplain
2133 return getter(section, name or key, None, untrusted=untrusted)
2131 return getter(section, name or key, None, untrusted=untrusted)
2134
2132
2135 # core options, expected to be understood by every diff parser
2133 # core options, expected to be understood by every diff parser
2136 buildopts = {
2134 buildopts = {
2137 'nodates': get('nodates'),
2135 'nodates': get('nodates'),
2138 'showfunc': get('show_function', 'showfunc'),
2136 'showfunc': get('show_function', 'showfunc'),
2139 'context': get('unified', getter=ui.config),
2137 'context': get('unified', getter=ui.config),
2140 }
2138 }
2141
2139
2142 if git:
2140 if git:
2143 buildopts['git'] = get('git')
2141 buildopts['git'] = get('git')
2144 if whitespace:
2142 if whitespace:
2145 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2143 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2146 buildopts['ignorewsamount'] = get('ignore_space_change',
2144 buildopts['ignorewsamount'] = get('ignore_space_change',
2147 'ignorewsamount')
2145 'ignorewsamount')
2148 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2146 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2149 'ignoreblanklines')
2147 'ignoreblanklines')
2150 if formatchanging:
2148 if formatchanging:
2151 buildopts['text'] = opts and opts.get('text')
2149 buildopts['text'] = opts and opts.get('text')
2152 buildopts['nobinary'] = get('nobinary')
2150 buildopts['nobinary'] = get('nobinary')
2153 buildopts['noprefix'] = get('noprefix', forceplain=False)
2151 buildopts['noprefix'] = get('noprefix', forceplain=False)
2154
2152
2155 return mdiff.diffopts(**buildopts)
2153 return mdiff.diffopts(**buildopts)
2156
2154
2157 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2155 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2158 losedatafn=None, prefix='', relroot=''):
2156 losedatafn=None, prefix='', relroot=''):
2159 '''yields diff of changes to files between two nodes, or node and
2157 '''yields diff of changes to files between two nodes, or node and
2160 working directory.
2158 working directory.
2161
2159
2162 if node1 is None, use first dirstate parent instead.
2160 if node1 is None, use first dirstate parent instead.
2163 if node2 is None, compare node1 with working directory.
2161 if node2 is None, compare node1 with working directory.
2164
2162
2165 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2163 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2166 every time some change cannot be represented with the current
2164 every time some change cannot be represented with the current
2167 patch format. Return False to upgrade to git patch format, True to
2165 patch format. Return False to upgrade to git patch format, True to
2168 accept the loss or raise an exception to abort the diff. It is
2166 accept the loss or raise an exception to abort the diff. It is
2169 called with the name of current file being diffed as 'fn'. If set
2167 called with the name of current file being diffed as 'fn'. If set
2170 to None, patches will always be upgraded to git format when
2168 to None, patches will always be upgraded to git format when
2171 necessary.
2169 necessary.
2172
2170
2173 prefix is a filename prefix that is prepended to all filenames on
2171 prefix is a filename prefix that is prepended to all filenames on
2174 display (used for subrepos).
2172 display (used for subrepos).
2175
2173
2176 relroot, if not empty, must be normalized with a trailing /. Any match
2174 relroot, if not empty, must be normalized with a trailing /. Any match
2177 patterns that fall outside it will be ignored.'''
2175 patterns that fall outside it will be ignored.'''
2178
2176
2179 if opts is None:
2177 if opts is None:
2180 opts = mdiff.defaultopts
2178 opts = mdiff.defaultopts
2181
2179
2182 if not node1 and not node2:
2180 if not node1 and not node2:
2183 node1 = repo.dirstate.p1()
2181 node1 = repo.dirstate.p1()
2184
2182
2185 def lrugetfilectx():
2183 def lrugetfilectx():
2186 cache = {}
2184 cache = {}
2187 order = collections.deque()
2185 order = collections.deque()
2188 def getfilectx(f, ctx):
2186 def getfilectx(f, ctx):
2189 fctx = ctx.filectx(f, filelog=cache.get(f))
2187 fctx = ctx.filectx(f, filelog=cache.get(f))
2190 if f not in cache:
2188 if f not in cache:
2191 if len(cache) > 20:
2189 if len(cache) > 20:
2192 del cache[order.popleft()]
2190 del cache[order.popleft()]
2193 cache[f] = fctx.filelog()
2191 cache[f] = fctx.filelog()
2194 else:
2192 else:
2195 order.remove(f)
2193 order.remove(f)
2196 order.append(f)
2194 order.append(f)
2197 return fctx
2195 return fctx
2198 return getfilectx
2196 return getfilectx
2199 getfilectx = lrugetfilectx()
2197 getfilectx = lrugetfilectx()
2200
2198
2201 ctx1 = repo[node1]
2199 ctx1 = repo[node1]
2202 ctx2 = repo[node2]
2200 ctx2 = repo[node2]
2203
2201
2204 relfiltered = False
2202 relfiltered = False
2205 if relroot != '' and match.always():
2203 if relroot != '' and match.always():
2206 # as a special case, create a new matcher with just the relroot
2204 # as a special case, create a new matcher with just the relroot
2207 pats = [relroot]
2205 pats = [relroot]
2208 match = scmutil.match(ctx2, pats, default='path')
2206 match = scmutil.match(ctx2, pats, default='path')
2209 relfiltered = True
2207 relfiltered = True
2210
2208
2211 if not changes:
2209 if not changes:
2212 changes = repo.status(ctx1, ctx2, match=match)
2210 changes = repo.status(ctx1, ctx2, match=match)
2213 modified, added, removed = changes[:3]
2211 modified, added, removed = changes[:3]
2214
2212
2215 if not modified and not added and not removed:
2213 if not modified and not added and not removed:
2216 return []
2214 return []
2217
2215
2218 if repo.ui.debugflag:
2216 if repo.ui.debugflag:
2219 hexfunc = hex
2217 hexfunc = hex
2220 else:
2218 else:
2221 hexfunc = short
2219 hexfunc = short
2222 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2220 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2223
2221
2224 copy = {}
2222 copy = {}
2225 if opts.git or opts.upgrade:
2223 if opts.git or opts.upgrade:
2226 copy = copies.pathcopies(ctx1, ctx2, match=match)
2224 copy = copies.pathcopies(ctx1, ctx2, match=match)
2227
2225
2228 if relroot is not None:
2226 if relroot is not None:
2229 if not relfiltered:
2227 if not relfiltered:
2230 # XXX this would ideally be done in the matcher, but that is
2228 # XXX this would ideally be done in the matcher, but that is
2231 # generally meant to 'or' patterns, not 'and' them. In this case we
2229 # generally meant to 'or' patterns, not 'and' them. In this case we
2232 # need to 'and' all the patterns from the matcher with relroot.
2230 # need to 'and' all the patterns from the matcher with relroot.
2233 def filterrel(l):
2231 def filterrel(l):
2234 return [f for f in l if f.startswith(relroot)]
2232 return [f for f in l if f.startswith(relroot)]
2235 modified = filterrel(modified)
2233 modified = filterrel(modified)
2236 added = filterrel(added)
2234 added = filterrel(added)
2237 removed = filterrel(removed)
2235 removed = filterrel(removed)
2238 relfiltered = True
2236 relfiltered = True
2239 # filter out copies where either side isn't inside the relative root
2237 # filter out copies where either side isn't inside the relative root
2240 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2238 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2241 if dst.startswith(relroot)
2239 if dst.startswith(relroot)
2242 and src.startswith(relroot)))
2240 and src.startswith(relroot)))
2243
2241
2244 def difffn(opts, losedata):
2242 def difffn(opts, losedata):
2245 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2243 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2246 copy, getfilectx, opts, losedata, prefix, relroot)
2244 copy, getfilectx, opts, losedata, prefix, relroot)
2247 if opts.upgrade and not opts.git:
2245 if opts.upgrade and not opts.git:
2248 try:
2246 try:
2249 def losedata(fn):
2247 def losedata(fn):
2250 if not losedatafn or not losedatafn(fn=fn):
2248 if not losedatafn or not losedatafn(fn=fn):
2251 raise GitDiffRequired
2249 raise GitDiffRequired
2252 # Buffer the whole output until we are sure it can be generated
2250 # Buffer the whole output until we are sure it can be generated
2253 return list(difffn(opts.copy(git=False), losedata))
2251 return list(difffn(opts.copy(git=False), losedata))
2254 except GitDiffRequired:
2252 except GitDiffRequired:
2255 return difffn(opts.copy(git=True), None)
2253 return difffn(opts.copy(git=True), None)
2256 else:
2254 else:
2257 return difffn(opts, None)
2255 return difffn(opts, None)
2258
2256
2259 def difflabel(func, *args, **kw):
2257 def difflabel(func, *args, **kw):
2260 '''yields 2-tuples of (output, label) based on the output of func()'''
2258 '''yields 2-tuples of (output, label) based on the output of func()'''
2261 headprefixes = [('diff', 'diff.diffline'),
2259 headprefixes = [('diff', 'diff.diffline'),
2262 ('copy', 'diff.extended'),
2260 ('copy', 'diff.extended'),
2263 ('rename', 'diff.extended'),
2261 ('rename', 'diff.extended'),
2264 ('old', 'diff.extended'),
2262 ('old', 'diff.extended'),
2265 ('new', 'diff.extended'),
2263 ('new', 'diff.extended'),
2266 ('deleted', 'diff.extended'),
2264 ('deleted', 'diff.extended'),
2267 ('---', 'diff.file_a'),
2265 ('---', 'diff.file_a'),
2268 ('+++', 'diff.file_b')]
2266 ('+++', 'diff.file_b')]
2269 textprefixes = [('@', 'diff.hunk'),
2267 textprefixes = [('@', 'diff.hunk'),
2270 ('-', 'diff.deleted'),
2268 ('-', 'diff.deleted'),
2271 ('+', 'diff.inserted')]
2269 ('+', 'diff.inserted')]
2272 head = False
2270 head = False
2273 for chunk in func(*args, **kw):
2271 for chunk in func(*args, **kw):
2274 lines = chunk.split('\n')
2272 lines = chunk.split('\n')
2275 for i, line in enumerate(lines):
2273 for i, line in enumerate(lines):
2276 if i != 0:
2274 if i != 0:
2277 yield ('\n', '')
2275 yield ('\n', '')
2278 if head:
2276 if head:
2279 if line.startswith('@'):
2277 if line.startswith('@'):
2280 head = False
2278 head = False
2281 else:
2279 else:
2282 if line and line[0] not in ' +-@\\':
2280 if line and line[0] not in ' +-@\\':
2283 head = True
2281 head = True
2284 stripline = line
2282 stripline = line
2285 diffline = False
2283 diffline = False
2286 if not head and line and line[0] in '+-':
2284 if not head and line and line[0] in '+-':
2287 # highlight tabs and trailing whitespace, but only in
2285 # highlight tabs and trailing whitespace, but only in
2288 # changed lines
2286 # changed lines
2289 stripline = line.rstrip()
2287 stripline = line.rstrip()
2290 diffline = True
2288 diffline = True
2291
2289
2292 prefixes = textprefixes
2290 prefixes = textprefixes
2293 if head:
2291 if head:
2294 prefixes = headprefixes
2292 prefixes = headprefixes
2295 for prefix, label in prefixes:
2293 for prefix, label in prefixes:
2296 if stripline.startswith(prefix):
2294 if stripline.startswith(prefix):
2297 if diffline:
2295 if diffline:
2298 for token in tabsplitter.findall(stripline):
2296 for token in tabsplitter.findall(stripline):
2299 if '\t' == token[0]:
2297 if '\t' == token[0]:
2300 yield (token, 'diff.tab')
2298 yield (token, 'diff.tab')
2301 else:
2299 else:
2302 yield (token, label)
2300 yield (token, label)
2303 else:
2301 else:
2304 yield (stripline, label)
2302 yield (stripline, label)
2305 break
2303 break
2306 else:
2304 else:
2307 yield (line, '')
2305 yield (line, '')
2308 if line != stripline:
2306 if line != stripline:
2309 yield (line[len(stripline):], 'diff.trailingwhitespace')
2307 yield (line[len(stripline):], 'diff.trailingwhitespace')
2310
2308
2311 def diffui(*args, **kw):
2309 def diffui(*args, **kw):
2312 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2310 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2313 return difflabel(diff, *args, **kw)
2311 return difflabel(diff, *args, **kw)
2314
2312
2315 def _filepairs(ctx1, modified, added, removed, copy, opts):
2313 def _filepairs(ctx1, modified, added, removed, copy, opts):
2316 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2314 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2317 before and f2 is the the name after. For added files, f1 will be None,
2315 before and f2 is the the name after. For added files, f1 will be None,
2318 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2316 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2319 or 'rename' (the latter two only if opts.git is set).'''
2317 or 'rename' (the latter two only if opts.git is set).'''
2320 gone = set()
2318 gone = set()
2321
2319
2322 copyto = dict([(v, k) for k, v in copy.items()])
2320 copyto = dict([(v, k) for k, v in copy.items()])
2323
2321
2324 addedset, removedset = set(added), set(removed)
2322 addedset, removedset = set(added), set(removed)
2325 # Fix up added, since merged-in additions appear as
2323 # Fix up added, since merged-in additions appear as
2326 # modifications during merges
2324 # modifications during merges
2327 for f in modified:
2325 for f in modified:
2328 if f not in ctx1:
2326 if f not in ctx1:
2329 addedset.add(f)
2327 addedset.add(f)
2330
2328
2331 for f in sorted(modified + added + removed):
2329 for f in sorted(modified + added + removed):
2332 copyop = None
2330 copyop = None
2333 f1, f2 = f, f
2331 f1, f2 = f, f
2334 if f in addedset:
2332 if f in addedset:
2335 f1 = None
2333 f1 = None
2336 if f in copy:
2334 if f in copy:
2337 if opts.git:
2335 if opts.git:
2338 f1 = copy[f]
2336 f1 = copy[f]
2339 if f1 in removedset and f1 not in gone:
2337 if f1 in removedset and f1 not in gone:
2340 copyop = 'rename'
2338 copyop = 'rename'
2341 gone.add(f1)
2339 gone.add(f1)
2342 else:
2340 else:
2343 copyop = 'copy'
2341 copyop = 'copy'
2344 elif f in removedset:
2342 elif f in removedset:
2345 f2 = None
2343 f2 = None
2346 if opts.git:
2344 if opts.git:
2347 # have we already reported a copy above?
2345 # have we already reported a copy above?
2348 if (f in copyto and copyto[f] in addedset
2346 if (f in copyto and copyto[f] in addedset
2349 and copy[copyto[f]] == f):
2347 and copy[copyto[f]] == f):
2350 continue
2348 continue
2351 yield f1, f2, copyop
2349 yield f1, f2, copyop
2352
2350
2353 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2351 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2354 copy, getfilectx, opts, losedatafn, prefix, relroot):
2352 copy, getfilectx, opts, losedatafn, prefix, relroot):
2355 '''given input data, generate a diff and yield it in blocks
2353 '''given input data, generate a diff and yield it in blocks
2356
2354
2357 If generating a diff would lose data like flags or binary data and
2355 If generating a diff would lose data like flags or binary data and
2358 losedatafn is not None, it will be called.
2356 losedatafn is not None, it will be called.
2359
2357
2360 relroot is removed and prefix is added to every path in the diff output.
2358 relroot is removed and prefix is added to every path in the diff output.
2361
2359
2362 If relroot is not empty, this function expects every path in modified,
2360 If relroot is not empty, this function expects every path in modified,
2363 added, removed and copy to start with it.'''
2361 added, removed and copy to start with it.'''
2364
2362
2365 def gitindex(text):
2363 def gitindex(text):
2366 if not text:
2364 if not text:
2367 text = ""
2365 text = ""
2368 l = len(text)
2366 l = len(text)
2369 s = util.sha1('blob %d\0' % l)
2367 s = util.sha1('blob %d\0' % l)
2370 s.update(text)
2368 s.update(text)
2371 return s.hexdigest()
2369 return s.hexdigest()
2372
2370
2373 if opts.noprefix:
2371 if opts.noprefix:
2374 aprefix = bprefix = ''
2372 aprefix = bprefix = ''
2375 else:
2373 else:
2376 aprefix = 'a/'
2374 aprefix = 'a/'
2377 bprefix = 'b/'
2375 bprefix = 'b/'
2378
2376
2379 def diffline(f, revs):
2377 def diffline(f, revs):
2380 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2378 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2381 return 'diff %s %s' % (revinfo, f)
2379 return 'diff %s %s' % (revinfo, f)
2382
2380
2383 date1 = util.datestr(ctx1.date())
2381 date1 = util.datestr(ctx1.date())
2384 date2 = util.datestr(ctx2.date())
2382 date2 = util.datestr(ctx2.date())
2385
2383
2386 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2384 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2387
2385
2388 if relroot != '' and (repo.ui.configbool('devel', 'all')
2386 if relroot != '' and (repo.ui.configbool('devel', 'all')
2389 or repo.ui.configbool('devel', 'check-relroot')):
2387 or repo.ui.configbool('devel', 'check-relroot')):
2390 for f in modified + added + removed + copy.keys() + copy.values():
2388 for f in modified + added + removed + copy.keys() + copy.values():
2391 if f is not None and not f.startswith(relroot):
2389 if f is not None and not f.startswith(relroot):
2392 raise AssertionError(
2390 raise AssertionError(
2393 "file %s doesn't start with relroot %s" % (f, relroot))
2391 "file %s doesn't start with relroot %s" % (f, relroot))
2394
2392
2395 for f1, f2, copyop in _filepairs(
2393 for f1, f2, copyop in _filepairs(
2396 ctx1, modified, added, removed, copy, opts):
2394 ctx1, modified, added, removed, copy, opts):
2397 content1 = None
2395 content1 = None
2398 content2 = None
2396 content2 = None
2399 flag1 = None
2397 flag1 = None
2400 flag2 = None
2398 flag2 = None
2401 if f1:
2399 if f1:
2402 content1 = getfilectx(f1, ctx1).data()
2400 content1 = getfilectx(f1, ctx1).data()
2403 if opts.git or losedatafn:
2401 if opts.git or losedatafn:
2404 flag1 = ctx1.flags(f1)
2402 flag1 = ctx1.flags(f1)
2405 if f2:
2403 if f2:
2406 content2 = getfilectx(f2, ctx2).data()
2404 content2 = getfilectx(f2, ctx2).data()
2407 if opts.git or losedatafn:
2405 if opts.git or losedatafn:
2408 flag2 = ctx2.flags(f2)
2406 flag2 = ctx2.flags(f2)
2409 binary = False
2407 binary = False
2410 if opts.git or losedatafn:
2408 if opts.git or losedatafn:
2411 binary = util.binary(content1) or util.binary(content2)
2409 binary = util.binary(content1) or util.binary(content2)
2412
2410
2413 if losedatafn and not opts.git:
2411 if losedatafn and not opts.git:
2414 if (binary or
2412 if (binary or
2415 # copy/rename
2413 # copy/rename
2416 f2 in copy or
2414 f2 in copy or
2417 # empty file creation
2415 # empty file creation
2418 (not f1 and not content2) or
2416 (not f1 and not content2) or
2419 # empty file deletion
2417 # empty file deletion
2420 (not content1 and not f2) or
2418 (not content1 and not f2) or
2421 # create with flags
2419 # create with flags
2422 (not f1 and flag2) or
2420 (not f1 and flag2) or
2423 # change flags
2421 # change flags
2424 (f1 and f2 and flag1 != flag2)):
2422 (f1 and f2 and flag1 != flag2)):
2425 losedatafn(f2 or f1)
2423 losedatafn(f2 or f1)
2426
2424
2427 path1 = f1 or f2
2425 path1 = f1 or f2
2428 path2 = f2 or f1
2426 path2 = f2 or f1
2429 path1 = posixpath.join(prefix, path1[len(relroot):])
2427 path1 = posixpath.join(prefix, path1[len(relroot):])
2430 path2 = posixpath.join(prefix, path2[len(relroot):])
2428 path2 = posixpath.join(prefix, path2[len(relroot):])
2431 header = []
2429 header = []
2432 if opts.git:
2430 if opts.git:
2433 header.append('diff --git %s%s %s%s' %
2431 header.append('diff --git %s%s %s%s' %
2434 (aprefix, path1, bprefix, path2))
2432 (aprefix, path1, bprefix, path2))
2435 if not f1: # added
2433 if not f1: # added
2436 header.append('new file mode %s' % gitmode[flag2])
2434 header.append('new file mode %s' % gitmode[flag2])
2437 elif not f2: # removed
2435 elif not f2: # removed
2438 header.append('deleted file mode %s' % gitmode[flag1])
2436 header.append('deleted file mode %s' % gitmode[flag1])
2439 else: # modified/copied/renamed
2437 else: # modified/copied/renamed
2440 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2438 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2441 if mode1 != mode2:
2439 if mode1 != mode2:
2442 header.append('old mode %s' % mode1)
2440 header.append('old mode %s' % mode1)
2443 header.append('new mode %s' % mode2)
2441 header.append('new mode %s' % mode2)
2444 if copyop is not None:
2442 if copyop is not None:
2445 header.append('%s from %s' % (copyop, path1))
2443 header.append('%s from %s' % (copyop, path1))
2446 header.append('%s to %s' % (copyop, path2))
2444 header.append('%s to %s' % (copyop, path2))
2447 elif revs and not repo.ui.quiet:
2445 elif revs and not repo.ui.quiet:
2448 header.append(diffline(path1, revs))
2446 header.append(diffline(path1, revs))
2449
2447
2450 if binary and opts.git and not opts.nobinary:
2448 if binary and opts.git and not opts.nobinary:
2451 text = mdiff.b85diff(content1, content2)
2449 text = mdiff.b85diff(content1, content2)
2452 if text:
2450 if text:
2453 header.append('index %s..%s' %
2451 header.append('index %s..%s' %
2454 (gitindex(content1), gitindex(content2)))
2452 (gitindex(content1), gitindex(content2)))
2455 else:
2453 else:
2456 text = mdiff.unidiff(content1, date1,
2454 text = mdiff.unidiff(content1, date1,
2457 content2, date2,
2455 content2, date2,
2458 path1, path2, opts=opts)
2456 path1, path2, opts=opts)
2459 if header and (text or len(header) > 1):
2457 if header and (text or len(header) > 1):
2460 yield '\n'.join(header) + '\n'
2458 yield '\n'.join(header) + '\n'
2461 if text:
2459 if text:
2462 yield text
2460 yield text
2463
2461
2464 def diffstatsum(stats):
2462 def diffstatsum(stats):
2465 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2463 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2466 for f, a, r, b in stats:
2464 for f, a, r, b in stats:
2467 maxfile = max(maxfile, encoding.colwidth(f))
2465 maxfile = max(maxfile, encoding.colwidth(f))
2468 maxtotal = max(maxtotal, a + r)
2466 maxtotal = max(maxtotal, a + r)
2469 addtotal += a
2467 addtotal += a
2470 removetotal += r
2468 removetotal += r
2471 binary = binary or b
2469 binary = binary or b
2472
2470
2473 return maxfile, maxtotal, addtotal, removetotal, binary
2471 return maxfile, maxtotal, addtotal, removetotal, binary
2474
2472
2475 def diffstatdata(lines):
2473 def diffstatdata(lines):
2476 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2474 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2477
2475
2478 results = []
2476 results = []
2479 filename, adds, removes, isbinary = None, 0, 0, False
2477 filename, adds, removes, isbinary = None, 0, 0, False
2480
2478
2481 def addresult():
2479 def addresult():
2482 if filename:
2480 if filename:
2483 results.append((filename, adds, removes, isbinary))
2481 results.append((filename, adds, removes, isbinary))
2484
2482
2485 for line in lines:
2483 for line in lines:
2486 if line.startswith('diff'):
2484 if line.startswith('diff'):
2487 addresult()
2485 addresult()
2488 # set numbers to 0 anyway when starting new file
2486 # set numbers to 0 anyway when starting new file
2489 adds, removes, isbinary = 0, 0, False
2487 adds, removes, isbinary = 0, 0, False
2490 if line.startswith('diff --git a/'):
2488 if line.startswith('diff --git a/'):
2491 filename = gitre.search(line).group(2)
2489 filename = gitre.search(line).group(2)
2492 elif line.startswith('diff -r'):
2490 elif line.startswith('diff -r'):
2493 # format: "diff -r ... -r ... filename"
2491 # format: "diff -r ... -r ... filename"
2494 filename = diffre.search(line).group(1)
2492 filename = diffre.search(line).group(1)
2495 elif line.startswith('+') and not line.startswith('+++ '):
2493 elif line.startswith('+') and not line.startswith('+++ '):
2496 adds += 1
2494 adds += 1
2497 elif line.startswith('-') and not line.startswith('--- '):
2495 elif line.startswith('-') and not line.startswith('--- '):
2498 removes += 1
2496 removes += 1
2499 elif (line.startswith('GIT binary patch') or
2497 elif (line.startswith('GIT binary patch') or
2500 line.startswith('Binary file')):
2498 line.startswith('Binary file')):
2501 isbinary = True
2499 isbinary = True
2502 addresult()
2500 addresult()
2503 return results
2501 return results
2504
2502
2505 def diffstat(lines, width=80, git=False):
2503 def diffstat(lines, width=80, git=False):
2506 output = []
2504 output = []
2507 stats = diffstatdata(lines)
2505 stats = diffstatdata(lines)
2508 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2506 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2509
2507
2510 countwidth = len(str(maxtotal))
2508 countwidth = len(str(maxtotal))
2511 if hasbinary and countwidth < 3:
2509 if hasbinary and countwidth < 3:
2512 countwidth = 3
2510 countwidth = 3
2513 graphwidth = width - countwidth - maxname - 6
2511 graphwidth = width - countwidth - maxname - 6
2514 if graphwidth < 10:
2512 if graphwidth < 10:
2515 graphwidth = 10
2513 graphwidth = 10
2516
2514
2517 def scale(i):
2515 def scale(i):
2518 if maxtotal <= graphwidth:
2516 if maxtotal <= graphwidth:
2519 return i
2517 return i
2520 # If diffstat runs out of room it doesn't print anything,
2518 # If diffstat runs out of room it doesn't print anything,
2521 # which isn't very useful, so always print at least one + or -
2519 # which isn't very useful, so always print at least one + or -
2522 # if there were at least some changes.
2520 # if there were at least some changes.
2523 return max(i * graphwidth // maxtotal, int(bool(i)))
2521 return max(i * graphwidth // maxtotal, int(bool(i)))
2524
2522
2525 for filename, adds, removes, isbinary in stats:
2523 for filename, adds, removes, isbinary in stats:
2526 if isbinary:
2524 if isbinary:
2527 count = 'Bin'
2525 count = 'Bin'
2528 else:
2526 else:
2529 count = adds + removes
2527 count = adds + removes
2530 pluses = '+' * scale(adds)
2528 pluses = '+' * scale(adds)
2531 minuses = '-' * scale(removes)
2529 minuses = '-' * scale(removes)
2532 output.append(' %s%s | %*s %s%s\n' %
2530 output.append(' %s%s | %*s %s%s\n' %
2533 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2531 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2534 countwidth, count, pluses, minuses))
2532 countwidth, count, pluses, minuses))
2535
2533
2536 if stats:
2534 if stats:
2537 output.append(_(' %d files changed, %d insertions(+), '
2535 output.append(_(' %d files changed, %d insertions(+), '
2538 '%d deletions(-)\n')
2536 '%d deletions(-)\n')
2539 % (len(stats), totaladds, totalremoves))
2537 % (len(stats), totaladds, totalremoves))
2540
2538
2541 return ''.join(output)
2539 return ''.join(output)
2542
2540
2543 def diffstatui(*args, **kw):
2541 def diffstatui(*args, **kw):
2544 '''like diffstat(), but yields 2-tuples of (output, label) for
2542 '''like diffstat(), but yields 2-tuples of (output, label) for
2545 ui.write()
2543 ui.write()
2546 '''
2544 '''
2547
2545
2548 for line in diffstat(*args, **kw).splitlines():
2546 for line in diffstat(*args, **kw).splitlines():
2549 if line and line[-1] in '+-':
2547 if line and line[-1] in '+-':
2550 name, graph = line.rsplit(' ', 1)
2548 name, graph = line.rsplit(' ', 1)
2551 yield (name + ' ', '')
2549 yield (name + ' ', '')
2552 m = re.search(r'\++', graph)
2550 m = re.search(r'\++', graph)
2553 if m:
2551 if m:
2554 yield (m.group(0), 'diffstat.inserted')
2552 yield (m.group(0), 'diffstat.inserted')
2555 m = re.search(r'-+', graph)
2553 m = re.search(r'-+', graph)
2556 if m:
2554 if m:
2557 yield (m.group(0), 'diffstat.deleted')
2555 yield (m.group(0), 'diffstat.deleted')
2558 else:
2556 else:
2559 yield (line, '')
2557 yield (line, '')
2560 yield ('\n', '')
2558 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now