##// END OF EJS Templates
patch: remove internal patcher fallback and NoHunk error...
Patrick Mezard -
r12671:1b4e3152 default
parent child Browse files
Show More
@@ -1,1663 +1,1633 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, re
9 import cStringIO, email.Parser, os, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, util, diffhelpers, copies, encoding
14 import base85, mdiff, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 class NoHunks(PatchError):
22 pass
23
24 # helper functions
21 # helper functions
25
22
26 def copyfile(src, dst, basedir):
23 def copyfile(src, dst, basedir):
27 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
24 abssrc, absdst = [util.canonpath(basedir, basedir, x) for x in [src, dst]]
28 if os.path.lexists(absdst):
25 if os.path.lexists(absdst):
29 raise util.Abort(_("cannot create %s: destination already exists") %
26 raise util.Abort(_("cannot create %s: destination already exists") %
30 dst)
27 dst)
31
28
32 dstdir = os.path.dirname(absdst)
29 dstdir = os.path.dirname(absdst)
33 if dstdir and not os.path.isdir(dstdir):
30 if dstdir and not os.path.isdir(dstdir):
34 try:
31 try:
35 os.makedirs(dstdir)
32 os.makedirs(dstdir)
36 except IOError:
33 except IOError:
37 raise util.Abort(
34 raise util.Abort(
38 _("cannot create %s: unable to create destination directory")
35 _("cannot create %s: unable to create destination directory")
39 % dst)
36 % dst)
40
37
41 util.copyfile(abssrc, absdst)
38 util.copyfile(abssrc, absdst)
42
39
43 # public functions
40 # public functions
44
41
45 def split(stream):
42 def split(stream):
46 '''return an iterator of individual patches from a stream'''
43 '''return an iterator of individual patches from a stream'''
47 def isheader(line, inheader):
44 def isheader(line, inheader):
48 if inheader and line[0] in (' ', '\t'):
45 if inheader and line[0] in (' ', '\t'):
49 # continuation
46 # continuation
50 return True
47 return True
51 if line[0] in (' ', '-', '+'):
48 if line[0] in (' ', '-', '+'):
52 # diff line - don't check for header pattern in there
49 # diff line - don't check for header pattern in there
53 return False
50 return False
54 l = line.split(': ', 1)
51 l = line.split(': ', 1)
55 return len(l) == 2 and ' ' not in l[0]
52 return len(l) == 2 and ' ' not in l[0]
56
53
57 def chunk(lines):
54 def chunk(lines):
58 return cStringIO.StringIO(''.join(lines))
55 return cStringIO.StringIO(''.join(lines))
59
56
60 def hgsplit(stream, cur):
57 def hgsplit(stream, cur):
61 inheader = True
58 inheader = True
62
59
63 for line in stream:
60 for line in stream:
64 if not line.strip():
61 if not line.strip():
65 inheader = False
62 inheader = False
66 if not inheader and line.startswith('# HG changeset patch'):
63 if not inheader and line.startswith('# HG changeset patch'):
67 yield chunk(cur)
64 yield chunk(cur)
68 cur = []
65 cur = []
69 inheader = True
66 inheader = True
70
67
71 cur.append(line)
68 cur.append(line)
72
69
73 if cur:
70 if cur:
74 yield chunk(cur)
71 yield chunk(cur)
75
72
76 def mboxsplit(stream, cur):
73 def mboxsplit(stream, cur):
77 for line in stream:
74 for line in stream:
78 if line.startswith('From '):
75 if line.startswith('From '):
79 for c in split(chunk(cur[1:])):
76 for c in split(chunk(cur[1:])):
80 yield c
77 yield c
81 cur = []
78 cur = []
82
79
83 cur.append(line)
80 cur.append(line)
84
81
85 if cur:
82 if cur:
86 for c in split(chunk(cur[1:])):
83 for c in split(chunk(cur[1:])):
87 yield c
84 yield c
88
85
89 def mimesplit(stream, cur):
86 def mimesplit(stream, cur):
90 def msgfp(m):
87 def msgfp(m):
91 fp = cStringIO.StringIO()
88 fp = cStringIO.StringIO()
92 g = email.Generator.Generator(fp, mangle_from_=False)
89 g = email.Generator.Generator(fp, mangle_from_=False)
93 g.flatten(m)
90 g.flatten(m)
94 fp.seek(0)
91 fp.seek(0)
95 return fp
92 return fp
96
93
97 for line in stream:
94 for line in stream:
98 cur.append(line)
95 cur.append(line)
99 c = chunk(cur)
96 c = chunk(cur)
100
97
101 m = email.Parser.Parser().parse(c)
98 m = email.Parser.Parser().parse(c)
102 if not m.is_multipart():
99 if not m.is_multipart():
103 yield msgfp(m)
100 yield msgfp(m)
104 else:
101 else:
105 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
102 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
106 for part in m.walk():
103 for part in m.walk():
107 ct = part.get_content_type()
104 ct = part.get_content_type()
108 if ct not in ok_types:
105 if ct not in ok_types:
109 continue
106 continue
110 yield msgfp(part)
107 yield msgfp(part)
111
108
112 def headersplit(stream, cur):
109 def headersplit(stream, cur):
113 inheader = False
110 inheader = False
114
111
115 for line in stream:
112 for line in stream:
116 if not inheader and isheader(line, inheader):
113 if not inheader and isheader(line, inheader):
117 yield chunk(cur)
114 yield chunk(cur)
118 cur = []
115 cur = []
119 inheader = True
116 inheader = True
120 if inheader and not isheader(line, inheader):
117 if inheader and not isheader(line, inheader):
121 inheader = False
118 inheader = False
122
119
123 cur.append(line)
120 cur.append(line)
124
121
125 if cur:
122 if cur:
126 yield chunk(cur)
123 yield chunk(cur)
127
124
128 def remainder(cur):
125 def remainder(cur):
129 yield chunk(cur)
126 yield chunk(cur)
130
127
131 class fiter(object):
128 class fiter(object):
132 def __init__(self, fp):
129 def __init__(self, fp):
133 self.fp = fp
130 self.fp = fp
134
131
135 def __iter__(self):
132 def __iter__(self):
136 return self
133 return self
137
134
138 def next(self):
135 def next(self):
139 l = self.fp.readline()
136 l = self.fp.readline()
140 if not l:
137 if not l:
141 raise StopIteration
138 raise StopIteration
142 return l
139 return l
143
140
144 inheader = False
141 inheader = False
145 cur = []
142 cur = []
146
143
147 mimeheaders = ['content-type']
144 mimeheaders = ['content-type']
148
145
149 if not hasattr(stream, 'next'):
146 if not hasattr(stream, 'next'):
150 # http responses, for example, have readline but not next
147 # http responses, for example, have readline but not next
151 stream = fiter(stream)
148 stream = fiter(stream)
152
149
153 for line in stream:
150 for line in stream:
154 cur.append(line)
151 cur.append(line)
155 if line.startswith('# HG changeset patch'):
152 if line.startswith('# HG changeset patch'):
156 return hgsplit(stream, cur)
153 return hgsplit(stream, cur)
157 elif line.startswith('From '):
154 elif line.startswith('From '):
158 return mboxsplit(stream, cur)
155 return mboxsplit(stream, cur)
159 elif isheader(line, inheader):
156 elif isheader(line, inheader):
160 inheader = True
157 inheader = True
161 if line.split(':', 1)[0].lower() in mimeheaders:
158 if line.split(':', 1)[0].lower() in mimeheaders:
162 # let email parser handle this
159 # let email parser handle this
163 return mimesplit(stream, cur)
160 return mimesplit(stream, cur)
164 elif line.startswith('--- ') and inheader:
161 elif line.startswith('--- ') and inheader:
165 # No evil headers seen by diff start, split by hand
162 # No evil headers seen by diff start, split by hand
166 return headersplit(stream, cur)
163 return headersplit(stream, cur)
167 # Not enough info, keep reading
164 # Not enough info, keep reading
168
165
169 # if we are here, we have a very plain patch
166 # if we are here, we have a very plain patch
170 return remainder(cur)
167 return remainder(cur)
171
168
172 def extract(ui, fileobj):
169 def extract(ui, fileobj):
173 '''extract patch from data read from fileobj.
170 '''extract patch from data read from fileobj.
174
171
175 patch can be a normal patch or contained in an email message.
172 patch can be a normal patch or contained in an email message.
176
173
177 return tuple (filename, message, user, date, branch, node, p1, p2).
174 return tuple (filename, message, user, date, branch, node, p1, p2).
178 Any item in the returned tuple can be None. If filename is None,
175 Any item in the returned tuple can be None. If filename is None,
179 fileobj did not contain a patch. Caller must unlink filename when done.'''
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
180
177
181 # attempt to detect the start of a patch
178 # attempt to detect the start of a patch
182 # (this heuristic is borrowed from quilt)
179 # (this heuristic is borrowed from quilt)
183 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
184 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
185 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
186 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
187
184
188 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
185 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
189 tmpfp = os.fdopen(fd, 'w')
186 tmpfp = os.fdopen(fd, 'w')
190 try:
187 try:
191 msg = email.Parser.Parser().parse(fileobj)
188 msg = email.Parser.Parser().parse(fileobj)
192
189
193 subject = msg['Subject']
190 subject = msg['Subject']
194 user = msg['From']
191 user = msg['From']
195 if not subject and not user:
192 if not subject and not user:
196 # Not an email, restore parsed headers if any
193 # Not an email, restore parsed headers if any
197 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
194 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
198
195
199 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
196 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
200 # should try to parse msg['Date']
197 # should try to parse msg['Date']
201 date = None
198 date = None
202 nodeid = None
199 nodeid = None
203 branch = None
200 branch = None
204 parents = []
201 parents = []
205
202
206 if subject:
203 if subject:
207 if subject.startswith('[PATCH'):
204 if subject.startswith('[PATCH'):
208 pend = subject.find(']')
205 pend = subject.find(']')
209 if pend >= 0:
206 if pend >= 0:
210 subject = subject[pend + 1:].lstrip()
207 subject = subject[pend + 1:].lstrip()
211 subject = subject.replace('\n\t', ' ')
208 subject = subject.replace('\n\t', ' ')
212 ui.debug('Subject: %s\n' % subject)
209 ui.debug('Subject: %s\n' % subject)
213 if user:
210 if user:
214 ui.debug('From: %s\n' % user)
211 ui.debug('From: %s\n' % user)
215 diffs_seen = 0
212 diffs_seen = 0
216 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
213 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
217 message = ''
214 message = ''
218 for part in msg.walk():
215 for part in msg.walk():
219 content_type = part.get_content_type()
216 content_type = part.get_content_type()
220 ui.debug('Content-Type: %s\n' % content_type)
217 ui.debug('Content-Type: %s\n' % content_type)
221 if content_type not in ok_types:
218 if content_type not in ok_types:
222 continue
219 continue
223 payload = part.get_payload(decode=True)
220 payload = part.get_payload(decode=True)
224 m = diffre.search(payload)
221 m = diffre.search(payload)
225 if m:
222 if m:
226 hgpatch = False
223 hgpatch = False
227 hgpatchheader = False
224 hgpatchheader = False
228 ignoretext = False
225 ignoretext = False
229
226
230 ui.debug('found patch at byte %d\n' % m.start(0))
227 ui.debug('found patch at byte %d\n' % m.start(0))
231 diffs_seen += 1
228 diffs_seen += 1
232 cfp = cStringIO.StringIO()
229 cfp = cStringIO.StringIO()
233 for line in payload[:m.start(0)].splitlines():
230 for line in payload[:m.start(0)].splitlines():
234 if line.startswith('# HG changeset patch'):
231 if line.startswith('# HG changeset patch'):
235 ui.debug('patch generated by hg export\n')
232 ui.debug('patch generated by hg export\n')
236 hgpatchheader = True
233 hgpatchheader = True
237 # drop earlier commit message content
234 # drop earlier commit message content
238 cfp.seek(0)
235 cfp.seek(0)
239 cfp.truncate()
236 cfp.truncate()
240 subject = None
237 subject = None
241 elif hgpatchheader:
238 elif hgpatchheader:
242 if line.startswith('# User '):
239 if line.startswith('# User '):
243 user = line[7:]
240 user = line[7:]
244 ui.debug('From: %s\n' % user)
241 ui.debug('From: %s\n' % user)
245 elif line.startswith("# Date "):
242 elif line.startswith("# Date "):
246 date = line[7:]
243 date = line[7:]
247 elif line.startswith("# Branch "):
244 elif line.startswith("# Branch "):
248 branch = line[9:]
245 branch = line[9:]
249 elif line.startswith("# Node ID "):
246 elif line.startswith("# Node ID "):
250 nodeid = line[10:]
247 nodeid = line[10:]
251 elif line.startswith("# Parent "):
248 elif line.startswith("# Parent "):
252 parents.append(line[10:])
249 parents.append(line[10:])
253 elif not line.startswith("# "):
250 elif not line.startswith("# "):
254 hgpatchheader = False
251 hgpatchheader = False
255 hgpatch = True
252 hgpatch = True
256 elif line == '---' and gitsendmail:
253 elif line == '---' and gitsendmail:
257 ignoretext = True
254 ignoretext = True
258 if not hgpatchheader and not ignoretext:
255 if not hgpatchheader and not ignoretext:
259 cfp.write(line)
256 cfp.write(line)
260 cfp.write('\n')
257 cfp.write('\n')
261 message = cfp.getvalue()
258 message = cfp.getvalue()
262 if tmpfp:
259 if tmpfp:
263 tmpfp.write(payload)
260 tmpfp.write(payload)
264 if not payload.endswith('\n'):
261 if not payload.endswith('\n'):
265 tmpfp.write('\n')
262 tmpfp.write('\n')
266 elif not diffs_seen and message and content_type == 'text/plain':
263 elif not diffs_seen and message and content_type == 'text/plain':
267 message += '\n' + payload
264 message += '\n' + payload
268 except:
265 except:
269 tmpfp.close()
266 tmpfp.close()
270 os.unlink(tmpname)
267 os.unlink(tmpname)
271 raise
268 raise
272
269
273 if subject and not message.startswith(subject):
270 if subject and not message.startswith(subject):
274 message = '%s\n%s' % (subject, message)
271 message = '%s\n%s' % (subject, message)
275 tmpfp.close()
272 tmpfp.close()
276 if not diffs_seen:
273 if not diffs_seen:
277 os.unlink(tmpname)
274 os.unlink(tmpname)
278 return None, message, user, date, branch, None, None, None
275 return None, message, user, date, branch, None, None, None
279 p1 = parents and parents.pop(0) or None
276 p1 = parents and parents.pop(0) or None
280 p2 = parents and parents.pop(0) or None
277 p2 = parents and parents.pop(0) or None
281 return tmpname, message, user, date, branch, nodeid, p1, p2
278 return tmpname, message, user, date, branch, nodeid, p1, p2
282
279
283 class patchmeta(object):
280 class patchmeta(object):
284 """Patched file metadata
281 """Patched file metadata
285
282
286 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
283 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
287 or COPY. 'path' is patched file path. 'oldpath' is set to the
284 or COPY. 'path' is patched file path. 'oldpath' is set to the
288 origin file when 'op' is either COPY or RENAME, None otherwise. If
285 origin file when 'op' is either COPY or RENAME, None otherwise. If
289 file mode is changed, 'mode' is a tuple (islink, isexec) where
286 file mode is changed, 'mode' is a tuple (islink, isexec) where
290 'islink' is True if the file is a symlink and 'isexec' is True if
287 'islink' is True if the file is a symlink and 'isexec' is True if
291 the file is executable. Otherwise, 'mode' is None.
288 the file is executable. Otherwise, 'mode' is None.
292 """
289 """
293 def __init__(self, path):
290 def __init__(self, path):
294 self.path = path
291 self.path = path
295 self.oldpath = None
292 self.oldpath = None
296 self.mode = None
293 self.mode = None
297 self.op = 'MODIFY'
294 self.op = 'MODIFY'
298 self.binary = False
295 self.binary = False
299
296
300 def setmode(self, mode):
297 def setmode(self, mode):
301 islink = mode & 020000
298 islink = mode & 020000
302 isexec = mode & 0100
299 isexec = mode & 0100
303 self.mode = (islink, isexec)
300 self.mode = (islink, isexec)
304
301
305 def __repr__(self):
302 def __repr__(self):
306 return "<patchmeta %s %r>" % (self.op, self.path)
303 return "<patchmeta %s %r>" % (self.op, self.path)
307
304
308 def readgitpatch(lr):
305 def readgitpatch(lr):
309 """extract git-style metadata about patches from <patchname>"""
306 """extract git-style metadata about patches from <patchname>"""
310
307
311 # Filter patch for git information
308 # Filter patch for git information
312 gp = None
309 gp = None
313 gitpatches = []
310 gitpatches = []
314 for line in lr:
311 for line in lr:
315 line = line.rstrip(' \r\n')
312 line = line.rstrip(' \r\n')
316 if line.startswith('diff --git'):
313 if line.startswith('diff --git'):
317 m = gitre.match(line)
314 m = gitre.match(line)
318 if m:
315 if m:
319 if gp:
316 if gp:
320 gitpatches.append(gp)
317 gitpatches.append(gp)
321 dst = m.group(2)
318 dst = m.group(2)
322 gp = patchmeta(dst)
319 gp = patchmeta(dst)
323 elif gp:
320 elif gp:
324 if line.startswith('--- '):
321 if line.startswith('--- '):
325 gitpatches.append(gp)
322 gitpatches.append(gp)
326 gp = None
323 gp = None
327 continue
324 continue
328 if line.startswith('rename from '):
325 if line.startswith('rename from '):
329 gp.op = 'RENAME'
326 gp.op = 'RENAME'
330 gp.oldpath = line[12:]
327 gp.oldpath = line[12:]
331 elif line.startswith('rename to '):
328 elif line.startswith('rename to '):
332 gp.path = line[10:]
329 gp.path = line[10:]
333 elif line.startswith('copy from '):
330 elif line.startswith('copy from '):
334 gp.op = 'COPY'
331 gp.op = 'COPY'
335 gp.oldpath = line[10:]
332 gp.oldpath = line[10:]
336 elif line.startswith('copy to '):
333 elif line.startswith('copy to '):
337 gp.path = line[8:]
334 gp.path = line[8:]
338 elif line.startswith('deleted file'):
335 elif line.startswith('deleted file'):
339 gp.op = 'DELETE'
336 gp.op = 'DELETE'
340 elif line.startswith('new file mode '):
337 elif line.startswith('new file mode '):
341 gp.op = 'ADD'
338 gp.op = 'ADD'
342 gp.setmode(int(line[-6:], 8))
339 gp.setmode(int(line[-6:], 8))
343 elif line.startswith('new mode '):
340 elif line.startswith('new mode '):
344 gp.setmode(int(line[-6:], 8))
341 gp.setmode(int(line[-6:], 8))
345 elif line.startswith('GIT binary patch'):
342 elif line.startswith('GIT binary patch'):
346 gp.binary = True
343 gp.binary = True
347 if gp:
344 if gp:
348 gitpatches.append(gp)
345 gitpatches.append(gp)
349
346
350 return gitpatches
347 return gitpatches
351
348
352 class linereader(object):
349 class linereader(object):
353 # simple class to allow pushing lines back into the input stream
350 # simple class to allow pushing lines back into the input stream
354 def __init__(self, fp, textmode=False):
351 def __init__(self, fp, textmode=False):
355 self.fp = fp
352 self.fp = fp
356 self.buf = []
353 self.buf = []
357 self.textmode = textmode
354 self.textmode = textmode
358 self.eol = None
355 self.eol = None
359
356
360 def push(self, line):
357 def push(self, line):
361 if line is not None:
358 if line is not None:
362 self.buf.append(line)
359 self.buf.append(line)
363
360
364 def readline(self):
361 def readline(self):
365 if self.buf:
362 if self.buf:
366 l = self.buf[0]
363 l = self.buf[0]
367 del self.buf[0]
364 del self.buf[0]
368 return l
365 return l
369 l = self.fp.readline()
366 l = self.fp.readline()
370 if not self.eol:
367 if not self.eol:
371 if l.endswith('\r\n'):
368 if l.endswith('\r\n'):
372 self.eol = '\r\n'
369 self.eol = '\r\n'
373 elif l.endswith('\n'):
370 elif l.endswith('\n'):
374 self.eol = '\n'
371 self.eol = '\n'
375 if self.textmode and l.endswith('\r\n'):
372 if self.textmode and l.endswith('\r\n'):
376 l = l[:-2] + '\n'
373 l = l[:-2] + '\n'
377 return l
374 return l
378
375
379 def __iter__(self):
376 def __iter__(self):
380 while 1:
377 while 1:
381 l = self.readline()
378 l = self.readline()
382 if not l:
379 if not l:
383 break
380 break
384 yield l
381 yield l
385
382
386 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
383 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
387 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
384 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
388 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
385 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
389 eolmodes = ['strict', 'crlf', 'lf', 'auto']
386 eolmodes = ['strict', 'crlf', 'lf', 'auto']
390
387
391 class patchfile(object):
388 class patchfile(object):
392 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
389 def __init__(self, ui, fname, opener, missing=False, eolmode='strict'):
393 self.fname = fname
390 self.fname = fname
394 self.eolmode = eolmode
391 self.eolmode = eolmode
395 self.eol = None
392 self.eol = None
396 self.opener = opener
393 self.opener = opener
397 self.ui = ui
394 self.ui = ui
398 self.lines = []
395 self.lines = []
399 self.exists = False
396 self.exists = False
400 self.missing = missing
397 self.missing = missing
401 if not missing:
398 if not missing:
402 try:
399 try:
403 self.lines = self.readlines(fname)
400 self.lines = self.readlines(fname)
404 self.exists = True
401 self.exists = True
405 except IOError:
402 except IOError:
406 pass
403 pass
407 else:
404 else:
408 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
405 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
409
406
410 self.hash = {}
407 self.hash = {}
411 self.dirty = 0
408 self.dirty = 0
412 self.offset = 0
409 self.offset = 0
413 self.skew = 0
410 self.skew = 0
414 self.rej = []
411 self.rej = []
415 self.fileprinted = False
412 self.fileprinted = False
416 self.printfile(False)
413 self.printfile(False)
417 self.hunks = 0
414 self.hunks = 0
418
415
419 def readlines(self, fname):
416 def readlines(self, fname):
420 if os.path.islink(fname):
417 if os.path.islink(fname):
421 return [os.readlink(fname)]
418 return [os.readlink(fname)]
422 fp = self.opener(fname, 'r')
419 fp = self.opener(fname, 'r')
423 try:
420 try:
424 lr = linereader(fp, self.eolmode != 'strict')
421 lr = linereader(fp, self.eolmode != 'strict')
425 lines = list(lr)
422 lines = list(lr)
426 self.eol = lr.eol
423 self.eol = lr.eol
427 return lines
424 return lines
428 finally:
425 finally:
429 fp.close()
426 fp.close()
430
427
431 def writelines(self, fname, lines):
428 def writelines(self, fname, lines):
432 # Ensure supplied data ends in fname, being a regular file or
429 # Ensure supplied data ends in fname, being a regular file or
433 # a symlink. cmdutil.updatedir will -too magically- take care
430 # a symlink. cmdutil.updatedir will -too magically- take care
434 # of setting it to the proper type afterwards.
431 # of setting it to the proper type afterwards.
435 islink = os.path.islink(fname)
432 islink = os.path.islink(fname)
436 if islink:
433 if islink:
437 fp = cStringIO.StringIO()
434 fp = cStringIO.StringIO()
438 else:
435 else:
439 fp = self.opener(fname, 'w')
436 fp = self.opener(fname, 'w')
440 try:
437 try:
441 if self.eolmode == 'auto':
438 if self.eolmode == 'auto':
442 eol = self.eol
439 eol = self.eol
443 elif self.eolmode == 'crlf':
440 elif self.eolmode == 'crlf':
444 eol = '\r\n'
441 eol = '\r\n'
445 else:
442 else:
446 eol = '\n'
443 eol = '\n'
447
444
448 if self.eolmode != 'strict' and eol and eol != '\n':
445 if self.eolmode != 'strict' and eol and eol != '\n':
449 for l in lines:
446 for l in lines:
450 if l and l[-1] == '\n':
447 if l and l[-1] == '\n':
451 l = l[:-1] + eol
448 l = l[:-1] + eol
452 fp.write(l)
449 fp.write(l)
453 else:
450 else:
454 fp.writelines(lines)
451 fp.writelines(lines)
455 if islink:
452 if islink:
456 self.opener.symlink(fp.getvalue(), fname)
453 self.opener.symlink(fp.getvalue(), fname)
457 finally:
454 finally:
458 fp.close()
455 fp.close()
459
456
460 def unlink(self, fname):
457 def unlink(self, fname):
461 os.unlink(fname)
458 os.unlink(fname)
462
459
463 def printfile(self, warn):
460 def printfile(self, warn):
464 if self.fileprinted:
461 if self.fileprinted:
465 return
462 return
466 if warn or self.ui.verbose:
463 if warn or self.ui.verbose:
467 self.fileprinted = True
464 self.fileprinted = True
468 s = _("patching file %s\n") % self.fname
465 s = _("patching file %s\n") % self.fname
469 if warn:
466 if warn:
470 self.ui.warn(s)
467 self.ui.warn(s)
471 else:
468 else:
472 self.ui.note(s)
469 self.ui.note(s)
473
470
474
471
475 def findlines(self, l, linenum):
472 def findlines(self, l, linenum):
476 # looks through the hash and finds candidate lines. The
473 # looks through the hash and finds candidate lines. The
477 # result is a list of line numbers sorted based on distance
474 # result is a list of line numbers sorted based on distance
478 # from linenum
475 # from linenum
479
476
480 cand = self.hash.get(l, [])
477 cand = self.hash.get(l, [])
481 if len(cand) > 1:
478 if len(cand) > 1:
482 # resort our list of potentials forward then back.
479 # resort our list of potentials forward then back.
483 cand.sort(key=lambda x: abs(x - linenum))
480 cand.sort(key=lambda x: abs(x - linenum))
484 return cand
481 return cand
485
482
486 def hashlines(self):
483 def hashlines(self):
487 self.hash = {}
484 self.hash = {}
488 for x, s in enumerate(self.lines):
485 for x, s in enumerate(self.lines):
489 self.hash.setdefault(s, []).append(x)
486 self.hash.setdefault(s, []).append(x)
490
487
491 def write_rej(self):
488 def write_rej(self):
492 # our rejects are a little different from patch(1). This always
489 # our rejects are a little different from patch(1). This always
493 # creates rejects in the same form as the original patch. A file
490 # creates rejects in the same form as the original patch. A file
494 # header is inserted so that you can run the reject through patch again
491 # header is inserted so that you can run the reject through patch again
495 # without having to type the filename.
492 # without having to type the filename.
496
493
497 if not self.rej:
494 if not self.rej:
498 return
495 return
499
496
500 fname = self.fname + ".rej"
497 fname = self.fname + ".rej"
501 self.ui.warn(
498 self.ui.warn(
502 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
499 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
503 (len(self.rej), self.hunks, fname))
500 (len(self.rej), self.hunks, fname))
504
501
505 def rejlines():
502 def rejlines():
506 base = os.path.basename(self.fname)
503 base = os.path.basename(self.fname)
507 yield "--- %s\n+++ %s\n" % (base, base)
504 yield "--- %s\n+++ %s\n" % (base, base)
508 for x in self.rej:
505 for x in self.rej:
509 for l in x.hunk:
506 for l in x.hunk:
510 yield l
507 yield l
511 if l[-1] != '\n':
508 if l[-1] != '\n':
512 yield "\n\ No newline at end of file\n"
509 yield "\n\ No newline at end of file\n"
513
510
514 self.writelines(fname, rejlines())
511 self.writelines(fname, rejlines())
515
512
516 def apply(self, h):
513 def apply(self, h):
517 if not h.complete():
514 if not h.complete():
518 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
515 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
519 (h.number, h.desc, len(h.a), h.lena, len(h.b),
516 (h.number, h.desc, len(h.a), h.lena, len(h.b),
520 h.lenb))
517 h.lenb))
521
518
522 self.hunks += 1
519 self.hunks += 1
523
520
524 if self.missing:
521 if self.missing:
525 self.rej.append(h)
522 self.rej.append(h)
526 return -1
523 return -1
527
524
528 if self.exists and h.createfile():
525 if self.exists and h.createfile():
529 self.ui.warn(_("file %s already exists\n") % self.fname)
526 self.ui.warn(_("file %s already exists\n") % self.fname)
530 self.rej.append(h)
527 self.rej.append(h)
531 return -1
528 return -1
532
529
533 if isinstance(h, binhunk):
530 if isinstance(h, binhunk):
534 if h.rmfile():
531 if h.rmfile():
535 self.unlink(self.fname)
532 self.unlink(self.fname)
536 else:
533 else:
537 self.lines[:] = h.new()
534 self.lines[:] = h.new()
538 self.offset += len(h.new())
535 self.offset += len(h.new())
539 self.dirty = 1
536 self.dirty = 1
540 return 0
537 return 0
541
538
542 horig = h
539 horig = h
543 if (self.eolmode in ('crlf', 'lf')
540 if (self.eolmode in ('crlf', 'lf')
544 or self.eolmode == 'auto' and self.eol):
541 or self.eolmode == 'auto' and self.eol):
545 # If new eols are going to be normalized, then normalize
542 # If new eols are going to be normalized, then normalize
546 # hunk data before patching. Otherwise, preserve input
543 # hunk data before patching. Otherwise, preserve input
547 # line-endings.
544 # line-endings.
548 h = h.getnormalized()
545 h = h.getnormalized()
549
546
550 # fast case first, no offsets, no fuzz
547 # fast case first, no offsets, no fuzz
551 old = h.old()
548 old = h.old()
552 # patch starts counting at 1 unless we are adding the file
549 # patch starts counting at 1 unless we are adding the file
553 if h.starta == 0:
550 if h.starta == 0:
554 start = 0
551 start = 0
555 else:
552 else:
556 start = h.starta + self.offset - 1
553 start = h.starta + self.offset - 1
557 orig_start = start
554 orig_start = start
558 # if there's skew we want to emit the "(offset %d lines)" even
555 # if there's skew we want to emit the "(offset %d lines)" even
559 # when the hunk cleanly applies at start + skew, so skip the
556 # when the hunk cleanly applies at start + skew, so skip the
560 # fast case code
557 # fast case code
561 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
558 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
562 if h.rmfile():
559 if h.rmfile():
563 self.unlink(self.fname)
560 self.unlink(self.fname)
564 else:
561 else:
565 self.lines[start : start + h.lena] = h.new()
562 self.lines[start : start + h.lena] = h.new()
566 self.offset += h.lenb - h.lena
563 self.offset += h.lenb - h.lena
567 self.dirty = 1
564 self.dirty = 1
568 return 0
565 return 0
569
566
570 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
567 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
571 self.hashlines()
568 self.hashlines()
572 if h.hunk[-1][0] != ' ':
569 if h.hunk[-1][0] != ' ':
573 # if the hunk tried to put something at the bottom of the file
570 # if the hunk tried to put something at the bottom of the file
574 # override the start line and use eof here
571 # override the start line and use eof here
575 search_start = len(self.lines)
572 search_start = len(self.lines)
576 else:
573 else:
577 search_start = orig_start + self.skew
574 search_start = orig_start + self.skew
578
575
579 for fuzzlen in xrange(3):
576 for fuzzlen in xrange(3):
580 for toponly in [True, False]:
577 for toponly in [True, False]:
581 old = h.old(fuzzlen, toponly)
578 old = h.old(fuzzlen, toponly)
582
579
583 cand = self.findlines(old[0][1:], search_start)
580 cand = self.findlines(old[0][1:], search_start)
584 for l in cand:
581 for l in cand:
585 if diffhelpers.testhunk(old, self.lines, l) == 0:
582 if diffhelpers.testhunk(old, self.lines, l) == 0:
586 newlines = h.new(fuzzlen, toponly)
583 newlines = h.new(fuzzlen, toponly)
587 self.lines[l : l + len(old)] = newlines
584 self.lines[l : l + len(old)] = newlines
588 self.offset += len(newlines) - len(old)
585 self.offset += len(newlines) - len(old)
589 self.skew = l - orig_start
586 self.skew = l - orig_start
590 self.dirty = 1
587 self.dirty = 1
591 offset = l - orig_start - fuzzlen
588 offset = l - orig_start - fuzzlen
592 if fuzzlen:
589 if fuzzlen:
593 msg = _("Hunk #%d succeeded at %d "
590 msg = _("Hunk #%d succeeded at %d "
594 "with fuzz %d "
591 "with fuzz %d "
595 "(offset %d lines).\n")
592 "(offset %d lines).\n")
596 self.printfile(True)
593 self.printfile(True)
597 self.ui.warn(msg %
594 self.ui.warn(msg %
598 (h.number, l + 1, fuzzlen, offset))
595 (h.number, l + 1, fuzzlen, offset))
599 else:
596 else:
600 msg = _("Hunk #%d succeeded at %d "
597 msg = _("Hunk #%d succeeded at %d "
601 "(offset %d lines).\n")
598 "(offset %d lines).\n")
602 self.ui.note(msg % (h.number, l + 1, offset))
599 self.ui.note(msg % (h.number, l + 1, offset))
603 return fuzzlen
600 return fuzzlen
604 self.printfile(True)
601 self.printfile(True)
605 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
602 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
606 self.rej.append(horig)
603 self.rej.append(horig)
607 return -1
604 return -1
608
605
609 class hunk(object):
606 class hunk(object):
610 def __init__(self, desc, num, lr, context, create=False, remove=False):
607 def __init__(self, desc, num, lr, context, create=False, remove=False):
611 self.number = num
608 self.number = num
612 self.desc = desc
609 self.desc = desc
613 self.hunk = [desc]
610 self.hunk = [desc]
614 self.a = []
611 self.a = []
615 self.b = []
612 self.b = []
616 self.starta = self.lena = None
613 self.starta = self.lena = None
617 self.startb = self.lenb = None
614 self.startb = self.lenb = None
618 if lr is not None:
615 if lr is not None:
619 if context:
616 if context:
620 self.read_context_hunk(lr)
617 self.read_context_hunk(lr)
621 else:
618 else:
622 self.read_unified_hunk(lr)
619 self.read_unified_hunk(lr)
623 self.create = create
620 self.create = create
624 self.remove = remove and not create
621 self.remove = remove and not create
625
622
626 def getnormalized(self):
623 def getnormalized(self):
627 """Return a copy with line endings normalized to LF."""
624 """Return a copy with line endings normalized to LF."""
628
625
629 def normalize(lines):
626 def normalize(lines):
630 nlines = []
627 nlines = []
631 for line in lines:
628 for line in lines:
632 if line.endswith('\r\n'):
629 if line.endswith('\r\n'):
633 line = line[:-2] + '\n'
630 line = line[:-2] + '\n'
634 nlines.append(line)
631 nlines.append(line)
635 return nlines
632 return nlines
636
633
637 # Dummy object, it is rebuilt manually
634 # Dummy object, it is rebuilt manually
638 nh = hunk(self.desc, self.number, None, None, False, False)
635 nh = hunk(self.desc, self.number, None, None, False, False)
639 nh.number = self.number
636 nh.number = self.number
640 nh.desc = self.desc
637 nh.desc = self.desc
641 nh.hunk = self.hunk
638 nh.hunk = self.hunk
642 nh.a = normalize(self.a)
639 nh.a = normalize(self.a)
643 nh.b = normalize(self.b)
640 nh.b = normalize(self.b)
644 nh.starta = self.starta
641 nh.starta = self.starta
645 nh.startb = self.startb
642 nh.startb = self.startb
646 nh.lena = self.lena
643 nh.lena = self.lena
647 nh.lenb = self.lenb
644 nh.lenb = self.lenb
648 nh.create = self.create
645 nh.create = self.create
649 nh.remove = self.remove
646 nh.remove = self.remove
650 return nh
647 return nh
651
648
652 def read_unified_hunk(self, lr):
649 def read_unified_hunk(self, lr):
653 m = unidesc.match(self.desc)
650 m = unidesc.match(self.desc)
654 if not m:
651 if not m:
655 raise PatchError(_("bad hunk #%d") % self.number)
652 raise PatchError(_("bad hunk #%d") % self.number)
656 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
653 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
657 if self.lena is None:
654 if self.lena is None:
658 self.lena = 1
655 self.lena = 1
659 else:
656 else:
660 self.lena = int(self.lena)
657 self.lena = int(self.lena)
661 if self.lenb is None:
658 if self.lenb is None:
662 self.lenb = 1
659 self.lenb = 1
663 else:
660 else:
664 self.lenb = int(self.lenb)
661 self.lenb = int(self.lenb)
665 self.starta = int(self.starta)
662 self.starta = int(self.starta)
666 self.startb = int(self.startb)
663 self.startb = int(self.startb)
667 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
664 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
668 # if we hit eof before finishing out the hunk, the last line will
665 # if we hit eof before finishing out the hunk, the last line will
669 # be zero length. Lets try to fix it up.
666 # be zero length. Lets try to fix it up.
670 while len(self.hunk[-1]) == 0:
667 while len(self.hunk[-1]) == 0:
671 del self.hunk[-1]
668 del self.hunk[-1]
672 del self.a[-1]
669 del self.a[-1]
673 del self.b[-1]
670 del self.b[-1]
674 self.lena -= 1
671 self.lena -= 1
675 self.lenb -= 1
672 self.lenb -= 1
676
673
677 def read_context_hunk(self, lr):
674 def read_context_hunk(self, lr):
678 self.desc = lr.readline()
675 self.desc = lr.readline()
679 m = contextdesc.match(self.desc)
676 m = contextdesc.match(self.desc)
680 if not m:
677 if not m:
681 raise PatchError(_("bad hunk #%d") % self.number)
678 raise PatchError(_("bad hunk #%d") % self.number)
682 foo, self.starta, foo2, aend, foo3 = m.groups()
679 foo, self.starta, foo2, aend, foo3 = m.groups()
683 self.starta = int(self.starta)
680 self.starta = int(self.starta)
684 if aend is None:
681 if aend is None:
685 aend = self.starta
682 aend = self.starta
686 self.lena = int(aend) - self.starta
683 self.lena = int(aend) - self.starta
687 if self.starta:
684 if self.starta:
688 self.lena += 1
685 self.lena += 1
689 for x in xrange(self.lena):
686 for x in xrange(self.lena):
690 l = lr.readline()
687 l = lr.readline()
691 if l.startswith('---'):
688 if l.startswith('---'):
692 lr.push(l)
689 lr.push(l)
693 break
690 break
694 s = l[2:]
691 s = l[2:]
695 if l.startswith('- ') or l.startswith('! '):
692 if l.startswith('- ') or l.startswith('! '):
696 u = '-' + s
693 u = '-' + s
697 elif l.startswith(' '):
694 elif l.startswith(' '):
698 u = ' ' + s
695 u = ' ' + s
699 else:
696 else:
700 raise PatchError(_("bad hunk #%d old text line %d") %
697 raise PatchError(_("bad hunk #%d old text line %d") %
701 (self.number, x))
698 (self.number, x))
702 self.a.append(u)
699 self.a.append(u)
703 self.hunk.append(u)
700 self.hunk.append(u)
704
701
705 l = lr.readline()
702 l = lr.readline()
706 if l.startswith('\ '):
703 if l.startswith('\ '):
707 s = self.a[-1][:-1]
704 s = self.a[-1][:-1]
708 self.a[-1] = s
705 self.a[-1] = s
709 self.hunk[-1] = s
706 self.hunk[-1] = s
710 l = lr.readline()
707 l = lr.readline()
711 m = contextdesc.match(l)
708 m = contextdesc.match(l)
712 if not m:
709 if not m:
713 raise PatchError(_("bad hunk #%d") % self.number)
710 raise PatchError(_("bad hunk #%d") % self.number)
714 foo, self.startb, foo2, bend, foo3 = m.groups()
711 foo, self.startb, foo2, bend, foo3 = m.groups()
715 self.startb = int(self.startb)
712 self.startb = int(self.startb)
716 if bend is None:
713 if bend is None:
717 bend = self.startb
714 bend = self.startb
718 self.lenb = int(bend) - self.startb
715 self.lenb = int(bend) - self.startb
719 if self.startb:
716 if self.startb:
720 self.lenb += 1
717 self.lenb += 1
721 hunki = 1
718 hunki = 1
722 for x in xrange(self.lenb):
719 for x in xrange(self.lenb):
723 l = lr.readline()
720 l = lr.readline()
724 if l.startswith('\ '):
721 if l.startswith('\ '):
725 s = self.b[-1][:-1]
722 s = self.b[-1][:-1]
726 self.b[-1] = s
723 self.b[-1] = s
727 self.hunk[hunki - 1] = s
724 self.hunk[hunki - 1] = s
728 continue
725 continue
729 if not l:
726 if not l:
730 lr.push(l)
727 lr.push(l)
731 break
728 break
732 s = l[2:]
729 s = l[2:]
733 if l.startswith('+ ') or l.startswith('! '):
730 if l.startswith('+ ') or l.startswith('! '):
734 u = '+' + s
731 u = '+' + s
735 elif l.startswith(' '):
732 elif l.startswith(' '):
736 u = ' ' + s
733 u = ' ' + s
737 elif len(self.b) == 0:
734 elif len(self.b) == 0:
738 # this can happen when the hunk does not add any lines
735 # this can happen when the hunk does not add any lines
739 lr.push(l)
736 lr.push(l)
740 break
737 break
741 else:
738 else:
742 raise PatchError(_("bad hunk #%d old text line %d") %
739 raise PatchError(_("bad hunk #%d old text line %d") %
743 (self.number, x))
740 (self.number, x))
744 self.b.append(s)
741 self.b.append(s)
745 while True:
742 while True:
746 if hunki >= len(self.hunk):
743 if hunki >= len(self.hunk):
747 h = ""
744 h = ""
748 else:
745 else:
749 h = self.hunk[hunki]
746 h = self.hunk[hunki]
750 hunki += 1
747 hunki += 1
751 if h == u:
748 if h == u:
752 break
749 break
753 elif h.startswith('-'):
750 elif h.startswith('-'):
754 continue
751 continue
755 else:
752 else:
756 self.hunk.insert(hunki - 1, u)
753 self.hunk.insert(hunki - 1, u)
757 break
754 break
758
755
759 if not self.a:
756 if not self.a:
760 # this happens when lines were only added to the hunk
757 # this happens when lines were only added to the hunk
761 for x in self.hunk:
758 for x in self.hunk:
762 if x.startswith('-') or x.startswith(' '):
759 if x.startswith('-') or x.startswith(' '):
763 self.a.append(x)
760 self.a.append(x)
764 if not self.b:
761 if not self.b:
765 # this happens when lines were only deleted from the hunk
762 # this happens when lines were only deleted from the hunk
766 for x in self.hunk:
763 for x in self.hunk:
767 if x.startswith('+') or x.startswith(' '):
764 if x.startswith('+') or x.startswith(' '):
768 self.b.append(x[1:])
765 self.b.append(x[1:])
769 # @@ -start,len +start,len @@
766 # @@ -start,len +start,len @@
770 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
767 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
771 self.startb, self.lenb)
768 self.startb, self.lenb)
772 self.hunk[0] = self.desc
769 self.hunk[0] = self.desc
773
770
774 def fix_newline(self):
771 def fix_newline(self):
775 diffhelpers.fix_newline(self.hunk, self.a, self.b)
772 diffhelpers.fix_newline(self.hunk, self.a, self.b)
776
773
777 def complete(self):
774 def complete(self):
778 return len(self.a) == self.lena and len(self.b) == self.lenb
775 return len(self.a) == self.lena and len(self.b) == self.lenb
779
776
780 def createfile(self):
777 def createfile(self):
781 return self.starta == 0 and self.lena == 0 and self.create
778 return self.starta == 0 and self.lena == 0 and self.create
782
779
783 def rmfile(self):
780 def rmfile(self):
784 return self.startb == 0 and self.lenb == 0 and self.remove
781 return self.startb == 0 and self.lenb == 0 and self.remove
785
782
786 def fuzzit(self, l, fuzz, toponly):
783 def fuzzit(self, l, fuzz, toponly):
787 # this removes context lines from the top and bottom of list 'l'. It
784 # this removes context lines from the top and bottom of list 'l'. It
788 # checks the hunk to make sure only context lines are removed, and then
785 # checks the hunk to make sure only context lines are removed, and then
789 # returns a new shortened list of lines.
786 # returns a new shortened list of lines.
790 fuzz = min(fuzz, len(l)-1)
787 fuzz = min(fuzz, len(l)-1)
791 if fuzz:
788 if fuzz:
792 top = 0
789 top = 0
793 bot = 0
790 bot = 0
794 hlen = len(self.hunk)
791 hlen = len(self.hunk)
795 for x in xrange(hlen - 1):
792 for x in xrange(hlen - 1):
796 # the hunk starts with the @@ line, so use x+1
793 # the hunk starts with the @@ line, so use x+1
797 if self.hunk[x + 1][0] == ' ':
794 if self.hunk[x + 1][0] == ' ':
798 top += 1
795 top += 1
799 else:
796 else:
800 break
797 break
801 if not toponly:
798 if not toponly:
802 for x in xrange(hlen - 1):
799 for x in xrange(hlen - 1):
803 if self.hunk[hlen - bot - 1][0] == ' ':
800 if self.hunk[hlen - bot - 1][0] == ' ':
804 bot += 1
801 bot += 1
805 else:
802 else:
806 break
803 break
807
804
808 # top and bot now count context in the hunk
805 # top and bot now count context in the hunk
809 # adjust them if either one is short
806 # adjust them if either one is short
810 context = max(top, bot, 3)
807 context = max(top, bot, 3)
811 if bot < context:
808 if bot < context:
812 bot = max(0, fuzz - (context - bot))
809 bot = max(0, fuzz - (context - bot))
813 else:
810 else:
814 bot = min(fuzz, bot)
811 bot = min(fuzz, bot)
815 if top < context:
812 if top < context:
816 top = max(0, fuzz - (context - top))
813 top = max(0, fuzz - (context - top))
817 else:
814 else:
818 top = min(fuzz, top)
815 top = min(fuzz, top)
819
816
820 return l[top:len(l)-bot]
817 return l[top:len(l)-bot]
821 return l
818 return l
822
819
823 def old(self, fuzz=0, toponly=False):
820 def old(self, fuzz=0, toponly=False):
824 return self.fuzzit(self.a, fuzz, toponly)
821 return self.fuzzit(self.a, fuzz, toponly)
825
822
826 def new(self, fuzz=0, toponly=False):
823 def new(self, fuzz=0, toponly=False):
827 return self.fuzzit(self.b, fuzz, toponly)
824 return self.fuzzit(self.b, fuzz, toponly)
828
825
829 class binhunk:
826 class binhunk:
830 'A binary patch file. Only understands literals so far.'
827 'A binary patch file. Only understands literals so far.'
831 def __init__(self, gitpatch):
828 def __init__(self, gitpatch):
832 self.gitpatch = gitpatch
829 self.gitpatch = gitpatch
833 self.text = None
830 self.text = None
834 self.hunk = ['GIT binary patch\n']
831 self.hunk = ['GIT binary patch\n']
835
832
836 def createfile(self):
833 def createfile(self):
837 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
834 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
838
835
839 def rmfile(self):
836 def rmfile(self):
840 return self.gitpatch.op == 'DELETE'
837 return self.gitpatch.op == 'DELETE'
841
838
842 def complete(self):
839 def complete(self):
843 return self.text is not None
840 return self.text is not None
844
841
845 def new(self):
842 def new(self):
846 return [self.text]
843 return [self.text]
847
844
848 def extract(self, lr):
845 def extract(self, lr):
849 line = lr.readline()
846 line = lr.readline()
850 self.hunk.append(line)
847 self.hunk.append(line)
851 while line and not line.startswith('literal '):
848 while line and not line.startswith('literal '):
852 line = lr.readline()
849 line = lr.readline()
853 self.hunk.append(line)
850 self.hunk.append(line)
854 if not line:
851 if not line:
855 raise PatchError(_('could not extract binary patch'))
852 raise PatchError(_('could not extract binary patch'))
856 size = int(line[8:].rstrip())
853 size = int(line[8:].rstrip())
857 dec = []
854 dec = []
858 line = lr.readline()
855 line = lr.readline()
859 self.hunk.append(line)
856 self.hunk.append(line)
860 while len(line) > 1:
857 while len(line) > 1:
861 l = line[0]
858 l = line[0]
862 if l <= 'Z' and l >= 'A':
859 if l <= 'Z' and l >= 'A':
863 l = ord(l) - ord('A') + 1
860 l = ord(l) - ord('A') + 1
864 else:
861 else:
865 l = ord(l) - ord('a') + 27
862 l = ord(l) - ord('a') + 27
866 dec.append(base85.b85decode(line[1:-1])[:l])
863 dec.append(base85.b85decode(line[1:-1])[:l])
867 line = lr.readline()
864 line = lr.readline()
868 self.hunk.append(line)
865 self.hunk.append(line)
869 text = zlib.decompress(''.join(dec))
866 text = zlib.decompress(''.join(dec))
870 if len(text) != size:
867 if len(text) != size:
871 raise PatchError(_('binary patch is %d bytes, not %d') %
868 raise PatchError(_('binary patch is %d bytes, not %d') %
872 len(text), size)
869 len(text), size)
873 self.text = text
870 self.text = text
874
871
875 def parsefilename(str):
872 def parsefilename(str):
876 # --- filename \t|space stuff
873 # --- filename \t|space stuff
877 s = str[4:].rstrip('\r\n')
874 s = str[4:].rstrip('\r\n')
878 i = s.find('\t')
875 i = s.find('\t')
879 if i < 0:
876 if i < 0:
880 i = s.find(' ')
877 i = s.find(' ')
881 if i < 0:
878 if i < 0:
882 return s
879 return s
883 return s[:i]
880 return s[:i]
884
881
885 def pathstrip(path, strip):
882 def pathstrip(path, strip):
886 pathlen = len(path)
883 pathlen = len(path)
887 i = 0
884 i = 0
888 if strip == 0:
885 if strip == 0:
889 return '', path.rstrip()
886 return '', path.rstrip()
890 count = strip
887 count = strip
891 while count > 0:
888 while count > 0:
892 i = path.find('/', i)
889 i = path.find('/', i)
893 if i == -1:
890 if i == -1:
894 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
891 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
895 (count, strip, path))
892 (count, strip, path))
896 i += 1
893 i += 1
897 # consume '//' in the path
894 # consume '//' in the path
898 while i < pathlen - 1 and path[i] == '/':
895 while i < pathlen - 1 and path[i] == '/':
899 i += 1
896 i += 1
900 count -= 1
897 count -= 1
901 return path[:i].lstrip(), path[i:].rstrip()
898 return path[:i].lstrip(), path[i:].rstrip()
902
899
903 def selectfile(afile_orig, bfile_orig, hunk, strip):
900 def selectfile(afile_orig, bfile_orig, hunk, strip):
904 nulla = afile_orig == "/dev/null"
901 nulla = afile_orig == "/dev/null"
905 nullb = bfile_orig == "/dev/null"
902 nullb = bfile_orig == "/dev/null"
906 abase, afile = pathstrip(afile_orig, strip)
903 abase, afile = pathstrip(afile_orig, strip)
907 gooda = not nulla and os.path.lexists(afile)
904 gooda = not nulla and os.path.lexists(afile)
908 bbase, bfile = pathstrip(bfile_orig, strip)
905 bbase, bfile = pathstrip(bfile_orig, strip)
909 if afile == bfile:
906 if afile == bfile:
910 goodb = gooda
907 goodb = gooda
911 else:
908 else:
912 goodb = not nullb and os.path.lexists(bfile)
909 goodb = not nullb and os.path.lexists(bfile)
913 createfunc = hunk.createfile
910 createfunc = hunk.createfile
914 missing = not goodb and not gooda and not createfunc()
911 missing = not goodb and not gooda and not createfunc()
915
912
916 # some diff programs apparently produce patches where the afile is
913 # some diff programs apparently produce patches where the afile is
917 # not /dev/null, but afile starts with bfile
914 # not /dev/null, but afile starts with bfile
918 abasedir = afile[:afile.rfind('/') + 1]
915 abasedir = afile[:afile.rfind('/') + 1]
919 bbasedir = bfile[:bfile.rfind('/') + 1]
916 bbasedir = bfile[:bfile.rfind('/') + 1]
920 if missing and abasedir == bbasedir and afile.startswith(bfile):
917 if missing and abasedir == bbasedir and afile.startswith(bfile):
921 # this isn't very pretty
918 # this isn't very pretty
922 hunk.create = True
919 hunk.create = True
923 if createfunc():
920 if createfunc():
924 missing = False
921 missing = False
925 else:
922 else:
926 hunk.create = False
923 hunk.create = False
927
924
928 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
925 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
929 # diff is between a file and its backup. In this case, the original
926 # diff is between a file and its backup. In this case, the original
930 # file should be patched (see original mpatch code).
927 # file should be patched (see original mpatch code).
931 isbackup = (abase == bbase and bfile.startswith(afile))
928 isbackup = (abase == bbase and bfile.startswith(afile))
932 fname = None
929 fname = None
933 if not missing:
930 if not missing:
934 if gooda and goodb:
931 if gooda and goodb:
935 fname = isbackup and afile or bfile
932 fname = isbackup and afile or bfile
936 elif gooda:
933 elif gooda:
937 fname = afile
934 fname = afile
938
935
939 if not fname:
936 if not fname:
940 if not nullb:
937 if not nullb:
941 fname = isbackup and afile or bfile
938 fname = isbackup and afile or bfile
942 elif not nulla:
939 elif not nulla:
943 fname = afile
940 fname = afile
944 else:
941 else:
945 raise PatchError(_("undefined source and destination files"))
942 raise PatchError(_("undefined source and destination files"))
946
943
947 return fname, missing
944 return fname, missing
948
945
949 def scangitpatch(lr, firstline):
946 def scangitpatch(lr, firstline):
950 """
947 """
951 Git patches can emit:
948 Git patches can emit:
952 - rename a to b
949 - rename a to b
953 - change b
950 - change b
954 - copy a to c
951 - copy a to c
955 - change c
952 - change c
956
953
957 We cannot apply this sequence as-is, the renamed 'a' could not be
954 We cannot apply this sequence as-is, the renamed 'a' could not be
958 found for it would have been renamed already. And we cannot copy
955 found for it would have been renamed already. And we cannot copy
959 from 'b' instead because 'b' would have been changed already. So
956 from 'b' instead because 'b' would have been changed already. So
960 we scan the git patch for copy and rename commands so we can
957 we scan the git patch for copy and rename commands so we can
961 perform the copies ahead of time.
958 perform the copies ahead of time.
962 """
959 """
963 pos = 0
960 pos = 0
964 try:
961 try:
965 pos = lr.fp.tell()
962 pos = lr.fp.tell()
966 fp = lr.fp
963 fp = lr.fp
967 except IOError:
964 except IOError:
968 fp = cStringIO.StringIO(lr.fp.read())
965 fp = cStringIO.StringIO(lr.fp.read())
969 gitlr = linereader(fp, lr.textmode)
966 gitlr = linereader(fp, lr.textmode)
970 gitlr.push(firstline)
967 gitlr.push(firstline)
971 gitpatches = readgitpatch(gitlr)
968 gitpatches = readgitpatch(gitlr)
972 fp.seek(pos)
969 fp.seek(pos)
973 return gitpatches
970 return gitpatches
974
971
975 def iterhunks(ui, fp, sourcefile=None):
972 def iterhunks(ui, fp, sourcefile=None):
976 """Read a patch and yield the following events:
973 """Read a patch and yield the following events:
977 - ("file", afile, bfile, firsthunk): select a new target file.
974 - ("file", afile, bfile, firsthunk): select a new target file.
978 - ("hunk", hunk): a new hunk is ready to be applied, follows a
975 - ("hunk", hunk): a new hunk is ready to be applied, follows a
979 "file" event.
976 "file" event.
980 - ("git", gitchanges): current diff is in git format, gitchanges
977 - ("git", gitchanges): current diff is in git format, gitchanges
981 maps filenames to gitpatch records. Unique event.
978 maps filenames to gitpatch records. Unique event.
982 """
979 """
983 changed = {}
980 changed = {}
984 current_hunk = None
981 current_hunk = None
985 afile = ""
982 afile = ""
986 bfile = ""
983 bfile = ""
987 state = None
984 state = None
988 hunknum = 0
985 hunknum = 0
989 emitfile = False
986 emitfile = False
990 git = False
987 git = False
991
988
992 # our states
989 # our states
993 BFILE = 1
990 BFILE = 1
994 context = None
991 context = None
995 lr = linereader(fp)
992 lr = linereader(fp)
996 # gitworkdone is True if a git operation (copy, rename, ...) was
993 # gitworkdone is True if a git operation (copy, rename, ...) was
997 # performed already for the current file. Useful when the file
994 # performed already for the current file. Useful when the file
998 # section may have no hunk.
995 # section may have no hunk.
999 gitworkdone = False
996 gitworkdone = False
1000 empty = None
1001
997
1002 while True:
998 while True:
1003 newfile = newgitfile = False
999 newfile = newgitfile = False
1004 x = lr.readline()
1000 x = lr.readline()
1005 if not x:
1001 if not x:
1006 break
1002 break
1007 if current_hunk:
1003 if current_hunk:
1008 if x.startswith('\ '):
1004 if x.startswith('\ '):
1009 current_hunk.fix_newline()
1005 current_hunk.fix_newline()
1010 yield 'hunk', current_hunk
1006 yield 'hunk', current_hunk
1011 current_hunk = None
1007 current_hunk = None
1012 empty = False
1013 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
1008 if ((sourcefile or state == BFILE) and ((not context and x[0] == '@') or
1014 ((context is not False) and x.startswith('***************')))):
1009 ((context is not False) and x.startswith('***************')))):
1015 try:
1010 try:
1016 if context is None and x.startswith('***************'):
1011 if context is None and x.startswith('***************'):
1017 context = True
1012 context = True
1018 gpatch = changed.get(bfile)
1013 gpatch = changed.get(bfile)
1019 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1014 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1020 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1015 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1021 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1016 current_hunk = hunk(x, hunknum + 1, lr, context, create, remove)
1022 except PatchError, err:
1017 except PatchError, err:
1023 ui.debug(err)
1018 ui.debug(err)
1024 current_hunk = None
1019 current_hunk = None
1025 continue
1020 continue
1026 hunknum += 1
1021 hunknum += 1
1027 if emitfile:
1022 if emitfile:
1028 emitfile = False
1023 emitfile = False
1029 yield 'file', (afile, bfile, current_hunk)
1024 yield 'file', (afile, bfile, current_hunk)
1030 empty = False
1031 elif state == BFILE and x.startswith('GIT binary patch'):
1025 elif state == BFILE and x.startswith('GIT binary patch'):
1032 current_hunk = binhunk(changed[bfile])
1026 current_hunk = binhunk(changed[bfile])
1033 hunknum += 1
1027 hunknum += 1
1034 if emitfile:
1028 if emitfile:
1035 emitfile = False
1029 emitfile = False
1036 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1030 yield 'file', ('a/' + afile, 'b/' + bfile, current_hunk)
1037 empty = False
1038 current_hunk.extract(lr)
1031 current_hunk.extract(lr)
1039 elif x.startswith('diff --git'):
1032 elif x.startswith('diff --git'):
1040 # check for git diff, scanning the whole patch file if needed
1033 # check for git diff, scanning the whole patch file if needed
1041 m = gitre.match(x)
1034 m = gitre.match(x)
1042 gitworkdone = False
1035 gitworkdone = False
1043 if m:
1036 if m:
1044 afile, bfile = m.group(1, 2)
1037 afile, bfile = m.group(1, 2)
1045 if not git:
1038 if not git:
1046 git = True
1039 git = True
1047 gitpatches = scangitpatch(lr, x)
1040 gitpatches = scangitpatch(lr, x)
1048 yield 'git', gitpatches
1041 yield 'git', gitpatches
1049 for gp in gitpatches:
1042 for gp in gitpatches:
1050 changed[gp.path] = gp
1043 changed[gp.path] = gp
1051 # else error?
1044 # else error?
1052 # copy/rename + modify should modify target, not source
1045 # copy/rename + modify should modify target, not source
1053 gp = changed.get(bfile)
1046 gp = changed.get(bfile)
1054 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1047 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1055 or gp.mode):
1048 or gp.mode):
1056 afile = bfile
1049 afile = bfile
1057 gitworkdone = True
1050 gitworkdone = True
1058 newgitfile = True
1051 newgitfile = True
1059 elif x.startswith('---'):
1052 elif x.startswith('---'):
1060 # check for a unified diff
1053 # check for a unified diff
1061 l2 = lr.readline()
1054 l2 = lr.readline()
1062 if not l2.startswith('+++'):
1055 if not l2.startswith('+++'):
1063 lr.push(l2)
1056 lr.push(l2)
1064 continue
1057 continue
1065 newfile = True
1058 newfile = True
1066 context = False
1059 context = False
1067 afile = parsefilename(x)
1060 afile = parsefilename(x)
1068 bfile = parsefilename(l2)
1061 bfile = parsefilename(l2)
1069 elif x.startswith('***'):
1062 elif x.startswith('***'):
1070 # check for a context diff
1063 # check for a context diff
1071 l2 = lr.readline()
1064 l2 = lr.readline()
1072 if not l2.startswith('---'):
1065 if not l2.startswith('---'):
1073 lr.push(l2)
1066 lr.push(l2)
1074 continue
1067 continue
1075 l3 = lr.readline()
1068 l3 = lr.readline()
1076 lr.push(l3)
1069 lr.push(l3)
1077 if not l3.startswith("***************"):
1070 if not l3.startswith("***************"):
1078 lr.push(l2)
1071 lr.push(l2)
1079 continue
1072 continue
1080 newfile = True
1073 newfile = True
1081 context = True
1074 context = True
1082 afile = parsefilename(x)
1075 afile = parsefilename(x)
1083 bfile = parsefilename(l2)
1076 bfile = parsefilename(l2)
1084
1077
1085 if newfile:
1078 if newfile:
1086 if empty:
1087 raise NoHunks
1088 empty = not gitworkdone
1089 gitworkdone = False
1079 gitworkdone = False
1090
1080
1091 if newgitfile or newfile:
1081 if newgitfile or newfile:
1092 emitfile = True
1082 emitfile = True
1093 state = BFILE
1083 state = BFILE
1094 hunknum = 0
1084 hunknum = 0
1095 if current_hunk:
1085 if current_hunk:
1096 if current_hunk.complete():
1086 if current_hunk.complete():
1097 yield 'hunk', current_hunk
1087 yield 'hunk', current_hunk
1098 empty = False
1099 else:
1088 else:
1100 raise PatchError(_("malformed patch %s %s") % (afile,
1089 raise PatchError(_("malformed patch %s %s") % (afile,
1101 current_hunk.desc))
1090 current_hunk.desc))
1102
1091
1103 if (empty is None and not gitworkdone) or empty:
1104 raise NoHunks
1105
1106
1107 def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'):
1092 def applydiff(ui, fp, changed, strip=1, sourcefile=None, eolmode='strict'):
1108 """Reads a patch from fp and tries to apply it.
1093 """Reads a patch from fp and tries to apply it.
1109
1094
1110 The dict 'changed' is filled in with all of the filenames changed
1095 The dict 'changed' is filled in with all of the filenames changed
1111 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1096 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1112 found and 1 if there was any fuzz.
1097 found and 1 if there was any fuzz.
1113
1098
1114 If 'eolmode' is 'strict', the patch content and patched file are
1099 If 'eolmode' is 'strict', the patch content and patched file are
1115 read in binary mode. Otherwise, line endings are ignored when
1100 read in binary mode. Otherwise, line endings are ignored when
1116 patching then normalized according to 'eolmode'.
1101 patching then normalized according to 'eolmode'.
1117
1102
1118 Callers probably want to call 'cmdutil.updatedir' after this to
1103 Callers probably want to call 'cmdutil.updatedir' after this to
1119 apply certain categories of changes not done by this function.
1104 apply certain categories of changes not done by this function.
1120 """
1105 """
1121 return _applydiff(
1106 return _applydiff(
1122 ui, fp, patchfile, copyfile,
1107 ui, fp, patchfile, copyfile,
1123 changed, strip=strip, sourcefile=sourcefile, eolmode=eolmode)
1108 changed, strip=strip, sourcefile=sourcefile, eolmode=eolmode)
1124
1109
1125
1110
1126 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1,
1111 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1,
1127 sourcefile=None, eolmode='strict'):
1112 sourcefile=None, eolmode='strict'):
1128 rejects = 0
1113 rejects = 0
1129 err = 0
1114 err = 0
1130 current_file = None
1115 current_file = None
1131 cwd = os.getcwd()
1116 cwd = os.getcwd()
1132 opener = util.opener(cwd)
1117 opener = util.opener(cwd)
1133
1118
1134 def closefile():
1119 def closefile():
1135 if not current_file:
1120 if not current_file:
1136 return 0
1121 return 0
1137 if current_file.dirty:
1122 if current_file.dirty:
1138 current_file.writelines(current_file.fname, current_file.lines)
1123 current_file.writelines(current_file.fname, current_file.lines)
1139 current_file.write_rej()
1124 current_file.write_rej()
1140 return len(current_file.rej)
1125 return len(current_file.rej)
1141
1126
1142 for state, values in iterhunks(ui, fp, sourcefile):
1127 for state, values in iterhunks(ui, fp, sourcefile):
1143 if state == 'hunk':
1128 if state == 'hunk':
1144 if not current_file:
1129 if not current_file:
1145 continue
1130 continue
1146 ret = current_file.apply(values)
1131 ret = current_file.apply(values)
1147 if ret >= 0:
1132 if ret >= 0:
1148 changed.setdefault(current_file.fname, None)
1133 changed.setdefault(current_file.fname, None)
1149 if ret > 0:
1134 if ret > 0:
1150 err = 1
1135 err = 1
1151 elif state == 'file':
1136 elif state == 'file':
1152 rejects += closefile()
1137 rejects += closefile()
1153 afile, bfile, first_hunk = values
1138 afile, bfile, first_hunk = values
1154 try:
1139 try:
1155 if sourcefile:
1140 if sourcefile:
1156 current_file = patcher(ui, sourcefile, opener,
1141 current_file = patcher(ui, sourcefile, opener,
1157 eolmode=eolmode)
1142 eolmode=eolmode)
1158 else:
1143 else:
1159 current_file, missing = selectfile(afile, bfile,
1144 current_file, missing = selectfile(afile, bfile,
1160 first_hunk, strip)
1145 first_hunk, strip)
1161 current_file = patcher(ui, current_file, opener,
1146 current_file = patcher(ui, current_file, opener,
1162 missing=missing, eolmode=eolmode)
1147 missing=missing, eolmode=eolmode)
1163 except PatchError, err:
1148 except PatchError, err:
1164 ui.warn(str(err) + '\n')
1149 ui.warn(str(err) + '\n')
1165 current_file = None
1150 current_file = None
1166 rejects += 1
1151 rejects += 1
1167 continue
1152 continue
1168 elif state == 'git':
1153 elif state == 'git':
1169 for gp in values:
1154 for gp in values:
1170 gp.path = pathstrip(gp.path, strip - 1)[1]
1155 gp.path = pathstrip(gp.path, strip - 1)[1]
1171 if gp.oldpath:
1156 if gp.oldpath:
1172 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1157 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1173 # Binary patches really overwrite target files, copying them
1158 # Binary patches really overwrite target files, copying them
1174 # will just make it fails with "target file exists"
1159 # will just make it fails with "target file exists"
1175 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1160 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1176 copyfn(gp.oldpath, gp.path, cwd)
1161 copyfn(gp.oldpath, gp.path, cwd)
1177 changed[gp.path] = gp
1162 changed[gp.path] = gp
1178 else:
1163 else:
1179 raise util.Abort(_('unsupported parser state: %s') % state)
1164 raise util.Abort(_('unsupported parser state: %s') % state)
1180
1165
1181 rejects += closefile()
1166 rejects += closefile()
1182
1167
1183 if rejects:
1168 if rejects:
1184 return -1
1169 return -1
1185 return err
1170 return err
1186
1171
1187 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1172 def externalpatch(patcher, args, patchname, ui, strip, cwd, files):
1188 """use <patcher> to apply <patchname> to the working directory.
1173 """use <patcher> to apply <patchname> to the working directory.
1189 returns whether patch was applied with fuzz factor."""
1174 returns whether patch was applied with fuzz factor."""
1190
1175
1191 fuzz = False
1176 fuzz = False
1192 if cwd:
1177 if cwd:
1193 args.append('-d %s' % util.shellquote(cwd))
1178 args.append('-d %s' % util.shellquote(cwd))
1194 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1179 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1195 util.shellquote(patchname)))
1180 util.shellquote(patchname)))
1196
1181
1197 for line in fp:
1182 for line in fp:
1198 line = line.rstrip()
1183 line = line.rstrip()
1199 ui.note(line + '\n')
1184 ui.note(line + '\n')
1200 if line.startswith('patching file '):
1185 if line.startswith('patching file '):
1201 pf = util.parse_patch_output(line)
1186 pf = util.parse_patch_output(line)
1202 printed_file = False
1187 printed_file = False
1203 files.setdefault(pf, None)
1188 files.setdefault(pf, None)
1204 elif line.find('with fuzz') >= 0:
1189 elif line.find('with fuzz') >= 0:
1205 fuzz = True
1190 fuzz = True
1206 if not printed_file:
1191 if not printed_file:
1207 ui.warn(pf + '\n')
1192 ui.warn(pf + '\n')
1208 printed_file = True
1193 printed_file = True
1209 ui.warn(line + '\n')
1194 ui.warn(line + '\n')
1210 elif line.find('saving rejects to file') >= 0:
1195 elif line.find('saving rejects to file') >= 0:
1211 ui.warn(line + '\n')
1196 ui.warn(line + '\n')
1212 elif line.find('FAILED') >= 0:
1197 elif line.find('FAILED') >= 0:
1213 if not printed_file:
1198 if not printed_file:
1214 ui.warn(pf + '\n')
1199 ui.warn(pf + '\n')
1215 printed_file = True
1200 printed_file = True
1216 ui.warn(line + '\n')
1201 ui.warn(line + '\n')
1217 code = fp.close()
1202 code = fp.close()
1218 if code:
1203 if code:
1219 raise PatchError(_("patch command failed: %s") %
1204 raise PatchError(_("patch command failed: %s") %
1220 util.explain_exit(code)[0])
1205 util.explain_exit(code)[0])
1221 return fuzz
1206 return fuzz
1222
1207
1223 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1208 def internalpatch(patchobj, ui, strip, cwd, files=None, eolmode='strict'):
1224 """use builtin patch to apply <patchobj> to the working directory.
1209 """use builtin patch to apply <patchobj> to the working directory.
1225 returns whether patch was applied with fuzz factor."""
1210 returns whether patch was applied with fuzz factor."""
1226
1211
1227 if files is None:
1212 if files is None:
1228 files = {}
1213 files = {}
1229 if eolmode is None:
1214 if eolmode is None:
1230 eolmode = ui.config('patch', 'eol', 'strict')
1215 eolmode = ui.config('patch', 'eol', 'strict')
1231 if eolmode.lower() not in eolmodes:
1216 if eolmode.lower() not in eolmodes:
1232 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1217 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1233 eolmode = eolmode.lower()
1218 eolmode = eolmode.lower()
1234
1219
1235 try:
1220 try:
1236 fp = open(patchobj, 'rb')
1221 fp = open(patchobj, 'rb')
1237 except TypeError:
1222 except TypeError:
1238 fp = patchobj
1223 fp = patchobj
1239 if cwd:
1224 if cwd:
1240 curdir = os.getcwd()
1225 curdir = os.getcwd()
1241 os.chdir(cwd)
1226 os.chdir(cwd)
1242 try:
1227 try:
1243 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1228 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1244 finally:
1229 finally:
1245 if cwd:
1230 if cwd:
1246 os.chdir(curdir)
1231 os.chdir(curdir)
1247 if fp != patchobj:
1232 if fp != patchobj:
1248 fp.close()
1233 fp.close()
1249 if ret < 0:
1234 if ret < 0:
1250 raise PatchError
1235 raise PatchError
1251 return ret > 0
1236 return ret > 0
1252
1237
1253 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1238 def patch(patchname, ui, strip=1, cwd=None, files=None, eolmode='strict'):
1254 """Apply <patchname> to the working directory.
1239 """Apply <patchname> to the working directory.
1255
1240
1256 'eolmode' specifies how end of lines should be handled. It can be:
1241 'eolmode' specifies how end of lines should be handled. It can be:
1257 - 'strict': inputs are read in binary mode, EOLs are preserved
1242 - 'strict': inputs are read in binary mode, EOLs are preserved
1258 - 'crlf': EOLs are ignored when patching and reset to CRLF
1243 - 'crlf': EOLs are ignored when patching and reset to CRLF
1259 - 'lf': EOLs are ignored when patching and reset to LF
1244 - 'lf': EOLs are ignored when patching and reset to LF
1260 - None: get it from user settings, default to 'strict'
1245 - None: get it from user settings, default to 'strict'
1261 'eolmode' is ignored when using an external patcher program.
1246 'eolmode' is ignored when using an external patcher program.
1262
1247
1263 Returns whether patch was applied with fuzz factor.
1248 Returns whether patch was applied with fuzz factor.
1264 """
1249 """
1265 patcher = ui.config('ui', 'patch')
1250 patcher = ui.config('ui', 'patch')
1266 args = []
1251 args = []
1267 if files is None:
1252 if files is None:
1268 files = {}
1253 files = {}
1269 try:
1254 try:
1270 if patcher:
1255 if patcher:
1271 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1256 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1272 files)
1257 files)
1273 else:
1258 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1274 try:
1275 return internalpatch(patchname, ui, strip, cwd, files, eolmode)
1276 except NoHunks:
1277 ui.warn(_('internal patcher failed\n'
1278 'please report details to '
1279 'http://mercurial.selenic.com/bts/\n'
1280 'or mercurial@selenic.com\n'))
1281 patcher = (util.find_exe('gpatch') or util.find_exe('patch')
1282 or 'patch')
1283 ui.debug('no valid hunks found; trying with %r instead\n' %
1284 patcher)
1285 if util.needbinarypatch():
1286 args.append('--binary')
1287 return externalpatch(patcher, args, patchname, ui, strip, cwd,
1288 files)
1289 except PatchError, err:
1259 except PatchError, err:
1290 s = str(err)
1260 s = str(err)
1291 if s:
1261 if s:
1292 raise util.Abort(s)
1262 raise util.Abort(s)
1293 else:
1263 else:
1294 raise util.Abort(_('patch failed to apply'))
1264 raise util.Abort(_('patch failed to apply'))
1295
1265
1296 def b85diff(to, tn):
1266 def b85diff(to, tn):
1297 '''print base85-encoded binary diff'''
1267 '''print base85-encoded binary diff'''
1298 def gitindex(text):
1268 def gitindex(text):
1299 if not text:
1269 if not text:
1300 return hex(nullid)
1270 return hex(nullid)
1301 l = len(text)
1271 l = len(text)
1302 s = util.sha1('blob %d\0' % l)
1272 s = util.sha1('blob %d\0' % l)
1303 s.update(text)
1273 s.update(text)
1304 return s.hexdigest()
1274 return s.hexdigest()
1305
1275
1306 def fmtline(line):
1276 def fmtline(line):
1307 l = len(line)
1277 l = len(line)
1308 if l <= 26:
1278 if l <= 26:
1309 l = chr(ord('A') + l - 1)
1279 l = chr(ord('A') + l - 1)
1310 else:
1280 else:
1311 l = chr(l - 26 + ord('a') - 1)
1281 l = chr(l - 26 + ord('a') - 1)
1312 return '%c%s\n' % (l, base85.b85encode(line, True))
1282 return '%c%s\n' % (l, base85.b85encode(line, True))
1313
1283
1314 def chunk(text, csize=52):
1284 def chunk(text, csize=52):
1315 l = len(text)
1285 l = len(text)
1316 i = 0
1286 i = 0
1317 while i < l:
1287 while i < l:
1318 yield text[i:i + csize]
1288 yield text[i:i + csize]
1319 i += csize
1289 i += csize
1320
1290
1321 tohash = gitindex(to)
1291 tohash = gitindex(to)
1322 tnhash = gitindex(tn)
1292 tnhash = gitindex(tn)
1323 if tohash == tnhash:
1293 if tohash == tnhash:
1324 return ""
1294 return ""
1325
1295
1326 # TODO: deltas
1296 # TODO: deltas
1327 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1297 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1328 (tohash, tnhash, len(tn))]
1298 (tohash, tnhash, len(tn))]
1329 for l in chunk(zlib.compress(tn)):
1299 for l in chunk(zlib.compress(tn)):
1330 ret.append(fmtline(l))
1300 ret.append(fmtline(l))
1331 ret.append('\n')
1301 ret.append('\n')
1332 return ''.join(ret)
1302 return ''.join(ret)
1333
1303
1334 class GitDiffRequired(Exception):
1304 class GitDiffRequired(Exception):
1335 pass
1305 pass
1336
1306
1337 def diffopts(ui, opts=None, untrusted=False):
1307 def diffopts(ui, opts=None, untrusted=False):
1338 def get(key, name=None, getter=ui.configbool):
1308 def get(key, name=None, getter=ui.configbool):
1339 return ((opts and opts.get(key)) or
1309 return ((opts and opts.get(key)) or
1340 getter('diff', name or key, None, untrusted=untrusted))
1310 getter('diff', name or key, None, untrusted=untrusted))
1341 return mdiff.diffopts(
1311 return mdiff.diffopts(
1342 text=opts and opts.get('text'),
1312 text=opts and opts.get('text'),
1343 git=get('git'),
1313 git=get('git'),
1344 nodates=get('nodates'),
1314 nodates=get('nodates'),
1345 showfunc=get('show_function', 'showfunc'),
1315 showfunc=get('show_function', 'showfunc'),
1346 ignorews=get('ignore_all_space', 'ignorews'),
1316 ignorews=get('ignore_all_space', 'ignorews'),
1347 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1317 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1348 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1318 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1349 context=get('unified', getter=ui.config))
1319 context=get('unified', getter=ui.config))
1350
1320
1351 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1321 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1352 losedatafn=None, prefix=''):
1322 losedatafn=None, prefix=''):
1353 '''yields diff of changes to files between two nodes, or node and
1323 '''yields diff of changes to files between two nodes, or node and
1354 working directory.
1324 working directory.
1355
1325
1356 if node1 is None, use first dirstate parent instead.
1326 if node1 is None, use first dirstate parent instead.
1357 if node2 is None, compare node1 with working directory.
1327 if node2 is None, compare node1 with working directory.
1358
1328
1359 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1329 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1360 every time some change cannot be represented with the current
1330 every time some change cannot be represented with the current
1361 patch format. Return False to upgrade to git patch format, True to
1331 patch format. Return False to upgrade to git patch format, True to
1362 accept the loss or raise an exception to abort the diff. It is
1332 accept the loss or raise an exception to abort the diff. It is
1363 called with the name of current file being diffed as 'fn'. If set
1333 called with the name of current file being diffed as 'fn'. If set
1364 to None, patches will always be upgraded to git format when
1334 to None, patches will always be upgraded to git format when
1365 necessary.
1335 necessary.
1366
1336
1367 prefix is a filename prefix that is prepended to all filenames on
1337 prefix is a filename prefix that is prepended to all filenames on
1368 display (used for subrepos).
1338 display (used for subrepos).
1369 '''
1339 '''
1370
1340
1371 if opts is None:
1341 if opts is None:
1372 opts = mdiff.defaultopts
1342 opts = mdiff.defaultopts
1373
1343
1374 if not node1 and not node2:
1344 if not node1 and not node2:
1375 node1 = repo.dirstate.parents()[0]
1345 node1 = repo.dirstate.parents()[0]
1376
1346
1377 def lrugetfilectx():
1347 def lrugetfilectx():
1378 cache = {}
1348 cache = {}
1379 order = []
1349 order = []
1380 def getfilectx(f, ctx):
1350 def getfilectx(f, ctx):
1381 fctx = ctx.filectx(f, filelog=cache.get(f))
1351 fctx = ctx.filectx(f, filelog=cache.get(f))
1382 if f not in cache:
1352 if f not in cache:
1383 if len(cache) > 20:
1353 if len(cache) > 20:
1384 del cache[order.pop(0)]
1354 del cache[order.pop(0)]
1385 cache[f] = fctx.filelog()
1355 cache[f] = fctx.filelog()
1386 else:
1356 else:
1387 order.remove(f)
1357 order.remove(f)
1388 order.append(f)
1358 order.append(f)
1389 return fctx
1359 return fctx
1390 return getfilectx
1360 return getfilectx
1391 getfilectx = lrugetfilectx()
1361 getfilectx = lrugetfilectx()
1392
1362
1393 ctx1 = repo[node1]
1363 ctx1 = repo[node1]
1394 ctx2 = repo[node2]
1364 ctx2 = repo[node2]
1395
1365
1396 if not changes:
1366 if not changes:
1397 changes = repo.status(ctx1, ctx2, match=match)
1367 changes = repo.status(ctx1, ctx2, match=match)
1398 modified, added, removed = changes[:3]
1368 modified, added, removed = changes[:3]
1399
1369
1400 if not modified and not added and not removed:
1370 if not modified and not added and not removed:
1401 return []
1371 return []
1402
1372
1403 revs = None
1373 revs = None
1404 if not repo.ui.quiet:
1374 if not repo.ui.quiet:
1405 hexfunc = repo.ui.debugflag and hex or short
1375 hexfunc = repo.ui.debugflag and hex or short
1406 revs = [hexfunc(node) for node in [node1, node2] if node]
1376 revs = [hexfunc(node) for node in [node1, node2] if node]
1407
1377
1408 copy = {}
1378 copy = {}
1409 if opts.git or opts.upgrade:
1379 if opts.git or opts.upgrade:
1410 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1380 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1411
1381
1412 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1382 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1413 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1383 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1414 if opts.upgrade and not opts.git:
1384 if opts.upgrade and not opts.git:
1415 try:
1385 try:
1416 def losedata(fn):
1386 def losedata(fn):
1417 if not losedatafn or not losedatafn(fn=fn):
1387 if not losedatafn or not losedatafn(fn=fn):
1418 raise GitDiffRequired()
1388 raise GitDiffRequired()
1419 # Buffer the whole output until we are sure it can be generated
1389 # Buffer the whole output until we are sure it can be generated
1420 return list(difffn(opts.copy(git=False), losedata))
1390 return list(difffn(opts.copy(git=False), losedata))
1421 except GitDiffRequired:
1391 except GitDiffRequired:
1422 return difffn(opts.copy(git=True), None)
1392 return difffn(opts.copy(git=True), None)
1423 else:
1393 else:
1424 return difffn(opts, None)
1394 return difffn(opts, None)
1425
1395
1426 def difflabel(func, *args, **kw):
1396 def difflabel(func, *args, **kw):
1427 '''yields 2-tuples of (output, label) based on the output of func()'''
1397 '''yields 2-tuples of (output, label) based on the output of func()'''
1428 prefixes = [('diff', 'diff.diffline'),
1398 prefixes = [('diff', 'diff.diffline'),
1429 ('copy', 'diff.extended'),
1399 ('copy', 'diff.extended'),
1430 ('rename', 'diff.extended'),
1400 ('rename', 'diff.extended'),
1431 ('old', 'diff.extended'),
1401 ('old', 'diff.extended'),
1432 ('new', 'diff.extended'),
1402 ('new', 'diff.extended'),
1433 ('deleted', 'diff.extended'),
1403 ('deleted', 'diff.extended'),
1434 ('---', 'diff.file_a'),
1404 ('---', 'diff.file_a'),
1435 ('+++', 'diff.file_b'),
1405 ('+++', 'diff.file_b'),
1436 ('@@', 'diff.hunk'),
1406 ('@@', 'diff.hunk'),
1437 ('-', 'diff.deleted'),
1407 ('-', 'diff.deleted'),
1438 ('+', 'diff.inserted')]
1408 ('+', 'diff.inserted')]
1439
1409
1440 for chunk in func(*args, **kw):
1410 for chunk in func(*args, **kw):
1441 lines = chunk.split('\n')
1411 lines = chunk.split('\n')
1442 for i, line in enumerate(lines):
1412 for i, line in enumerate(lines):
1443 if i != 0:
1413 if i != 0:
1444 yield ('\n', '')
1414 yield ('\n', '')
1445 stripline = line
1415 stripline = line
1446 if line and line[0] in '+-':
1416 if line and line[0] in '+-':
1447 # highlight trailing whitespace, but only in changed lines
1417 # highlight trailing whitespace, but only in changed lines
1448 stripline = line.rstrip()
1418 stripline = line.rstrip()
1449 for prefix, label in prefixes:
1419 for prefix, label in prefixes:
1450 if stripline.startswith(prefix):
1420 if stripline.startswith(prefix):
1451 yield (stripline, label)
1421 yield (stripline, label)
1452 break
1422 break
1453 else:
1423 else:
1454 yield (line, '')
1424 yield (line, '')
1455 if line != stripline:
1425 if line != stripline:
1456 yield (line[len(stripline):], 'diff.trailingwhitespace')
1426 yield (line[len(stripline):], 'diff.trailingwhitespace')
1457
1427
1458 def diffui(*args, **kw):
1428 def diffui(*args, **kw):
1459 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1429 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1460 return difflabel(diff, *args, **kw)
1430 return difflabel(diff, *args, **kw)
1461
1431
1462
1432
1463 def _addmodehdr(header, omode, nmode):
1433 def _addmodehdr(header, omode, nmode):
1464 if omode != nmode:
1434 if omode != nmode:
1465 header.append('old mode %s\n' % omode)
1435 header.append('old mode %s\n' % omode)
1466 header.append('new mode %s\n' % nmode)
1436 header.append('new mode %s\n' % nmode)
1467
1437
1468 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1438 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1469 copy, getfilectx, opts, losedatafn, prefix):
1439 copy, getfilectx, opts, losedatafn, prefix):
1470
1440
1471 def join(f):
1441 def join(f):
1472 return os.path.join(prefix, f)
1442 return os.path.join(prefix, f)
1473
1443
1474 date1 = util.datestr(ctx1.date())
1444 date1 = util.datestr(ctx1.date())
1475 man1 = ctx1.manifest()
1445 man1 = ctx1.manifest()
1476
1446
1477 gone = set()
1447 gone = set()
1478 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1448 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1479
1449
1480 copyto = dict([(v, k) for k, v in copy.items()])
1450 copyto = dict([(v, k) for k, v in copy.items()])
1481
1451
1482 if opts.git:
1452 if opts.git:
1483 revs = None
1453 revs = None
1484
1454
1485 for f in sorted(modified + added + removed):
1455 for f in sorted(modified + added + removed):
1486 to = None
1456 to = None
1487 tn = None
1457 tn = None
1488 dodiff = True
1458 dodiff = True
1489 header = []
1459 header = []
1490 if f in man1:
1460 if f in man1:
1491 to = getfilectx(f, ctx1).data()
1461 to = getfilectx(f, ctx1).data()
1492 if f not in removed:
1462 if f not in removed:
1493 tn = getfilectx(f, ctx2).data()
1463 tn = getfilectx(f, ctx2).data()
1494 a, b = f, f
1464 a, b = f, f
1495 if opts.git or losedatafn:
1465 if opts.git or losedatafn:
1496 if f in added:
1466 if f in added:
1497 mode = gitmode[ctx2.flags(f)]
1467 mode = gitmode[ctx2.flags(f)]
1498 if f in copy or f in copyto:
1468 if f in copy or f in copyto:
1499 if opts.git:
1469 if opts.git:
1500 if f in copy:
1470 if f in copy:
1501 a = copy[f]
1471 a = copy[f]
1502 else:
1472 else:
1503 a = copyto[f]
1473 a = copyto[f]
1504 omode = gitmode[man1.flags(a)]
1474 omode = gitmode[man1.flags(a)]
1505 _addmodehdr(header, omode, mode)
1475 _addmodehdr(header, omode, mode)
1506 if a in removed and a not in gone:
1476 if a in removed and a not in gone:
1507 op = 'rename'
1477 op = 'rename'
1508 gone.add(a)
1478 gone.add(a)
1509 else:
1479 else:
1510 op = 'copy'
1480 op = 'copy'
1511 header.append('%s from %s\n' % (op, join(a)))
1481 header.append('%s from %s\n' % (op, join(a)))
1512 header.append('%s to %s\n' % (op, join(f)))
1482 header.append('%s to %s\n' % (op, join(f)))
1513 to = getfilectx(a, ctx1).data()
1483 to = getfilectx(a, ctx1).data()
1514 else:
1484 else:
1515 losedatafn(f)
1485 losedatafn(f)
1516 else:
1486 else:
1517 if opts.git:
1487 if opts.git:
1518 header.append('new file mode %s\n' % mode)
1488 header.append('new file mode %s\n' % mode)
1519 elif ctx2.flags(f):
1489 elif ctx2.flags(f):
1520 losedatafn(f)
1490 losedatafn(f)
1521 # In theory, if tn was copied or renamed we should check
1491 # In theory, if tn was copied or renamed we should check
1522 # if the source is binary too but the copy record already
1492 # if the source is binary too but the copy record already
1523 # forces git mode.
1493 # forces git mode.
1524 if util.binary(tn):
1494 if util.binary(tn):
1525 if opts.git:
1495 if opts.git:
1526 dodiff = 'binary'
1496 dodiff = 'binary'
1527 else:
1497 else:
1528 losedatafn(f)
1498 losedatafn(f)
1529 if not opts.git and not tn:
1499 if not opts.git and not tn:
1530 # regular diffs cannot represent new empty file
1500 # regular diffs cannot represent new empty file
1531 losedatafn(f)
1501 losedatafn(f)
1532 elif f in removed:
1502 elif f in removed:
1533 if opts.git:
1503 if opts.git:
1534 # have we already reported a copy above?
1504 # have we already reported a copy above?
1535 if ((f in copy and copy[f] in added
1505 if ((f in copy and copy[f] in added
1536 and copyto[copy[f]] == f) or
1506 and copyto[copy[f]] == f) or
1537 (f in copyto and copyto[f] in added
1507 (f in copyto and copyto[f] in added
1538 and copy[copyto[f]] == f)):
1508 and copy[copyto[f]] == f)):
1539 dodiff = False
1509 dodiff = False
1540 else:
1510 else:
1541 header.append('deleted file mode %s\n' %
1511 header.append('deleted file mode %s\n' %
1542 gitmode[man1.flags(f)])
1512 gitmode[man1.flags(f)])
1543 elif not to or util.binary(to):
1513 elif not to or util.binary(to):
1544 # regular diffs cannot represent empty file deletion
1514 # regular diffs cannot represent empty file deletion
1545 losedatafn(f)
1515 losedatafn(f)
1546 else:
1516 else:
1547 oflag = man1.flags(f)
1517 oflag = man1.flags(f)
1548 nflag = ctx2.flags(f)
1518 nflag = ctx2.flags(f)
1549 binary = util.binary(to) or util.binary(tn)
1519 binary = util.binary(to) or util.binary(tn)
1550 if opts.git:
1520 if opts.git:
1551 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1521 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1552 if binary:
1522 if binary:
1553 dodiff = 'binary'
1523 dodiff = 'binary'
1554 elif binary or nflag != oflag:
1524 elif binary or nflag != oflag:
1555 losedatafn(f)
1525 losedatafn(f)
1556 if opts.git:
1526 if opts.git:
1557 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1527 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1558
1528
1559 if dodiff:
1529 if dodiff:
1560 if dodiff == 'binary':
1530 if dodiff == 'binary':
1561 text = b85diff(to, tn)
1531 text = b85diff(to, tn)
1562 else:
1532 else:
1563 text = mdiff.unidiff(to, date1,
1533 text = mdiff.unidiff(to, date1,
1564 # ctx2 date may be dynamic
1534 # ctx2 date may be dynamic
1565 tn, util.datestr(ctx2.date()),
1535 tn, util.datestr(ctx2.date()),
1566 join(a), join(b), revs, opts=opts)
1536 join(a), join(b), revs, opts=opts)
1567 if header and (text or len(header) > 1):
1537 if header and (text or len(header) > 1):
1568 yield ''.join(header)
1538 yield ''.join(header)
1569 if text:
1539 if text:
1570 yield text
1540 yield text
1571
1541
1572 def diffstatdata(lines):
1542 def diffstatdata(lines):
1573 filename, adds, removes = None, 0, 0
1543 filename, adds, removes = None, 0, 0
1574 for line in lines:
1544 for line in lines:
1575 if line.startswith('diff'):
1545 if line.startswith('diff'):
1576 if filename:
1546 if filename:
1577 isbinary = adds == 0 and removes == 0
1547 isbinary = adds == 0 and removes == 0
1578 yield (filename, adds, removes, isbinary)
1548 yield (filename, adds, removes, isbinary)
1579 # set numbers to 0 anyway when starting new file
1549 # set numbers to 0 anyway when starting new file
1580 adds, removes = 0, 0
1550 adds, removes = 0, 0
1581 if line.startswith('diff --git'):
1551 if line.startswith('diff --git'):
1582 filename = gitre.search(line).group(1)
1552 filename = gitre.search(line).group(1)
1583 else:
1553 else:
1584 # format: "diff -r ... -r ... filename"
1554 # format: "diff -r ... -r ... filename"
1585 filename = line.split(None, 5)[-1]
1555 filename = line.split(None, 5)[-1]
1586 elif line.startswith('+') and not line.startswith('+++'):
1556 elif line.startswith('+') and not line.startswith('+++'):
1587 adds += 1
1557 adds += 1
1588 elif line.startswith('-') and not line.startswith('---'):
1558 elif line.startswith('-') and not line.startswith('---'):
1589 removes += 1
1559 removes += 1
1590 if filename:
1560 if filename:
1591 isbinary = adds == 0 and removes == 0
1561 isbinary = adds == 0 and removes == 0
1592 yield (filename, adds, removes, isbinary)
1562 yield (filename, adds, removes, isbinary)
1593
1563
1594 def diffstat(lines, width=80, git=False):
1564 def diffstat(lines, width=80, git=False):
1595 output = []
1565 output = []
1596 stats = list(diffstatdata(lines))
1566 stats = list(diffstatdata(lines))
1597
1567
1598 maxtotal, maxname = 0, 0
1568 maxtotal, maxname = 0, 0
1599 totaladds, totalremoves = 0, 0
1569 totaladds, totalremoves = 0, 0
1600 hasbinary = False
1570 hasbinary = False
1601
1571
1602 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1572 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1603 for filename, adds, removes, isbinary in stats]
1573 for filename, adds, removes, isbinary in stats]
1604
1574
1605 for filename, adds, removes, isbinary, namewidth in sized:
1575 for filename, adds, removes, isbinary, namewidth in sized:
1606 totaladds += adds
1576 totaladds += adds
1607 totalremoves += removes
1577 totalremoves += removes
1608 maxname = max(maxname, namewidth)
1578 maxname = max(maxname, namewidth)
1609 maxtotal = max(maxtotal, adds + removes)
1579 maxtotal = max(maxtotal, adds + removes)
1610 if isbinary:
1580 if isbinary:
1611 hasbinary = True
1581 hasbinary = True
1612
1582
1613 countwidth = len(str(maxtotal))
1583 countwidth = len(str(maxtotal))
1614 if hasbinary and countwidth < 3:
1584 if hasbinary and countwidth < 3:
1615 countwidth = 3
1585 countwidth = 3
1616 graphwidth = width - countwidth - maxname - 6
1586 graphwidth = width - countwidth - maxname - 6
1617 if graphwidth < 10:
1587 if graphwidth < 10:
1618 graphwidth = 10
1588 graphwidth = 10
1619
1589
1620 def scale(i):
1590 def scale(i):
1621 if maxtotal <= graphwidth:
1591 if maxtotal <= graphwidth:
1622 return i
1592 return i
1623 # If diffstat runs out of room it doesn't print anything,
1593 # If diffstat runs out of room it doesn't print anything,
1624 # which isn't very useful, so always print at least one + or -
1594 # which isn't very useful, so always print at least one + or -
1625 # if there were at least some changes.
1595 # if there were at least some changes.
1626 return max(i * graphwidth // maxtotal, int(bool(i)))
1596 return max(i * graphwidth // maxtotal, int(bool(i)))
1627
1597
1628 for filename, adds, removes, isbinary, namewidth in sized:
1598 for filename, adds, removes, isbinary, namewidth in sized:
1629 if git and isbinary:
1599 if git and isbinary:
1630 count = 'Bin'
1600 count = 'Bin'
1631 else:
1601 else:
1632 count = adds + removes
1602 count = adds + removes
1633 pluses = '+' * scale(adds)
1603 pluses = '+' * scale(adds)
1634 minuses = '-' * scale(removes)
1604 minuses = '-' * scale(removes)
1635 output.append(' %s%s | %*s %s%s\n' %
1605 output.append(' %s%s | %*s %s%s\n' %
1636 (filename, ' ' * (maxname - namewidth),
1606 (filename, ' ' * (maxname - namewidth),
1637 countwidth, count,
1607 countwidth, count,
1638 pluses, minuses))
1608 pluses, minuses))
1639
1609
1640 if stats:
1610 if stats:
1641 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1611 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1642 % (len(stats), totaladds, totalremoves))
1612 % (len(stats), totaladds, totalremoves))
1643
1613
1644 return ''.join(output)
1614 return ''.join(output)
1645
1615
1646 def diffstatui(*args, **kw):
1616 def diffstatui(*args, **kw):
1647 '''like diffstat(), but yields 2-tuples of (output, label) for
1617 '''like diffstat(), but yields 2-tuples of (output, label) for
1648 ui.write()
1618 ui.write()
1649 '''
1619 '''
1650
1620
1651 for line in diffstat(*args, **kw).splitlines():
1621 for line in diffstat(*args, **kw).splitlines():
1652 if line and line[-1] in '+-':
1622 if line and line[-1] in '+-':
1653 name, graph = line.rsplit(' ', 1)
1623 name, graph = line.rsplit(' ', 1)
1654 yield (name + ' ', '')
1624 yield (name + ' ', '')
1655 m = re.search(r'\++', graph)
1625 m = re.search(r'\++', graph)
1656 if m:
1626 if m:
1657 yield (m.group(0), 'diffstat.inserted')
1627 yield (m.group(0), 'diffstat.inserted')
1658 m = re.search(r'-+', graph)
1628 m = re.search(r'-+', graph)
1659 if m:
1629 if m:
1660 yield (m.group(0), 'diffstat.deleted')
1630 yield (m.group(0), 'diffstat.deleted')
1661 else:
1631 else:
1662 yield (line, '')
1632 yield (line, '')
1663 yield ('\n', '')
1633 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now