##// END OF EJS Templates
patch: merge makerejlines() into write_rej()
Patrick Mezard -
r14349:776ae95b default
parent child Browse files
Show More
@@ -1,1743 +1,1740 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 # helper functions
21 # helper functions
22
22
23 def copyfile(src, dst, basedir):
23 def copyfile(src, dst, basedir):
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
25 for x in [src, dst]]
25 for x in [src, dst]]
26 if os.path.lexists(absdst):
26 if os.path.lexists(absdst):
27 raise util.Abort(_("cannot create %s: destination already exists") %
27 raise util.Abort(_("cannot create %s: destination already exists") %
28 dst)
28 dst)
29
29
30 dstdir = os.path.dirname(absdst)
30 dstdir = os.path.dirname(absdst)
31 if dstdir and not os.path.isdir(dstdir):
31 if dstdir and not os.path.isdir(dstdir):
32 try:
32 try:
33 os.makedirs(dstdir)
33 os.makedirs(dstdir)
34 except IOError:
34 except IOError:
35 raise util.Abort(
35 raise util.Abort(
36 _("cannot create %s: unable to create destination directory")
36 _("cannot create %s: unable to create destination directory")
37 % dst)
37 % dst)
38
38
39 util.copyfile(abssrc, absdst)
39 util.copyfile(abssrc, absdst)
40
40
41 # public functions
41 # public functions
42
42
43 def split(stream):
43 def split(stream):
44 '''return an iterator of individual patches from a stream'''
44 '''return an iterator of individual patches from a stream'''
45 def isheader(line, inheader):
45 def isheader(line, inheader):
46 if inheader and line[0] in (' ', '\t'):
46 if inheader and line[0] in (' ', '\t'):
47 # continuation
47 # continuation
48 return True
48 return True
49 if line[0] in (' ', '-', '+'):
49 if line[0] in (' ', '-', '+'):
50 # diff line - don't check for header pattern in there
50 # diff line - don't check for header pattern in there
51 return False
51 return False
52 l = line.split(': ', 1)
52 l = line.split(': ', 1)
53 return len(l) == 2 and ' ' not in l[0]
53 return len(l) == 2 and ' ' not in l[0]
54
54
55 def chunk(lines):
55 def chunk(lines):
56 return cStringIO.StringIO(''.join(lines))
56 return cStringIO.StringIO(''.join(lines))
57
57
58 def hgsplit(stream, cur):
58 def hgsplit(stream, cur):
59 inheader = True
59 inheader = True
60
60
61 for line in stream:
61 for line in stream:
62 if not line.strip():
62 if not line.strip():
63 inheader = False
63 inheader = False
64 if not inheader and line.startswith('# HG changeset patch'):
64 if not inheader and line.startswith('# HG changeset patch'):
65 yield chunk(cur)
65 yield chunk(cur)
66 cur = []
66 cur = []
67 inheader = True
67 inheader = True
68
68
69 cur.append(line)
69 cur.append(line)
70
70
71 if cur:
71 if cur:
72 yield chunk(cur)
72 yield chunk(cur)
73
73
74 def mboxsplit(stream, cur):
74 def mboxsplit(stream, cur):
75 for line in stream:
75 for line in stream:
76 if line.startswith('From '):
76 if line.startswith('From '):
77 for c in split(chunk(cur[1:])):
77 for c in split(chunk(cur[1:])):
78 yield c
78 yield c
79 cur = []
79 cur = []
80
80
81 cur.append(line)
81 cur.append(line)
82
82
83 if cur:
83 if cur:
84 for c in split(chunk(cur[1:])):
84 for c in split(chunk(cur[1:])):
85 yield c
85 yield c
86
86
87 def mimesplit(stream, cur):
87 def mimesplit(stream, cur):
88 def msgfp(m):
88 def msgfp(m):
89 fp = cStringIO.StringIO()
89 fp = cStringIO.StringIO()
90 g = email.Generator.Generator(fp, mangle_from_=False)
90 g = email.Generator.Generator(fp, mangle_from_=False)
91 g.flatten(m)
91 g.flatten(m)
92 fp.seek(0)
92 fp.seek(0)
93 return fp
93 return fp
94
94
95 for line in stream:
95 for line in stream:
96 cur.append(line)
96 cur.append(line)
97 c = chunk(cur)
97 c = chunk(cur)
98
98
99 m = email.Parser.Parser().parse(c)
99 m = email.Parser.Parser().parse(c)
100 if not m.is_multipart():
100 if not m.is_multipart():
101 yield msgfp(m)
101 yield msgfp(m)
102 else:
102 else:
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
104 for part in m.walk():
104 for part in m.walk():
105 ct = part.get_content_type()
105 ct = part.get_content_type()
106 if ct not in ok_types:
106 if ct not in ok_types:
107 continue
107 continue
108 yield msgfp(part)
108 yield msgfp(part)
109
109
110 def headersplit(stream, cur):
110 def headersplit(stream, cur):
111 inheader = False
111 inheader = False
112
112
113 for line in stream:
113 for line in stream:
114 if not inheader and isheader(line, inheader):
114 if not inheader and isheader(line, inheader):
115 yield chunk(cur)
115 yield chunk(cur)
116 cur = []
116 cur = []
117 inheader = True
117 inheader = True
118 if inheader and not isheader(line, inheader):
118 if inheader and not isheader(line, inheader):
119 inheader = False
119 inheader = False
120
120
121 cur.append(line)
121 cur.append(line)
122
122
123 if cur:
123 if cur:
124 yield chunk(cur)
124 yield chunk(cur)
125
125
126 def remainder(cur):
126 def remainder(cur):
127 yield chunk(cur)
127 yield chunk(cur)
128
128
129 class fiter(object):
129 class fiter(object):
130 def __init__(self, fp):
130 def __init__(self, fp):
131 self.fp = fp
131 self.fp = fp
132
132
133 def __iter__(self):
133 def __iter__(self):
134 return self
134 return self
135
135
136 def next(self):
136 def next(self):
137 l = self.fp.readline()
137 l = self.fp.readline()
138 if not l:
138 if not l:
139 raise StopIteration
139 raise StopIteration
140 return l
140 return l
141
141
142 inheader = False
142 inheader = False
143 cur = []
143 cur = []
144
144
145 mimeheaders = ['content-type']
145 mimeheaders = ['content-type']
146
146
147 if not hasattr(stream, 'next'):
147 if not hasattr(stream, 'next'):
148 # http responses, for example, have readline but not next
148 # http responses, for example, have readline but not next
149 stream = fiter(stream)
149 stream = fiter(stream)
150
150
151 for line in stream:
151 for line in stream:
152 cur.append(line)
152 cur.append(line)
153 if line.startswith('# HG changeset patch'):
153 if line.startswith('# HG changeset patch'):
154 return hgsplit(stream, cur)
154 return hgsplit(stream, cur)
155 elif line.startswith('From '):
155 elif line.startswith('From '):
156 return mboxsplit(stream, cur)
156 return mboxsplit(stream, cur)
157 elif isheader(line, inheader):
157 elif isheader(line, inheader):
158 inheader = True
158 inheader = True
159 if line.split(':', 1)[0].lower() in mimeheaders:
159 if line.split(':', 1)[0].lower() in mimeheaders:
160 # let email parser handle this
160 # let email parser handle this
161 return mimesplit(stream, cur)
161 return mimesplit(stream, cur)
162 elif line.startswith('--- ') and inheader:
162 elif line.startswith('--- ') and inheader:
163 # No evil headers seen by diff start, split by hand
163 # No evil headers seen by diff start, split by hand
164 return headersplit(stream, cur)
164 return headersplit(stream, cur)
165 # Not enough info, keep reading
165 # Not enough info, keep reading
166
166
167 # if we are here, we have a very plain patch
167 # if we are here, we have a very plain patch
168 return remainder(cur)
168 return remainder(cur)
169
169
170 def extract(ui, fileobj):
170 def extract(ui, fileobj):
171 '''extract patch from data read from fileobj.
171 '''extract patch from data read from fileobj.
172
172
173 patch can be a normal patch or contained in an email message.
173 patch can be a normal patch or contained in an email message.
174
174
175 return tuple (filename, message, user, date, branch, node, p1, p2).
175 return tuple (filename, message, user, date, branch, node, p1, p2).
176 Any item in the returned tuple can be None. If filename is None,
176 Any item in the returned tuple can be None. If filename is None,
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
178
178
179 # attempt to detect the start of a patch
179 # attempt to detect the start of a patch
180 # (this heuristic is borrowed from quilt)
180 # (this heuristic is borrowed from quilt)
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
185
185
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
188 try:
188 try:
189 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
190
190
191 subject = msg['Subject']
191 subject = msg['Subject']
192 user = msg['From']
192 user = msg['From']
193 if not subject and not user:
193 if not subject and not user:
194 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196
196
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
198 # should try to parse msg['Date']
198 # should try to parse msg['Date']
199 date = None
199 date = None
200 nodeid = None
200 nodeid = None
201 branch = None
201 branch = None
202 parents = []
202 parents = []
203
203
204 if subject:
204 if subject:
205 if subject.startswith('[PATCH'):
205 if subject.startswith('[PATCH'):
206 pend = subject.find(']')
206 pend = subject.find(']')
207 if pend >= 0:
207 if pend >= 0:
208 subject = subject[pend + 1:].lstrip()
208 subject = subject[pend + 1:].lstrip()
209 subject = subject.replace('\n\t', ' ')
209 subject = subject.replace('\n\t', ' ')
210 ui.debug('Subject: %s\n' % subject)
210 ui.debug('Subject: %s\n' % subject)
211 if user:
211 if user:
212 ui.debug('From: %s\n' % user)
212 ui.debug('From: %s\n' % user)
213 diffs_seen = 0
213 diffs_seen = 0
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
215 message = ''
215 message = ''
216 for part in msg.walk():
216 for part in msg.walk():
217 content_type = part.get_content_type()
217 content_type = part.get_content_type()
218 ui.debug('Content-Type: %s\n' % content_type)
218 ui.debug('Content-Type: %s\n' % content_type)
219 if content_type not in ok_types:
219 if content_type not in ok_types:
220 continue
220 continue
221 payload = part.get_payload(decode=True)
221 payload = part.get_payload(decode=True)
222 m = diffre.search(payload)
222 m = diffre.search(payload)
223 if m:
223 if m:
224 hgpatch = False
224 hgpatch = False
225 hgpatchheader = False
225 hgpatchheader = False
226 ignoretext = False
226 ignoretext = False
227
227
228 ui.debug('found patch at byte %d\n' % m.start(0))
228 ui.debug('found patch at byte %d\n' % m.start(0))
229 diffs_seen += 1
229 diffs_seen += 1
230 cfp = cStringIO.StringIO()
230 cfp = cStringIO.StringIO()
231 for line in payload[:m.start(0)].splitlines():
231 for line in payload[:m.start(0)].splitlines():
232 if line.startswith('# HG changeset patch') and not hgpatch:
232 if line.startswith('# HG changeset patch') and not hgpatch:
233 ui.debug('patch generated by hg export\n')
233 ui.debug('patch generated by hg export\n')
234 hgpatch = True
234 hgpatch = True
235 hgpatchheader = True
235 hgpatchheader = True
236 # drop earlier commit message content
236 # drop earlier commit message content
237 cfp.seek(0)
237 cfp.seek(0)
238 cfp.truncate()
238 cfp.truncate()
239 subject = None
239 subject = None
240 elif hgpatchheader:
240 elif hgpatchheader:
241 if line.startswith('# User '):
241 if line.startswith('# User '):
242 user = line[7:]
242 user = line[7:]
243 ui.debug('From: %s\n' % user)
243 ui.debug('From: %s\n' % user)
244 elif line.startswith("# Date "):
244 elif line.startswith("# Date "):
245 date = line[7:]
245 date = line[7:]
246 elif line.startswith("# Branch "):
246 elif line.startswith("# Branch "):
247 branch = line[9:]
247 branch = line[9:]
248 elif line.startswith("# Node ID "):
248 elif line.startswith("# Node ID "):
249 nodeid = line[10:]
249 nodeid = line[10:]
250 elif line.startswith("# Parent "):
250 elif line.startswith("# Parent "):
251 parents.append(line[10:])
251 parents.append(line[10:])
252 elif not line.startswith("# "):
252 elif not line.startswith("# "):
253 hgpatchheader = False
253 hgpatchheader = False
254 elif line == '---' and gitsendmail:
254 elif line == '---' and gitsendmail:
255 ignoretext = True
255 ignoretext = True
256 if not hgpatchheader and not ignoretext:
256 if not hgpatchheader and not ignoretext:
257 cfp.write(line)
257 cfp.write(line)
258 cfp.write('\n')
258 cfp.write('\n')
259 message = cfp.getvalue()
259 message = cfp.getvalue()
260 if tmpfp:
260 if tmpfp:
261 tmpfp.write(payload)
261 tmpfp.write(payload)
262 if not payload.endswith('\n'):
262 if not payload.endswith('\n'):
263 tmpfp.write('\n')
263 tmpfp.write('\n')
264 elif not diffs_seen and message and content_type == 'text/plain':
264 elif not diffs_seen and message and content_type == 'text/plain':
265 message += '\n' + payload
265 message += '\n' + payload
266 except:
266 except:
267 tmpfp.close()
267 tmpfp.close()
268 os.unlink(tmpname)
268 os.unlink(tmpname)
269 raise
269 raise
270
270
271 if subject and not message.startswith(subject):
271 if subject and not message.startswith(subject):
272 message = '%s\n%s' % (subject, message)
272 message = '%s\n%s' % (subject, message)
273 tmpfp.close()
273 tmpfp.close()
274 if not diffs_seen:
274 if not diffs_seen:
275 os.unlink(tmpname)
275 os.unlink(tmpname)
276 return None, message, user, date, branch, None, None, None
276 return None, message, user, date, branch, None, None, None
277 p1 = parents and parents.pop(0) or None
277 p1 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
279 return tmpname, message, user, date, branch, nodeid, p1, p2
279 return tmpname, message, user, date, branch, nodeid, p1, p2
280
280
281 class patchmeta(object):
281 class patchmeta(object):
282 """Patched file metadata
282 """Patched file metadata
283
283
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
289 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
290 """
290 """
291 def __init__(self, path):
291 def __init__(self, path):
292 self.path = path
292 self.path = path
293 self.oldpath = None
293 self.oldpath = None
294 self.mode = None
294 self.mode = None
295 self.op = 'MODIFY'
295 self.op = 'MODIFY'
296 self.binary = False
296 self.binary = False
297
297
298 def setmode(self, mode):
298 def setmode(self, mode):
299 islink = mode & 020000
299 islink = mode & 020000
300 isexec = mode & 0100
300 isexec = mode & 0100
301 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
302
302
303 def __repr__(self):
303 def __repr__(self):
304 return "<patchmeta %s %r>" % (self.op, self.path)
304 return "<patchmeta %s %r>" % (self.op, self.path)
305
305
306 def readgitpatch(lr):
306 def readgitpatch(lr):
307 """extract git-style metadata about patches from <patchname>"""
307 """extract git-style metadata about patches from <patchname>"""
308
308
309 # Filter patch for git information
309 # Filter patch for git information
310 gp = None
310 gp = None
311 gitpatches = []
311 gitpatches = []
312 for line in lr:
312 for line in lr:
313 line = line.rstrip(' \r\n')
313 line = line.rstrip(' \r\n')
314 if line.startswith('diff --git'):
314 if line.startswith('diff --git'):
315 m = gitre.match(line)
315 m = gitre.match(line)
316 if m:
316 if m:
317 if gp:
317 if gp:
318 gitpatches.append(gp)
318 gitpatches.append(gp)
319 dst = m.group(2)
319 dst = m.group(2)
320 gp = patchmeta(dst)
320 gp = patchmeta(dst)
321 elif gp:
321 elif gp:
322 if line.startswith('--- '):
322 if line.startswith('--- '):
323 gitpatches.append(gp)
323 gitpatches.append(gp)
324 gp = None
324 gp = None
325 continue
325 continue
326 if line.startswith('rename from '):
326 if line.startswith('rename from '):
327 gp.op = 'RENAME'
327 gp.op = 'RENAME'
328 gp.oldpath = line[12:]
328 gp.oldpath = line[12:]
329 elif line.startswith('rename to '):
329 elif line.startswith('rename to '):
330 gp.path = line[10:]
330 gp.path = line[10:]
331 elif line.startswith('copy from '):
331 elif line.startswith('copy from '):
332 gp.op = 'COPY'
332 gp.op = 'COPY'
333 gp.oldpath = line[10:]
333 gp.oldpath = line[10:]
334 elif line.startswith('copy to '):
334 elif line.startswith('copy to '):
335 gp.path = line[8:]
335 gp.path = line[8:]
336 elif line.startswith('deleted file'):
336 elif line.startswith('deleted file'):
337 gp.op = 'DELETE'
337 gp.op = 'DELETE'
338 elif line.startswith('new file mode '):
338 elif line.startswith('new file mode '):
339 gp.op = 'ADD'
339 gp.op = 'ADD'
340 gp.setmode(int(line[-6:], 8))
340 gp.setmode(int(line[-6:], 8))
341 elif line.startswith('new mode '):
341 elif line.startswith('new mode '):
342 gp.setmode(int(line[-6:], 8))
342 gp.setmode(int(line[-6:], 8))
343 elif line.startswith('GIT binary patch'):
343 elif line.startswith('GIT binary patch'):
344 gp.binary = True
344 gp.binary = True
345 if gp:
345 if gp:
346 gitpatches.append(gp)
346 gitpatches.append(gp)
347
347
348 return gitpatches
348 return gitpatches
349
349
350 class linereader(object):
350 class linereader(object):
351 # simple class to allow pushing lines back into the input stream
351 # simple class to allow pushing lines back into the input stream
352 def __init__(self, fp, textmode=False):
352 def __init__(self, fp, textmode=False):
353 self.fp = fp
353 self.fp = fp
354 self.buf = []
354 self.buf = []
355 self.textmode = textmode
355 self.textmode = textmode
356 self.eol = None
356 self.eol = None
357
357
358 def push(self, line):
358 def push(self, line):
359 if line is not None:
359 if line is not None:
360 self.buf.append(line)
360 self.buf.append(line)
361
361
362 def readline(self):
362 def readline(self):
363 if self.buf:
363 if self.buf:
364 l = self.buf[0]
364 l = self.buf[0]
365 del self.buf[0]
365 del self.buf[0]
366 return l
366 return l
367 l = self.fp.readline()
367 l = self.fp.readline()
368 if not self.eol:
368 if not self.eol:
369 if l.endswith('\r\n'):
369 if l.endswith('\r\n'):
370 self.eol = '\r\n'
370 self.eol = '\r\n'
371 elif l.endswith('\n'):
371 elif l.endswith('\n'):
372 self.eol = '\n'
372 self.eol = '\n'
373 if self.textmode and l.endswith('\r\n'):
373 if self.textmode and l.endswith('\r\n'):
374 l = l[:-2] + '\n'
374 l = l[:-2] + '\n'
375 return l
375 return l
376
376
377 def __iter__(self):
377 def __iter__(self):
378 while 1:
378 while 1:
379 l = self.readline()
379 l = self.readline()
380 if not l:
380 if not l:
381 break
381 break
382 yield l
382 yield l
383
383
384 class abstractbackend(object):
384 class abstractbackend(object):
385 def __init__(self, ui):
385 def __init__(self, ui):
386 self.ui = ui
386 self.ui = ui
387
387
388 def readlines(self, fname):
388 def readlines(self, fname):
389 """Return target file lines, or its content as a single line
389 """Return target file lines, or its content as a single line
390 for symlinks.
390 for symlinks.
391 """
391 """
392 raise NotImplementedError
392 raise NotImplementedError
393
393
394 def writelines(self, fname, lines):
394 def writelines(self, fname, lines):
395 """Write lines to target file."""
395 """Write lines to target file."""
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def unlink(self, fname):
398 def unlink(self, fname):
399 """Unlink target file."""
399 """Unlink target file."""
400 raise NotImplementedError
400 raise NotImplementedError
401
401
402 def writerej(self, fname, failed, total, lines):
402 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
403 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
404 which failed to apply and total the total number of hunks for this
405 files.
405 files.
406 """
406 """
407 pass
407 pass
408
408
409 class fsbackend(abstractbackend):
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, opener):
410 def __init__(self, ui, opener):
411 super(fsbackend, self).__init__(ui)
411 super(fsbackend, self).__init__(ui)
412 self.opener = opener
412 self.opener = opener
413
413
414 def readlines(self, fname):
414 def readlines(self, fname):
415 if os.path.islink(fname):
415 if os.path.islink(fname):
416 return [os.readlink(fname)]
416 return [os.readlink(fname)]
417 fp = self.opener(fname, 'r')
417 fp = self.opener(fname, 'r')
418 try:
418 try:
419 return list(fp)
419 return list(fp)
420 finally:
420 finally:
421 fp.close()
421 fp.close()
422
422
423 def writelines(self, fname, lines):
423 def writelines(self, fname, lines):
424 # Ensure supplied data ends in fname, being a regular file or
424 # Ensure supplied data ends in fname, being a regular file or
425 # a symlink. _updatedir will -too magically- take care
425 # a symlink. _updatedir will -too magically- take care
426 # of setting it to the proper type afterwards.
426 # of setting it to the proper type afterwards.
427 st_mode = None
427 st_mode = None
428 islink = os.path.islink(fname)
428 islink = os.path.islink(fname)
429 if islink:
429 if islink:
430 fp = cStringIO.StringIO()
430 fp = cStringIO.StringIO()
431 else:
431 else:
432 try:
432 try:
433 st_mode = os.lstat(fname).st_mode & 0777
433 st_mode = os.lstat(fname).st_mode & 0777
434 except OSError, e:
434 except OSError, e:
435 if e.errno != errno.ENOENT:
435 if e.errno != errno.ENOENT:
436 raise
436 raise
437 fp = self.opener(fname, 'w')
437 fp = self.opener(fname, 'w')
438 try:
438 try:
439 fp.writelines(lines)
439 fp.writelines(lines)
440 if islink:
440 if islink:
441 self.opener.symlink(fp.getvalue(), fname)
441 self.opener.symlink(fp.getvalue(), fname)
442 if st_mode is not None:
442 if st_mode is not None:
443 os.chmod(fname, st_mode)
443 os.chmod(fname, st_mode)
444 finally:
444 finally:
445 fp.close()
445 fp.close()
446
446
447 def unlink(self, fname):
447 def unlink(self, fname):
448 os.unlink(fname)
448 os.unlink(fname)
449
449
450 def writerej(self, fname, failed, total, lines):
450 def writerej(self, fname, failed, total, lines):
451 fname = fname + ".rej"
451 fname = fname + ".rej"
452 self.ui.warn(
452 self.ui.warn(
453 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
453 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
454 (failed, total, fname))
454 (failed, total, fname))
455 fp = self.opener(fname, 'w')
455 fp = self.opener(fname, 'w')
456 fp.writelines(lines)
456 fp.writelines(lines)
457 fp.close()
457 fp.close()
458
458
459 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
459 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
460 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
460 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
461 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
461 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
462 eolmodes = ['strict', 'crlf', 'lf', 'auto']
462 eolmodes = ['strict', 'crlf', 'lf', 'auto']
463
463
464 class patchfile(object):
464 class patchfile(object):
465 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
465 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
466 self.fname = fname
466 self.fname = fname
467 self.eolmode = eolmode
467 self.eolmode = eolmode
468 self.eol = None
468 self.eol = None
469 self.backend = backend
469 self.backend = backend
470 self.ui = ui
470 self.ui = ui
471 self.lines = []
471 self.lines = []
472 self.exists = False
472 self.exists = False
473 self.missing = missing
473 self.missing = missing
474 if not missing:
474 if not missing:
475 try:
475 try:
476 self.lines = self.backend.readlines(fname)
476 self.lines = self.backend.readlines(fname)
477 if self.lines:
477 if self.lines:
478 # Normalize line endings
478 # Normalize line endings
479 if self.lines[0].endswith('\r\n'):
479 if self.lines[0].endswith('\r\n'):
480 self.eol = '\r\n'
480 self.eol = '\r\n'
481 elif self.lines[0].endswith('\n'):
481 elif self.lines[0].endswith('\n'):
482 self.eol = '\n'
482 self.eol = '\n'
483 if eolmode != 'strict':
483 if eolmode != 'strict':
484 nlines = []
484 nlines = []
485 for l in self.lines:
485 for l in self.lines:
486 if l.endswith('\r\n'):
486 if l.endswith('\r\n'):
487 l = l[:-2] + '\n'
487 l = l[:-2] + '\n'
488 nlines.append(l)
488 nlines.append(l)
489 self.lines = nlines
489 self.lines = nlines
490 self.exists = True
490 self.exists = True
491 except IOError:
491 except IOError:
492 pass
492 pass
493 else:
493 else:
494 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
494 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
495
495
496 self.hash = {}
496 self.hash = {}
497 self.dirty = 0
497 self.dirty = 0
498 self.offset = 0
498 self.offset = 0
499 self.skew = 0
499 self.skew = 0
500 self.rej = []
500 self.rej = []
501 self.fileprinted = False
501 self.fileprinted = False
502 self.printfile(False)
502 self.printfile(False)
503 self.hunks = 0
503 self.hunks = 0
504
504
505 def writelines(self, fname, lines):
505 def writelines(self, fname, lines):
506 if self.eolmode == 'auto':
506 if self.eolmode == 'auto':
507 eol = self.eol
507 eol = self.eol
508 elif self.eolmode == 'crlf':
508 elif self.eolmode == 'crlf':
509 eol = '\r\n'
509 eol = '\r\n'
510 else:
510 else:
511 eol = '\n'
511 eol = '\n'
512
512
513 if self.eolmode != 'strict' and eol and eol != '\n':
513 if self.eolmode != 'strict' and eol and eol != '\n':
514 rawlines = []
514 rawlines = []
515 for l in lines:
515 for l in lines:
516 if l and l[-1] == '\n':
516 if l and l[-1] == '\n':
517 l = l[:-1] + eol
517 l = l[:-1] + eol
518 rawlines.append(l)
518 rawlines.append(l)
519 lines = rawlines
519 lines = rawlines
520
520
521 self.backend.writelines(fname, lines)
521 self.backend.writelines(fname, lines)
522
522
523 def printfile(self, warn):
523 def printfile(self, warn):
524 if self.fileprinted:
524 if self.fileprinted:
525 return
525 return
526 if warn or self.ui.verbose:
526 if warn or self.ui.verbose:
527 self.fileprinted = True
527 self.fileprinted = True
528 s = _("patching file %s\n") % self.fname
528 s = _("patching file %s\n") % self.fname
529 if warn:
529 if warn:
530 self.ui.warn(s)
530 self.ui.warn(s)
531 else:
531 else:
532 self.ui.note(s)
532 self.ui.note(s)
533
533
534
534
535 def findlines(self, l, linenum):
535 def findlines(self, l, linenum):
536 # looks through the hash and finds candidate lines. The
536 # looks through the hash and finds candidate lines. The
537 # result is a list of line numbers sorted based on distance
537 # result is a list of line numbers sorted based on distance
538 # from linenum
538 # from linenum
539
539
540 cand = self.hash.get(l, [])
540 cand = self.hash.get(l, [])
541 if len(cand) > 1:
541 if len(cand) > 1:
542 # resort our list of potentials forward then back.
542 # resort our list of potentials forward then back.
543 cand.sort(key=lambda x: abs(x - linenum))
543 cand.sort(key=lambda x: abs(x - linenum))
544 return cand
544 return cand
545
545
546 def makerejlines(self, fname):
547 base = os.path.basename(fname)
548 yield "--- %s\n+++ %s\n" % (base, base)
549 for x in self.rej:
550 for l in x.hunk:
551 yield l
552 if l[-1] != '\n':
553 yield "\n\ No newline at end of file\n"
554
555 def write_rej(self):
546 def write_rej(self):
556 # our rejects are a little different from patch(1). This always
547 # our rejects are a little different from patch(1). This always
557 # creates rejects in the same form as the original patch. A file
548 # creates rejects in the same form as the original patch. A file
558 # header is inserted so that you can run the reject through patch again
549 # header is inserted so that you can run the reject through patch again
559 # without having to type the filename.
550 # without having to type the filename.
560 if not self.rej:
551 if not self.rej:
561 return
552 return
562 self.backend.writerej(self.fname, len(self.rej), self.hunks,
553 base = os.path.basename(self.fname)
563 self.makerejlines(self.fname))
554 lines = ["--- %s\n+++ %s\n" % (base, base)]
555 for x in self.rej:
556 for l in x.hunk:
557 lines.append(l)
558 if l[-1] != '\n':
559 lines.append("\n\ No newline at end of file\n")
560 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
564
561
565 def apply(self, h):
562 def apply(self, h):
566 if not h.complete():
563 if not h.complete():
567 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
564 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
568 (h.number, h.desc, len(h.a), h.lena, len(h.b),
565 (h.number, h.desc, len(h.a), h.lena, len(h.b),
569 h.lenb))
566 h.lenb))
570
567
571 self.hunks += 1
568 self.hunks += 1
572
569
573 if self.missing:
570 if self.missing:
574 self.rej.append(h)
571 self.rej.append(h)
575 return -1
572 return -1
576
573
577 if self.exists and h.createfile():
574 if self.exists and h.createfile():
578 self.ui.warn(_("file %s already exists\n") % self.fname)
575 self.ui.warn(_("file %s already exists\n") % self.fname)
579 self.rej.append(h)
576 self.rej.append(h)
580 return -1
577 return -1
581
578
582 if isinstance(h, binhunk):
579 if isinstance(h, binhunk):
583 if h.rmfile():
580 if h.rmfile():
584 self.backend.unlink(self.fname)
581 self.backend.unlink(self.fname)
585 else:
582 else:
586 self.lines[:] = h.new()
583 self.lines[:] = h.new()
587 self.offset += len(h.new())
584 self.offset += len(h.new())
588 self.dirty = True
585 self.dirty = True
589 return 0
586 return 0
590
587
591 horig = h
588 horig = h
592 if (self.eolmode in ('crlf', 'lf')
589 if (self.eolmode in ('crlf', 'lf')
593 or self.eolmode == 'auto' and self.eol):
590 or self.eolmode == 'auto' and self.eol):
594 # If new eols are going to be normalized, then normalize
591 # If new eols are going to be normalized, then normalize
595 # hunk data before patching. Otherwise, preserve input
592 # hunk data before patching. Otherwise, preserve input
596 # line-endings.
593 # line-endings.
597 h = h.getnormalized()
594 h = h.getnormalized()
598
595
599 # fast case first, no offsets, no fuzz
596 # fast case first, no offsets, no fuzz
600 old = h.old()
597 old = h.old()
601 # patch starts counting at 1 unless we are adding the file
598 # patch starts counting at 1 unless we are adding the file
602 if h.starta == 0:
599 if h.starta == 0:
603 start = 0
600 start = 0
604 else:
601 else:
605 start = h.starta + self.offset - 1
602 start = h.starta + self.offset - 1
606 orig_start = start
603 orig_start = start
607 # if there's skew we want to emit the "(offset %d lines)" even
604 # if there's skew we want to emit the "(offset %d lines)" even
608 # when the hunk cleanly applies at start + skew, so skip the
605 # when the hunk cleanly applies at start + skew, so skip the
609 # fast case code
606 # fast case code
610 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
607 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
611 if h.rmfile():
608 if h.rmfile():
612 self.backend.unlink(self.fname)
609 self.backend.unlink(self.fname)
613 else:
610 else:
614 self.lines[start : start + h.lena] = h.new()
611 self.lines[start : start + h.lena] = h.new()
615 self.offset += h.lenb - h.lena
612 self.offset += h.lenb - h.lena
616 self.dirty = True
613 self.dirty = True
617 return 0
614 return 0
618
615
619 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
616 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
620 self.hash = {}
617 self.hash = {}
621 for x, s in enumerate(self.lines):
618 for x, s in enumerate(self.lines):
622 self.hash.setdefault(s, []).append(x)
619 self.hash.setdefault(s, []).append(x)
623 if h.hunk[-1][0] != ' ':
620 if h.hunk[-1][0] != ' ':
624 # if the hunk tried to put something at the bottom of the file
621 # if the hunk tried to put something at the bottom of the file
625 # override the start line and use eof here
622 # override the start line and use eof here
626 search_start = len(self.lines)
623 search_start = len(self.lines)
627 else:
624 else:
628 search_start = orig_start + self.skew
625 search_start = orig_start + self.skew
629
626
630 for fuzzlen in xrange(3):
627 for fuzzlen in xrange(3):
631 for toponly in [True, False]:
628 for toponly in [True, False]:
632 old = h.old(fuzzlen, toponly)
629 old = h.old(fuzzlen, toponly)
633
630
634 cand = self.findlines(old[0][1:], search_start)
631 cand = self.findlines(old[0][1:], search_start)
635 for l in cand:
632 for l in cand:
636 if diffhelpers.testhunk(old, self.lines, l) == 0:
633 if diffhelpers.testhunk(old, self.lines, l) == 0:
637 newlines = h.new(fuzzlen, toponly)
634 newlines = h.new(fuzzlen, toponly)
638 self.lines[l : l + len(old)] = newlines
635 self.lines[l : l + len(old)] = newlines
639 self.offset += len(newlines) - len(old)
636 self.offset += len(newlines) - len(old)
640 self.skew = l - orig_start
637 self.skew = l - orig_start
641 self.dirty = True
638 self.dirty = True
642 offset = l - orig_start - fuzzlen
639 offset = l - orig_start - fuzzlen
643 if fuzzlen:
640 if fuzzlen:
644 msg = _("Hunk #%d succeeded at %d "
641 msg = _("Hunk #%d succeeded at %d "
645 "with fuzz %d "
642 "with fuzz %d "
646 "(offset %d lines).\n")
643 "(offset %d lines).\n")
647 self.printfile(True)
644 self.printfile(True)
648 self.ui.warn(msg %
645 self.ui.warn(msg %
649 (h.number, l + 1, fuzzlen, offset))
646 (h.number, l + 1, fuzzlen, offset))
650 else:
647 else:
651 msg = _("Hunk #%d succeeded at %d "
648 msg = _("Hunk #%d succeeded at %d "
652 "(offset %d lines).\n")
649 "(offset %d lines).\n")
653 self.ui.note(msg % (h.number, l + 1, offset))
650 self.ui.note(msg % (h.number, l + 1, offset))
654 return fuzzlen
651 return fuzzlen
655 self.printfile(True)
652 self.printfile(True)
656 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
653 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
657 self.rej.append(horig)
654 self.rej.append(horig)
658 return -1
655 return -1
659
656
660 def close(self):
657 def close(self):
661 if self.dirty:
658 if self.dirty:
662 self.writelines(self.fname, self.lines)
659 self.writelines(self.fname, self.lines)
663 self.write_rej()
660 self.write_rej()
664 return len(self.rej)
661 return len(self.rej)
665
662
666 class hunk(object):
663 class hunk(object):
667 def __init__(self, desc, num, lr, context, create=False, remove=False):
664 def __init__(self, desc, num, lr, context, create=False, remove=False):
668 self.number = num
665 self.number = num
669 self.desc = desc
666 self.desc = desc
670 self.hunk = [desc]
667 self.hunk = [desc]
671 self.a = []
668 self.a = []
672 self.b = []
669 self.b = []
673 self.starta = self.lena = None
670 self.starta = self.lena = None
674 self.startb = self.lenb = None
671 self.startb = self.lenb = None
675 if lr is not None:
672 if lr is not None:
676 if context:
673 if context:
677 self.read_context_hunk(lr)
674 self.read_context_hunk(lr)
678 else:
675 else:
679 self.read_unified_hunk(lr)
676 self.read_unified_hunk(lr)
680 self.create = create
677 self.create = create
681 self.remove = remove and not create
678 self.remove = remove and not create
682
679
683 def getnormalized(self):
680 def getnormalized(self):
684 """Return a copy with line endings normalized to LF."""
681 """Return a copy with line endings normalized to LF."""
685
682
686 def normalize(lines):
683 def normalize(lines):
687 nlines = []
684 nlines = []
688 for line in lines:
685 for line in lines:
689 if line.endswith('\r\n'):
686 if line.endswith('\r\n'):
690 line = line[:-2] + '\n'
687 line = line[:-2] + '\n'
691 nlines.append(line)
688 nlines.append(line)
692 return nlines
689 return nlines
693
690
694 # Dummy object, it is rebuilt manually
691 # Dummy object, it is rebuilt manually
695 nh = hunk(self.desc, self.number, None, None, False, False)
692 nh = hunk(self.desc, self.number, None, None, False, False)
696 nh.number = self.number
693 nh.number = self.number
697 nh.desc = self.desc
694 nh.desc = self.desc
698 nh.hunk = self.hunk
695 nh.hunk = self.hunk
699 nh.a = normalize(self.a)
696 nh.a = normalize(self.a)
700 nh.b = normalize(self.b)
697 nh.b = normalize(self.b)
701 nh.starta = self.starta
698 nh.starta = self.starta
702 nh.startb = self.startb
699 nh.startb = self.startb
703 nh.lena = self.lena
700 nh.lena = self.lena
704 nh.lenb = self.lenb
701 nh.lenb = self.lenb
705 nh.create = self.create
702 nh.create = self.create
706 nh.remove = self.remove
703 nh.remove = self.remove
707 return nh
704 return nh
708
705
709 def read_unified_hunk(self, lr):
706 def read_unified_hunk(self, lr):
710 m = unidesc.match(self.desc)
707 m = unidesc.match(self.desc)
711 if not m:
708 if not m:
712 raise PatchError(_("bad hunk #%d") % self.number)
709 raise PatchError(_("bad hunk #%d") % self.number)
713 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
710 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
714 if self.lena is None:
711 if self.lena is None:
715 self.lena = 1
712 self.lena = 1
716 else:
713 else:
717 self.lena = int(self.lena)
714 self.lena = int(self.lena)
718 if self.lenb is None:
715 if self.lenb is None:
719 self.lenb = 1
716 self.lenb = 1
720 else:
717 else:
721 self.lenb = int(self.lenb)
718 self.lenb = int(self.lenb)
722 self.starta = int(self.starta)
719 self.starta = int(self.starta)
723 self.startb = int(self.startb)
720 self.startb = int(self.startb)
724 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
721 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
725 # if we hit eof before finishing out the hunk, the last line will
722 # if we hit eof before finishing out the hunk, the last line will
726 # be zero length. Lets try to fix it up.
723 # be zero length. Lets try to fix it up.
727 while len(self.hunk[-1]) == 0:
724 while len(self.hunk[-1]) == 0:
728 del self.hunk[-1]
725 del self.hunk[-1]
729 del self.a[-1]
726 del self.a[-1]
730 del self.b[-1]
727 del self.b[-1]
731 self.lena -= 1
728 self.lena -= 1
732 self.lenb -= 1
729 self.lenb -= 1
733 self._fixnewline(lr)
730 self._fixnewline(lr)
734
731
735 def read_context_hunk(self, lr):
732 def read_context_hunk(self, lr):
736 self.desc = lr.readline()
733 self.desc = lr.readline()
737 m = contextdesc.match(self.desc)
734 m = contextdesc.match(self.desc)
738 if not m:
735 if not m:
739 raise PatchError(_("bad hunk #%d") % self.number)
736 raise PatchError(_("bad hunk #%d") % self.number)
740 foo, self.starta, foo2, aend, foo3 = m.groups()
737 foo, self.starta, foo2, aend, foo3 = m.groups()
741 self.starta = int(self.starta)
738 self.starta = int(self.starta)
742 if aend is None:
739 if aend is None:
743 aend = self.starta
740 aend = self.starta
744 self.lena = int(aend) - self.starta
741 self.lena = int(aend) - self.starta
745 if self.starta:
742 if self.starta:
746 self.lena += 1
743 self.lena += 1
747 for x in xrange(self.lena):
744 for x in xrange(self.lena):
748 l = lr.readline()
745 l = lr.readline()
749 if l.startswith('---'):
746 if l.startswith('---'):
750 # lines addition, old block is empty
747 # lines addition, old block is empty
751 lr.push(l)
748 lr.push(l)
752 break
749 break
753 s = l[2:]
750 s = l[2:]
754 if l.startswith('- ') or l.startswith('! '):
751 if l.startswith('- ') or l.startswith('! '):
755 u = '-' + s
752 u = '-' + s
756 elif l.startswith(' '):
753 elif l.startswith(' '):
757 u = ' ' + s
754 u = ' ' + s
758 else:
755 else:
759 raise PatchError(_("bad hunk #%d old text line %d") %
756 raise PatchError(_("bad hunk #%d old text line %d") %
760 (self.number, x))
757 (self.number, x))
761 self.a.append(u)
758 self.a.append(u)
762 self.hunk.append(u)
759 self.hunk.append(u)
763
760
764 l = lr.readline()
761 l = lr.readline()
765 if l.startswith('\ '):
762 if l.startswith('\ '):
766 s = self.a[-1][:-1]
763 s = self.a[-1][:-1]
767 self.a[-1] = s
764 self.a[-1] = s
768 self.hunk[-1] = s
765 self.hunk[-1] = s
769 l = lr.readline()
766 l = lr.readline()
770 m = contextdesc.match(l)
767 m = contextdesc.match(l)
771 if not m:
768 if not m:
772 raise PatchError(_("bad hunk #%d") % self.number)
769 raise PatchError(_("bad hunk #%d") % self.number)
773 foo, self.startb, foo2, bend, foo3 = m.groups()
770 foo, self.startb, foo2, bend, foo3 = m.groups()
774 self.startb = int(self.startb)
771 self.startb = int(self.startb)
775 if bend is None:
772 if bend is None:
776 bend = self.startb
773 bend = self.startb
777 self.lenb = int(bend) - self.startb
774 self.lenb = int(bend) - self.startb
778 if self.startb:
775 if self.startb:
779 self.lenb += 1
776 self.lenb += 1
780 hunki = 1
777 hunki = 1
781 for x in xrange(self.lenb):
778 for x in xrange(self.lenb):
782 l = lr.readline()
779 l = lr.readline()
783 if l.startswith('\ '):
780 if l.startswith('\ '):
784 # XXX: the only way to hit this is with an invalid line range.
781 # XXX: the only way to hit this is with an invalid line range.
785 # The no-eol marker is not counted in the line range, but I
782 # The no-eol marker is not counted in the line range, but I
786 # guess there are diff(1) out there which behave differently.
783 # guess there are diff(1) out there which behave differently.
787 s = self.b[-1][:-1]
784 s = self.b[-1][:-1]
788 self.b[-1] = s
785 self.b[-1] = s
789 self.hunk[hunki - 1] = s
786 self.hunk[hunki - 1] = s
790 continue
787 continue
791 if not l:
788 if not l:
792 # line deletions, new block is empty and we hit EOF
789 # line deletions, new block is empty and we hit EOF
793 lr.push(l)
790 lr.push(l)
794 break
791 break
795 s = l[2:]
792 s = l[2:]
796 if l.startswith('+ ') or l.startswith('! '):
793 if l.startswith('+ ') or l.startswith('! '):
797 u = '+' + s
794 u = '+' + s
798 elif l.startswith(' '):
795 elif l.startswith(' '):
799 u = ' ' + s
796 u = ' ' + s
800 elif len(self.b) == 0:
797 elif len(self.b) == 0:
801 # line deletions, new block is empty
798 # line deletions, new block is empty
802 lr.push(l)
799 lr.push(l)
803 break
800 break
804 else:
801 else:
805 raise PatchError(_("bad hunk #%d old text line %d") %
802 raise PatchError(_("bad hunk #%d old text line %d") %
806 (self.number, x))
803 (self.number, x))
807 self.b.append(s)
804 self.b.append(s)
808 while True:
805 while True:
809 if hunki >= len(self.hunk):
806 if hunki >= len(self.hunk):
810 h = ""
807 h = ""
811 else:
808 else:
812 h = self.hunk[hunki]
809 h = self.hunk[hunki]
813 hunki += 1
810 hunki += 1
814 if h == u:
811 if h == u:
815 break
812 break
816 elif h.startswith('-'):
813 elif h.startswith('-'):
817 continue
814 continue
818 else:
815 else:
819 self.hunk.insert(hunki - 1, u)
816 self.hunk.insert(hunki - 1, u)
820 break
817 break
821
818
822 if not self.a:
819 if not self.a:
823 # this happens when lines were only added to the hunk
820 # this happens when lines were only added to the hunk
824 for x in self.hunk:
821 for x in self.hunk:
825 if x.startswith('-') or x.startswith(' '):
822 if x.startswith('-') or x.startswith(' '):
826 self.a.append(x)
823 self.a.append(x)
827 if not self.b:
824 if not self.b:
828 # this happens when lines were only deleted from the hunk
825 # this happens when lines were only deleted from the hunk
829 for x in self.hunk:
826 for x in self.hunk:
830 if x.startswith('+') or x.startswith(' '):
827 if x.startswith('+') or x.startswith(' '):
831 self.b.append(x[1:])
828 self.b.append(x[1:])
832 # @@ -start,len +start,len @@
829 # @@ -start,len +start,len @@
833 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
830 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
834 self.startb, self.lenb)
831 self.startb, self.lenb)
835 self.hunk[0] = self.desc
832 self.hunk[0] = self.desc
836 self._fixnewline(lr)
833 self._fixnewline(lr)
837
834
838 def _fixnewline(self, lr):
835 def _fixnewline(self, lr):
839 l = lr.readline()
836 l = lr.readline()
840 if l.startswith('\ '):
837 if l.startswith('\ '):
841 diffhelpers.fix_newline(self.hunk, self.a, self.b)
838 diffhelpers.fix_newline(self.hunk, self.a, self.b)
842 else:
839 else:
843 lr.push(l)
840 lr.push(l)
844
841
845 def complete(self):
842 def complete(self):
846 return len(self.a) == self.lena and len(self.b) == self.lenb
843 return len(self.a) == self.lena and len(self.b) == self.lenb
847
844
848 def createfile(self):
845 def createfile(self):
849 return self.starta == 0 and self.lena == 0 and self.create
846 return self.starta == 0 and self.lena == 0 and self.create
850
847
851 def rmfile(self):
848 def rmfile(self):
852 return self.startb == 0 and self.lenb == 0 and self.remove
849 return self.startb == 0 and self.lenb == 0 and self.remove
853
850
854 def fuzzit(self, l, fuzz, toponly):
851 def fuzzit(self, l, fuzz, toponly):
855 # this removes context lines from the top and bottom of list 'l'. It
852 # this removes context lines from the top and bottom of list 'l'. It
856 # checks the hunk to make sure only context lines are removed, and then
853 # checks the hunk to make sure only context lines are removed, and then
857 # returns a new shortened list of lines.
854 # returns a new shortened list of lines.
858 fuzz = min(fuzz, len(l)-1)
855 fuzz = min(fuzz, len(l)-1)
859 if fuzz:
856 if fuzz:
860 top = 0
857 top = 0
861 bot = 0
858 bot = 0
862 hlen = len(self.hunk)
859 hlen = len(self.hunk)
863 for x in xrange(hlen - 1):
860 for x in xrange(hlen - 1):
864 # the hunk starts with the @@ line, so use x+1
861 # the hunk starts with the @@ line, so use x+1
865 if self.hunk[x + 1][0] == ' ':
862 if self.hunk[x + 1][0] == ' ':
866 top += 1
863 top += 1
867 else:
864 else:
868 break
865 break
869 if not toponly:
866 if not toponly:
870 for x in xrange(hlen - 1):
867 for x in xrange(hlen - 1):
871 if self.hunk[hlen - bot - 1][0] == ' ':
868 if self.hunk[hlen - bot - 1][0] == ' ':
872 bot += 1
869 bot += 1
873 else:
870 else:
874 break
871 break
875
872
876 # top and bot now count context in the hunk
873 # top and bot now count context in the hunk
877 # adjust them if either one is short
874 # adjust them if either one is short
878 context = max(top, bot, 3)
875 context = max(top, bot, 3)
879 if bot < context:
876 if bot < context:
880 bot = max(0, fuzz - (context - bot))
877 bot = max(0, fuzz - (context - bot))
881 else:
878 else:
882 bot = min(fuzz, bot)
879 bot = min(fuzz, bot)
883 if top < context:
880 if top < context:
884 top = max(0, fuzz - (context - top))
881 top = max(0, fuzz - (context - top))
885 else:
882 else:
886 top = min(fuzz, top)
883 top = min(fuzz, top)
887
884
888 return l[top:len(l)-bot]
885 return l[top:len(l)-bot]
889 return l
886 return l
890
887
891 def old(self, fuzz=0, toponly=False):
888 def old(self, fuzz=0, toponly=False):
892 return self.fuzzit(self.a, fuzz, toponly)
889 return self.fuzzit(self.a, fuzz, toponly)
893
890
894 def new(self, fuzz=0, toponly=False):
891 def new(self, fuzz=0, toponly=False):
895 return self.fuzzit(self.b, fuzz, toponly)
892 return self.fuzzit(self.b, fuzz, toponly)
896
893
897 class binhunk:
894 class binhunk:
898 'A binary patch file. Only understands literals so far.'
895 'A binary patch file. Only understands literals so far.'
899 def __init__(self, gitpatch):
896 def __init__(self, gitpatch):
900 self.gitpatch = gitpatch
897 self.gitpatch = gitpatch
901 self.text = None
898 self.text = None
902 self.hunk = ['GIT binary patch\n']
899 self.hunk = ['GIT binary patch\n']
903
900
904 def createfile(self):
901 def createfile(self):
905 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
902 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
906
903
907 def rmfile(self):
904 def rmfile(self):
908 return self.gitpatch.op == 'DELETE'
905 return self.gitpatch.op == 'DELETE'
909
906
910 def complete(self):
907 def complete(self):
911 return self.text is not None
908 return self.text is not None
912
909
913 def new(self):
910 def new(self):
914 return [self.text]
911 return [self.text]
915
912
916 def extract(self, lr):
913 def extract(self, lr):
917 line = lr.readline()
914 line = lr.readline()
918 self.hunk.append(line)
915 self.hunk.append(line)
919 while line and not line.startswith('literal '):
916 while line and not line.startswith('literal '):
920 line = lr.readline()
917 line = lr.readline()
921 self.hunk.append(line)
918 self.hunk.append(line)
922 if not line:
919 if not line:
923 raise PatchError(_('could not extract binary patch'))
920 raise PatchError(_('could not extract binary patch'))
924 size = int(line[8:].rstrip())
921 size = int(line[8:].rstrip())
925 dec = []
922 dec = []
926 line = lr.readline()
923 line = lr.readline()
927 self.hunk.append(line)
924 self.hunk.append(line)
928 while len(line) > 1:
925 while len(line) > 1:
929 l = line[0]
926 l = line[0]
930 if l <= 'Z' and l >= 'A':
927 if l <= 'Z' and l >= 'A':
931 l = ord(l) - ord('A') + 1
928 l = ord(l) - ord('A') + 1
932 else:
929 else:
933 l = ord(l) - ord('a') + 27
930 l = ord(l) - ord('a') + 27
934 dec.append(base85.b85decode(line[1:-1])[:l])
931 dec.append(base85.b85decode(line[1:-1])[:l])
935 line = lr.readline()
932 line = lr.readline()
936 self.hunk.append(line)
933 self.hunk.append(line)
937 text = zlib.decompress(''.join(dec))
934 text = zlib.decompress(''.join(dec))
938 if len(text) != size:
935 if len(text) != size:
939 raise PatchError(_('binary patch is %d bytes, not %d') %
936 raise PatchError(_('binary patch is %d bytes, not %d') %
940 len(text), size)
937 len(text), size)
941 self.text = text
938 self.text = text
942
939
943 def parsefilename(str):
940 def parsefilename(str):
944 # --- filename \t|space stuff
941 # --- filename \t|space stuff
945 s = str[4:].rstrip('\r\n')
942 s = str[4:].rstrip('\r\n')
946 i = s.find('\t')
943 i = s.find('\t')
947 if i < 0:
944 if i < 0:
948 i = s.find(' ')
945 i = s.find(' ')
949 if i < 0:
946 if i < 0:
950 return s
947 return s
951 return s[:i]
948 return s[:i]
952
949
953 def pathstrip(path, strip):
950 def pathstrip(path, strip):
954 pathlen = len(path)
951 pathlen = len(path)
955 i = 0
952 i = 0
956 if strip == 0:
953 if strip == 0:
957 return '', path.rstrip()
954 return '', path.rstrip()
958 count = strip
955 count = strip
959 while count > 0:
956 while count > 0:
960 i = path.find('/', i)
957 i = path.find('/', i)
961 if i == -1:
958 if i == -1:
962 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
959 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
963 (count, strip, path))
960 (count, strip, path))
964 i += 1
961 i += 1
965 # consume '//' in the path
962 # consume '//' in the path
966 while i < pathlen - 1 and path[i] == '/':
963 while i < pathlen - 1 and path[i] == '/':
967 i += 1
964 i += 1
968 count -= 1
965 count -= 1
969 return path[:i].lstrip(), path[i:].rstrip()
966 return path[:i].lstrip(), path[i:].rstrip()
970
967
971 def selectfile(afile_orig, bfile_orig, hunk, strip):
968 def selectfile(afile_orig, bfile_orig, hunk, strip):
972 nulla = afile_orig == "/dev/null"
969 nulla = afile_orig == "/dev/null"
973 nullb = bfile_orig == "/dev/null"
970 nullb = bfile_orig == "/dev/null"
974 abase, afile = pathstrip(afile_orig, strip)
971 abase, afile = pathstrip(afile_orig, strip)
975 gooda = not nulla and os.path.lexists(afile)
972 gooda = not nulla and os.path.lexists(afile)
976 bbase, bfile = pathstrip(bfile_orig, strip)
973 bbase, bfile = pathstrip(bfile_orig, strip)
977 if afile == bfile:
974 if afile == bfile:
978 goodb = gooda
975 goodb = gooda
979 else:
976 else:
980 goodb = not nullb and os.path.lexists(bfile)
977 goodb = not nullb and os.path.lexists(bfile)
981 createfunc = hunk.createfile
978 createfunc = hunk.createfile
982 missing = not goodb and not gooda and not createfunc()
979 missing = not goodb and not gooda and not createfunc()
983
980
984 # some diff programs apparently produce patches where the afile is
981 # some diff programs apparently produce patches where the afile is
985 # not /dev/null, but afile starts with bfile
982 # not /dev/null, but afile starts with bfile
986 abasedir = afile[:afile.rfind('/') + 1]
983 abasedir = afile[:afile.rfind('/') + 1]
987 bbasedir = bfile[:bfile.rfind('/') + 1]
984 bbasedir = bfile[:bfile.rfind('/') + 1]
988 if missing and abasedir == bbasedir and afile.startswith(bfile):
985 if missing and abasedir == bbasedir and afile.startswith(bfile):
989 # this isn't very pretty
986 # this isn't very pretty
990 hunk.create = True
987 hunk.create = True
991 if createfunc():
988 if createfunc():
992 missing = False
989 missing = False
993 else:
990 else:
994 hunk.create = False
991 hunk.create = False
995
992
996 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
993 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
997 # diff is between a file and its backup. In this case, the original
994 # diff is between a file and its backup. In this case, the original
998 # file should be patched (see original mpatch code).
995 # file should be patched (see original mpatch code).
999 isbackup = (abase == bbase and bfile.startswith(afile))
996 isbackup = (abase == bbase and bfile.startswith(afile))
1000 fname = None
997 fname = None
1001 if not missing:
998 if not missing:
1002 if gooda and goodb:
999 if gooda and goodb:
1003 fname = isbackup and afile or bfile
1000 fname = isbackup and afile or bfile
1004 elif gooda:
1001 elif gooda:
1005 fname = afile
1002 fname = afile
1006
1003
1007 if not fname:
1004 if not fname:
1008 if not nullb:
1005 if not nullb:
1009 fname = isbackup and afile or bfile
1006 fname = isbackup and afile or bfile
1010 elif not nulla:
1007 elif not nulla:
1011 fname = afile
1008 fname = afile
1012 else:
1009 else:
1013 raise PatchError(_("undefined source and destination files"))
1010 raise PatchError(_("undefined source and destination files"))
1014
1011
1015 return fname, missing
1012 return fname, missing
1016
1013
1017 def scangitpatch(lr, firstline):
1014 def scangitpatch(lr, firstline):
1018 """
1015 """
1019 Git patches can emit:
1016 Git patches can emit:
1020 - rename a to b
1017 - rename a to b
1021 - change b
1018 - change b
1022 - copy a to c
1019 - copy a to c
1023 - change c
1020 - change c
1024
1021
1025 We cannot apply this sequence as-is, the renamed 'a' could not be
1022 We cannot apply this sequence as-is, the renamed 'a' could not be
1026 found for it would have been renamed already. And we cannot copy
1023 found for it would have been renamed already. And we cannot copy
1027 from 'b' instead because 'b' would have been changed already. So
1024 from 'b' instead because 'b' would have been changed already. So
1028 we scan the git patch for copy and rename commands so we can
1025 we scan the git patch for copy and rename commands so we can
1029 perform the copies ahead of time.
1026 perform the copies ahead of time.
1030 """
1027 """
1031 pos = 0
1028 pos = 0
1032 try:
1029 try:
1033 pos = lr.fp.tell()
1030 pos = lr.fp.tell()
1034 fp = lr.fp
1031 fp = lr.fp
1035 except IOError:
1032 except IOError:
1036 fp = cStringIO.StringIO(lr.fp.read())
1033 fp = cStringIO.StringIO(lr.fp.read())
1037 gitlr = linereader(fp, lr.textmode)
1034 gitlr = linereader(fp, lr.textmode)
1038 gitlr.push(firstline)
1035 gitlr.push(firstline)
1039 gitpatches = readgitpatch(gitlr)
1036 gitpatches = readgitpatch(gitlr)
1040 fp.seek(pos)
1037 fp.seek(pos)
1041 return gitpatches
1038 return gitpatches
1042
1039
1043 def iterhunks(fp):
1040 def iterhunks(fp):
1044 """Read a patch and yield the following events:
1041 """Read a patch and yield the following events:
1045 - ("file", afile, bfile, firsthunk): select a new target file.
1042 - ("file", afile, bfile, firsthunk): select a new target file.
1046 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1043 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1047 "file" event.
1044 "file" event.
1048 - ("git", gitchanges): current diff is in git format, gitchanges
1045 - ("git", gitchanges): current diff is in git format, gitchanges
1049 maps filenames to gitpatch records. Unique event.
1046 maps filenames to gitpatch records. Unique event.
1050 """
1047 """
1051 changed = {}
1048 changed = {}
1052 afile = ""
1049 afile = ""
1053 bfile = ""
1050 bfile = ""
1054 state = None
1051 state = None
1055 hunknum = 0
1052 hunknum = 0
1056 emitfile = newfile = False
1053 emitfile = newfile = False
1057 git = False
1054 git = False
1058
1055
1059 # our states
1056 # our states
1060 BFILE = 1
1057 BFILE = 1
1061 context = None
1058 context = None
1062 lr = linereader(fp)
1059 lr = linereader(fp)
1063
1060
1064 while True:
1061 while True:
1065 x = lr.readline()
1062 x = lr.readline()
1066 if not x:
1063 if not x:
1067 break
1064 break
1068 if (state == BFILE and ((not context and x[0] == '@') or
1065 if (state == BFILE and ((not context and x[0] == '@') or
1069 ((context is not False) and x.startswith('***************')))):
1066 ((context is not False) and x.startswith('***************')))):
1070 if context is None and x.startswith('***************'):
1067 if context is None and x.startswith('***************'):
1071 context = True
1068 context = True
1072 gpatch = changed.get(bfile)
1069 gpatch = changed.get(bfile)
1073 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1070 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1074 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1071 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1075 h = hunk(x, hunknum + 1, lr, context, create, remove)
1072 h = hunk(x, hunknum + 1, lr, context, create, remove)
1076 hunknum += 1
1073 hunknum += 1
1077 if emitfile:
1074 if emitfile:
1078 emitfile = False
1075 emitfile = False
1079 yield 'file', (afile, bfile, h)
1076 yield 'file', (afile, bfile, h)
1080 yield 'hunk', h
1077 yield 'hunk', h
1081 elif state == BFILE and x.startswith('GIT binary patch'):
1078 elif state == BFILE and x.startswith('GIT binary patch'):
1082 h = binhunk(changed[bfile])
1079 h = binhunk(changed[bfile])
1083 hunknum += 1
1080 hunknum += 1
1084 if emitfile:
1081 if emitfile:
1085 emitfile = False
1082 emitfile = False
1086 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1083 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1087 h.extract(lr)
1084 h.extract(lr)
1088 yield 'hunk', h
1085 yield 'hunk', h
1089 elif x.startswith('diff --git'):
1086 elif x.startswith('diff --git'):
1090 # check for git diff, scanning the whole patch file if needed
1087 # check for git diff, scanning the whole patch file if needed
1091 m = gitre.match(x)
1088 m = gitre.match(x)
1092 if m:
1089 if m:
1093 afile, bfile = m.group(1, 2)
1090 afile, bfile = m.group(1, 2)
1094 if not git:
1091 if not git:
1095 git = True
1092 git = True
1096 gitpatches = scangitpatch(lr, x)
1093 gitpatches = scangitpatch(lr, x)
1097 yield 'git', gitpatches
1094 yield 'git', gitpatches
1098 for gp in gitpatches:
1095 for gp in gitpatches:
1099 changed[gp.path] = gp
1096 changed[gp.path] = gp
1100 # else error?
1097 # else error?
1101 # copy/rename + modify should modify target, not source
1098 # copy/rename + modify should modify target, not source
1102 gp = changed.get(bfile)
1099 gp = changed.get(bfile)
1103 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1100 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1104 or gp.mode):
1101 or gp.mode):
1105 afile = bfile
1102 afile = bfile
1106 newfile = True
1103 newfile = True
1107 elif x.startswith('---'):
1104 elif x.startswith('---'):
1108 # check for a unified diff
1105 # check for a unified diff
1109 l2 = lr.readline()
1106 l2 = lr.readline()
1110 if not l2.startswith('+++'):
1107 if not l2.startswith('+++'):
1111 lr.push(l2)
1108 lr.push(l2)
1112 continue
1109 continue
1113 newfile = True
1110 newfile = True
1114 context = False
1111 context = False
1115 afile = parsefilename(x)
1112 afile = parsefilename(x)
1116 bfile = parsefilename(l2)
1113 bfile = parsefilename(l2)
1117 elif x.startswith('***'):
1114 elif x.startswith('***'):
1118 # check for a context diff
1115 # check for a context diff
1119 l2 = lr.readline()
1116 l2 = lr.readline()
1120 if not l2.startswith('---'):
1117 if not l2.startswith('---'):
1121 lr.push(l2)
1118 lr.push(l2)
1122 continue
1119 continue
1123 l3 = lr.readline()
1120 l3 = lr.readline()
1124 lr.push(l3)
1121 lr.push(l3)
1125 if not l3.startswith("***************"):
1122 if not l3.startswith("***************"):
1126 lr.push(l2)
1123 lr.push(l2)
1127 continue
1124 continue
1128 newfile = True
1125 newfile = True
1129 context = True
1126 context = True
1130 afile = parsefilename(x)
1127 afile = parsefilename(x)
1131 bfile = parsefilename(l2)
1128 bfile = parsefilename(l2)
1132
1129
1133 if newfile:
1130 if newfile:
1134 newfile = False
1131 newfile = False
1135 emitfile = True
1132 emitfile = True
1136 state = BFILE
1133 state = BFILE
1137 hunknum = 0
1134 hunknum = 0
1138
1135
1139 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1136 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1140 """Reads a patch from fp and tries to apply it.
1137 """Reads a patch from fp and tries to apply it.
1141
1138
1142 The dict 'changed' is filled in with all of the filenames changed
1139 The dict 'changed' is filled in with all of the filenames changed
1143 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1140 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1144 found and 1 if there was any fuzz.
1141 found and 1 if there was any fuzz.
1145
1142
1146 If 'eolmode' is 'strict', the patch content and patched file are
1143 If 'eolmode' is 'strict', the patch content and patched file are
1147 read in binary mode. Otherwise, line endings are ignored when
1144 read in binary mode. Otherwise, line endings are ignored when
1148 patching then normalized according to 'eolmode'.
1145 patching then normalized according to 'eolmode'.
1149
1146
1150 Callers probably want to call '_updatedir' after this to
1147 Callers probably want to call '_updatedir' after this to
1151 apply certain categories of changes not done by this function.
1148 apply certain categories of changes not done by this function.
1152 """
1149 """
1153 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1150 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1154 eolmode=eolmode)
1151 eolmode=eolmode)
1155
1152
1156 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1153 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1157 rejects = 0
1154 rejects = 0
1158 err = 0
1155 err = 0
1159 current_file = None
1156 current_file = None
1160 cwd = os.getcwd()
1157 cwd = os.getcwd()
1161 backend = fsbackend(ui, scmutil.opener(cwd))
1158 backend = fsbackend(ui, scmutil.opener(cwd))
1162
1159
1163 for state, values in iterhunks(fp):
1160 for state, values in iterhunks(fp):
1164 if state == 'hunk':
1161 if state == 'hunk':
1165 if not current_file:
1162 if not current_file:
1166 continue
1163 continue
1167 ret = current_file.apply(values)
1164 ret = current_file.apply(values)
1168 if ret >= 0:
1165 if ret >= 0:
1169 changed.setdefault(current_file.fname, None)
1166 changed.setdefault(current_file.fname, None)
1170 if ret > 0:
1167 if ret > 0:
1171 err = 1
1168 err = 1
1172 elif state == 'file':
1169 elif state == 'file':
1173 if current_file:
1170 if current_file:
1174 rejects += current_file.close()
1171 rejects += current_file.close()
1175 afile, bfile, first_hunk = values
1172 afile, bfile, first_hunk = values
1176 try:
1173 try:
1177 current_file, missing = selectfile(afile, bfile,
1174 current_file, missing = selectfile(afile, bfile,
1178 first_hunk, strip)
1175 first_hunk, strip)
1179 current_file = patcher(ui, current_file, backend,
1176 current_file = patcher(ui, current_file, backend,
1180 missing=missing, eolmode=eolmode)
1177 missing=missing, eolmode=eolmode)
1181 except PatchError, inst:
1178 except PatchError, inst:
1182 ui.warn(str(inst) + '\n')
1179 ui.warn(str(inst) + '\n')
1183 current_file = None
1180 current_file = None
1184 rejects += 1
1181 rejects += 1
1185 continue
1182 continue
1186 elif state == 'git':
1183 elif state == 'git':
1187 for gp in values:
1184 for gp in values:
1188 gp.path = pathstrip(gp.path, strip - 1)[1]
1185 gp.path = pathstrip(gp.path, strip - 1)[1]
1189 if gp.oldpath:
1186 if gp.oldpath:
1190 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1187 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1191 # Binary patches really overwrite target files, copying them
1188 # Binary patches really overwrite target files, copying them
1192 # will just make it fails with "target file exists"
1189 # will just make it fails with "target file exists"
1193 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1190 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1194 copyfn(gp.oldpath, gp.path, cwd)
1191 copyfn(gp.oldpath, gp.path, cwd)
1195 changed[gp.path] = gp
1192 changed[gp.path] = gp
1196 else:
1193 else:
1197 raise util.Abort(_('unsupported parser state: %s') % state)
1194 raise util.Abort(_('unsupported parser state: %s') % state)
1198
1195
1199 if current_file:
1196 if current_file:
1200 rejects += current_file.close()
1197 rejects += current_file.close()
1201
1198
1202 if rejects:
1199 if rejects:
1203 return -1
1200 return -1
1204 return err
1201 return err
1205
1202
1206 def _updatedir(ui, repo, patches, similarity=0):
1203 def _updatedir(ui, repo, patches, similarity=0):
1207 '''Update dirstate after patch application according to metadata'''
1204 '''Update dirstate after patch application according to metadata'''
1208 if not patches:
1205 if not patches:
1209 return []
1206 return []
1210 copies = []
1207 copies = []
1211 removes = set()
1208 removes = set()
1212 cfiles = patches.keys()
1209 cfiles = patches.keys()
1213 cwd = repo.getcwd()
1210 cwd = repo.getcwd()
1214 if cwd:
1211 if cwd:
1215 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1212 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1216 for f in patches:
1213 for f in patches:
1217 gp = patches[f]
1214 gp = patches[f]
1218 if not gp:
1215 if not gp:
1219 continue
1216 continue
1220 if gp.op == 'RENAME':
1217 if gp.op == 'RENAME':
1221 copies.append((gp.oldpath, gp.path))
1218 copies.append((gp.oldpath, gp.path))
1222 removes.add(gp.oldpath)
1219 removes.add(gp.oldpath)
1223 elif gp.op == 'COPY':
1220 elif gp.op == 'COPY':
1224 copies.append((gp.oldpath, gp.path))
1221 copies.append((gp.oldpath, gp.path))
1225 elif gp.op == 'DELETE':
1222 elif gp.op == 'DELETE':
1226 removes.add(gp.path)
1223 removes.add(gp.path)
1227
1224
1228 wctx = repo[None]
1225 wctx = repo[None]
1229 for src, dst in copies:
1226 for src, dst in copies:
1230 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1227 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1231 if (not similarity) and removes:
1228 if (not similarity) and removes:
1232 wctx.remove(sorted(removes), True)
1229 wctx.remove(sorted(removes), True)
1233
1230
1234 for f in patches:
1231 for f in patches:
1235 gp = patches[f]
1232 gp = patches[f]
1236 if gp and gp.mode:
1233 if gp and gp.mode:
1237 islink, isexec = gp.mode
1234 islink, isexec = gp.mode
1238 dst = repo.wjoin(gp.path)
1235 dst = repo.wjoin(gp.path)
1239 # patch won't create empty files
1236 # patch won't create empty files
1240 if gp.op == 'ADD' and not os.path.lexists(dst):
1237 if gp.op == 'ADD' and not os.path.lexists(dst):
1241 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1238 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1242 repo.wwrite(gp.path, '', flags)
1239 repo.wwrite(gp.path, '', flags)
1243 util.setflags(dst, islink, isexec)
1240 util.setflags(dst, islink, isexec)
1244 scmutil.addremove(repo, cfiles, similarity=similarity)
1241 scmutil.addremove(repo, cfiles, similarity=similarity)
1245 files = patches.keys()
1242 files = patches.keys()
1246 files.extend([r for r in removes if r not in files])
1243 files.extend([r for r in removes if r not in files])
1247 return sorted(files)
1244 return sorted(files)
1248
1245
1249 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1246 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1250 """use <patcher> to apply <patchname> to the working directory.
1247 """use <patcher> to apply <patchname> to the working directory.
1251 returns whether patch was applied with fuzz factor."""
1248 returns whether patch was applied with fuzz factor."""
1252
1249
1253 fuzz = False
1250 fuzz = False
1254 args = []
1251 args = []
1255 if cwd:
1252 if cwd:
1256 args.append('-d %s' % util.shellquote(cwd))
1253 args.append('-d %s' % util.shellquote(cwd))
1257 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1254 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1258 util.shellquote(patchname)))
1255 util.shellquote(patchname)))
1259
1256
1260 for line in fp:
1257 for line in fp:
1261 line = line.rstrip()
1258 line = line.rstrip()
1262 ui.note(line + '\n')
1259 ui.note(line + '\n')
1263 if line.startswith('patching file '):
1260 if line.startswith('patching file '):
1264 pf = util.parsepatchoutput(line)
1261 pf = util.parsepatchoutput(line)
1265 printed_file = False
1262 printed_file = False
1266 files.setdefault(pf, None)
1263 files.setdefault(pf, None)
1267 elif line.find('with fuzz') >= 0:
1264 elif line.find('with fuzz') >= 0:
1268 fuzz = True
1265 fuzz = True
1269 if not printed_file:
1266 if not printed_file:
1270 ui.warn(pf + '\n')
1267 ui.warn(pf + '\n')
1271 printed_file = True
1268 printed_file = True
1272 ui.warn(line + '\n')
1269 ui.warn(line + '\n')
1273 elif line.find('saving rejects to file') >= 0:
1270 elif line.find('saving rejects to file') >= 0:
1274 ui.warn(line + '\n')
1271 ui.warn(line + '\n')
1275 elif line.find('FAILED') >= 0:
1272 elif line.find('FAILED') >= 0:
1276 if not printed_file:
1273 if not printed_file:
1277 ui.warn(pf + '\n')
1274 ui.warn(pf + '\n')
1278 printed_file = True
1275 printed_file = True
1279 ui.warn(line + '\n')
1276 ui.warn(line + '\n')
1280 code = fp.close()
1277 code = fp.close()
1281 if code:
1278 if code:
1282 raise PatchError(_("patch command failed: %s") %
1279 raise PatchError(_("patch command failed: %s") %
1283 util.explainexit(code)[0])
1280 util.explainexit(code)[0])
1284 return fuzz
1281 return fuzz
1285
1282
1286 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1283 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1287 similarity=0):
1284 similarity=0):
1288 """use builtin patch to apply <patchobj> to the working directory.
1285 """use builtin patch to apply <patchobj> to the working directory.
1289 returns whether patch was applied with fuzz factor."""
1286 returns whether patch was applied with fuzz factor."""
1290
1287
1291 if files is None:
1288 if files is None:
1292 files = {}
1289 files = {}
1293 if eolmode is None:
1290 if eolmode is None:
1294 eolmode = ui.config('patch', 'eol', 'strict')
1291 eolmode = ui.config('patch', 'eol', 'strict')
1295 if eolmode.lower() not in eolmodes:
1292 if eolmode.lower() not in eolmodes:
1296 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1293 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1297 eolmode = eolmode.lower()
1294 eolmode = eolmode.lower()
1298
1295
1299 try:
1296 try:
1300 fp = open(patchobj, 'rb')
1297 fp = open(patchobj, 'rb')
1301 except TypeError:
1298 except TypeError:
1302 fp = patchobj
1299 fp = patchobj
1303 if cwd:
1300 if cwd:
1304 curdir = os.getcwd()
1301 curdir = os.getcwd()
1305 os.chdir(cwd)
1302 os.chdir(cwd)
1306 try:
1303 try:
1307 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1304 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1308 finally:
1305 finally:
1309 if cwd:
1306 if cwd:
1310 os.chdir(curdir)
1307 os.chdir(curdir)
1311 if fp != patchobj:
1308 if fp != patchobj:
1312 fp.close()
1309 fp.close()
1313 touched = _updatedir(ui, repo, files, similarity)
1310 touched = _updatedir(ui, repo, files, similarity)
1314 files.update(dict.fromkeys(touched))
1311 files.update(dict.fromkeys(touched))
1315 if ret < 0:
1312 if ret < 0:
1316 raise PatchError(_('patch failed to apply'))
1313 raise PatchError(_('patch failed to apply'))
1317 return ret > 0
1314 return ret > 0
1318
1315
1319 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1316 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1320 similarity=0):
1317 similarity=0):
1321 """Apply <patchname> to the working directory.
1318 """Apply <patchname> to the working directory.
1322
1319
1323 'eolmode' specifies how end of lines should be handled. It can be:
1320 'eolmode' specifies how end of lines should be handled. It can be:
1324 - 'strict': inputs are read in binary mode, EOLs are preserved
1321 - 'strict': inputs are read in binary mode, EOLs are preserved
1325 - 'crlf': EOLs are ignored when patching and reset to CRLF
1322 - 'crlf': EOLs are ignored when patching and reset to CRLF
1326 - 'lf': EOLs are ignored when patching and reset to LF
1323 - 'lf': EOLs are ignored when patching and reset to LF
1327 - None: get it from user settings, default to 'strict'
1324 - None: get it from user settings, default to 'strict'
1328 'eolmode' is ignored when using an external patcher program.
1325 'eolmode' is ignored when using an external patcher program.
1329
1326
1330 Returns whether patch was applied with fuzz factor.
1327 Returns whether patch was applied with fuzz factor.
1331 """
1328 """
1332 patcher = ui.config('ui', 'patch')
1329 patcher = ui.config('ui', 'patch')
1333 if files is None:
1330 if files is None:
1334 files = {}
1331 files = {}
1335 try:
1332 try:
1336 if patcher:
1333 if patcher:
1337 try:
1334 try:
1338 return _externalpatch(patcher, patchname, ui, strip, cwd,
1335 return _externalpatch(patcher, patchname, ui, strip, cwd,
1339 files)
1336 files)
1340 finally:
1337 finally:
1341 touched = _updatedir(ui, repo, files, similarity)
1338 touched = _updatedir(ui, repo, files, similarity)
1342 files.update(dict.fromkeys(touched))
1339 files.update(dict.fromkeys(touched))
1343 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1340 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1344 similarity)
1341 similarity)
1345 except PatchError, err:
1342 except PatchError, err:
1346 raise util.Abort(str(err))
1343 raise util.Abort(str(err))
1347
1344
1348 def changedfiles(patchpath, strip=1):
1345 def changedfiles(patchpath, strip=1):
1349 fp = open(patchpath, 'rb')
1346 fp = open(patchpath, 'rb')
1350 try:
1347 try:
1351 changed = set()
1348 changed = set()
1352 for state, values in iterhunks(fp):
1349 for state, values in iterhunks(fp):
1353 if state == 'hunk':
1350 if state == 'hunk':
1354 continue
1351 continue
1355 elif state == 'file':
1352 elif state == 'file':
1356 afile, bfile, first_hunk = values
1353 afile, bfile, first_hunk = values
1357 current_file, missing = selectfile(afile, bfile,
1354 current_file, missing = selectfile(afile, bfile,
1358 first_hunk, strip)
1355 first_hunk, strip)
1359 changed.add(current_file)
1356 changed.add(current_file)
1360 elif state == 'git':
1357 elif state == 'git':
1361 for gp in values:
1358 for gp in values:
1362 gp.path = pathstrip(gp.path, strip - 1)[1]
1359 gp.path = pathstrip(gp.path, strip - 1)[1]
1363 changed.add(gp.path)
1360 changed.add(gp.path)
1364 if gp.oldpath:
1361 if gp.oldpath:
1365 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1362 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1366 if gp.op == 'RENAME':
1363 if gp.op == 'RENAME':
1367 changed.add(gp.oldpath)
1364 changed.add(gp.oldpath)
1368 else:
1365 else:
1369 raise util.Abort(_('unsupported parser state: %s') % state)
1366 raise util.Abort(_('unsupported parser state: %s') % state)
1370 return changed
1367 return changed
1371 finally:
1368 finally:
1372 fp.close()
1369 fp.close()
1373
1370
1374 def b85diff(to, tn):
1371 def b85diff(to, tn):
1375 '''print base85-encoded binary diff'''
1372 '''print base85-encoded binary diff'''
1376 def gitindex(text):
1373 def gitindex(text):
1377 if not text:
1374 if not text:
1378 return hex(nullid)
1375 return hex(nullid)
1379 l = len(text)
1376 l = len(text)
1380 s = util.sha1('blob %d\0' % l)
1377 s = util.sha1('blob %d\0' % l)
1381 s.update(text)
1378 s.update(text)
1382 return s.hexdigest()
1379 return s.hexdigest()
1383
1380
1384 def fmtline(line):
1381 def fmtline(line):
1385 l = len(line)
1382 l = len(line)
1386 if l <= 26:
1383 if l <= 26:
1387 l = chr(ord('A') + l - 1)
1384 l = chr(ord('A') + l - 1)
1388 else:
1385 else:
1389 l = chr(l - 26 + ord('a') - 1)
1386 l = chr(l - 26 + ord('a') - 1)
1390 return '%c%s\n' % (l, base85.b85encode(line, True))
1387 return '%c%s\n' % (l, base85.b85encode(line, True))
1391
1388
1392 def chunk(text, csize=52):
1389 def chunk(text, csize=52):
1393 l = len(text)
1390 l = len(text)
1394 i = 0
1391 i = 0
1395 while i < l:
1392 while i < l:
1396 yield text[i:i + csize]
1393 yield text[i:i + csize]
1397 i += csize
1394 i += csize
1398
1395
1399 tohash = gitindex(to)
1396 tohash = gitindex(to)
1400 tnhash = gitindex(tn)
1397 tnhash = gitindex(tn)
1401 if tohash == tnhash:
1398 if tohash == tnhash:
1402 return ""
1399 return ""
1403
1400
1404 # TODO: deltas
1401 # TODO: deltas
1405 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1402 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1406 (tohash, tnhash, len(tn))]
1403 (tohash, tnhash, len(tn))]
1407 for l in chunk(zlib.compress(tn)):
1404 for l in chunk(zlib.compress(tn)):
1408 ret.append(fmtline(l))
1405 ret.append(fmtline(l))
1409 ret.append('\n')
1406 ret.append('\n')
1410 return ''.join(ret)
1407 return ''.join(ret)
1411
1408
1412 class GitDiffRequired(Exception):
1409 class GitDiffRequired(Exception):
1413 pass
1410 pass
1414
1411
1415 def diffopts(ui, opts=None, untrusted=False):
1412 def diffopts(ui, opts=None, untrusted=False):
1416 def get(key, name=None, getter=ui.configbool):
1413 def get(key, name=None, getter=ui.configbool):
1417 return ((opts and opts.get(key)) or
1414 return ((opts and opts.get(key)) or
1418 getter('diff', name or key, None, untrusted=untrusted))
1415 getter('diff', name or key, None, untrusted=untrusted))
1419 return mdiff.diffopts(
1416 return mdiff.diffopts(
1420 text=opts and opts.get('text'),
1417 text=opts and opts.get('text'),
1421 git=get('git'),
1418 git=get('git'),
1422 nodates=get('nodates'),
1419 nodates=get('nodates'),
1423 showfunc=get('show_function', 'showfunc'),
1420 showfunc=get('show_function', 'showfunc'),
1424 ignorews=get('ignore_all_space', 'ignorews'),
1421 ignorews=get('ignore_all_space', 'ignorews'),
1425 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1422 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1426 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1423 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1427 context=get('unified', getter=ui.config))
1424 context=get('unified', getter=ui.config))
1428
1425
1429 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1426 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1430 losedatafn=None, prefix=''):
1427 losedatafn=None, prefix=''):
1431 '''yields diff of changes to files between two nodes, or node and
1428 '''yields diff of changes to files between two nodes, or node and
1432 working directory.
1429 working directory.
1433
1430
1434 if node1 is None, use first dirstate parent instead.
1431 if node1 is None, use first dirstate parent instead.
1435 if node2 is None, compare node1 with working directory.
1432 if node2 is None, compare node1 with working directory.
1436
1433
1437 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1434 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1438 every time some change cannot be represented with the current
1435 every time some change cannot be represented with the current
1439 patch format. Return False to upgrade to git patch format, True to
1436 patch format. Return False to upgrade to git patch format, True to
1440 accept the loss or raise an exception to abort the diff. It is
1437 accept the loss or raise an exception to abort the diff. It is
1441 called with the name of current file being diffed as 'fn'. If set
1438 called with the name of current file being diffed as 'fn'. If set
1442 to None, patches will always be upgraded to git format when
1439 to None, patches will always be upgraded to git format when
1443 necessary.
1440 necessary.
1444
1441
1445 prefix is a filename prefix that is prepended to all filenames on
1442 prefix is a filename prefix that is prepended to all filenames on
1446 display (used for subrepos).
1443 display (used for subrepos).
1447 '''
1444 '''
1448
1445
1449 if opts is None:
1446 if opts is None:
1450 opts = mdiff.defaultopts
1447 opts = mdiff.defaultopts
1451
1448
1452 if not node1 and not node2:
1449 if not node1 and not node2:
1453 node1 = repo.dirstate.p1()
1450 node1 = repo.dirstate.p1()
1454
1451
1455 def lrugetfilectx():
1452 def lrugetfilectx():
1456 cache = {}
1453 cache = {}
1457 order = []
1454 order = []
1458 def getfilectx(f, ctx):
1455 def getfilectx(f, ctx):
1459 fctx = ctx.filectx(f, filelog=cache.get(f))
1456 fctx = ctx.filectx(f, filelog=cache.get(f))
1460 if f not in cache:
1457 if f not in cache:
1461 if len(cache) > 20:
1458 if len(cache) > 20:
1462 del cache[order.pop(0)]
1459 del cache[order.pop(0)]
1463 cache[f] = fctx.filelog()
1460 cache[f] = fctx.filelog()
1464 else:
1461 else:
1465 order.remove(f)
1462 order.remove(f)
1466 order.append(f)
1463 order.append(f)
1467 return fctx
1464 return fctx
1468 return getfilectx
1465 return getfilectx
1469 getfilectx = lrugetfilectx()
1466 getfilectx = lrugetfilectx()
1470
1467
1471 ctx1 = repo[node1]
1468 ctx1 = repo[node1]
1472 ctx2 = repo[node2]
1469 ctx2 = repo[node2]
1473
1470
1474 if not changes:
1471 if not changes:
1475 changes = repo.status(ctx1, ctx2, match=match)
1472 changes = repo.status(ctx1, ctx2, match=match)
1476 modified, added, removed = changes[:3]
1473 modified, added, removed = changes[:3]
1477
1474
1478 if not modified and not added and not removed:
1475 if not modified and not added and not removed:
1479 return []
1476 return []
1480
1477
1481 revs = None
1478 revs = None
1482 if not repo.ui.quiet:
1479 if not repo.ui.quiet:
1483 hexfunc = repo.ui.debugflag and hex or short
1480 hexfunc = repo.ui.debugflag and hex or short
1484 revs = [hexfunc(node) for node in [node1, node2] if node]
1481 revs = [hexfunc(node) for node in [node1, node2] if node]
1485
1482
1486 copy = {}
1483 copy = {}
1487 if opts.git or opts.upgrade:
1484 if opts.git or opts.upgrade:
1488 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1485 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1489
1486
1490 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1487 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1491 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1488 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1492 if opts.upgrade and not opts.git:
1489 if opts.upgrade and not opts.git:
1493 try:
1490 try:
1494 def losedata(fn):
1491 def losedata(fn):
1495 if not losedatafn or not losedatafn(fn=fn):
1492 if not losedatafn or not losedatafn(fn=fn):
1496 raise GitDiffRequired()
1493 raise GitDiffRequired()
1497 # Buffer the whole output until we are sure it can be generated
1494 # Buffer the whole output until we are sure it can be generated
1498 return list(difffn(opts.copy(git=False), losedata))
1495 return list(difffn(opts.copy(git=False), losedata))
1499 except GitDiffRequired:
1496 except GitDiffRequired:
1500 return difffn(opts.copy(git=True), None)
1497 return difffn(opts.copy(git=True), None)
1501 else:
1498 else:
1502 return difffn(opts, None)
1499 return difffn(opts, None)
1503
1500
1504 def difflabel(func, *args, **kw):
1501 def difflabel(func, *args, **kw):
1505 '''yields 2-tuples of (output, label) based on the output of func()'''
1502 '''yields 2-tuples of (output, label) based on the output of func()'''
1506 prefixes = [('diff', 'diff.diffline'),
1503 prefixes = [('diff', 'diff.diffline'),
1507 ('copy', 'diff.extended'),
1504 ('copy', 'diff.extended'),
1508 ('rename', 'diff.extended'),
1505 ('rename', 'diff.extended'),
1509 ('old', 'diff.extended'),
1506 ('old', 'diff.extended'),
1510 ('new', 'diff.extended'),
1507 ('new', 'diff.extended'),
1511 ('deleted', 'diff.extended'),
1508 ('deleted', 'diff.extended'),
1512 ('---', 'diff.file_a'),
1509 ('---', 'diff.file_a'),
1513 ('+++', 'diff.file_b'),
1510 ('+++', 'diff.file_b'),
1514 ('@@', 'diff.hunk'),
1511 ('@@', 'diff.hunk'),
1515 ('-', 'diff.deleted'),
1512 ('-', 'diff.deleted'),
1516 ('+', 'diff.inserted')]
1513 ('+', 'diff.inserted')]
1517
1514
1518 for chunk in func(*args, **kw):
1515 for chunk in func(*args, **kw):
1519 lines = chunk.split('\n')
1516 lines = chunk.split('\n')
1520 for i, line in enumerate(lines):
1517 for i, line in enumerate(lines):
1521 if i != 0:
1518 if i != 0:
1522 yield ('\n', '')
1519 yield ('\n', '')
1523 stripline = line
1520 stripline = line
1524 if line and line[0] in '+-':
1521 if line and line[0] in '+-':
1525 # highlight trailing whitespace, but only in changed lines
1522 # highlight trailing whitespace, but only in changed lines
1526 stripline = line.rstrip()
1523 stripline = line.rstrip()
1527 for prefix, label in prefixes:
1524 for prefix, label in prefixes:
1528 if stripline.startswith(prefix):
1525 if stripline.startswith(prefix):
1529 yield (stripline, label)
1526 yield (stripline, label)
1530 break
1527 break
1531 else:
1528 else:
1532 yield (line, '')
1529 yield (line, '')
1533 if line != stripline:
1530 if line != stripline:
1534 yield (line[len(stripline):], 'diff.trailingwhitespace')
1531 yield (line[len(stripline):], 'diff.trailingwhitespace')
1535
1532
1536 def diffui(*args, **kw):
1533 def diffui(*args, **kw):
1537 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1534 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1538 return difflabel(diff, *args, **kw)
1535 return difflabel(diff, *args, **kw)
1539
1536
1540
1537
1541 def _addmodehdr(header, omode, nmode):
1538 def _addmodehdr(header, omode, nmode):
1542 if omode != nmode:
1539 if omode != nmode:
1543 header.append('old mode %s\n' % omode)
1540 header.append('old mode %s\n' % omode)
1544 header.append('new mode %s\n' % nmode)
1541 header.append('new mode %s\n' % nmode)
1545
1542
1546 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1543 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1547 copy, getfilectx, opts, losedatafn, prefix):
1544 copy, getfilectx, opts, losedatafn, prefix):
1548
1545
1549 def join(f):
1546 def join(f):
1550 return os.path.join(prefix, f)
1547 return os.path.join(prefix, f)
1551
1548
1552 date1 = util.datestr(ctx1.date())
1549 date1 = util.datestr(ctx1.date())
1553 man1 = ctx1.manifest()
1550 man1 = ctx1.manifest()
1554
1551
1555 gone = set()
1552 gone = set()
1556 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1553 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1557
1554
1558 copyto = dict([(v, k) for k, v in copy.items()])
1555 copyto = dict([(v, k) for k, v in copy.items()])
1559
1556
1560 if opts.git:
1557 if opts.git:
1561 revs = None
1558 revs = None
1562
1559
1563 for f in sorted(modified + added + removed):
1560 for f in sorted(modified + added + removed):
1564 to = None
1561 to = None
1565 tn = None
1562 tn = None
1566 dodiff = True
1563 dodiff = True
1567 header = []
1564 header = []
1568 if f in man1:
1565 if f in man1:
1569 to = getfilectx(f, ctx1).data()
1566 to = getfilectx(f, ctx1).data()
1570 if f not in removed:
1567 if f not in removed:
1571 tn = getfilectx(f, ctx2).data()
1568 tn = getfilectx(f, ctx2).data()
1572 a, b = f, f
1569 a, b = f, f
1573 if opts.git or losedatafn:
1570 if opts.git or losedatafn:
1574 if f in added:
1571 if f in added:
1575 mode = gitmode[ctx2.flags(f)]
1572 mode = gitmode[ctx2.flags(f)]
1576 if f in copy or f in copyto:
1573 if f in copy or f in copyto:
1577 if opts.git:
1574 if opts.git:
1578 if f in copy:
1575 if f in copy:
1579 a = copy[f]
1576 a = copy[f]
1580 else:
1577 else:
1581 a = copyto[f]
1578 a = copyto[f]
1582 omode = gitmode[man1.flags(a)]
1579 omode = gitmode[man1.flags(a)]
1583 _addmodehdr(header, omode, mode)
1580 _addmodehdr(header, omode, mode)
1584 if a in removed and a not in gone:
1581 if a in removed and a not in gone:
1585 op = 'rename'
1582 op = 'rename'
1586 gone.add(a)
1583 gone.add(a)
1587 else:
1584 else:
1588 op = 'copy'
1585 op = 'copy'
1589 header.append('%s from %s\n' % (op, join(a)))
1586 header.append('%s from %s\n' % (op, join(a)))
1590 header.append('%s to %s\n' % (op, join(f)))
1587 header.append('%s to %s\n' % (op, join(f)))
1591 to = getfilectx(a, ctx1).data()
1588 to = getfilectx(a, ctx1).data()
1592 else:
1589 else:
1593 losedatafn(f)
1590 losedatafn(f)
1594 else:
1591 else:
1595 if opts.git:
1592 if opts.git:
1596 header.append('new file mode %s\n' % mode)
1593 header.append('new file mode %s\n' % mode)
1597 elif ctx2.flags(f):
1594 elif ctx2.flags(f):
1598 losedatafn(f)
1595 losedatafn(f)
1599 # In theory, if tn was copied or renamed we should check
1596 # In theory, if tn was copied or renamed we should check
1600 # if the source is binary too but the copy record already
1597 # if the source is binary too but the copy record already
1601 # forces git mode.
1598 # forces git mode.
1602 if util.binary(tn):
1599 if util.binary(tn):
1603 if opts.git:
1600 if opts.git:
1604 dodiff = 'binary'
1601 dodiff = 'binary'
1605 else:
1602 else:
1606 losedatafn(f)
1603 losedatafn(f)
1607 if not opts.git and not tn:
1604 if not opts.git and not tn:
1608 # regular diffs cannot represent new empty file
1605 # regular diffs cannot represent new empty file
1609 losedatafn(f)
1606 losedatafn(f)
1610 elif f in removed:
1607 elif f in removed:
1611 if opts.git:
1608 if opts.git:
1612 # have we already reported a copy above?
1609 # have we already reported a copy above?
1613 if ((f in copy and copy[f] in added
1610 if ((f in copy and copy[f] in added
1614 and copyto[copy[f]] == f) or
1611 and copyto[copy[f]] == f) or
1615 (f in copyto and copyto[f] in added
1612 (f in copyto and copyto[f] in added
1616 and copy[copyto[f]] == f)):
1613 and copy[copyto[f]] == f)):
1617 dodiff = False
1614 dodiff = False
1618 else:
1615 else:
1619 header.append('deleted file mode %s\n' %
1616 header.append('deleted file mode %s\n' %
1620 gitmode[man1.flags(f)])
1617 gitmode[man1.flags(f)])
1621 elif not to or util.binary(to):
1618 elif not to or util.binary(to):
1622 # regular diffs cannot represent empty file deletion
1619 # regular diffs cannot represent empty file deletion
1623 losedatafn(f)
1620 losedatafn(f)
1624 else:
1621 else:
1625 oflag = man1.flags(f)
1622 oflag = man1.flags(f)
1626 nflag = ctx2.flags(f)
1623 nflag = ctx2.flags(f)
1627 binary = util.binary(to) or util.binary(tn)
1624 binary = util.binary(to) or util.binary(tn)
1628 if opts.git:
1625 if opts.git:
1629 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1626 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1630 if binary:
1627 if binary:
1631 dodiff = 'binary'
1628 dodiff = 'binary'
1632 elif binary or nflag != oflag:
1629 elif binary or nflag != oflag:
1633 losedatafn(f)
1630 losedatafn(f)
1634 if opts.git:
1631 if opts.git:
1635 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1632 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1636
1633
1637 if dodiff:
1634 if dodiff:
1638 if dodiff == 'binary':
1635 if dodiff == 'binary':
1639 text = b85diff(to, tn)
1636 text = b85diff(to, tn)
1640 else:
1637 else:
1641 text = mdiff.unidiff(to, date1,
1638 text = mdiff.unidiff(to, date1,
1642 # ctx2 date may be dynamic
1639 # ctx2 date may be dynamic
1643 tn, util.datestr(ctx2.date()),
1640 tn, util.datestr(ctx2.date()),
1644 join(a), join(b), revs, opts=opts)
1641 join(a), join(b), revs, opts=opts)
1645 if header and (text or len(header) > 1):
1642 if header and (text or len(header) > 1):
1646 yield ''.join(header)
1643 yield ''.join(header)
1647 if text:
1644 if text:
1648 yield text
1645 yield text
1649
1646
1650 def diffstatdata(lines):
1647 def diffstatdata(lines):
1651 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1648 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1652
1649
1653 filename, adds, removes = None, 0, 0
1650 filename, adds, removes = None, 0, 0
1654 for line in lines:
1651 for line in lines:
1655 if line.startswith('diff'):
1652 if line.startswith('diff'):
1656 if filename:
1653 if filename:
1657 isbinary = adds == 0 and removes == 0
1654 isbinary = adds == 0 and removes == 0
1658 yield (filename, adds, removes, isbinary)
1655 yield (filename, adds, removes, isbinary)
1659 # set numbers to 0 anyway when starting new file
1656 # set numbers to 0 anyway when starting new file
1660 adds, removes = 0, 0
1657 adds, removes = 0, 0
1661 if line.startswith('diff --git'):
1658 if line.startswith('diff --git'):
1662 filename = gitre.search(line).group(1)
1659 filename = gitre.search(line).group(1)
1663 elif line.startswith('diff -r'):
1660 elif line.startswith('diff -r'):
1664 # format: "diff -r ... -r ... filename"
1661 # format: "diff -r ... -r ... filename"
1665 filename = diffre.search(line).group(1)
1662 filename = diffre.search(line).group(1)
1666 elif line.startswith('+') and not line.startswith('+++'):
1663 elif line.startswith('+') and not line.startswith('+++'):
1667 adds += 1
1664 adds += 1
1668 elif line.startswith('-') and not line.startswith('---'):
1665 elif line.startswith('-') and not line.startswith('---'):
1669 removes += 1
1666 removes += 1
1670 if filename:
1667 if filename:
1671 isbinary = adds == 0 and removes == 0
1668 isbinary = adds == 0 and removes == 0
1672 yield (filename, adds, removes, isbinary)
1669 yield (filename, adds, removes, isbinary)
1673
1670
1674 def diffstat(lines, width=80, git=False):
1671 def diffstat(lines, width=80, git=False):
1675 output = []
1672 output = []
1676 stats = list(diffstatdata(lines))
1673 stats = list(diffstatdata(lines))
1677
1674
1678 maxtotal, maxname = 0, 0
1675 maxtotal, maxname = 0, 0
1679 totaladds, totalremoves = 0, 0
1676 totaladds, totalremoves = 0, 0
1680 hasbinary = False
1677 hasbinary = False
1681
1678
1682 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1679 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1683 for filename, adds, removes, isbinary in stats]
1680 for filename, adds, removes, isbinary in stats]
1684
1681
1685 for filename, adds, removes, isbinary, namewidth in sized:
1682 for filename, adds, removes, isbinary, namewidth in sized:
1686 totaladds += adds
1683 totaladds += adds
1687 totalremoves += removes
1684 totalremoves += removes
1688 maxname = max(maxname, namewidth)
1685 maxname = max(maxname, namewidth)
1689 maxtotal = max(maxtotal, adds + removes)
1686 maxtotal = max(maxtotal, adds + removes)
1690 if isbinary:
1687 if isbinary:
1691 hasbinary = True
1688 hasbinary = True
1692
1689
1693 countwidth = len(str(maxtotal))
1690 countwidth = len(str(maxtotal))
1694 if hasbinary and countwidth < 3:
1691 if hasbinary and countwidth < 3:
1695 countwidth = 3
1692 countwidth = 3
1696 graphwidth = width - countwidth - maxname - 6
1693 graphwidth = width - countwidth - maxname - 6
1697 if graphwidth < 10:
1694 if graphwidth < 10:
1698 graphwidth = 10
1695 graphwidth = 10
1699
1696
1700 def scale(i):
1697 def scale(i):
1701 if maxtotal <= graphwidth:
1698 if maxtotal <= graphwidth:
1702 return i
1699 return i
1703 # If diffstat runs out of room it doesn't print anything,
1700 # If diffstat runs out of room it doesn't print anything,
1704 # which isn't very useful, so always print at least one + or -
1701 # which isn't very useful, so always print at least one + or -
1705 # if there were at least some changes.
1702 # if there were at least some changes.
1706 return max(i * graphwidth // maxtotal, int(bool(i)))
1703 return max(i * graphwidth // maxtotal, int(bool(i)))
1707
1704
1708 for filename, adds, removes, isbinary, namewidth in sized:
1705 for filename, adds, removes, isbinary, namewidth in sized:
1709 if git and isbinary:
1706 if git and isbinary:
1710 count = 'Bin'
1707 count = 'Bin'
1711 else:
1708 else:
1712 count = adds + removes
1709 count = adds + removes
1713 pluses = '+' * scale(adds)
1710 pluses = '+' * scale(adds)
1714 minuses = '-' * scale(removes)
1711 minuses = '-' * scale(removes)
1715 output.append(' %s%s | %*s %s%s\n' %
1712 output.append(' %s%s | %*s %s%s\n' %
1716 (filename, ' ' * (maxname - namewidth),
1713 (filename, ' ' * (maxname - namewidth),
1717 countwidth, count,
1714 countwidth, count,
1718 pluses, minuses))
1715 pluses, minuses))
1719
1716
1720 if stats:
1717 if stats:
1721 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1718 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1722 % (len(stats), totaladds, totalremoves))
1719 % (len(stats), totaladds, totalremoves))
1723
1720
1724 return ''.join(output)
1721 return ''.join(output)
1725
1722
1726 def diffstatui(*args, **kw):
1723 def diffstatui(*args, **kw):
1727 '''like diffstat(), but yields 2-tuples of (output, label) for
1724 '''like diffstat(), but yields 2-tuples of (output, label) for
1728 ui.write()
1725 ui.write()
1729 '''
1726 '''
1730
1727
1731 for line in diffstat(*args, **kw).splitlines():
1728 for line in diffstat(*args, **kw).splitlines():
1732 if line and line[-1] in '+-':
1729 if line and line[-1] in '+-':
1733 name, graph = line.rsplit(' ', 1)
1730 name, graph = line.rsplit(' ', 1)
1734 yield (name + ' ', '')
1731 yield (name + ' ', '')
1735 m = re.search(r'\++', graph)
1732 m = re.search(r'\++', graph)
1736 if m:
1733 if m:
1737 yield (m.group(0), 'diffstat.inserted')
1734 yield (m.group(0), 'diffstat.inserted')
1738 m = re.search(r'-+', graph)
1735 m = re.search(r'-+', graph)
1739 if m:
1736 if m:
1740 yield (m.group(0), 'diffstat.deleted')
1737 yield (m.group(0), 'diffstat.deleted')
1741 else:
1738 else:
1742 yield (line, '')
1739 yield (line, '')
1743 yield ('\n', '')
1740 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now