##// END OF EJS Templates
patch: move copyfile() into backends, abstract basedir
Patrick Mezard -
r14350:00da6624 default
parent child Browse files
Show More
@@ -1,1740 +1,1745 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib
10 import tempfile, zlib
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding
15
15
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
16 gitre = re.compile('diff --git a/(.*) b/(.*)')
17
17
18 class PatchError(Exception):
18 class PatchError(Exception):
19 pass
19 pass
20
20
21 # helper functions
22
23 def copyfile(src, dst, basedir):
24 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
25 for x in [src, dst]]
26 if os.path.lexists(absdst):
27 raise util.Abort(_("cannot create %s: destination already exists") %
28 dst)
29
30 dstdir = os.path.dirname(absdst)
31 if dstdir and not os.path.isdir(dstdir):
32 try:
33 os.makedirs(dstdir)
34 except IOError:
35 raise util.Abort(
36 _("cannot create %s: unable to create destination directory")
37 % dst)
38
39 util.copyfile(abssrc, absdst)
40
21
41 # public functions
22 # public functions
42
23
43 def split(stream):
24 def split(stream):
44 '''return an iterator of individual patches from a stream'''
25 '''return an iterator of individual patches from a stream'''
45 def isheader(line, inheader):
26 def isheader(line, inheader):
46 if inheader and line[0] in (' ', '\t'):
27 if inheader and line[0] in (' ', '\t'):
47 # continuation
28 # continuation
48 return True
29 return True
49 if line[0] in (' ', '-', '+'):
30 if line[0] in (' ', '-', '+'):
50 # diff line - don't check for header pattern in there
31 # diff line - don't check for header pattern in there
51 return False
32 return False
52 l = line.split(': ', 1)
33 l = line.split(': ', 1)
53 return len(l) == 2 and ' ' not in l[0]
34 return len(l) == 2 and ' ' not in l[0]
54
35
55 def chunk(lines):
36 def chunk(lines):
56 return cStringIO.StringIO(''.join(lines))
37 return cStringIO.StringIO(''.join(lines))
57
38
58 def hgsplit(stream, cur):
39 def hgsplit(stream, cur):
59 inheader = True
40 inheader = True
60
41
61 for line in stream:
42 for line in stream:
62 if not line.strip():
43 if not line.strip():
63 inheader = False
44 inheader = False
64 if not inheader and line.startswith('# HG changeset patch'):
45 if not inheader and line.startswith('# HG changeset patch'):
65 yield chunk(cur)
46 yield chunk(cur)
66 cur = []
47 cur = []
67 inheader = True
48 inheader = True
68
49
69 cur.append(line)
50 cur.append(line)
70
51
71 if cur:
52 if cur:
72 yield chunk(cur)
53 yield chunk(cur)
73
54
74 def mboxsplit(stream, cur):
55 def mboxsplit(stream, cur):
75 for line in stream:
56 for line in stream:
76 if line.startswith('From '):
57 if line.startswith('From '):
77 for c in split(chunk(cur[1:])):
58 for c in split(chunk(cur[1:])):
78 yield c
59 yield c
79 cur = []
60 cur = []
80
61
81 cur.append(line)
62 cur.append(line)
82
63
83 if cur:
64 if cur:
84 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
85 yield c
66 yield c
86
67
87 def mimesplit(stream, cur):
68 def mimesplit(stream, cur):
88 def msgfp(m):
69 def msgfp(m):
89 fp = cStringIO.StringIO()
70 fp = cStringIO.StringIO()
90 g = email.Generator.Generator(fp, mangle_from_=False)
71 g = email.Generator.Generator(fp, mangle_from_=False)
91 g.flatten(m)
72 g.flatten(m)
92 fp.seek(0)
73 fp.seek(0)
93 return fp
74 return fp
94
75
95 for line in stream:
76 for line in stream:
96 cur.append(line)
77 cur.append(line)
97 c = chunk(cur)
78 c = chunk(cur)
98
79
99 m = email.Parser.Parser().parse(c)
80 m = email.Parser.Parser().parse(c)
100 if not m.is_multipart():
81 if not m.is_multipart():
101 yield msgfp(m)
82 yield msgfp(m)
102 else:
83 else:
103 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
84 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
104 for part in m.walk():
85 for part in m.walk():
105 ct = part.get_content_type()
86 ct = part.get_content_type()
106 if ct not in ok_types:
87 if ct not in ok_types:
107 continue
88 continue
108 yield msgfp(part)
89 yield msgfp(part)
109
90
110 def headersplit(stream, cur):
91 def headersplit(stream, cur):
111 inheader = False
92 inheader = False
112
93
113 for line in stream:
94 for line in stream:
114 if not inheader and isheader(line, inheader):
95 if not inheader and isheader(line, inheader):
115 yield chunk(cur)
96 yield chunk(cur)
116 cur = []
97 cur = []
117 inheader = True
98 inheader = True
118 if inheader and not isheader(line, inheader):
99 if inheader and not isheader(line, inheader):
119 inheader = False
100 inheader = False
120
101
121 cur.append(line)
102 cur.append(line)
122
103
123 if cur:
104 if cur:
124 yield chunk(cur)
105 yield chunk(cur)
125
106
126 def remainder(cur):
107 def remainder(cur):
127 yield chunk(cur)
108 yield chunk(cur)
128
109
129 class fiter(object):
110 class fiter(object):
130 def __init__(self, fp):
111 def __init__(self, fp):
131 self.fp = fp
112 self.fp = fp
132
113
133 def __iter__(self):
114 def __iter__(self):
134 return self
115 return self
135
116
136 def next(self):
117 def next(self):
137 l = self.fp.readline()
118 l = self.fp.readline()
138 if not l:
119 if not l:
139 raise StopIteration
120 raise StopIteration
140 return l
121 return l
141
122
142 inheader = False
123 inheader = False
143 cur = []
124 cur = []
144
125
145 mimeheaders = ['content-type']
126 mimeheaders = ['content-type']
146
127
147 if not hasattr(stream, 'next'):
128 if not hasattr(stream, 'next'):
148 # http responses, for example, have readline but not next
129 # http responses, for example, have readline but not next
149 stream = fiter(stream)
130 stream = fiter(stream)
150
131
151 for line in stream:
132 for line in stream:
152 cur.append(line)
133 cur.append(line)
153 if line.startswith('# HG changeset patch'):
134 if line.startswith('# HG changeset patch'):
154 return hgsplit(stream, cur)
135 return hgsplit(stream, cur)
155 elif line.startswith('From '):
136 elif line.startswith('From '):
156 return mboxsplit(stream, cur)
137 return mboxsplit(stream, cur)
157 elif isheader(line, inheader):
138 elif isheader(line, inheader):
158 inheader = True
139 inheader = True
159 if line.split(':', 1)[0].lower() in mimeheaders:
140 if line.split(':', 1)[0].lower() in mimeheaders:
160 # let email parser handle this
141 # let email parser handle this
161 return mimesplit(stream, cur)
142 return mimesplit(stream, cur)
162 elif line.startswith('--- ') and inheader:
143 elif line.startswith('--- ') and inheader:
163 # No evil headers seen by diff start, split by hand
144 # No evil headers seen by diff start, split by hand
164 return headersplit(stream, cur)
145 return headersplit(stream, cur)
165 # Not enough info, keep reading
146 # Not enough info, keep reading
166
147
167 # if we are here, we have a very plain patch
148 # if we are here, we have a very plain patch
168 return remainder(cur)
149 return remainder(cur)
169
150
170 def extract(ui, fileobj):
151 def extract(ui, fileobj):
171 '''extract patch from data read from fileobj.
152 '''extract patch from data read from fileobj.
172
153
173 patch can be a normal patch or contained in an email message.
154 patch can be a normal patch or contained in an email message.
174
155
175 return tuple (filename, message, user, date, branch, node, p1, p2).
156 return tuple (filename, message, user, date, branch, node, p1, p2).
176 Any item in the returned tuple can be None. If filename is None,
157 Any item in the returned tuple can be None. If filename is None,
177 fileobj did not contain a patch. Caller must unlink filename when done.'''
158 fileobj did not contain a patch. Caller must unlink filename when done.'''
178
159
179 # attempt to detect the start of a patch
160 # attempt to detect the start of a patch
180 # (this heuristic is borrowed from quilt)
161 # (this heuristic is borrowed from quilt)
181 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
162 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
182 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
163 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
183 r'---[ \t].*?^\+\+\+[ \t]|'
164 r'---[ \t].*?^\+\+\+[ \t]|'
184 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
165 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
185
166
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
167 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 tmpfp = os.fdopen(fd, 'w')
168 tmpfp = os.fdopen(fd, 'w')
188 try:
169 try:
189 msg = email.Parser.Parser().parse(fileobj)
170 msg = email.Parser.Parser().parse(fileobj)
190
171
191 subject = msg['Subject']
172 subject = msg['Subject']
192 user = msg['From']
173 user = msg['From']
193 if not subject and not user:
174 if not subject and not user:
194 # Not an email, restore parsed headers if any
175 # Not an email, restore parsed headers if any
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
176 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196
177
197 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
178 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
198 # should try to parse msg['Date']
179 # should try to parse msg['Date']
199 date = None
180 date = None
200 nodeid = None
181 nodeid = None
201 branch = None
182 branch = None
202 parents = []
183 parents = []
203
184
204 if subject:
185 if subject:
205 if subject.startswith('[PATCH'):
186 if subject.startswith('[PATCH'):
206 pend = subject.find(']')
187 pend = subject.find(']')
207 if pend >= 0:
188 if pend >= 0:
208 subject = subject[pend + 1:].lstrip()
189 subject = subject[pend + 1:].lstrip()
209 subject = subject.replace('\n\t', ' ')
190 subject = subject.replace('\n\t', ' ')
210 ui.debug('Subject: %s\n' % subject)
191 ui.debug('Subject: %s\n' % subject)
211 if user:
192 if user:
212 ui.debug('From: %s\n' % user)
193 ui.debug('From: %s\n' % user)
213 diffs_seen = 0
194 diffs_seen = 0
214 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
215 message = ''
196 message = ''
216 for part in msg.walk():
197 for part in msg.walk():
217 content_type = part.get_content_type()
198 content_type = part.get_content_type()
218 ui.debug('Content-Type: %s\n' % content_type)
199 ui.debug('Content-Type: %s\n' % content_type)
219 if content_type not in ok_types:
200 if content_type not in ok_types:
220 continue
201 continue
221 payload = part.get_payload(decode=True)
202 payload = part.get_payload(decode=True)
222 m = diffre.search(payload)
203 m = diffre.search(payload)
223 if m:
204 if m:
224 hgpatch = False
205 hgpatch = False
225 hgpatchheader = False
206 hgpatchheader = False
226 ignoretext = False
207 ignoretext = False
227
208
228 ui.debug('found patch at byte %d\n' % m.start(0))
209 ui.debug('found patch at byte %d\n' % m.start(0))
229 diffs_seen += 1
210 diffs_seen += 1
230 cfp = cStringIO.StringIO()
211 cfp = cStringIO.StringIO()
231 for line in payload[:m.start(0)].splitlines():
212 for line in payload[:m.start(0)].splitlines():
232 if line.startswith('# HG changeset patch') and not hgpatch:
213 if line.startswith('# HG changeset patch') and not hgpatch:
233 ui.debug('patch generated by hg export\n')
214 ui.debug('patch generated by hg export\n')
234 hgpatch = True
215 hgpatch = True
235 hgpatchheader = True
216 hgpatchheader = True
236 # drop earlier commit message content
217 # drop earlier commit message content
237 cfp.seek(0)
218 cfp.seek(0)
238 cfp.truncate()
219 cfp.truncate()
239 subject = None
220 subject = None
240 elif hgpatchheader:
221 elif hgpatchheader:
241 if line.startswith('# User '):
222 if line.startswith('# User '):
242 user = line[7:]
223 user = line[7:]
243 ui.debug('From: %s\n' % user)
224 ui.debug('From: %s\n' % user)
244 elif line.startswith("# Date "):
225 elif line.startswith("# Date "):
245 date = line[7:]
226 date = line[7:]
246 elif line.startswith("# Branch "):
227 elif line.startswith("# Branch "):
247 branch = line[9:]
228 branch = line[9:]
248 elif line.startswith("# Node ID "):
229 elif line.startswith("# Node ID "):
249 nodeid = line[10:]
230 nodeid = line[10:]
250 elif line.startswith("# Parent "):
231 elif line.startswith("# Parent "):
251 parents.append(line[10:])
232 parents.append(line[10:])
252 elif not line.startswith("# "):
233 elif not line.startswith("# "):
253 hgpatchheader = False
234 hgpatchheader = False
254 elif line == '---' and gitsendmail:
235 elif line == '---' and gitsendmail:
255 ignoretext = True
236 ignoretext = True
256 if not hgpatchheader and not ignoretext:
237 if not hgpatchheader and not ignoretext:
257 cfp.write(line)
238 cfp.write(line)
258 cfp.write('\n')
239 cfp.write('\n')
259 message = cfp.getvalue()
240 message = cfp.getvalue()
260 if tmpfp:
241 if tmpfp:
261 tmpfp.write(payload)
242 tmpfp.write(payload)
262 if not payload.endswith('\n'):
243 if not payload.endswith('\n'):
263 tmpfp.write('\n')
244 tmpfp.write('\n')
264 elif not diffs_seen and message and content_type == 'text/plain':
245 elif not diffs_seen and message and content_type == 'text/plain':
265 message += '\n' + payload
246 message += '\n' + payload
266 except:
247 except:
267 tmpfp.close()
248 tmpfp.close()
268 os.unlink(tmpname)
249 os.unlink(tmpname)
269 raise
250 raise
270
251
271 if subject and not message.startswith(subject):
252 if subject and not message.startswith(subject):
272 message = '%s\n%s' % (subject, message)
253 message = '%s\n%s' % (subject, message)
273 tmpfp.close()
254 tmpfp.close()
274 if not diffs_seen:
255 if not diffs_seen:
275 os.unlink(tmpname)
256 os.unlink(tmpname)
276 return None, message, user, date, branch, None, None, None
257 return None, message, user, date, branch, None, None, None
277 p1 = parents and parents.pop(0) or None
258 p1 = parents and parents.pop(0) or None
278 p2 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
279 return tmpname, message, user, date, branch, nodeid, p1, p2
260 return tmpname, message, user, date, branch, nodeid, p1, p2
280
261
281 class patchmeta(object):
262 class patchmeta(object):
282 """Patched file metadata
263 """Patched file metadata
283
264
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 'islink' is True if the file is a symlink and 'isexec' is True if
269 'islink' is True if the file is a symlink and 'isexec' is True if
289 the file is executable. Otherwise, 'mode' is None.
270 the file is executable. Otherwise, 'mode' is None.
290 """
271 """
291 def __init__(self, path):
272 def __init__(self, path):
292 self.path = path
273 self.path = path
293 self.oldpath = None
274 self.oldpath = None
294 self.mode = None
275 self.mode = None
295 self.op = 'MODIFY'
276 self.op = 'MODIFY'
296 self.binary = False
277 self.binary = False
297
278
298 def setmode(self, mode):
279 def setmode(self, mode):
299 islink = mode & 020000
280 islink = mode & 020000
300 isexec = mode & 0100
281 isexec = mode & 0100
301 self.mode = (islink, isexec)
282 self.mode = (islink, isexec)
302
283
303 def __repr__(self):
284 def __repr__(self):
304 return "<patchmeta %s %r>" % (self.op, self.path)
285 return "<patchmeta %s %r>" % (self.op, self.path)
305
286
306 def readgitpatch(lr):
287 def readgitpatch(lr):
307 """extract git-style metadata about patches from <patchname>"""
288 """extract git-style metadata about patches from <patchname>"""
308
289
309 # Filter patch for git information
290 # Filter patch for git information
310 gp = None
291 gp = None
311 gitpatches = []
292 gitpatches = []
312 for line in lr:
293 for line in lr:
313 line = line.rstrip(' \r\n')
294 line = line.rstrip(' \r\n')
314 if line.startswith('diff --git'):
295 if line.startswith('diff --git'):
315 m = gitre.match(line)
296 m = gitre.match(line)
316 if m:
297 if m:
317 if gp:
298 if gp:
318 gitpatches.append(gp)
299 gitpatches.append(gp)
319 dst = m.group(2)
300 dst = m.group(2)
320 gp = patchmeta(dst)
301 gp = patchmeta(dst)
321 elif gp:
302 elif gp:
322 if line.startswith('--- '):
303 if line.startswith('--- '):
323 gitpatches.append(gp)
304 gitpatches.append(gp)
324 gp = None
305 gp = None
325 continue
306 continue
326 if line.startswith('rename from '):
307 if line.startswith('rename from '):
327 gp.op = 'RENAME'
308 gp.op = 'RENAME'
328 gp.oldpath = line[12:]
309 gp.oldpath = line[12:]
329 elif line.startswith('rename to '):
310 elif line.startswith('rename to '):
330 gp.path = line[10:]
311 gp.path = line[10:]
331 elif line.startswith('copy from '):
312 elif line.startswith('copy from '):
332 gp.op = 'COPY'
313 gp.op = 'COPY'
333 gp.oldpath = line[10:]
314 gp.oldpath = line[10:]
334 elif line.startswith('copy to '):
315 elif line.startswith('copy to '):
335 gp.path = line[8:]
316 gp.path = line[8:]
336 elif line.startswith('deleted file'):
317 elif line.startswith('deleted file'):
337 gp.op = 'DELETE'
318 gp.op = 'DELETE'
338 elif line.startswith('new file mode '):
319 elif line.startswith('new file mode '):
339 gp.op = 'ADD'
320 gp.op = 'ADD'
340 gp.setmode(int(line[-6:], 8))
321 gp.setmode(int(line[-6:], 8))
341 elif line.startswith('new mode '):
322 elif line.startswith('new mode '):
342 gp.setmode(int(line[-6:], 8))
323 gp.setmode(int(line[-6:], 8))
343 elif line.startswith('GIT binary patch'):
324 elif line.startswith('GIT binary patch'):
344 gp.binary = True
325 gp.binary = True
345 if gp:
326 if gp:
346 gitpatches.append(gp)
327 gitpatches.append(gp)
347
328
348 return gitpatches
329 return gitpatches
349
330
350 class linereader(object):
331 class linereader(object):
351 # simple class to allow pushing lines back into the input stream
332 # simple class to allow pushing lines back into the input stream
352 def __init__(self, fp, textmode=False):
333 def __init__(self, fp, textmode=False):
353 self.fp = fp
334 self.fp = fp
354 self.buf = []
335 self.buf = []
355 self.textmode = textmode
336 self.textmode = textmode
356 self.eol = None
337 self.eol = None
357
338
358 def push(self, line):
339 def push(self, line):
359 if line is not None:
340 if line is not None:
360 self.buf.append(line)
341 self.buf.append(line)
361
342
362 def readline(self):
343 def readline(self):
363 if self.buf:
344 if self.buf:
364 l = self.buf[0]
345 l = self.buf[0]
365 del self.buf[0]
346 del self.buf[0]
366 return l
347 return l
367 l = self.fp.readline()
348 l = self.fp.readline()
368 if not self.eol:
349 if not self.eol:
369 if l.endswith('\r\n'):
350 if l.endswith('\r\n'):
370 self.eol = '\r\n'
351 self.eol = '\r\n'
371 elif l.endswith('\n'):
352 elif l.endswith('\n'):
372 self.eol = '\n'
353 self.eol = '\n'
373 if self.textmode and l.endswith('\r\n'):
354 if self.textmode and l.endswith('\r\n'):
374 l = l[:-2] + '\n'
355 l = l[:-2] + '\n'
375 return l
356 return l
376
357
377 def __iter__(self):
358 def __iter__(self):
378 while 1:
359 while 1:
379 l = self.readline()
360 l = self.readline()
380 if not l:
361 if not l:
381 break
362 break
382 yield l
363 yield l
383
364
384 class abstractbackend(object):
365 class abstractbackend(object):
385 def __init__(self, ui):
366 def __init__(self, ui):
386 self.ui = ui
367 self.ui = ui
387
368
388 def readlines(self, fname):
369 def readlines(self, fname):
389 """Return target file lines, or its content as a single line
370 """Return target file lines, or its content as a single line
390 for symlinks.
371 for symlinks.
391 """
372 """
392 raise NotImplementedError
373 raise NotImplementedError
393
374
394 def writelines(self, fname, lines):
375 def writelines(self, fname, lines):
395 """Write lines to target file."""
376 """Write lines to target file."""
396 raise NotImplementedError
377 raise NotImplementedError
397
378
398 def unlink(self, fname):
379 def unlink(self, fname):
399 """Unlink target file."""
380 """Unlink target file."""
400 raise NotImplementedError
381 raise NotImplementedError
401
382
402 def writerej(self, fname, failed, total, lines):
383 def writerej(self, fname, failed, total, lines):
403 """Write rejected lines for fname. total is the number of hunks
384 """Write rejected lines for fname. total is the number of hunks
404 which failed to apply and total the total number of hunks for this
385 which failed to apply and total the total number of hunks for this
405 files.
386 files.
406 """
387 """
407 pass
388 pass
408
389
390 def copy(self, src, dst):
391 """Copy src file into dst file. Create intermediate directories if
392 necessary. Files are specified relatively to the patching base
393 directory.
394 """
395 raise NotImplementedError
396
409 class fsbackend(abstractbackend):
397 class fsbackend(abstractbackend):
410 def __init__(self, ui, opener):
398 def __init__(self, ui, basedir):
411 super(fsbackend, self).__init__(ui)
399 super(fsbackend, self).__init__(ui)
412 self.opener = opener
400 self.opener = scmutil.opener(basedir)
413
401
414 def readlines(self, fname):
402 def readlines(self, fname):
415 if os.path.islink(fname):
403 if os.path.islink(fname):
416 return [os.readlink(fname)]
404 return [os.readlink(fname)]
417 fp = self.opener(fname, 'r')
405 fp = self.opener(fname, 'r')
418 try:
406 try:
419 return list(fp)
407 return list(fp)
420 finally:
408 finally:
421 fp.close()
409 fp.close()
422
410
423 def writelines(self, fname, lines):
411 def writelines(self, fname, lines):
424 # Ensure supplied data ends in fname, being a regular file or
412 # Ensure supplied data ends in fname, being a regular file or
425 # a symlink. _updatedir will -too magically- take care
413 # a symlink. _updatedir will -too magically- take care
426 # of setting it to the proper type afterwards.
414 # of setting it to the proper type afterwards.
427 st_mode = None
415 st_mode = None
428 islink = os.path.islink(fname)
416 islink = os.path.islink(fname)
429 if islink:
417 if islink:
430 fp = cStringIO.StringIO()
418 fp = cStringIO.StringIO()
431 else:
419 else:
432 try:
420 try:
433 st_mode = os.lstat(fname).st_mode & 0777
421 st_mode = os.lstat(fname).st_mode & 0777
434 except OSError, e:
422 except OSError, e:
435 if e.errno != errno.ENOENT:
423 if e.errno != errno.ENOENT:
436 raise
424 raise
437 fp = self.opener(fname, 'w')
425 fp = self.opener(fname, 'w')
438 try:
426 try:
439 fp.writelines(lines)
427 fp.writelines(lines)
440 if islink:
428 if islink:
441 self.opener.symlink(fp.getvalue(), fname)
429 self.opener.symlink(fp.getvalue(), fname)
442 if st_mode is not None:
430 if st_mode is not None:
443 os.chmod(fname, st_mode)
431 os.chmod(fname, st_mode)
444 finally:
432 finally:
445 fp.close()
433 fp.close()
446
434
447 def unlink(self, fname):
435 def unlink(self, fname):
448 os.unlink(fname)
436 os.unlink(fname)
449
437
450 def writerej(self, fname, failed, total, lines):
438 def writerej(self, fname, failed, total, lines):
451 fname = fname + ".rej"
439 fname = fname + ".rej"
452 self.ui.warn(
440 self.ui.warn(
453 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
441 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
454 (failed, total, fname))
442 (failed, total, fname))
455 fp = self.opener(fname, 'w')
443 fp = self.opener(fname, 'w')
456 fp.writelines(lines)
444 fp.writelines(lines)
457 fp.close()
445 fp.close()
458
446
447 def copy(self, src, dst):
448 basedir = self.opener.base
449 abssrc, absdst = [scmutil.canonpath(basedir, basedir, x)
450 for x in [src, dst]]
451 if os.path.lexists(absdst):
452 raise util.Abort(_("cannot create %s: destination already exists")
453 % dst)
454 dstdir = os.path.dirname(absdst)
455 if dstdir and not os.path.isdir(dstdir):
456 try:
457 os.makedirs(dstdir)
458 except IOError:
459 raise util.Abort(
460 _("cannot create %s: unable to create destination directory")
461 % dst)
462 util.copyfile(abssrc, absdst)
463
459 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
464 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
460 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
465 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
461 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
466 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
462 eolmodes = ['strict', 'crlf', 'lf', 'auto']
467 eolmodes = ['strict', 'crlf', 'lf', 'auto']
463
468
464 class patchfile(object):
469 class patchfile(object):
465 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
470 def __init__(self, ui, fname, backend, missing=False, eolmode='strict'):
466 self.fname = fname
471 self.fname = fname
467 self.eolmode = eolmode
472 self.eolmode = eolmode
468 self.eol = None
473 self.eol = None
469 self.backend = backend
474 self.backend = backend
470 self.ui = ui
475 self.ui = ui
471 self.lines = []
476 self.lines = []
472 self.exists = False
477 self.exists = False
473 self.missing = missing
478 self.missing = missing
474 if not missing:
479 if not missing:
475 try:
480 try:
476 self.lines = self.backend.readlines(fname)
481 self.lines = self.backend.readlines(fname)
477 if self.lines:
482 if self.lines:
478 # Normalize line endings
483 # Normalize line endings
479 if self.lines[0].endswith('\r\n'):
484 if self.lines[0].endswith('\r\n'):
480 self.eol = '\r\n'
485 self.eol = '\r\n'
481 elif self.lines[0].endswith('\n'):
486 elif self.lines[0].endswith('\n'):
482 self.eol = '\n'
487 self.eol = '\n'
483 if eolmode != 'strict':
488 if eolmode != 'strict':
484 nlines = []
489 nlines = []
485 for l in self.lines:
490 for l in self.lines:
486 if l.endswith('\r\n'):
491 if l.endswith('\r\n'):
487 l = l[:-2] + '\n'
492 l = l[:-2] + '\n'
488 nlines.append(l)
493 nlines.append(l)
489 self.lines = nlines
494 self.lines = nlines
490 self.exists = True
495 self.exists = True
491 except IOError:
496 except IOError:
492 pass
497 pass
493 else:
498 else:
494 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
499 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
495
500
496 self.hash = {}
501 self.hash = {}
497 self.dirty = 0
502 self.dirty = 0
498 self.offset = 0
503 self.offset = 0
499 self.skew = 0
504 self.skew = 0
500 self.rej = []
505 self.rej = []
501 self.fileprinted = False
506 self.fileprinted = False
502 self.printfile(False)
507 self.printfile(False)
503 self.hunks = 0
508 self.hunks = 0
504
509
505 def writelines(self, fname, lines):
510 def writelines(self, fname, lines):
506 if self.eolmode == 'auto':
511 if self.eolmode == 'auto':
507 eol = self.eol
512 eol = self.eol
508 elif self.eolmode == 'crlf':
513 elif self.eolmode == 'crlf':
509 eol = '\r\n'
514 eol = '\r\n'
510 else:
515 else:
511 eol = '\n'
516 eol = '\n'
512
517
513 if self.eolmode != 'strict' and eol and eol != '\n':
518 if self.eolmode != 'strict' and eol and eol != '\n':
514 rawlines = []
519 rawlines = []
515 for l in lines:
520 for l in lines:
516 if l and l[-1] == '\n':
521 if l and l[-1] == '\n':
517 l = l[:-1] + eol
522 l = l[:-1] + eol
518 rawlines.append(l)
523 rawlines.append(l)
519 lines = rawlines
524 lines = rawlines
520
525
521 self.backend.writelines(fname, lines)
526 self.backend.writelines(fname, lines)
522
527
523 def printfile(self, warn):
528 def printfile(self, warn):
524 if self.fileprinted:
529 if self.fileprinted:
525 return
530 return
526 if warn or self.ui.verbose:
531 if warn or self.ui.verbose:
527 self.fileprinted = True
532 self.fileprinted = True
528 s = _("patching file %s\n") % self.fname
533 s = _("patching file %s\n") % self.fname
529 if warn:
534 if warn:
530 self.ui.warn(s)
535 self.ui.warn(s)
531 else:
536 else:
532 self.ui.note(s)
537 self.ui.note(s)
533
538
534
539
535 def findlines(self, l, linenum):
540 def findlines(self, l, linenum):
536 # looks through the hash and finds candidate lines. The
541 # looks through the hash and finds candidate lines. The
537 # result is a list of line numbers sorted based on distance
542 # result is a list of line numbers sorted based on distance
538 # from linenum
543 # from linenum
539
544
540 cand = self.hash.get(l, [])
545 cand = self.hash.get(l, [])
541 if len(cand) > 1:
546 if len(cand) > 1:
542 # resort our list of potentials forward then back.
547 # resort our list of potentials forward then back.
543 cand.sort(key=lambda x: abs(x - linenum))
548 cand.sort(key=lambda x: abs(x - linenum))
544 return cand
549 return cand
545
550
546 def write_rej(self):
551 def write_rej(self):
547 # our rejects are a little different from patch(1). This always
552 # our rejects are a little different from patch(1). This always
548 # creates rejects in the same form as the original patch. A file
553 # creates rejects in the same form as the original patch. A file
549 # header is inserted so that you can run the reject through patch again
554 # header is inserted so that you can run the reject through patch again
550 # without having to type the filename.
555 # without having to type the filename.
551 if not self.rej:
556 if not self.rej:
552 return
557 return
553 base = os.path.basename(self.fname)
558 base = os.path.basename(self.fname)
554 lines = ["--- %s\n+++ %s\n" % (base, base)]
559 lines = ["--- %s\n+++ %s\n" % (base, base)]
555 for x in self.rej:
560 for x in self.rej:
556 for l in x.hunk:
561 for l in x.hunk:
557 lines.append(l)
562 lines.append(l)
558 if l[-1] != '\n':
563 if l[-1] != '\n':
559 lines.append("\n\ No newline at end of file\n")
564 lines.append("\n\ No newline at end of file\n")
560 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
565 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
561
566
562 def apply(self, h):
567 def apply(self, h):
563 if not h.complete():
568 if not h.complete():
564 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
569 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
565 (h.number, h.desc, len(h.a), h.lena, len(h.b),
570 (h.number, h.desc, len(h.a), h.lena, len(h.b),
566 h.lenb))
571 h.lenb))
567
572
568 self.hunks += 1
573 self.hunks += 1
569
574
570 if self.missing:
575 if self.missing:
571 self.rej.append(h)
576 self.rej.append(h)
572 return -1
577 return -1
573
578
574 if self.exists and h.createfile():
579 if self.exists and h.createfile():
575 self.ui.warn(_("file %s already exists\n") % self.fname)
580 self.ui.warn(_("file %s already exists\n") % self.fname)
576 self.rej.append(h)
581 self.rej.append(h)
577 return -1
582 return -1
578
583
579 if isinstance(h, binhunk):
584 if isinstance(h, binhunk):
580 if h.rmfile():
585 if h.rmfile():
581 self.backend.unlink(self.fname)
586 self.backend.unlink(self.fname)
582 else:
587 else:
583 self.lines[:] = h.new()
588 self.lines[:] = h.new()
584 self.offset += len(h.new())
589 self.offset += len(h.new())
585 self.dirty = True
590 self.dirty = True
586 return 0
591 return 0
587
592
588 horig = h
593 horig = h
589 if (self.eolmode in ('crlf', 'lf')
594 if (self.eolmode in ('crlf', 'lf')
590 or self.eolmode == 'auto' and self.eol):
595 or self.eolmode == 'auto' and self.eol):
591 # If new eols are going to be normalized, then normalize
596 # If new eols are going to be normalized, then normalize
592 # hunk data before patching. Otherwise, preserve input
597 # hunk data before patching. Otherwise, preserve input
593 # line-endings.
598 # line-endings.
594 h = h.getnormalized()
599 h = h.getnormalized()
595
600
596 # fast case first, no offsets, no fuzz
601 # fast case first, no offsets, no fuzz
597 old = h.old()
602 old = h.old()
598 # patch starts counting at 1 unless we are adding the file
603 # patch starts counting at 1 unless we are adding the file
599 if h.starta == 0:
604 if h.starta == 0:
600 start = 0
605 start = 0
601 else:
606 else:
602 start = h.starta + self.offset - 1
607 start = h.starta + self.offset - 1
603 orig_start = start
608 orig_start = start
604 # if there's skew we want to emit the "(offset %d lines)" even
609 # if there's skew we want to emit the "(offset %d lines)" even
605 # when the hunk cleanly applies at start + skew, so skip the
610 # when the hunk cleanly applies at start + skew, so skip the
606 # fast case code
611 # fast case code
607 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
612 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
608 if h.rmfile():
613 if h.rmfile():
609 self.backend.unlink(self.fname)
614 self.backend.unlink(self.fname)
610 else:
615 else:
611 self.lines[start : start + h.lena] = h.new()
616 self.lines[start : start + h.lena] = h.new()
612 self.offset += h.lenb - h.lena
617 self.offset += h.lenb - h.lena
613 self.dirty = True
618 self.dirty = True
614 return 0
619 return 0
615
620
616 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
621 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
617 self.hash = {}
622 self.hash = {}
618 for x, s in enumerate(self.lines):
623 for x, s in enumerate(self.lines):
619 self.hash.setdefault(s, []).append(x)
624 self.hash.setdefault(s, []).append(x)
620 if h.hunk[-1][0] != ' ':
625 if h.hunk[-1][0] != ' ':
621 # if the hunk tried to put something at the bottom of the file
626 # if the hunk tried to put something at the bottom of the file
622 # override the start line and use eof here
627 # override the start line and use eof here
623 search_start = len(self.lines)
628 search_start = len(self.lines)
624 else:
629 else:
625 search_start = orig_start + self.skew
630 search_start = orig_start + self.skew
626
631
627 for fuzzlen in xrange(3):
632 for fuzzlen in xrange(3):
628 for toponly in [True, False]:
633 for toponly in [True, False]:
629 old = h.old(fuzzlen, toponly)
634 old = h.old(fuzzlen, toponly)
630
635
631 cand = self.findlines(old[0][1:], search_start)
636 cand = self.findlines(old[0][1:], search_start)
632 for l in cand:
637 for l in cand:
633 if diffhelpers.testhunk(old, self.lines, l) == 0:
638 if diffhelpers.testhunk(old, self.lines, l) == 0:
634 newlines = h.new(fuzzlen, toponly)
639 newlines = h.new(fuzzlen, toponly)
635 self.lines[l : l + len(old)] = newlines
640 self.lines[l : l + len(old)] = newlines
636 self.offset += len(newlines) - len(old)
641 self.offset += len(newlines) - len(old)
637 self.skew = l - orig_start
642 self.skew = l - orig_start
638 self.dirty = True
643 self.dirty = True
639 offset = l - orig_start - fuzzlen
644 offset = l - orig_start - fuzzlen
640 if fuzzlen:
645 if fuzzlen:
641 msg = _("Hunk #%d succeeded at %d "
646 msg = _("Hunk #%d succeeded at %d "
642 "with fuzz %d "
647 "with fuzz %d "
643 "(offset %d lines).\n")
648 "(offset %d lines).\n")
644 self.printfile(True)
649 self.printfile(True)
645 self.ui.warn(msg %
650 self.ui.warn(msg %
646 (h.number, l + 1, fuzzlen, offset))
651 (h.number, l + 1, fuzzlen, offset))
647 else:
652 else:
648 msg = _("Hunk #%d succeeded at %d "
653 msg = _("Hunk #%d succeeded at %d "
649 "(offset %d lines).\n")
654 "(offset %d lines).\n")
650 self.ui.note(msg % (h.number, l + 1, offset))
655 self.ui.note(msg % (h.number, l + 1, offset))
651 return fuzzlen
656 return fuzzlen
652 self.printfile(True)
657 self.printfile(True)
653 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
658 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
654 self.rej.append(horig)
659 self.rej.append(horig)
655 return -1
660 return -1
656
661
657 def close(self):
662 def close(self):
658 if self.dirty:
663 if self.dirty:
659 self.writelines(self.fname, self.lines)
664 self.writelines(self.fname, self.lines)
660 self.write_rej()
665 self.write_rej()
661 return len(self.rej)
666 return len(self.rej)
662
667
663 class hunk(object):
668 class hunk(object):
664 def __init__(self, desc, num, lr, context, create=False, remove=False):
669 def __init__(self, desc, num, lr, context, create=False, remove=False):
665 self.number = num
670 self.number = num
666 self.desc = desc
671 self.desc = desc
667 self.hunk = [desc]
672 self.hunk = [desc]
668 self.a = []
673 self.a = []
669 self.b = []
674 self.b = []
670 self.starta = self.lena = None
675 self.starta = self.lena = None
671 self.startb = self.lenb = None
676 self.startb = self.lenb = None
672 if lr is not None:
677 if lr is not None:
673 if context:
678 if context:
674 self.read_context_hunk(lr)
679 self.read_context_hunk(lr)
675 else:
680 else:
676 self.read_unified_hunk(lr)
681 self.read_unified_hunk(lr)
677 self.create = create
682 self.create = create
678 self.remove = remove and not create
683 self.remove = remove and not create
679
684
680 def getnormalized(self):
685 def getnormalized(self):
681 """Return a copy with line endings normalized to LF."""
686 """Return a copy with line endings normalized to LF."""
682
687
683 def normalize(lines):
688 def normalize(lines):
684 nlines = []
689 nlines = []
685 for line in lines:
690 for line in lines:
686 if line.endswith('\r\n'):
691 if line.endswith('\r\n'):
687 line = line[:-2] + '\n'
692 line = line[:-2] + '\n'
688 nlines.append(line)
693 nlines.append(line)
689 return nlines
694 return nlines
690
695
691 # Dummy object, it is rebuilt manually
696 # Dummy object, it is rebuilt manually
692 nh = hunk(self.desc, self.number, None, None, False, False)
697 nh = hunk(self.desc, self.number, None, None, False, False)
693 nh.number = self.number
698 nh.number = self.number
694 nh.desc = self.desc
699 nh.desc = self.desc
695 nh.hunk = self.hunk
700 nh.hunk = self.hunk
696 nh.a = normalize(self.a)
701 nh.a = normalize(self.a)
697 nh.b = normalize(self.b)
702 nh.b = normalize(self.b)
698 nh.starta = self.starta
703 nh.starta = self.starta
699 nh.startb = self.startb
704 nh.startb = self.startb
700 nh.lena = self.lena
705 nh.lena = self.lena
701 nh.lenb = self.lenb
706 nh.lenb = self.lenb
702 nh.create = self.create
707 nh.create = self.create
703 nh.remove = self.remove
708 nh.remove = self.remove
704 return nh
709 return nh
705
710
706 def read_unified_hunk(self, lr):
711 def read_unified_hunk(self, lr):
707 m = unidesc.match(self.desc)
712 m = unidesc.match(self.desc)
708 if not m:
713 if not m:
709 raise PatchError(_("bad hunk #%d") % self.number)
714 raise PatchError(_("bad hunk #%d") % self.number)
710 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
715 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
711 if self.lena is None:
716 if self.lena is None:
712 self.lena = 1
717 self.lena = 1
713 else:
718 else:
714 self.lena = int(self.lena)
719 self.lena = int(self.lena)
715 if self.lenb is None:
720 if self.lenb is None:
716 self.lenb = 1
721 self.lenb = 1
717 else:
722 else:
718 self.lenb = int(self.lenb)
723 self.lenb = int(self.lenb)
719 self.starta = int(self.starta)
724 self.starta = int(self.starta)
720 self.startb = int(self.startb)
725 self.startb = int(self.startb)
721 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
726 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
722 # if we hit eof before finishing out the hunk, the last line will
727 # if we hit eof before finishing out the hunk, the last line will
723 # be zero length. Lets try to fix it up.
728 # be zero length. Lets try to fix it up.
724 while len(self.hunk[-1]) == 0:
729 while len(self.hunk[-1]) == 0:
725 del self.hunk[-1]
730 del self.hunk[-1]
726 del self.a[-1]
731 del self.a[-1]
727 del self.b[-1]
732 del self.b[-1]
728 self.lena -= 1
733 self.lena -= 1
729 self.lenb -= 1
734 self.lenb -= 1
730 self._fixnewline(lr)
735 self._fixnewline(lr)
731
736
732 def read_context_hunk(self, lr):
737 def read_context_hunk(self, lr):
733 self.desc = lr.readline()
738 self.desc = lr.readline()
734 m = contextdesc.match(self.desc)
739 m = contextdesc.match(self.desc)
735 if not m:
740 if not m:
736 raise PatchError(_("bad hunk #%d") % self.number)
741 raise PatchError(_("bad hunk #%d") % self.number)
737 foo, self.starta, foo2, aend, foo3 = m.groups()
742 foo, self.starta, foo2, aend, foo3 = m.groups()
738 self.starta = int(self.starta)
743 self.starta = int(self.starta)
739 if aend is None:
744 if aend is None:
740 aend = self.starta
745 aend = self.starta
741 self.lena = int(aend) - self.starta
746 self.lena = int(aend) - self.starta
742 if self.starta:
747 if self.starta:
743 self.lena += 1
748 self.lena += 1
744 for x in xrange(self.lena):
749 for x in xrange(self.lena):
745 l = lr.readline()
750 l = lr.readline()
746 if l.startswith('---'):
751 if l.startswith('---'):
747 # lines addition, old block is empty
752 # lines addition, old block is empty
748 lr.push(l)
753 lr.push(l)
749 break
754 break
750 s = l[2:]
755 s = l[2:]
751 if l.startswith('- ') or l.startswith('! '):
756 if l.startswith('- ') or l.startswith('! '):
752 u = '-' + s
757 u = '-' + s
753 elif l.startswith(' '):
758 elif l.startswith(' '):
754 u = ' ' + s
759 u = ' ' + s
755 else:
760 else:
756 raise PatchError(_("bad hunk #%d old text line %d") %
761 raise PatchError(_("bad hunk #%d old text line %d") %
757 (self.number, x))
762 (self.number, x))
758 self.a.append(u)
763 self.a.append(u)
759 self.hunk.append(u)
764 self.hunk.append(u)
760
765
761 l = lr.readline()
766 l = lr.readline()
762 if l.startswith('\ '):
767 if l.startswith('\ '):
763 s = self.a[-1][:-1]
768 s = self.a[-1][:-1]
764 self.a[-1] = s
769 self.a[-1] = s
765 self.hunk[-1] = s
770 self.hunk[-1] = s
766 l = lr.readline()
771 l = lr.readline()
767 m = contextdesc.match(l)
772 m = contextdesc.match(l)
768 if not m:
773 if not m:
769 raise PatchError(_("bad hunk #%d") % self.number)
774 raise PatchError(_("bad hunk #%d") % self.number)
770 foo, self.startb, foo2, bend, foo3 = m.groups()
775 foo, self.startb, foo2, bend, foo3 = m.groups()
771 self.startb = int(self.startb)
776 self.startb = int(self.startb)
772 if bend is None:
777 if bend is None:
773 bend = self.startb
778 bend = self.startb
774 self.lenb = int(bend) - self.startb
779 self.lenb = int(bend) - self.startb
775 if self.startb:
780 if self.startb:
776 self.lenb += 1
781 self.lenb += 1
777 hunki = 1
782 hunki = 1
778 for x in xrange(self.lenb):
783 for x in xrange(self.lenb):
779 l = lr.readline()
784 l = lr.readline()
780 if l.startswith('\ '):
785 if l.startswith('\ '):
781 # XXX: the only way to hit this is with an invalid line range.
786 # XXX: the only way to hit this is with an invalid line range.
782 # The no-eol marker is not counted in the line range, but I
787 # The no-eol marker is not counted in the line range, but I
783 # guess there are diff(1) out there which behave differently.
788 # guess there are diff(1) out there which behave differently.
784 s = self.b[-1][:-1]
789 s = self.b[-1][:-1]
785 self.b[-1] = s
790 self.b[-1] = s
786 self.hunk[hunki - 1] = s
791 self.hunk[hunki - 1] = s
787 continue
792 continue
788 if not l:
793 if not l:
789 # line deletions, new block is empty and we hit EOF
794 # line deletions, new block is empty and we hit EOF
790 lr.push(l)
795 lr.push(l)
791 break
796 break
792 s = l[2:]
797 s = l[2:]
793 if l.startswith('+ ') or l.startswith('! '):
798 if l.startswith('+ ') or l.startswith('! '):
794 u = '+' + s
799 u = '+' + s
795 elif l.startswith(' '):
800 elif l.startswith(' '):
796 u = ' ' + s
801 u = ' ' + s
797 elif len(self.b) == 0:
802 elif len(self.b) == 0:
798 # line deletions, new block is empty
803 # line deletions, new block is empty
799 lr.push(l)
804 lr.push(l)
800 break
805 break
801 else:
806 else:
802 raise PatchError(_("bad hunk #%d old text line %d") %
807 raise PatchError(_("bad hunk #%d old text line %d") %
803 (self.number, x))
808 (self.number, x))
804 self.b.append(s)
809 self.b.append(s)
805 while True:
810 while True:
806 if hunki >= len(self.hunk):
811 if hunki >= len(self.hunk):
807 h = ""
812 h = ""
808 else:
813 else:
809 h = self.hunk[hunki]
814 h = self.hunk[hunki]
810 hunki += 1
815 hunki += 1
811 if h == u:
816 if h == u:
812 break
817 break
813 elif h.startswith('-'):
818 elif h.startswith('-'):
814 continue
819 continue
815 else:
820 else:
816 self.hunk.insert(hunki - 1, u)
821 self.hunk.insert(hunki - 1, u)
817 break
822 break
818
823
819 if not self.a:
824 if not self.a:
820 # this happens when lines were only added to the hunk
825 # this happens when lines were only added to the hunk
821 for x in self.hunk:
826 for x in self.hunk:
822 if x.startswith('-') or x.startswith(' '):
827 if x.startswith('-') or x.startswith(' '):
823 self.a.append(x)
828 self.a.append(x)
824 if not self.b:
829 if not self.b:
825 # this happens when lines were only deleted from the hunk
830 # this happens when lines were only deleted from the hunk
826 for x in self.hunk:
831 for x in self.hunk:
827 if x.startswith('+') or x.startswith(' '):
832 if x.startswith('+') or x.startswith(' '):
828 self.b.append(x[1:])
833 self.b.append(x[1:])
829 # @@ -start,len +start,len @@
834 # @@ -start,len +start,len @@
830 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
835 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
831 self.startb, self.lenb)
836 self.startb, self.lenb)
832 self.hunk[0] = self.desc
837 self.hunk[0] = self.desc
833 self._fixnewline(lr)
838 self._fixnewline(lr)
834
839
835 def _fixnewline(self, lr):
840 def _fixnewline(self, lr):
836 l = lr.readline()
841 l = lr.readline()
837 if l.startswith('\ '):
842 if l.startswith('\ '):
838 diffhelpers.fix_newline(self.hunk, self.a, self.b)
843 diffhelpers.fix_newline(self.hunk, self.a, self.b)
839 else:
844 else:
840 lr.push(l)
845 lr.push(l)
841
846
842 def complete(self):
847 def complete(self):
843 return len(self.a) == self.lena and len(self.b) == self.lenb
848 return len(self.a) == self.lena and len(self.b) == self.lenb
844
849
845 def createfile(self):
850 def createfile(self):
846 return self.starta == 0 and self.lena == 0 and self.create
851 return self.starta == 0 and self.lena == 0 and self.create
847
852
848 def rmfile(self):
853 def rmfile(self):
849 return self.startb == 0 and self.lenb == 0 and self.remove
854 return self.startb == 0 and self.lenb == 0 and self.remove
850
855
851 def fuzzit(self, l, fuzz, toponly):
856 def fuzzit(self, l, fuzz, toponly):
852 # this removes context lines from the top and bottom of list 'l'. It
857 # this removes context lines from the top and bottom of list 'l'. It
853 # checks the hunk to make sure only context lines are removed, and then
858 # checks the hunk to make sure only context lines are removed, and then
854 # returns a new shortened list of lines.
859 # returns a new shortened list of lines.
855 fuzz = min(fuzz, len(l)-1)
860 fuzz = min(fuzz, len(l)-1)
856 if fuzz:
861 if fuzz:
857 top = 0
862 top = 0
858 bot = 0
863 bot = 0
859 hlen = len(self.hunk)
864 hlen = len(self.hunk)
860 for x in xrange(hlen - 1):
865 for x in xrange(hlen - 1):
861 # the hunk starts with the @@ line, so use x+1
866 # the hunk starts with the @@ line, so use x+1
862 if self.hunk[x + 1][0] == ' ':
867 if self.hunk[x + 1][0] == ' ':
863 top += 1
868 top += 1
864 else:
869 else:
865 break
870 break
866 if not toponly:
871 if not toponly:
867 for x in xrange(hlen - 1):
872 for x in xrange(hlen - 1):
868 if self.hunk[hlen - bot - 1][0] == ' ':
873 if self.hunk[hlen - bot - 1][0] == ' ':
869 bot += 1
874 bot += 1
870 else:
875 else:
871 break
876 break
872
877
873 # top and bot now count context in the hunk
878 # top and bot now count context in the hunk
874 # adjust them if either one is short
879 # adjust them if either one is short
875 context = max(top, bot, 3)
880 context = max(top, bot, 3)
876 if bot < context:
881 if bot < context:
877 bot = max(0, fuzz - (context - bot))
882 bot = max(0, fuzz - (context - bot))
878 else:
883 else:
879 bot = min(fuzz, bot)
884 bot = min(fuzz, bot)
880 if top < context:
885 if top < context:
881 top = max(0, fuzz - (context - top))
886 top = max(0, fuzz - (context - top))
882 else:
887 else:
883 top = min(fuzz, top)
888 top = min(fuzz, top)
884
889
885 return l[top:len(l)-bot]
890 return l[top:len(l)-bot]
886 return l
891 return l
887
892
888 def old(self, fuzz=0, toponly=False):
893 def old(self, fuzz=0, toponly=False):
889 return self.fuzzit(self.a, fuzz, toponly)
894 return self.fuzzit(self.a, fuzz, toponly)
890
895
891 def new(self, fuzz=0, toponly=False):
896 def new(self, fuzz=0, toponly=False):
892 return self.fuzzit(self.b, fuzz, toponly)
897 return self.fuzzit(self.b, fuzz, toponly)
893
898
894 class binhunk:
899 class binhunk:
895 'A binary patch file. Only understands literals so far.'
900 'A binary patch file. Only understands literals so far.'
896 def __init__(self, gitpatch):
901 def __init__(self, gitpatch):
897 self.gitpatch = gitpatch
902 self.gitpatch = gitpatch
898 self.text = None
903 self.text = None
899 self.hunk = ['GIT binary patch\n']
904 self.hunk = ['GIT binary patch\n']
900
905
901 def createfile(self):
906 def createfile(self):
902 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
907 return self.gitpatch.op in ('ADD', 'RENAME', 'COPY')
903
908
904 def rmfile(self):
909 def rmfile(self):
905 return self.gitpatch.op == 'DELETE'
910 return self.gitpatch.op == 'DELETE'
906
911
907 def complete(self):
912 def complete(self):
908 return self.text is not None
913 return self.text is not None
909
914
910 def new(self):
915 def new(self):
911 return [self.text]
916 return [self.text]
912
917
913 def extract(self, lr):
918 def extract(self, lr):
914 line = lr.readline()
919 line = lr.readline()
915 self.hunk.append(line)
920 self.hunk.append(line)
916 while line and not line.startswith('literal '):
921 while line and not line.startswith('literal '):
917 line = lr.readline()
922 line = lr.readline()
918 self.hunk.append(line)
923 self.hunk.append(line)
919 if not line:
924 if not line:
920 raise PatchError(_('could not extract binary patch'))
925 raise PatchError(_('could not extract binary patch'))
921 size = int(line[8:].rstrip())
926 size = int(line[8:].rstrip())
922 dec = []
927 dec = []
923 line = lr.readline()
928 line = lr.readline()
924 self.hunk.append(line)
929 self.hunk.append(line)
925 while len(line) > 1:
930 while len(line) > 1:
926 l = line[0]
931 l = line[0]
927 if l <= 'Z' and l >= 'A':
932 if l <= 'Z' and l >= 'A':
928 l = ord(l) - ord('A') + 1
933 l = ord(l) - ord('A') + 1
929 else:
934 else:
930 l = ord(l) - ord('a') + 27
935 l = ord(l) - ord('a') + 27
931 dec.append(base85.b85decode(line[1:-1])[:l])
936 dec.append(base85.b85decode(line[1:-1])[:l])
932 line = lr.readline()
937 line = lr.readline()
933 self.hunk.append(line)
938 self.hunk.append(line)
934 text = zlib.decompress(''.join(dec))
939 text = zlib.decompress(''.join(dec))
935 if len(text) != size:
940 if len(text) != size:
936 raise PatchError(_('binary patch is %d bytes, not %d') %
941 raise PatchError(_('binary patch is %d bytes, not %d') %
937 len(text), size)
942 len(text), size)
938 self.text = text
943 self.text = text
939
944
940 def parsefilename(str):
945 def parsefilename(str):
941 # --- filename \t|space stuff
946 # --- filename \t|space stuff
942 s = str[4:].rstrip('\r\n')
947 s = str[4:].rstrip('\r\n')
943 i = s.find('\t')
948 i = s.find('\t')
944 if i < 0:
949 if i < 0:
945 i = s.find(' ')
950 i = s.find(' ')
946 if i < 0:
951 if i < 0:
947 return s
952 return s
948 return s[:i]
953 return s[:i]
949
954
950 def pathstrip(path, strip):
955 def pathstrip(path, strip):
951 pathlen = len(path)
956 pathlen = len(path)
952 i = 0
957 i = 0
953 if strip == 0:
958 if strip == 0:
954 return '', path.rstrip()
959 return '', path.rstrip()
955 count = strip
960 count = strip
956 while count > 0:
961 while count > 0:
957 i = path.find('/', i)
962 i = path.find('/', i)
958 if i == -1:
963 if i == -1:
959 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
964 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
960 (count, strip, path))
965 (count, strip, path))
961 i += 1
966 i += 1
962 # consume '//' in the path
967 # consume '//' in the path
963 while i < pathlen - 1 and path[i] == '/':
968 while i < pathlen - 1 and path[i] == '/':
964 i += 1
969 i += 1
965 count -= 1
970 count -= 1
966 return path[:i].lstrip(), path[i:].rstrip()
971 return path[:i].lstrip(), path[i:].rstrip()
967
972
968 def selectfile(afile_orig, bfile_orig, hunk, strip):
973 def selectfile(afile_orig, bfile_orig, hunk, strip):
969 nulla = afile_orig == "/dev/null"
974 nulla = afile_orig == "/dev/null"
970 nullb = bfile_orig == "/dev/null"
975 nullb = bfile_orig == "/dev/null"
971 abase, afile = pathstrip(afile_orig, strip)
976 abase, afile = pathstrip(afile_orig, strip)
972 gooda = not nulla and os.path.lexists(afile)
977 gooda = not nulla and os.path.lexists(afile)
973 bbase, bfile = pathstrip(bfile_orig, strip)
978 bbase, bfile = pathstrip(bfile_orig, strip)
974 if afile == bfile:
979 if afile == bfile:
975 goodb = gooda
980 goodb = gooda
976 else:
981 else:
977 goodb = not nullb and os.path.lexists(bfile)
982 goodb = not nullb and os.path.lexists(bfile)
978 createfunc = hunk.createfile
983 createfunc = hunk.createfile
979 missing = not goodb and not gooda and not createfunc()
984 missing = not goodb and not gooda and not createfunc()
980
985
981 # some diff programs apparently produce patches where the afile is
986 # some diff programs apparently produce patches where the afile is
982 # not /dev/null, but afile starts with bfile
987 # not /dev/null, but afile starts with bfile
983 abasedir = afile[:afile.rfind('/') + 1]
988 abasedir = afile[:afile.rfind('/') + 1]
984 bbasedir = bfile[:bfile.rfind('/') + 1]
989 bbasedir = bfile[:bfile.rfind('/') + 1]
985 if missing and abasedir == bbasedir and afile.startswith(bfile):
990 if missing and abasedir == bbasedir and afile.startswith(bfile):
986 # this isn't very pretty
991 # this isn't very pretty
987 hunk.create = True
992 hunk.create = True
988 if createfunc():
993 if createfunc():
989 missing = False
994 missing = False
990 else:
995 else:
991 hunk.create = False
996 hunk.create = False
992
997
993 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
998 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
994 # diff is between a file and its backup. In this case, the original
999 # diff is between a file and its backup. In this case, the original
995 # file should be patched (see original mpatch code).
1000 # file should be patched (see original mpatch code).
996 isbackup = (abase == bbase and bfile.startswith(afile))
1001 isbackup = (abase == bbase and bfile.startswith(afile))
997 fname = None
1002 fname = None
998 if not missing:
1003 if not missing:
999 if gooda and goodb:
1004 if gooda and goodb:
1000 fname = isbackup and afile or bfile
1005 fname = isbackup and afile or bfile
1001 elif gooda:
1006 elif gooda:
1002 fname = afile
1007 fname = afile
1003
1008
1004 if not fname:
1009 if not fname:
1005 if not nullb:
1010 if not nullb:
1006 fname = isbackup and afile or bfile
1011 fname = isbackup and afile or bfile
1007 elif not nulla:
1012 elif not nulla:
1008 fname = afile
1013 fname = afile
1009 else:
1014 else:
1010 raise PatchError(_("undefined source and destination files"))
1015 raise PatchError(_("undefined source and destination files"))
1011
1016
1012 return fname, missing
1017 return fname, missing
1013
1018
1014 def scangitpatch(lr, firstline):
1019 def scangitpatch(lr, firstline):
1015 """
1020 """
1016 Git patches can emit:
1021 Git patches can emit:
1017 - rename a to b
1022 - rename a to b
1018 - change b
1023 - change b
1019 - copy a to c
1024 - copy a to c
1020 - change c
1025 - change c
1021
1026
1022 We cannot apply this sequence as-is, the renamed 'a' could not be
1027 We cannot apply this sequence as-is, the renamed 'a' could not be
1023 found for it would have been renamed already. And we cannot copy
1028 found for it would have been renamed already. And we cannot copy
1024 from 'b' instead because 'b' would have been changed already. So
1029 from 'b' instead because 'b' would have been changed already. So
1025 we scan the git patch for copy and rename commands so we can
1030 we scan the git patch for copy and rename commands so we can
1026 perform the copies ahead of time.
1031 perform the copies ahead of time.
1027 """
1032 """
1028 pos = 0
1033 pos = 0
1029 try:
1034 try:
1030 pos = lr.fp.tell()
1035 pos = lr.fp.tell()
1031 fp = lr.fp
1036 fp = lr.fp
1032 except IOError:
1037 except IOError:
1033 fp = cStringIO.StringIO(lr.fp.read())
1038 fp = cStringIO.StringIO(lr.fp.read())
1034 gitlr = linereader(fp, lr.textmode)
1039 gitlr = linereader(fp, lr.textmode)
1035 gitlr.push(firstline)
1040 gitlr.push(firstline)
1036 gitpatches = readgitpatch(gitlr)
1041 gitpatches = readgitpatch(gitlr)
1037 fp.seek(pos)
1042 fp.seek(pos)
1038 return gitpatches
1043 return gitpatches
1039
1044
1040 def iterhunks(fp):
1045 def iterhunks(fp):
1041 """Read a patch and yield the following events:
1046 """Read a patch and yield the following events:
1042 - ("file", afile, bfile, firsthunk): select a new target file.
1047 - ("file", afile, bfile, firsthunk): select a new target file.
1043 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1048 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1044 "file" event.
1049 "file" event.
1045 - ("git", gitchanges): current diff is in git format, gitchanges
1050 - ("git", gitchanges): current diff is in git format, gitchanges
1046 maps filenames to gitpatch records. Unique event.
1051 maps filenames to gitpatch records. Unique event.
1047 """
1052 """
1048 changed = {}
1053 changed = {}
1049 afile = ""
1054 afile = ""
1050 bfile = ""
1055 bfile = ""
1051 state = None
1056 state = None
1052 hunknum = 0
1057 hunknum = 0
1053 emitfile = newfile = False
1058 emitfile = newfile = False
1054 git = False
1059 git = False
1055
1060
1056 # our states
1061 # our states
1057 BFILE = 1
1062 BFILE = 1
1058 context = None
1063 context = None
1059 lr = linereader(fp)
1064 lr = linereader(fp)
1060
1065
1061 while True:
1066 while True:
1062 x = lr.readline()
1067 x = lr.readline()
1063 if not x:
1068 if not x:
1064 break
1069 break
1065 if (state == BFILE and ((not context and x[0] == '@') or
1070 if (state == BFILE and ((not context and x[0] == '@') or
1066 ((context is not False) and x.startswith('***************')))):
1071 ((context is not False) and x.startswith('***************')))):
1067 if context is None and x.startswith('***************'):
1072 if context is None and x.startswith('***************'):
1068 context = True
1073 context = True
1069 gpatch = changed.get(bfile)
1074 gpatch = changed.get(bfile)
1070 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1075 create = afile == '/dev/null' or gpatch and gpatch.op == 'ADD'
1071 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1076 remove = bfile == '/dev/null' or gpatch and gpatch.op == 'DELETE'
1072 h = hunk(x, hunknum + 1, lr, context, create, remove)
1077 h = hunk(x, hunknum + 1, lr, context, create, remove)
1073 hunknum += 1
1078 hunknum += 1
1074 if emitfile:
1079 if emitfile:
1075 emitfile = False
1080 emitfile = False
1076 yield 'file', (afile, bfile, h)
1081 yield 'file', (afile, bfile, h)
1077 yield 'hunk', h
1082 yield 'hunk', h
1078 elif state == BFILE and x.startswith('GIT binary patch'):
1083 elif state == BFILE and x.startswith('GIT binary patch'):
1079 h = binhunk(changed[bfile])
1084 h = binhunk(changed[bfile])
1080 hunknum += 1
1085 hunknum += 1
1081 if emitfile:
1086 if emitfile:
1082 emitfile = False
1087 emitfile = False
1083 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1088 yield 'file', ('a/' + afile, 'b/' + bfile, h)
1084 h.extract(lr)
1089 h.extract(lr)
1085 yield 'hunk', h
1090 yield 'hunk', h
1086 elif x.startswith('diff --git'):
1091 elif x.startswith('diff --git'):
1087 # check for git diff, scanning the whole patch file if needed
1092 # check for git diff, scanning the whole patch file if needed
1088 m = gitre.match(x)
1093 m = gitre.match(x)
1089 if m:
1094 if m:
1090 afile, bfile = m.group(1, 2)
1095 afile, bfile = m.group(1, 2)
1091 if not git:
1096 if not git:
1092 git = True
1097 git = True
1093 gitpatches = scangitpatch(lr, x)
1098 gitpatches = scangitpatch(lr, x)
1094 yield 'git', gitpatches
1099 yield 'git', gitpatches
1095 for gp in gitpatches:
1100 for gp in gitpatches:
1096 changed[gp.path] = gp
1101 changed[gp.path] = gp
1097 # else error?
1102 # else error?
1098 # copy/rename + modify should modify target, not source
1103 # copy/rename + modify should modify target, not source
1099 gp = changed.get(bfile)
1104 gp = changed.get(bfile)
1100 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1105 if gp and (gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD')
1101 or gp.mode):
1106 or gp.mode):
1102 afile = bfile
1107 afile = bfile
1103 newfile = True
1108 newfile = True
1104 elif x.startswith('---'):
1109 elif x.startswith('---'):
1105 # check for a unified diff
1110 # check for a unified diff
1106 l2 = lr.readline()
1111 l2 = lr.readline()
1107 if not l2.startswith('+++'):
1112 if not l2.startswith('+++'):
1108 lr.push(l2)
1113 lr.push(l2)
1109 continue
1114 continue
1110 newfile = True
1115 newfile = True
1111 context = False
1116 context = False
1112 afile = parsefilename(x)
1117 afile = parsefilename(x)
1113 bfile = parsefilename(l2)
1118 bfile = parsefilename(l2)
1114 elif x.startswith('***'):
1119 elif x.startswith('***'):
1115 # check for a context diff
1120 # check for a context diff
1116 l2 = lr.readline()
1121 l2 = lr.readline()
1117 if not l2.startswith('---'):
1122 if not l2.startswith('---'):
1118 lr.push(l2)
1123 lr.push(l2)
1119 continue
1124 continue
1120 l3 = lr.readline()
1125 l3 = lr.readline()
1121 lr.push(l3)
1126 lr.push(l3)
1122 if not l3.startswith("***************"):
1127 if not l3.startswith("***************"):
1123 lr.push(l2)
1128 lr.push(l2)
1124 continue
1129 continue
1125 newfile = True
1130 newfile = True
1126 context = True
1131 context = True
1127 afile = parsefilename(x)
1132 afile = parsefilename(x)
1128 bfile = parsefilename(l2)
1133 bfile = parsefilename(l2)
1129
1134
1130 if newfile:
1135 if newfile:
1131 newfile = False
1136 newfile = False
1132 emitfile = True
1137 emitfile = True
1133 state = BFILE
1138 state = BFILE
1134 hunknum = 0
1139 hunknum = 0
1135
1140
1136 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1141 def applydiff(ui, fp, changed, strip=1, eolmode='strict'):
1137 """Reads a patch from fp and tries to apply it.
1142 """Reads a patch from fp and tries to apply it.
1138
1143
1139 The dict 'changed' is filled in with all of the filenames changed
1144 The dict 'changed' is filled in with all of the filenames changed
1140 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1145 by the patch. Returns 0 for a clean patch, -1 if any rejects were
1141 found and 1 if there was any fuzz.
1146 found and 1 if there was any fuzz.
1142
1147
1143 If 'eolmode' is 'strict', the patch content and patched file are
1148 If 'eolmode' is 'strict', the patch content and patched file are
1144 read in binary mode. Otherwise, line endings are ignored when
1149 read in binary mode. Otherwise, line endings are ignored when
1145 patching then normalized according to 'eolmode'.
1150 patching then normalized according to 'eolmode'.
1146
1151
1147 Callers probably want to call '_updatedir' after this to
1152 Callers probably want to call '_updatedir' after this to
1148 apply certain categories of changes not done by this function.
1153 apply certain categories of changes not done by this function.
1149 """
1154 """
1150 return _applydiff(ui, fp, patchfile, copyfile, changed, strip=strip,
1155 return _applydiff(ui, fp, patchfile, changed, strip=strip,
1151 eolmode=eolmode)
1156 eolmode=eolmode)
1152
1157
1153 def _applydiff(ui, fp, patcher, copyfn, changed, strip=1, eolmode='strict'):
1158 def _applydiff(ui, fp, patcher, changed, strip=1, eolmode='strict'):
1154 rejects = 0
1159 rejects = 0
1155 err = 0
1160 err = 0
1156 current_file = None
1161 current_file = None
1157 cwd = os.getcwd()
1162 cwd = os.getcwd()
1158 backend = fsbackend(ui, scmutil.opener(cwd))
1163 backend = fsbackend(ui, os.getcwd())
1159
1164
1160 for state, values in iterhunks(fp):
1165 for state, values in iterhunks(fp):
1161 if state == 'hunk':
1166 if state == 'hunk':
1162 if not current_file:
1167 if not current_file:
1163 continue
1168 continue
1164 ret = current_file.apply(values)
1169 ret = current_file.apply(values)
1165 if ret >= 0:
1170 if ret >= 0:
1166 changed.setdefault(current_file.fname, None)
1171 changed.setdefault(current_file.fname, None)
1167 if ret > 0:
1172 if ret > 0:
1168 err = 1
1173 err = 1
1169 elif state == 'file':
1174 elif state == 'file':
1170 if current_file:
1175 if current_file:
1171 rejects += current_file.close()
1176 rejects += current_file.close()
1172 afile, bfile, first_hunk = values
1177 afile, bfile, first_hunk = values
1173 try:
1178 try:
1174 current_file, missing = selectfile(afile, bfile,
1179 current_file, missing = selectfile(afile, bfile,
1175 first_hunk, strip)
1180 first_hunk, strip)
1176 current_file = patcher(ui, current_file, backend,
1181 current_file = patcher(ui, current_file, backend,
1177 missing=missing, eolmode=eolmode)
1182 missing=missing, eolmode=eolmode)
1178 except PatchError, inst:
1183 except PatchError, inst:
1179 ui.warn(str(inst) + '\n')
1184 ui.warn(str(inst) + '\n')
1180 current_file = None
1185 current_file = None
1181 rejects += 1
1186 rejects += 1
1182 continue
1187 continue
1183 elif state == 'git':
1188 elif state == 'git':
1184 for gp in values:
1189 for gp in values:
1185 gp.path = pathstrip(gp.path, strip - 1)[1]
1190 gp.path = pathstrip(gp.path, strip - 1)[1]
1186 if gp.oldpath:
1191 if gp.oldpath:
1187 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1192 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1188 # Binary patches really overwrite target files, copying them
1193 # Binary patches really overwrite target files, copying them
1189 # will just make it fails with "target file exists"
1194 # will just make it fails with "target file exists"
1190 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1195 if gp.op in ('COPY', 'RENAME') and not gp.binary:
1191 copyfn(gp.oldpath, gp.path, cwd)
1196 backend.copy(gp.oldpath, gp.path)
1192 changed[gp.path] = gp
1197 changed[gp.path] = gp
1193 else:
1198 else:
1194 raise util.Abort(_('unsupported parser state: %s') % state)
1199 raise util.Abort(_('unsupported parser state: %s') % state)
1195
1200
1196 if current_file:
1201 if current_file:
1197 rejects += current_file.close()
1202 rejects += current_file.close()
1198
1203
1199 if rejects:
1204 if rejects:
1200 return -1
1205 return -1
1201 return err
1206 return err
1202
1207
1203 def _updatedir(ui, repo, patches, similarity=0):
1208 def _updatedir(ui, repo, patches, similarity=0):
1204 '''Update dirstate after patch application according to metadata'''
1209 '''Update dirstate after patch application according to metadata'''
1205 if not patches:
1210 if not patches:
1206 return []
1211 return []
1207 copies = []
1212 copies = []
1208 removes = set()
1213 removes = set()
1209 cfiles = patches.keys()
1214 cfiles = patches.keys()
1210 cwd = repo.getcwd()
1215 cwd = repo.getcwd()
1211 if cwd:
1216 if cwd:
1212 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1217 cfiles = [util.pathto(repo.root, cwd, f) for f in patches.keys()]
1213 for f in patches:
1218 for f in patches:
1214 gp = patches[f]
1219 gp = patches[f]
1215 if not gp:
1220 if not gp:
1216 continue
1221 continue
1217 if gp.op == 'RENAME':
1222 if gp.op == 'RENAME':
1218 copies.append((gp.oldpath, gp.path))
1223 copies.append((gp.oldpath, gp.path))
1219 removes.add(gp.oldpath)
1224 removes.add(gp.oldpath)
1220 elif gp.op == 'COPY':
1225 elif gp.op == 'COPY':
1221 copies.append((gp.oldpath, gp.path))
1226 copies.append((gp.oldpath, gp.path))
1222 elif gp.op == 'DELETE':
1227 elif gp.op == 'DELETE':
1223 removes.add(gp.path)
1228 removes.add(gp.path)
1224
1229
1225 wctx = repo[None]
1230 wctx = repo[None]
1226 for src, dst in copies:
1231 for src, dst in copies:
1227 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1232 scmutil.dirstatecopy(ui, repo, wctx, src, dst, cwd=cwd)
1228 if (not similarity) and removes:
1233 if (not similarity) and removes:
1229 wctx.remove(sorted(removes), True)
1234 wctx.remove(sorted(removes), True)
1230
1235
1231 for f in patches:
1236 for f in patches:
1232 gp = patches[f]
1237 gp = patches[f]
1233 if gp and gp.mode:
1238 if gp and gp.mode:
1234 islink, isexec = gp.mode
1239 islink, isexec = gp.mode
1235 dst = repo.wjoin(gp.path)
1240 dst = repo.wjoin(gp.path)
1236 # patch won't create empty files
1241 # patch won't create empty files
1237 if gp.op == 'ADD' and not os.path.lexists(dst):
1242 if gp.op == 'ADD' and not os.path.lexists(dst):
1238 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1243 flags = (isexec and 'x' or '') + (islink and 'l' or '')
1239 repo.wwrite(gp.path, '', flags)
1244 repo.wwrite(gp.path, '', flags)
1240 util.setflags(dst, islink, isexec)
1245 util.setflags(dst, islink, isexec)
1241 scmutil.addremove(repo, cfiles, similarity=similarity)
1246 scmutil.addremove(repo, cfiles, similarity=similarity)
1242 files = patches.keys()
1247 files = patches.keys()
1243 files.extend([r for r in removes if r not in files])
1248 files.extend([r for r in removes if r not in files])
1244 return sorted(files)
1249 return sorted(files)
1245
1250
1246 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1251 def _externalpatch(patcher, patchname, ui, strip, cwd, files):
1247 """use <patcher> to apply <patchname> to the working directory.
1252 """use <patcher> to apply <patchname> to the working directory.
1248 returns whether patch was applied with fuzz factor."""
1253 returns whether patch was applied with fuzz factor."""
1249
1254
1250 fuzz = False
1255 fuzz = False
1251 args = []
1256 args = []
1252 if cwd:
1257 if cwd:
1253 args.append('-d %s' % util.shellquote(cwd))
1258 args.append('-d %s' % util.shellquote(cwd))
1254 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1259 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1255 util.shellquote(patchname)))
1260 util.shellquote(patchname)))
1256
1261
1257 for line in fp:
1262 for line in fp:
1258 line = line.rstrip()
1263 line = line.rstrip()
1259 ui.note(line + '\n')
1264 ui.note(line + '\n')
1260 if line.startswith('patching file '):
1265 if line.startswith('patching file '):
1261 pf = util.parsepatchoutput(line)
1266 pf = util.parsepatchoutput(line)
1262 printed_file = False
1267 printed_file = False
1263 files.setdefault(pf, None)
1268 files.setdefault(pf, None)
1264 elif line.find('with fuzz') >= 0:
1269 elif line.find('with fuzz') >= 0:
1265 fuzz = True
1270 fuzz = True
1266 if not printed_file:
1271 if not printed_file:
1267 ui.warn(pf + '\n')
1272 ui.warn(pf + '\n')
1268 printed_file = True
1273 printed_file = True
1269 ui.warn(line + '\n')
1274 ui.warn(line + '\n')
1270 elif line.find('saving rejects to file') >= 0:
1275 elif line.find('saving rejects to file') >= 0:
1271 ui.warn(line + '\n')
1276 ui.warn(line + '\n')
1272 elif line.find('FAILED') >= 0:
1277 elif line.find('FAILED') >= 0:
1273 if not printed_file:
1278 if not printed_file:
1274 ui.warn(pf + '\n')
1279 ui.warn(pf + '\n')
1275 printed_file = True
1280 printed_file = True
1276 ui.warn(line + '\n')
1281 ui.warn(line + '\n')
1277 code = fp.close()
1282 code = fp.close()
1278 if code:
1283 if code:
1279 raise PatchError(_("patch command failed: %s") %
1284 raise PatchError(_("patch command failed: %s") %
1280 util.explainexit(code)[0])
1285 util.explainexit(code)[0])
1281 return fuzz
1286 return fuzz
1282
1287
1283 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1288 def internalpatch(ui, repo, patchobj, strip, cwd, files=None, eolmode='strict',
1284 similarity=0):
1289 similarity=0):
1285 """use builtin patch to apply <patchobj> to the working directory.
1290 """use builtin patch to apply <patchobj> to the working directory.
1286 returns whether patch was applied with fuzz factor."""
1291 returns whether patch was applied with fuzz factor."""
1287
1292
1288 if files is None:
1293 if files is None:
1289 files = {}
1294 files = {}
1290 if eolmode is None:
1295 if eolmode is None:
1291 eolmode = ui.config('patch', 'eol', 'strict')
1296 eolmode = ui.config('patch', 'eol', 'strict')
1292 if eolmode.lower() not in eolmodes:
1297 if eolmode.lower() not in eolmodes:
1293 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1298 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1294 eolmode = eolmode.lower()
1299 eolmode = eolmode.lower()
1295
1300
1296 try:
1301 try:
1297 fp = open(patchobj, 'rb')
1302 fp = open(patchobj, 'rb')
1298 except TypeError:
1303 except TypeError:
1299 fp = patchobj
1304 fp = patchobj
1300 if cwd:
1305 if cwd:
1301 curdir = os.getcwd()
1306 curdir = os.getcwd()
1302 os.chdir(cwd)
1307 os.chdir(cwd)
1303 try:
1308 try:
1304 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1309 ret = applydiff(ui, fp, files, strip=strip, eolmode=eolmode)
1305 finally:
1310 finally:
1306 if cwd:
1311 if cwd:
1307 os.chdir(curdir)
1312 os.chdir(curdir)
1308 if fp != patchobj:
1313 if fp != patchobj:
1309 fp.close()
1314 fp.close()
1310 touched = _updatedir(ui, repo, files, similarity)
1315 touched = _updatedir(ui, repo, files, similarity)
1311 files.update(dict.fromkeys(touched))
1316 files.update(dict.fromkeys(touched))
1312 if ret < 0:
1317 if ret < 0:
1313 raise PatchError(_('patch failed to apply'))
1318 raise PatchError(_('patch failed to apply'))
1314 return ret > 0
1319 return ret > 0
1315
1320
1316 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1321 def patch(ui, repo, patchname, strip=1, cwd=None, files=None, eolmode='strict',
1317 similarity=0):
1322 similarity=0):
1318 """Apply <patchname> to the working directory.
1323 """Apply <patchname> to the working directory.
1319
1324
1320 'eolmode' specifies how end of lines should be handled. It can be:
1325 'eolmode' specifies how end of lines should be handled. It can be:
1321 - 'strict': inputs are read in binary mode, EOLs are preserved
1326 - 'strict': inputs are read in binary mode, EOLs are preserved
1322 - 'crlf': EOLs are ignored when patching and reset to CRLF
1327 - 'crlf': EOLs are ignored when patching and reset to CRLF
1323 - 'lf': EOLs are ignored when patching and reset to LF
1328 - 'lf': EOLs are ignored when patching and reset to LF
1324 - None: get it from user settings, default to 'strict'
1329 - None: get it from user settings, default to 'strict'
1325 'eolmode' is ignored when using an external patcher program.
1330 'eolmode' is ignored when using an external patcher program.
1326
1331
1327 Returns whether patch was applied with fuzz factor.
1332 Returns whether patch was applied with fuzz factor.
1328 """
1333 """
1329 patcher = ui.config('ui', 'patch')
1334 patcher = ui.config('ui', 'patch')
1330 if files is None:
1335 if files is None:
1331 files = {}
1336 files = {}
1332 try:
1337 try:
1333 if patcher:
1338 if patcher:
1334 try:
1339 try:
1335 return _externalpatch(patcher, patchname, ui, strip, cwd,
1340 return _externalpatch(patcher, patchname, ui, strip, cwd,
1336 files)
1341 files)
1337 finally:
1342 finally:
1338 touched = _updatedir(ui, repo, files, similarity)
1343 touched = _updatedir(ui, repo, files, similarity)
1339 files.update(dict.fromkeys(touched))
1344 files.update(dict.fromkeys(touched))
1340 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1345 return internalpatch(ui, repo, patchname, strip, cwd, files, eolmode,
1341 similarity)
1346 similarity)
1342 except PatchError, err:
1347 except PatchError, err:
1343 raise util.Abort(str(err))
1348 raise util.Abort(str(err))
1344
1349
1345 def changedfiles(patchpath, strip=1):
1350 def changedfiles(patchpath, strip=1):
1346 fp = open(patchpath, 'rb')
1351 fp = open(patchpath, 'rb')
1347 try:
1352 try:
1348 changed = set()
1353 changed = set()
1349 for state, values in iterhunks(fp):
1354 for state, values in iterhunks(fp):
1350 if state == 'hunk':
1355 if state == 'hunk':
1351 continue
1356 continue
1352 elif state == 'file':
1357 elif state == 'file':
1353 afile, bfile, first_hunk = values
1358 afile, bfile, first_hunk = values
1354 current_file, missing = selectfile(afile, bfile,
1359 current_file, missing = selectfile(afile, bfile,
1355 first_hunk, strip)
1360 first_hunk, strip)
1356 changed.add(current_file)
1361 changed.add(current_file)
1357 elif state == 'git':
1362 elif state == 'git':
1358 for gp in values:
1363 for gp in values:
1359 gp.path = pathstrip(gp.path, strip - 1)[1]
1364 gp.path = pathstrip(gp.path, strip - 1)[1]
1360 changed.add(gp.path)
1365 changed.add(gp.path)
1361 if gp.oldpath:
1366 if gp.oldpath:
1362 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1367 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1363 if gp.op == 'RENAME':
1368 if gp.op == 'RENAME':
1364 changed.add(gp.oldpath)
1369 changed.add(gp.oldpath)
1365 else:
1370 else:
1366 raise util.Abort(_('unsupported parser state: %s') % state)
1371 raise util.Abort(_('unsupported parser state: %s') % state)
1367 return changed
1372 return changed
1368 finally:
1373 finally:
1369 fp.close()
1374 fp.close()
1370
1375
1371 def b85diff(to, tn):
1376 def b85diff(to, tn):
1372 '''print base85-encoded binary diff'''
1377 '''print base85-encoded binary diff'''
1373 def gitindex(text):
1378 def gitindex(text):
1374 if not text:
1379 if not text:
1375 return hex(nullid)
1380 return hex(nullid)
1376 l = len(text)
1381 l = len(text)
1377 s = util.sha1('blob %d\0' % l)
1382 s = util.sha1('blob %d\0' % l)
1378 s.update(text)
1383 s.update(text)
1379 return s.hexdigest()
1384 return s.hexdigest()
1380
1385
1381 def fmtline(line):
1386 def fmtline(line):
1382 l = len(line)
1387 l = len(line)
1383 if l <= 26:
1388 if l <= 26:
1384 l = chr(ord('A') + l - 1)
1389 l = chr(ord('A') + l - 1)
1385 else:
1390 else:
1386 l = chr(l - 26 + ord('a') - 1)
1391 l = chr(l - 26 + ord('a') - 1)
1387 return '%c%s\n' % (l, base85.b85encode(line, True))
1392 return '%c%s\n' % (l, base85.b85encode(line, True))
1388
1393
1389 def chunk(text, csize=52):
1394 def chunk(text, csize=52):
1390 l = len(text)
1395 l = len(text)
1391 i = 0
1396 i = 0
1392 while i < l:
1397 while i < l:
1393 yield text[i:i + csize]
1398 yield text[i:i + csize]
1394 i += csize
1399 i += csize
1395
1400
1396 tohash = gitindex(to)
1401 tohash = gitindex(to)
1397 tnhash = gitindex(tn)
1402 tnhash = gitindex(tn)
1398 if tohash == tnhash:
1403 if tohash == tnhash:
1399 return ""
1404 return ""
1400
1405
1401 # TODO: deltas
1406 # TODO: deltas
1402 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1407 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1403 (tohash, tnhash, len(tn))]
1408 (tohash, tnhash, len(tn))]
1404 for l in chunk(zlib.compress(tn)):
1409 for l in chunk(zlib.compress(tn)):
1405 ret.append(fmtline(l))
1410 ret.append(fmtline(l))
1406 ret.append('\n')
1411 ret.append('\n')
1407 return ''.join(ret)
1412 return ''.join(ret)
1408
1413
1409 class GitDiffRequired(Exception):
1414 class GitDiffRequired(Exception):
1410 pass
1415 pass
1411
1416
1412 def diffopts(ui, opts=None, untrusted=False):
1417 def diffopts(ui, opts=None, untrusted=False):
1413 def get(key, name=None, getter=ui.configbool):
1418 def get(key, name=None, getter=ui.configbool):
1414 return ((opts and opts.get(key)) or
1419 return ((opts and opts.get(key)) or
1415 getter('diff', name or key, None, untrusted=untrusted))
1420 getter('diff', name or key, None, untrusted=untrusted))
1416 return mdiff.diffopts(
1421 return mdiff.diffopts(
1417 text=opts and opts.get('text'),
1422 text=opts and opts.get('text'),
1418 git=get('git'),
1423 git=get('git'),
1419 nodates=get('nodates'),
1424 nodates=get('nodates'),
1420 showfunc=get('show_function', 'showfunc'),
1425 showfunc=get('show_function', 'showfunc'),
1421 ignorews=get('ignore_all_space', 'ignorews'),
1426 ignorews=get('ignore_all_space', 'ignorews'),
1422 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1427 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1423 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1428 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1424 context=get('unified', getter=ui.config))
1429 context=get('unified', getter=ui.config))
1425
1430
1426 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1431 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1427 losedatafn=None, prefix=''):
1432 losedatafn=None, prefix=''):
1428 '''yields diff of changes to files between two nodes, or node and
1433 '''yields diff of changes to files between two nodes, or node and
1429 working directory.
1434 working directory.
1430
1435
1431 if node1 is None, use first dirstate parent instead.
1436 if node1 is None, use first dirstate parent instead.
1432 if node2 is None, compare node1 with working directory.
1437 if node2 is None, compare node1 with working directory.
1433
1438
1434 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1439 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1435 every time some change cannot be represented with the current
1440 every time some change cannot be represented with the current
1436 patch format. Return False to upgrade to git patch format, True to
1441 patch format. Return False to upgrade to git patch format, True to
1437 accept the loss or raise an exception to abort the diff. It is
1442 accept the loss or raise an exception to abort the diff. It is
1438 called with the name of current file being diffed as 'fn'. If set
1443 called with the name of current file being diffed as 'fn'. If set
1439 to None, patches will always be upgraded to git format when
1444 to None, patches will always be upgraded to git format when
1440 necessary.
1445 necessary.
1441
1446
1442 prefix is a filename prefix that is prepended to all filenames on
1447 prefix is a filename prefix that is prepended to all filenames on
1443 display (used for subrepos).
1448 display (used for subrepos).
1444 '''
1449 '''
1445
1450
1446 if opts is None:
1451 if opts is None:
1447 opts = mdiff.defaultopts
1452 opts = mdiff.defaultopts
1448
1453
1449 if not node1 and not node2:
1454 if not node1 and not node2:
1450 node1 = repo.dirstate.p1()
1455 node1 = repo.dirstate.p1()
1451
1456
1452 def lrugetfilectx():
1457 def lrugetfilectx():
1453 cache = {}
1458 cache = {}
1454 order = []
1459 order = []
1455 def getfilectx(f, ctx):
1460 def getfilectx(f, ctx):
1456 fctx = ctx.filectx(f, filelog=cache.get(f))
1461 fctx = ctx.filectx(f, filelog=cache.get(f))
1457 if f not in cache:
1462 if f not in cache:
1458 if len(cache) > 20:
1463 if len(cache) > 20:
1459 del cache[order.pop(0)]
1464 del cache[order.pop(0)]
1460 cache[f] = fctx.filelog()
1465 cache[f] = fctx.filelog()
1461 else:
1466 else:
1462 order.remove(f)
1467 order.remove(f)
1463 order.append(f)
1468 order.append(f)
1464 return fctx
1469 return fctx
1465 return getfilectx
1470 return getfilectx
1466 getfilectx = lrugetfilectx()
1471 getfilectx = lrugetfilectx()
1467
1472
1468 ctx1 = repo[node1]
1473 ctx1 = repo[node1]
1469 ctx2 = repo[node2]
1474 ctx2 = repo[node2]
1470
1475
1471 if not changes:
1476 if not changes:
1472 changes = repo.status(ctx1, ctx2, match=match)
1477 changes = repo.status(ctx1, ctx2, match=match)
1473 modified, added, removed = changes[:3]
1478 modified, added, removed = changes[:3]
1474
1479
1475 if not modified and not added and not removed:
1480 if not modified and not added and not removed:
1476 return []
1481 return []
1477
1482
1478 revs = None
1483 revs = None
1479 if not repo.ui.quiet:
1484 if not repo.ui.quiet:
1480 hexfunc = repo.ui.debugflag and hex or short
1485 hexfunc = repo.ui.debugflag and hex or short
1481 revs = [hexfunc(node) for node in [node1, node2] if node]
1486 revs = [hexfunc(node) for node in [node1, node2] if node]
1482
1487
1483 copy = {}
1488 copy = {}
1484 if opts.git or opts.upgrade:
1489 if opts.git or opts.upgrade:
1485 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1490 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1486
1491
1487 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1492 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1488 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1493 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1489 if opts.upgrade and not opts.git:
1494 if opts.upgrade and not opts.git:
1490 try:
1495 try:
1491 def losedata(fn):
1496 def losedata(fn):
1492 if not losedatafn or not losedatafn(fn=fn):
1497 if not losedatafn or not losedatafn(fn=fn):
1493 raise GitDiffRequired()
1498 raise GitDiffRequired()
1494 # Buffer the whole output until we are sure it can be generated
1499 # Buffer the whole output until we are sure it can be generated
1495 return list(difffn(opts.copy(git=False), losedata))
1500 return list(difffn(opts.copy(git=False), losedata))
1496 except GitDiffRequired:
1501 except GitDiffRequired:
1497 return difffn(opts.copy(git=True), None)
1502 return difffn(opts.copy(git=True), None)
1498 else:
1503 else:
1499 return difffn(opts, None)
1504 return difffn(opts, None)
1500
1505
1501 def difflabel(func, *args, **kw):
1506 def difflabel(func, *args, **kw):
1502 '''yields 2-tuples of (output, label) based on the output of func()'''
1507 '''yields 2-tuples of (output, label) based on the output of func()'''
1503 prefixes = [('diff', 'diff.diffline'),
1508 prefixes = [('diff', 'diff.diffline'),
1504 ('copy', 'diff.extended'),
1509 ('copy', 'diff.extended'),
1505 ('rename', 'diff.extended'),
1510 ('rename', 'diff.extended'),
1506 ('old', 'diff.extended'),
1511 ('old', 'diff.extended'),
1507 ('new', 'diff.extended'),
1512 ('new', 'diff.extended'),
1508 ('deleted', 'diff.extended'),
1513 ('deleted', 'diff.extended'),
1509 ('---', 'diff.file_a'),
1514 ('---', 'diff.file_a'),
1510 ('+++', 'diff.file_b'),
1515 ('+++', 'diff.file_b'),
1511 ('@@', 'diff.hunk'),
1516 ('@@', 'diff.hunk'),
1512 ('-', 'diff.deleted'),
1517 ('-', 'diff.deleted'),
1513 ('+', 'diff.inserted')]
1518 ('+', 'diff.inserted')]
1514
1519
1515 for chunk in func(*args, **kw):
1520 for chunk in func(*args, **kw):
1516 lines = chunk.split('\n')
1521 lines = chunk.split('\n')
1517 for i, line in enumerate(lines):
1522 for i, line in enumerate(lines):
1518 if i != 0:
1523 if i != 0:
1519 yield ('\n', '')
1524 yield ('\n', '')
1520 stripline = line
1525 stripline = line
1521 if line and line[0] in '+-':
1526 if line and line[0] in '+-':
1522 # highlight trailing whitespace, but only in changed lines
1527 # highlight trailing whitespace, but only in changed lines
1523 stripline = line.rstrip()
1528 stripline = line.rstrip()
1524 for prefix, label in prefixes:
1529 for prefix, label in prefixes:
1525 if stripline.startswith(prefix):
1530 if stripline.startswith(prefix):
1526 yield (stripline, label)
1531 yield (stripline, label)
1527 break
1532 break
1528 else:
1533 else:
1529 yield (line, '')
1534 yield (line, '')
1530 if line != stripline:
1535 if line != stripline:
1531 yield (line[len(stripline):], 'diff.trailingwhitespace')
1536 yield (line[len(stripline):], 'diff.trailingwhitespace')
1532
1537
1533 def diffui(*args, **kw):
1538 def diffui(*args, **kw):
1534 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1539 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1535 return difflabel(diff, *args, **kw)
1540 return difflabel(diff, *args, **kw)
1536
1541
1537
1542
1538 def _addmodehdr(header, omode, nmode):
1543 def _addmodehdr(header, omode, nmode):
1539 if omode != nmode:
1544 if omode != nmode:
1540 header.append('old mode %s\n' % omode)
1545 header.append('old mode %s\n' % omode)
1541 header.append('new mode %s\n' % nmode)
1546 header.append('new mode %s\n' % nmode)
1542
1547
1543 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1548 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1544 copy, getfilectx, opts, losedatafn, prefix):
1549 copy, getfilectx, opts, losedatafn, prefix):
1545
1550
1546 def join(f):
1551 def join(f):
1547 return os.path.join(prefix, f)
1552 return os.path.join(prefix, f)
1548
1553
1549 date1 = util.datestr(ctx1.date())
1554 date1 = util.datestr(ctx1.date())
1550 man1 = ctx1.manifest()
1555 man1 = ctx1.manifest()
1551
1556
1552 gone = set()
1557 gone = set()
1553 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1558 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1554
1559
1555 copyto = dict([(v, k) for k, v in copy.items()])
1560 copyto = dict([(v, k) for k, v in copy.items()])
1556
1561
1557 if opts.git:
1562 if opts.git:
1558 revs = None
1563 revs = None
1559
1564
1560 for f in sorted(modified + added + removed):
1565 for f in sorted(modified + added + removed):
1561 to = None
1566 to = None
1562 tn = None
1567 tn = None
1563 dodiff = True
1568 dodiff = True
1564 header = []
1569 header = []
1565 if f in man1:
1570 if f in man1:
1566 to = getfilectx(f, ctx1).data()
1571 to = getfilectx(f, ctx1).data()
1567 if f not in removed:
1572 if f not in removed:
1568 tn = getfilectx(f, ctx2).data()
1573 tn = getfilectx(f, ctx2).data()
1569 a, b = f, f
1574 a, b = f, f
1570 if opts.git or losedatafn:
1575 if opts.git or losedatafn:
1571 if f in added:
1576 if f in added:
1572 mode = gitmode[ctx2.flags(f)]
1577 mode = gitmode[ctx2.flags(f)]
1573 if f in copy or f in copyto:
1578 if f in copy or f in copyto:
1574 if opts.git:
1579 if opts.git:
1575 if f in copy:
1580 if f in copy:
1576 a = copy[f]
1581 a = copy[f]
1577 else:
1582 else:
1578 a = copyto[f]
1583 a = copyto[f]
1579 omode = gitmode[man1.flags(a)]
1584 omode = gitmode[man1.flags(a)]
1580 _addmodehdr(header, omode, mode)
1585 _addmodehdr(header, omode, mode)
1581 if a in removed and a not in gone:
1586 if a in removed and a not in gone:
1582 op = 'rename'
1587 op = 'rename'
1583 gone.add(a)
1588 gone.add(a)
1584 else:
1589 else:
1585 op = 'copy'
1590 op = 'copy'
1586 header.append('%s from %s\n' % (op, join(a)))
1591 header.append('%s from %s\n' % (op, join(a)))
1587 header.append('%s to %s\n' % (op, join(f)))
1592 header.append('%s to %s\n' % (op, join(f)))
1588 to = getfilectx(a, ctx1).data()
1593 to = getfilectx(a, ctx1).data()
1589 else:
1594 else:
1590 losedatafn(f)
1595 losedatafn(f)
1591 else:
1596 else:
1592 if opts.git:
1597 if opts.git:
1593 header.append('new file mode %s\n' % mode)
1598 header.append('new file mode %s\n' % mode)
1594 elif ctx2.flags(f):
1599 elif ctx2.flags(f):
1595 losedatafn(f)
1600 losedatafn(f)
1596 # In theory, if tn was copied or renamed we should check
1601 # In theory, if tn was copied or renamed we should check
1597 # if the source is binary too but the copy record already
1602 # if the source is binary too but the copy record already
1598 # forces git mode.
1603 # forces git mode.
1599 if util.binary(tn):
1604 if util.binary(tn):
1600 if opts.git:
1605 if opts.git:
1601 dodiff = 'binary'
1606 dodiff = 'binary'
1602 else:
1607 else:
1603 losedatafn(f)
1608 losedatafn(f)
1604 if not opts.git and not tn:
1609 if not opts.git and not tn:
1605 # regular diffs cannot represent new empty file
1610 # regular diffs cannot represent new empty file
1606 losedatafn(f)
1611 losedatafn(f)
1607 elif f in removed:
1612 elif f in removed:
1608 if opts.git:
1613 if opts.git:
1609 # have we already reported a copy above?
1614 # have we already reported a copy above?
1610 if ((f in copy and copy[f] in added
1615 if ((f in copy and copy[f] in added
1611 and copyto[copy[f]] == f) or
1616 and copyto[copy[f]] == f) or
1612 (f in copyto and copyto[f] in added
1617 (f in copyto and copyto[f] in added
1613 and copy[copyto[f]] == f)):
1618 and copy[copyto[f]] == f)):
1614 dodiff = False
1619 dodiff = False
1615 else:
1620 else:
1616 header.append('deleted file mode %s\n' %
1621 header.append('deleted file mode %s\n' %
1617 gitmode[man1.flags(f)])
1622 gitmode[man1.flags(f)])
1618 elif not to or util.binary(to):
1623 elif not to or util.binary(to):
1619 # regular diffs cannot represent empty file deletion
1624 # regular diffs cannot represent empty file deletion
1620 losedatafn(f)
1625 losedatafn(f)
1621 else:
1626 else:
1622 oflag = man1.flags(f)
1627 oflag = man1.flags(f)
1623 nflag = ctx2.flags(f)
1628 nflag = ctx2.flags(f)
1624 binary = util.binary(to) or util.binary(tn)
1629 binary = util.binary(to) or util.binary(tn)
1625 if opts.git:
1630 if opts.git:
1626 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1631 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1627 if binary:
1632 if binary:
1628 dodiff = 'binary'
1633 dodiff = 'binary'
1629 elif binary or nflag != oflag:
1634 elif binary or nflag != oflag:
1630 losedatafn(f)
1635 losedatafn(f)
1631 if opts.git:
1636 if opts.git:
1632 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1637 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1633
1638
1634 if dodiff:
1639 if dodiff:
1635 if dodiff == 'binary':
1640 if dodiff == 'binary':
1636 text = b85diff(to, tn)
1641 text = b85diff(to, tn)
1637 else:
1642 else:
1638 text = mdiff.unidiff(to, date1,
1643 text = mdiff.unidiff(to, date1,
1639 # ctx2 date may be dynamic
1644 # ctx2 date may be dynamic
1640 tn, util.datestr(ctx2.date()),
1645 tn, util.datestr(ctx2.date()),
1641 join(a), join(b), revs, opts=opts)
1646 join(a), join(b), revs, opts=opts)
1642 if header and (text or len(header) > 1):
1647 if header and (text or len(header) > 1):
1643 yield ''.join(header)
1648 yield ''.join(header)
1644 if text:
1649 if text:
1645 yield text
1650 yield text
1646
1651
1647 def diffstatdata(lines):
1652 def diffstatdata(lines):
1648 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1653 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1649
1654
1650 filename, adds, removes = None, 0, 0
1655 filename, adds, removes = None, 0, 0
1651 for line in lines:
1656 for line in lines:
1652 if line.startswith('diff'):
1657 if line.startswith('diff'):
1653 if filename:
1658 if filename:
1654 isbinary = adds == 0 and removes == 0
1659 isbinary = adds == 0 and removes == 0
1655 yield (filename, adds, removes, isbinary)
1660 yield (filename, adds, removes, isbinary)
1656 # set numbers to 0 anyway when starting new file
1661 # set numbers to 0 anyway when starting new file
1657 adds, removes = 0, 0
1662 adds, removes = 0, 0
1658 if line.startswith('diff --git'):
1663 if line.startswith('diff --git'):
1659 filename = gitre.search(line).group(1)
1664 filename = gitre.search(line).group(1)
1660 elif line.startswith('diff -r'):
1665 elif line.startswith('diff -r'):
1661 # format: "diff -r ... -r ... filename"
1666 # format: "diff -r ... -r ... filename"
1662 filename = diffre.search(line).group(1)
1667 filename = diffre.search(line).group(1)
1663 elif line.startswith('+') and not line.startswith('+++'):
1668 elif line.startswith('+') and not line.startswith('+++'):
1664 adds += 1
1669 adds += 1
1665 elif line.startswith('-') and not line.startswith('---'):
1670 elif line.startswith('-') and not line.startswith('---'):
1666 removes += 1
1671 removes += 1
1667 if filename:
1672 if filename:
1668 isbinary = adds == 0 and removes == 0
1673 isbinary = adds == 0 and removes == 0
1669 yield (filename, adds, removes, isbinary)
1674 yield (filename, adds, removes, isbinary)
1670
1675
1671 def diffstat(lines, width=80, git=False):
1676 def diffstat(lines, width=80, git=False):
1672 output = []
1677 output = []
1673 stats = list(diffstatdata(lines))
1678 stats = list(diffstatdata(lines))
1674
1679
1675 maxtotal, maxname = 0, 0
1680 maxtotal, maxname = 0, 0
1676 totaladds, totalremoves = 0, 0
1681 totaladds, totalremoves = 0, 0
1677 hasbinary = False
1682 hasbinary = False
1678
1683
1679 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1684 sized = [(filename, adds, removes, isbinary, encoding.colwidth(filename))
1680 for filename, adds, removes, isbinary in stats]
1685 for filename, adds, removes, isbinary in stats]
1681
1686
1682 for filename, adds, removes, isbinary, namewidth in sized:
1687 for filename, adds, removes, isbinary, namewidth in sized:
1683 totaladds += adds
1688 totaladds += adds
1684 totalremoves += removes
1689 totalremoves += removes
1685 maxname = max(maxname, namewidth)
1690 maxname = max(maxname, namewidth)
1686 maxtotal = max(maxtotal, adds + removes)
1691 maxtotal = max(maxtotal, adds + removes)
1687 if isbinary:
1692 if isbinary:
1688 hasbinary = True
1693 hasbinary = True
1689
1694
1690 countwidth = len(str(maxtotal))
1695 countwidth = len(str(maxtotal))
1691 if hasbinary and countwidth < 3:
1696 if hasbinary and countwidth < 3:
1692 countwidth = 3
1697 countwidth = 3
1693 graphwidth = width - countwidth - maxname - 6
1698 graphwidth = width - countwidth - maxname - 6
1694 if graphwidth < 10:
1699 if graphwidth < 10:
1695 graphwidth = 10
1700 graphwidth = 10
1696
1701
1697 def scale(i):
1702 def scale(i):
1698 if maxtotal <= graphwidth:
1703 if maxtotal <= graphwidth:
1699 return i
1704 return i
1700 # If diffstat runs out of room it doesn't print anything,
1705 # If diffstat runs out of room it doesn't print anything,
1701 # which isn't very useful, so always print at least one + or -
1706 # which isn't very useful, so always print at least one + or -
1702 # if there were at least some changes.
1707 # if there were at least some changes.
1703 return max(i * graphwidth // maxtotal, int(bool(i)))
1708 return max(i * graphwidth // maxtotal, int(bool(i)))
1704
1709
1705 for filename, adds, removes, isbinary, namewidth in sized:
1710 for filename, adds, removes, isbinary, namewidth in sized:
1706 if git and isbinary:
1711 if git and isbinary:
1707 count = 'Bin'
1712 count = 'Bin'
1708 else:
1713 else:
1709 count = adds + removes
1714 count = adds + removes
1710 pluses = '+' * scale(adds)
1715 pluses = '+' * scale(adds)
1711 minuses = '-' * scale(removes)
1716 minuses = '-' * scale(removes)
1712 output.append(' %s%s | %*s %s%s\n' %
1717 output.append(' %s%s | %*s %s%s\n' %
1713 (filename, ' ' * (maxname - namewidth),
1718 (filename, ' ' * (maxname - namewidth),
1714 countwidth, count,
1719 countwidth, count,
1715 pluses, minuses))
1720 pluses, minuses))
1716
1721
1717 if stats:
1722 if stats:
1718 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1723 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1719 % (len(stats), totaladds, totalremoves))
1724 % (len(stats), totaladds, totalremoves))
1720
1725
1721 return ''.join(output)
1726 return ''.join(output)
1722
1727
1723 def diffstatui(*args, **kw):
1728 def diffstatui(*args, **kw):
1724 '''like diffstat(), but yields 2-tuples of (output, label) for
1729 '''like diffstat(), but yields 2-tuples of (output, label) for
1725 ui.write()
1730 ui.write()
1726 '''
1731 '''
1727
1732
1728 for line in diffstat(*args, **kw).splitlines():
1733 for line in diffstat(*args, **kw).splitlines():
1729 if line and line[-1] in '+-':
1734 if line and line[-1] in '+-':
1730 name, graph = line.rsplit(' ', 1)
1735 name, graph = line.rsplit(' ', 1)
1731 yield (name + ' ', '')
1736 yield (name + ' ', '')
1732 m = re.search(r'\++', graph)
1737 m = re.search(r'\++', graph)
1733 if m:
1738 if m:
1734 yield (m.group(0), 'diffstat.inserted')
1739 yield (m.group(0), 'diffstat.inserted')
1735 m = re.search(r'-+', graph)
1740 m = re.search(r'-+', graph)
1736 if m:
1741 if m:
1737 yield (m.group(0), 'diffstat.deleted')
1742 yield (m.group(0), 'diffstat.deleted')
1738 else:
1743 else:
1739 yield (line, '')
1744 yield (line, '')
1740 yield ('\n', '')
1745 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now