##// END OF EJS Templates
patch: correct import of email module
Augie Fackler -
r19789:b054a241 default
parent child Browse files
Show More
@@ -1,1878 +1,1878 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 # should try to parse msg['Date']
179 # should try to parse msg['Date']
180 date = None
180 date = None
181 nodeid = None
181 nodeid = None
182 branch = None
182 branch = None
183 parents = []
183 parents = []
184
184
185 if subject:
185 if subject:
186 if subject.startswith('[PATCH'):
186 if subject.startswith('[PATCH'):
187 pend = subject.find(']')
187 pend = subject.find(']')
188 if pend >= 0:
188 if pend >= 0:
189 subject = subject[pend + 1:].lstrip()
189 subject = subject[pend + 1:].lstrip()
190 subject = re.sub(r'\n[ \t]+', ' ', subject)
190 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 ui.debug('Subject: %s\n' % subject)
191 ui.debug('Subject: %s\n' % subject)
192 if user:
192 if user:
193 ui.debug('From: %s\n' % user)
193 ui.debug('From: %s\n' % user)
194 diffs_seen = 0
194 diffs_seen = 0
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
195 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 message = ''
196 message = ''
197 for part in msg.walk():
197 for part in msg.walk():
198 content_type = part.get_content_type()
198 content_type = part.get_content_type()
199 ui.debug('Content-Type: %s\n' % content_type)
199 ui.debug('Content-Type: %s\n' % content_type)
200 if content_type not in ok_types:
200 if content_type not in ok_types:
201 continue
201 continue
202 payload = part.get_payload(decode=True)
202 payload = part.get_payload(decode=True)
203 m = diffre.search(payload)
203 m = diffre.search(payload)
204 if m:
204 if m:
205 hgpatch = False
205 hgpatch = False
206 hgpatchheader = False
206 hgpatchheader = False
207 ignoretext = False
207 ignoretext = False
208
208
209 ui.debug('found patch at byte %d\n' % m.start(0))
209 ui.debug('found patch at byte %d\n' % m.start(0))
210 diffs_seen += 1
210 diffs_seen += 1
211 cfp = cStringIO.StringIO()
211 cfp = cStringIO.StringIO()
212 for line in payload[:m.start(0)].splitlines():
212 for line in payload[:m.start(0)].splitlines():
213 if line.startswith('# HG changeset patch') and not hgpatch:
213 if line.startswith('# HG changeset patch') and not hgpatch:
214 ui.debug('patch generated by hg export\n')
214 ui.debug('patch generated by hg export\n')
215 hgpatch = True
215 hgpatch = True
216 hgpatchheader = True
216 hgpatchheader = True
217 # drop earlier commit message content
217 # drop earlier commit message content
218 cfp.seek(0)
218 cfp.seek(0)
219 cfp.truncate()
219 cfp.truncate()
220 subject = None
220 subject = None
221 elif hgpatchheader:
221 elif hgpatchheader:
222 if line.startswith('# User '):
222 if line.startswith('# User '):
223 user = line[7:]
223 user = line[7:]
224 ui.debug('From: %s\n' % user)
224 ui.debug('From: %s\n' % user)
225 elif line.startswith("# Date "):
225 elif line.startswith("# Date "):
226 date = line[7:]
226 date = line[7:]
227 elif line.startswith("# Branch "):
227 elif line.startswith("# Branch "):
228 branch = line[9:]
228 branch = line[9:]
229 elif line.startswith("# Node ID "):
229 elif line.startswith("# Node ID "):
230 nodeid = line[10:]
230 nodeid = line[10:]
231 elif line.startswith("# Parent "):
231 elif line.startswith("# Parent "):
232 parents.append(line[9:].lstrip())
232 parents.append(line[9:].lstrip())
233 elif not line.startswith("# "):
233 elif not line.startswith("# "):
234 hgpatchheader = False
234 hgpatchheader = False
235 elif line == '---':
235 elif line == '---':
236 ignoretext = True
236 ignoretext = True
237 if not hgpatchheader and not ignoretext:
237 if not hgpatchheader and not ignoretext:
238 cfp.write(line)
238 cfp.write(line)
239 cfp.write('\n')
239 cfp.write('\n')
240 message = cfp.getvalue()
240 message = cfp.getvalue()
241 if tmpfp:
241 if tmpfp:
242 tmpfp.write(payload)
242 tmpfp.write(payload)
243 if not payload.endswith('\n'):
243 if not payload.endswith('\n'):
244 tmpfp.write('\n')
244 tmpfp.write('\n')
245 elif not diffs_seen and message and content_type == 'text/plain':
245 elif not diffs_seen and message and content_type == 'text/plain':
246 message += '\n' + payload
246 message += '\n' + payload
247 except: # re-raises
247 except: # re-raises
248 tmpfp.close()
248 tmpfp.close()
249 os.unlink(tmpname)
249 os.unlink(tmpname)
250 raise
250 raise
251
251
252 if subject and not message.startswith(subject):
252 if subject and not message.startswith(subject):
253 message = '%s\n%s' % (subject, message)
253 message = '%s\n%s' % (subject, message)
254 tmpfp.close()
254 tmpfp.close()
255 if not diffs_seen:
255 if not diffs_seen:
256 os.unlink(tmpname)
256 os.unlink(tmpname)
257 return None, message, user, date, branch, None, None, None
257 return None, message, user, date, branch, None, None, None
258 p1 = parents and parents.pop(0) or None
258 p1 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
259 p2 = parents and parents.pop(0) or None
260 return tmpname, message, user, date, branch, nodeid, p1, p2
260 return tmpname, message, user, date, branch, nodeid, p1, p2
261
261
262 class patchmeta(object):
262 class patchmeta(object):
263 """Patched file metadata
263 """Patched file metadata
264
264
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
265 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
266 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
267 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
268 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 'islink' is True if the file is a symlink and 'isexec' is True if
269 'islink' is True if the file is a symlink and 'isexec' is True if
270 the file is executable. Otherwise, 'mode' is None.
270 the file is executable. Otherwise, 'mode' is None.
271 """
271 """
272 def __init__(self, path):
272 def __init__(self, path):
273 self.path = path
273 self.path = path
274 self.oldpath = None
274 self.oldpath = None
275 self.mode = None
275 self.mode = None
276 self.op = 'MODIFY'
276 self.op = 'MODIFY'
277 self.binary = False
277 self.binary = False
278
278
279 def setmode(self, mode):
279 def setmode(self, mode):
280 islink = mode & 020000
280 islink = mode & 020000
281 isexec = mode & 0100
281 isexec = mode & 0100
282 self.mode = (islink, isexec)
282 self.mode = (islink, isexec)
283
283
284 def copy(self):
284 def copy(self):
285 other = patchmeta(self.path)
285 other = patchmeta(self.path)
286 other.oldpath = self.oldpath
286 other.oldpath = self.oldpath
287 other.mode = self.mode
287 other.mode = self.mode
288 other.op = self.op
288 other.op = self.op
289 other.binary = self.binary
289 other.binary = self.binary
290 return other
290 return other
291
291
292 def _ispatchinga(self, afile):
292 def _ispatchinga(self, afile):
293 if afile == '/dev/null':
293 if afile == '/dev/null':
294 return self.op == 'ADD'
294 return self.op == 'ADD'
295 return afile == 'a/' + (self.oldpath or self.path)
295 return afile == 'a/' + (self.oldpath or self.path)
296
296
297 def _ispatchingb(self, bfile):
297 def _ispatchingb(self, bfile):
298 if bfile == '/dev/null':
298 if bfile == '/dev/null':
299 return self.op == 'DELETE'
299 return self.op == 'DELETE'
300 return bfile == 'b/' + self.path
300 return bfile == 'b/' + self.path
301
301
302 def ispatching(self, afile, bfile):
302 def ispatching(self, afile, bfile):
303 return self._ispatchinga(afile) and self._ispatchingb(bfile)
303 return self._ispatchinga(afile) and self._ispatchingb(bfile)
304
304
305 def __repr__(self):
305 def __repr__(self):
306 return "<patchmeta %s %r>" % (self.op, self.path)
306 return "<patchmeta %s %r>" % (self.op, self.path)
307
307
308 def readgitpatch(lr):
308 def readgitpatch(lr):
309 """extract git-style metadata about patches from <patchname>"""
309 """extract git-style metadata about patches from <patchname>"""
310
310
311 # Filter patch for git information
311 # Filter patch for git information
312 gp = None
312 gp = None
313 gitpatches = []
313 gitpatches = []
314 for line in lr:
314 for line in lr:
315 line = line.rstrip(' \r\n')
315 line = line.rstrip(' \r\n')
316 if line.startswith('diff --git a/'):
316 if line.startswith('diff --git a/'):
317 m = gitre.match(line)
317 m = gitre.match(line)
318 if m:
318 if m:
319 if gp:
319 if gp:
320 gitpatches.append(gp)
320 gitpatches.append(gp)
321 dst = m.group(2)
321 dst = m.group(2)
322 gp = patchmeta(dst)
322 gp = patchmeta(dst)
323 elif gp:
323 elif gp:
324 if line.startswith('--- '):
324 if line.startswith('--- '):
325 gitpatches.append(gp)
325 gitpatches.append(gp)
326 gp = None
326 gp = None
327 continue
327 continue
328 if line.startswith('rename from '):
328 if line.startswith('rename from '):
329 gp.op = 'RENAME'
329 gp.op = 'RENAME'
330 gp.oldpath = line[12:]
330 gp.oldpath = line[12:]
331 elif line.startswith('rename to '):
331 elif line.startswith('rename to '):
332 gp.path = line[10:]
332 gp.path = line[10:]
333 elif line.startswith('copy from '):
333 elif line.startswith('copy from '):
334 gp.op = 'COPY'
334 gp.op = 'COPY'
335 gp.oldpath = line[10:]
335 gp.oldpath = line[10:]
336 elif line.startswith('copy to '):
336 elif line.startswith('copy to '):
337 gp.path = line[8:]
337 gp.path = line[8:]
338 elif line.startswith('deleted file'):
338 elif line.startswith('deleted file'):
339 gp.op = 'DELETE'
339 gp.op = 'DELETE'
340 elif line.startswith('new file mode '):
340 elif line.startswith('new file mode '):
341 gp.op = 'ADD'
341 gp.op = 'ADD'
342 gp.setmode(int(line[-6:], 8))
342 gp.setmode(int(line[-6:], 8))
343 elif line.startswith('new mode '):
343 elif line.startswith('new mode '):
344 gp.setmode(int(line[-6:], 8))
344 gp.setmode(int(line[-6:], 8))
345 elif line.startswith('GIT binary patch'):
345 elif line.startswith('GIT binary patch'):
346 gp.binary = True
346 gp.binary = True
347 if gp:
347 if gp:
348 gitpatches.append(gp)
348 gitpatches.append(gp)
349
349
350 return gitpatches
350 return gitpatches
351
351
352 class linereader(object):
352 class linereader(object):
353 # simple class to allow pushing lines back into the input stream
353 # simple class to allow pushing lines back into the input stream
354 def __init__(self, fp):
354 def __init__(self, fp):
355 self.fp = fp
355 self.fp = fp
356 self.buf = []
356 self.buf = []
357
357
358 def push(self, line):
358 def push(self, line):
359 if line is not None:
359 if line is not None:
360 self.buf.append(line)
360 self.buf.append(line)
361
361
362 def readline(self):
362 def readline(self):
363 if self.buf:
363 if self.buf:
364 l = self.buf[0]
364 l = self.buf[0]
365 del self.buf[0]
365 del self.buf[0]
366 return l
366 return l
367 return self.fp.readline()
367 return self.fp.readline()
368
368
369 def __iter__(self):
369 def __iter__(self):
370 while True:
370 while True:
371 l = self.readline()
371 l = self.readline()
372 if not l:
372 if not l:
373 break
373 break
374 yield l
374 yield l
375
375
376 class abstractbackend(object):
376 class abstractbackend(object):
377 def __init__(self, ui):
377 def __init__(self, ui):
378 self.ui = ui
378 self.ui = ui
379
379
380 def getfile(self, fname):
380 def getfile(self, fname):
381 """Return target file data and flags as a (data, (islink,
381 """Return target file data and flags as a (data, (islink,
382 isexec)) tuple.
382 isexec)) tuple.
383 """
383 """
384 raise NotImplementedError
384 raise NotImplementedError
385
385
386 def setfile(self, fname, data, mode, copysource):
386 def setfile(self, fname, data, mode, copysource):
387 """Write data to target file fname and set its mode. mode is a
387 """Write data to target file fname and set its mode. mode is a
388 (islink, isexec) tuple. If data is None, the file content should
388 (islink, isexec) tuple. If data is None, the file content should
389 be left unchanged. If the file is modified after being copied,
389 be left unchanged. If the file is modified after being copied,
390 copysource is set to the original file name.
390 copysource is set to the original file name.
391 """
391 """
392 raise NotImplementedError
392 raise NotImplementedError
393
393
394 def unlink(self, fname):
394 def unlink(self, fname):
395 """Unlink target file."""
395 """Unlink target file."""
396 raise NotImplementedError
396 raise NotImplementedError
397
397
398 def writerej(self, fname, failed, total, lines):
398 def writerej(self, fname, failed, total, lines):
399 """Write rejected lines for fname. total is the number of hunks
399 """Write rejected lines for fname. total is the number of hunks
400 which failed to apply and total the total number of hunks for this
400 which failed to apply and total the total number of hunks for this
401 files.
401 files.
402 """
402 """
403 pass
403 pass
404
404
405 def exists(self, fname):
405 def exists(self, fname):
406 raise NotImplementedError
406 raise NotImplementedError
407
407
408 class fsbackend(abstractbackend):
408 class fsbackend(abstractbackend):
409 def __init__(self, ui, basedir):
409 def __init__(self, ui, basedir):
410 super(fsbackend, self).__init__(ui)
410 super(fsbackend, self).__init__(ui)
411 self.opener = scmutil.opener(basedir)
411 self.opener = scmutil.opener(basedir)
412
412
413 def _join(self, f):
413 def _join(self, f):
414 return os.path.join(self.opener.base, f)
414 return os.path.join(self.opener.base, f)
415
415
416 def getfile(self, fname):
416 def getfile(self, fname):
417 path = self._join(fname)
417 path = self._join(fname)
418 if os.path.islink(path):
418 if os.path.islink(path):
419 return (os.readlink(path), (True, False))
419 return (os.readlink(path), (True, False))
420 isexec = False
420 isexec = False
421 try:
421 try:
422 isexec = os.lstat(path).st_mode & 0100 != 0
422 isexec = os.lstat(path).st_mode & 0100 != 0
423 except OSError, e:
423 except OSError, e:
424 if e.errno != errno.ENOENT:
424 if e.errno != errno.ENOENT:
425 raise
425 raise
426 return (self.opener.read(fname), (False, isexec))
426 return (self.opener.read(fname), (False, isexec))
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 islink, isexec = mode
429 islink, isexec = mode
430 if data is None:
430 if data is None:
431 util.setflags(self._join(fname), islink, isexec)
431 util.setflags(self._join(fname), islink, isexec)
432 return
432 return
433 if islink:
433 if islink:
434 self.opener.symlink(data, fname)
434 self.opener.symlink(data, fname)
435 else:
435 else:
436 self.opener.write(fname, data)
436 self.opener.write(fname, data)
437 if isexec:
437 if isexec:
438 util.setflags(self._join(fname), False, True)
438 util.setflags(self._join(fname), False, True)
439
439
440 def unlink(self, fname):
440 def unlink(self, fname):
441 util.unlinkpath(self._join(fname), ignoremissing=True)
441 util.unlinkpath(self._join(fname), ignoremissing=True)
442
442
443 def writerej(self, fname, failed, total, lines):
443 def writerej(self, fname, failed, total, lines):
444 fname = fname + ".rej"
444 fname = fname + ".rej"
445 self.ui.warn(
445 self.ui.warn(
446 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
446 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
447 (failed, total, fname))
447 (failed, total, fname))
448 fp = self.opener(fname, 'w')
448 fp = self.opener(fname, 'w')
449 fp.writelines(lines)
449 fp.writelines(lines)
450 fp.close()
450 fp.close()
451
451
452 def exists(self, fname):
452 def exists(self, fname):
453 return os.path.lexists(self._join(fname))
453 return os.path.lexists(self._join(fname))
454
454
455 class workingbackend(fsbackend):
455 class workingbackend(fsbackend):
456 def __init__(self, ui, repo, similarity):
456 def __init__(self, ui, repo, similarity):
457 super(workingbackend, self).__init__(ui, repo.root)
457 super(workingbackend, self).__init__(ui, repo.root)
458 self.repo = repo
458 self.repo = repo
459 self.similarity = similarity
459 self.similarity = similarity
460 self.removed = set()
460 self.removed = set()
461 self.changed = set()
461 self.changed = set()
462 self.copied = []
462 self.copied = []
463
463
464 def _checkknown(self, fname):
464 def _checkknown(self, fname):
465 if self.repo.dirstate[fname] == '?' and self.exists(fname):
465 if self.repo.dirstate[fname] == '?' and self.exists(fname):
466 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
466 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
467
467
468 def setfile(self, fname, data, mode, copysource):
468 def setfile(self, fname, data, mode, copysource):
469 self._checkknown(fname)
469 self._checkknown(fname)
470 super(workingbackend, self).setfile(fname, data, mode, copysource)
470 super(workingbackend, self).setfile(fname, data, mode, copysource)
471 if copysource is not None:
471 if copysource is not None:
472 self.copied.append((copysource, fname))
472 self.copied.append((copysource, fname))
473 self.changed.add(fname)
473 self.changed.add(fname)
474
474
475 def unlink(self, fname):
475 def unlink(self, fname):
476 self._checkknown(fname)
476 self._checkknown(fname)
477 super(workingbackend, self).unlink(fname)
477 super(workingbackend, self).unlink(fname)
478 self.removed.add(fname)
478 self.removed.add(fname)
479 self.changed.add(fname)
479 self.changed.add(fname)
480
480
481 def close(self):
481 def close(self):
482 wctx = self.repo[None]
482 wctx = self.repo[None]
483 changed = set(self.changed)
483 changed = set(self.changed)
484 for src, dst in self.copied:
484 for src, dst in self.copied:
485 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
485 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
486 if self.removed:
486 if self.removed:
487 wctx.forget(sorted(self.removed))
487 wctx.forget(sorted(self.removed))
488 for f in self.removed:
488 for f in self.removed:
489 if f not in self.repo.dirstate:
489 if f not in self.repo.dirstate:
490 # File was deleted and no longer belongs to the
490 # File was deleted and no longer belongs to the
491 # dirstate, it was probably marked added then
491 # dirstate, it was probably marked added then
492 # deleted, and should not be considered by
492 # deleted, and should not be considered by
493 # marktouched().
493 # marktouched().
494 changed.discard(f)
494 changed.discard(f)
495 if changed:
495 if changed:
496 scmutil.marktouched(self.repo, changed, self.similarity)
496 scmutil.marktouched(self.repo, changed, self.similarity)
497 return sorted(self.changed)
497 return sorted(self.changed)
498
498
499 class filestore(object):
499 class filestore(object):
500 def __init__(self, maxsize=None):
500 def __init__(self, maxsize=None):
501 self.opener = None
501 self.opener = None
502 self.files = {}
502 self.files = {}
503 self.created = 0
503 self.created = 0
504 self.maxsize = maxsize
504 self.maxsize = maxsize
505 if self.maxsize is None:
505 if self.maxsize is None:
506 self.maxsize = 4*(2**20)
506 self.maxsize = 4*(2**20)
507 self.size = 0
507 self.size = 0
508 self.data = {}
508 self.data = {}
509
509
510 def setfile(self, fname, data, mode, copied=None):
510 def setfile(self, fname, data, mode, copied=None):
511 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
511 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
512 self.data[fname] = (data, mode, copied)
512 self.data[fname] = (data, mode, copied)
513 self.size += len(data)
513 self.size += len(data)
514 else:
514 else:
515 if self.opener is None:
515 if self.opener is None:
516 root = tempfile.mkdtemp(prefix='hg-patch-')
516 root = tempfile.mkdtemp(prefix='hg-patch-')
517 self.opener = scmutil.opener(root)
517 self.opener = scmutil.opener(root)
518 # Avoid filename issues with these simple names
518 # Avoid filename issues with these simple names
519 fn = str(self.created)
519 fn = str(self.created)
520 self.opener.write(fn, data)
520 self.opener.write(fn, data)
521 self.created += 1
521 self.created += 1
522 self.files[fname] = (fn, mode, copied)
522 self.files[fname] = (fn, mode, copied)
523
523
524 def getfile(self, fname):
524 def getfile(self, fname):
525 if fname in self.data:
525 if fname in self.data:
526 return self.data[fname]
526 return self.data[fname]
527 if not self.opener or fname not in self.files:
527 if not self.opener or fname not in self.files:
528 raise IOError
528 raise IOError
529 fn, mode, copied = self.files[fname]
529 fn, mode, copied = self.files[fname]
530 return self.opener.read(fn), mode, copied
530 return self.opener.read(fn), mode, copied
531
531
532 def close(self):
532 def close(self):
533 if self.opener:
533 if self.opener:
534 shutil.rmtree(self.opener.base)
534 shutil.rmtree(self.opener.base)
535
535
536 class repobackend(abstractbackend):
536 class repobackend(abstractbackend):
537 def __init__(self, ui, repo, ctx, store):
537 def __init__(self, ui, repo, ctx, store):
538 super(repobackend, self).__init__(ui)
538 super(repobackend, self).__init__(ui)
539 self.repo = repo
539 self.repo = repo
540 self.ctx = ctx
540 self.ctx = ctx
541 self.store = store
541 self.store = store
542 self.changed = set()
542 self.changed = set()
543 self.removed = set()
543 self.removed = set()
544 self.copied = {}
544 self.copied = {}
545
545
546 def _checkknown(self, fname):
546 def _checkknown(self, fname):
547 if fname not in self.ctx:
547 if fname not in self.ctx:
548 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
548 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
549
549
550 def getfile(self, fname):
550 def getfile(self, fname):
551 try:
551 try:
552 fctx = self.ctx[fname]
552 fctx = self.ctx[fname]
553 except error.LookupError:
553 except error.LookupError:
554 raise IOError
554 raise IOError
555 flags = fctx.flags()
555 flags = fctx.flags()
556 return fctx.data(), ('l' in flags, 'x' in flags)
556 return fctx.data(), ('l' in flags, 'x' in flags)
557
557
558 def setfile(self, fname, data, mode, copysource):
558 def setfile(self, fname, data, mode, copysource):
559 if copysource:
559 if copysource:
560 self._checkknown(copysource)
560 self._checkknown(copysource)
561 if data is None:
561 if data is None:
562 data = self.ctx[fname].data()
562 data = self.ctx[fname].data()
563 self.store.setfile(fname, data, mode, copysource)
563 self.store.setfile(fname, data, mode, copysource)
564 self.changed.add(fname)
564 self.changed.add(fname)
565 if copysource:
565 if copysource:
566 self.copied[fname] = copysource
566 self.copied[fname] = copysource
567
567
568 def unlink(self, fname):
568 def unlink(self, fname):
569 self._checkknown(fname)
569 self._checkknown(fname)
570 self.removed.add(fname)
570 self.removed.add(fname)
571
571
572 def exists(self, fname):
572 def exists(self, fname):
573 return fname in self.ctx
573 return fname in self.ctx
574
574
575 def close(self):
575 def close(self):
576 return self.changed | self.removed
576 return self.changed | self.removed
577
577
578 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
578 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
579 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
579 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
580 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
580 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
581 eolmodes = ['strict', 'crlf', 'lf', 'auto']
581 eolmodes = ['strict', 'crlf', 'lf', 'auto']
582
582
583 class patchfile(object):
583 class patchfile(object):
584 def __init__(self, ui, gp, backend, store, eolmode='strict'):
584 def __init__(self, ui, gp, backend, store, eolmode='strict'):
585 self.fname = gp.path
585 self.fname = gp.path
586 self.eolmode = eolmode
586 self.eolmode = eolmode
587 self.eol = None
587 self.eol = None
588 self.backend = backend
588 self.backend = backend
589 self.ui = ui
589 self.ui = ui
590 self.lines = []
590 self.lines = []
591 self.exists = False
591 self.exists = False
592 self.missing = True
592 self.missing = True
593 self.mode = gp.mode
593 self.mode = gp.mode
594 self.copysource = gp.oldpath
594 self.copysource = gp.oldpath
595 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
595 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
596 self.remove = gp.op == 'DELETE'
596 self.remove = gp.op == 'DELETE'
597 try:
597 try:
598 if self.copysource is None:
598 if self.copysource is None:
599 data, mode = backend.getfile(self.fname)
599 data, mode = backend.getfile(self.fname)
600 self.exists = True
600 self.exists = True
601 else:
601 else:
602 data, mode = store.getfile(self.copysource)[:2]
602 data, mode = store.getfile(self.copysource)[:2]
603 self.exists = backend.exists(self.fname)
603 self.exists = backend.exists(self.fname)
604 self.missing = False
604 self.missing = False
605 if data:
605 if data:
606 self.lines = mdiff.splitnewlines(data)
606 self.lines = mdiff.splitnewlines(data)
607 if self.mode is None:
607 if self.mode is None:
608 self.mode = mode
608 self.mode = mode
609 if self.lines:
609 if self.lines:
610 # Normalize line endings
610 # Normalize line endings
611 if self.lines[0].endswith('\r\n'):
611 if self.lines[0].endswith('\r\n'):
612 self.eol = '\r\n'
612 self.eol = '\r\n'
613 elif self.lines[0].endswith('\n'):
613 elif self.lines[0].endswith('\n'):
614 self.eol = '\n'
614 self.eol = '\n'
615 if eolmode != 'strict':
615 if eolmode != 'strict':
616 nlines = []
616 nlines = []
617 for l in self.lines:
617 for l in self.lines:
618 if l.endswith('\r\n'):
618 if l.endswith('\r\n'):
619 l = l[:-2] + '\n'
619 l = l[:-2] + '\n'
620 nlines.append(l)
620 nlines.append(l)
621 self.lines = nlines
621 self.lines = nlines
622 except IOError:
622 except IOError:
623 if self.create:
623 if self.create:
624 self.missing = False
624 self.missing = False
625 if self.mode is None:
625 if self.mode is None:
626 self.mode = (False, False)
626 self.mode = (False, False)
627 if self.missing:
627 if self.missing:
628 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
628 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
629
629
630 self.hash = {}
630 self.hash = {}
631 self.dirty = 0
631 self.dirty = 0
632 self.offset = 0
632 self.offset = 0
633 self.skew = 0
633 self.skew = 0
634 self.rej = []
634 self.rej = []
635 self.fileprinted = False
635 self.fileprinted = False
636 self.printfile(False)
636 self.printfile(False)
637 self.hunks = 0
637 self.hunks = 0
638
638
639 def writelines(self, fname, lines, mode):
639 def writelines(self, fname, lines, mode):
640 if self.eolmode == 'auto':
640 if self.eolmode == 'auto':
641 eol = self.eol
641 eol = self.eol
642 elif self.eolmode == 'crlf':
642 elif self.eolmode == 'crlf':
643 eol = '\r\n'
643 eol = '\r\n'
644 else:
644 else:
645 eol = '\n'
645 eol = '\n'
646
646
647 if self.eolmode != 'strict' and eol and eol != '\n':
647 if self.eolmode != 'strict' and eol and eol != '\n':
648 rawlines = []
648 rawlines = []
649 for l in lines:
649 for l in lines:
650 if l and l[-1] == '\n':
650 if l and l[-1] == '\n':
651 l = l[:-1] + eol
651 l = l[:-1] + eol
652 rawlines.append(l)
652 rawlines.append(l)
653 lines = rawlines
653 lines = rawlines
654
654
655 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
655 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
656
656
657 def printfile(self, warn):
657 def printfile(self, warn):
658 if self.fileprinted:
658 if self.fileprinted:
659 return
659 return
660 if warn or self.ui.verbose:
660 if warn or self.ui.verbose:
661 self.fileprinted = True
661 self.fileprinted = True
662 s = _("patching file %s\n") % self.fname
662 s = _("patching file %s\n") % self.fname
663 if warn:
663 if warn:
664 self.ui.warn(s)
664 self.ui.warn(s)
665 else:
665 else:
666 self.ui.note(s)
666 self.ui.note(s)
667
667
668
668
669 def findlines(self, l, linenum):
669 def findlines(self, l, linenum):
670 # looks through the hash and finds candidate lines. The
670 # looks through the hash and finds candidate lines. The
671 # result is a list of line numbers sorted based on distance
671 # result is a list of line numbers sorted based on distance
672 # from linenum
672 # from linenum
673
673
674 cand = self.hash.get(l, [])
674 cand = self.hash.get(l, [])
675 if len(cand) > 1:
675 if len(cand) > 1:
676 # resort our list of potentials forward then back.
676 # resort our list of potentials forward then back.
677 cand.sort(key=lambda x: abs(x - linenum))
677 cand.sort(key=lambda x: abs(x - linenum))
678 return cand
678 return cand
679
679
680 def write_rej(self):
680 def write_rej(self):
681 # our rejects are a little different from patch(1). This always
681 # our rejects are a little different from patch(1). This always
682 # creates rejects in the same form as the original patch. A file
682 # creates rejects in the same form as the original patch. A file
683 # header is inserted so that you can run the reject through patch again
683 # header is inserted so that you can run the reject through patch again
684 # without having to type the filename.
684 # without having to type the filename.
685 if not self.rej:
685 if not self.rej:
686 return
686 return
687 base = os.path.basename(self.fname)
687 base = os.path.basename(self.fname)
688 lines = ["--- %s\n+++ %s\n" % (base, base)]
688 lines = ["--- %s\n+++ %s\n" % (base, base)]
689 for x in self.rej:
689 for x in self.rej:
690 for l in x.hunk:
690 for l in x.hunk:
691 lines.append(l)
691 lines.append(l)
692 if l[-1] != '\n':
692 if l[-1] != '\n':
693 lines.append("\n\ No newline at end of file\n")
693 lines.append("\n\ No newline at end of file\n")
694 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
694 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
695
695
696 def apply(self, h):
696 def apply(self, h):
697 if not h.complete():
697 if not h.complete():
698 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
698 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
699 (h.number, h.desc, len(h.a), h.lena, len(h.b),
699 (h.number, h.desc, len(h.a), h.lena, len(h.b),
700 h.lenb))
700 h.lenb))
701
701
702 self.hunks += 1
702 self.hunks += 1
703
703
704 if self.missing:
704 if self.missing:
705 self.rej.append(h)
705 self.rej.append(h)
706 return -1
706 return -1
707
707
708 if self.exists and self.create:
708 if self.exists and self.create:
709 if self.copysource:
709 if self.copysource:
710 self.ui.warn(_("cannot create %s: destination already "
710 self.ui.warn(_("cannot create %s: destination already "
711 "exists\n" % self.fname))
711 "exists\n" % self.fname))
712 else:
712 else:
713 self.ui.warn(_("file %s already exists\n") % self.fname)
713 self.ui.warn(_("file %s already exists\n") % self.fname)
714 self.rej.append(h)
714 self.rej.append(h)
715 return -1
715 return -1
716
716
717 if isinstance(h, binhunk):
717 if isinstance(h, binhunk):
718 if self.remove:
718 if self.remove:
719 self.backend.unlink(self.fname)
719 self.backend.unlink(self.fname)
720 else:
720 else:
721 self.lines[:] = h.new()
721 self.lines[:] = h.new()
722 self.offset += len(h.new())
722 self.offset += len(h.new())
723 self.dirty = True
723 self.dirty = True
724 return 0
724 return 0
725
725
726 horig = h
726 horig = h
727 if (self.eolmode in ('crlf', 'lf')
727 if (self.eolmode in ('crlf', 'lf')
728 or self.eolmode == 'auto' and self.eol):
728 or self.eolmode == 'auto' and self.eol):
729 # If new eols are going to be normalized, then normalize
729 # If new eols are going to be normalized, then normalize
730 # hunk data before patching. Otherwise, preserve input
730 # hunk data before patching. Otherwise, preserve input
731 # line-endings.
731 # line-endings.
732 h = h.getnormalized()
732 h = h.getnormalized()
733
733
734 # fast case first, no offsets, no fuzz
734 # fast case first, no offsets, no fuzz
735 old, oldstart, new, newstart = h.fuzzit(0, False)
735 old, oldstart, new, newstart = h.fuzzit(0, False)
736 oldstart += self.offset
736 oldstart += self.offset
737 orig_start = oldstart
737 orig_start = oldstart
738 # if there's skew we want to emit the "(offset %d lines)" even
738 # if there's skew we want to emit the "(offset %d lines)" even
739 # when the hunk cleanly applies at start + skew, so skip the
739 # when the hunk cleanly applies at start + skew, so skip the
740 # fast case code
740 # fast case code
741 if (self.skew == 0 and
741 if (self.skew == 0 and
742 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
742 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
743 if self.remove:
743 if self.remove:
744 self.backend.unlink(self.fname)
744 self.backend.unlink(self.fname)
745 else:
745 else:
746 self.lines[oldstart:oldstart + len(old)] = new
746 self.lines[oldstart:oldstart + len(old)] = new
747 self.offset += len(new) - len(old)
747 self.offset += len(new) - len(old)
748 self.dirty = True
748 self.dirty = True
749 return 0
749 return 0
750
750
751 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
751 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
752 self.hash = {}
752 self.hash = {}
753 for x, s in enumerate(self.lines):
753 for x, s in enumerate(self.lines):
754 self.hash.setdefault(s, []).append(x)
754 self.hash.setdefault(s, []).append(x)
755
755
756 for fuzzlen in xrange(3):
756 for fuzzlen in xrange(3):
757 for toponly in [True, False]:
757 for toponly in [True, False]:
758 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
758 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
759 oldstart = oldstart + self.offset + self.skew
759 oldstart = oldstart + self.offset + self.skew
760 oldstart = min(oldstart, len(self.lines))
760 oldstart = min(oldstart, len(self.lines))
761 if old:
761 if old:
762 cand = self.findlines(old[0][1:], oldstart)
762 cand = self.findlines(old[0][1:], oldstart)
763 else:
763 else:
764 # Only adding lines with no or fuzzed context, just
764 # Only adding lines with no or fuzzed context, just
765 # take the skew in account
765 # take the skew in account
766 cand = [oldstart]
766 cand = [oldstart]
767
767
768 for l in cand:
768 for l in cand:
769 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
769 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
770 self.lines[l : l + len(old)] = new
770 self.lines[l : l + len(old)] = new
771 self.offset += len(new) - len(old)
771 self.offset += len(new) - len(old)
772 self.skew = l - orig_start
772 self.skew = l - orig_start
773 self.dirty = True
773 self.dirty = True
774 offset = l - orig_start - fuzzlen
774 offset = l - orig_start - fuzzlen
775 if fuzzlen:
775 if fuzzlen:
776 msg = _("Hunk #%d succeeded at %d "
776 msg = _("Hunk #%d succeeded at %d "
777 "with fuzz %d "
777 "with fuzz %d "
778 "(offset %d lines).\n")
778 "(offset %d lines).\n")
779 self.printfile(True)
779 self.printfile(True)
780 self.ui.warn(msg %
780 self.ui.warn(msg %
781 (h.number, l + 1, fuzzlen, offset))
781 (h.number, l + 1, fuzzlen, offset))
782 else:
782 else:
783 msg = _("Hunk #%d succeeded at %d "
783 msg = _("Hunk #%d succeeded at %d "
784 "(offset %d lines).\n")
784 "(offset %d lines).\n")
785 self.ui.note(msg % (h.number, l + 1, offset))
785 self.ui.note(msg % (h.number, l + 1, offset))
786 return fuzzlen
786 return fuzzlen
787 self.printfile(True)
787 self.printfile(True)
788 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
788 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
789 self.rej.append(horig)
789 self.rej.append(horig)
790 return -1
790 return -1
791
791
792 def close(self):
792 def close(self):
793 if self.dirty:
793 if self.dirty:
794 self.writelines(self.fname, self.lines, self.mode)
794 self.writelines(self.fname, self.lines, self.mode)
795 self.write_rej()
795 self.write_rej()
796 return len(self.rej)
796 return len(self.rej)
797
797
798 class hunk(object):
798 class hunk(object):
799 def __init__(self, desc, num, lr, context):
799 def __init__(self, desc, num, lr, context):
800 self.number = num
800 self.number = num
801 self.desc = desc
801 self.desc = desc
802 self.hunk = [desc]
802 self.hunk = [desc]
803 self.a = []
803 self.a = []
804 self.b = []
804 self.b = []
805 self.starta = self.lena = None
805 self.starta = self.lena = None
806 self.startb = self.lenb = None
806 self.startb = self.lenb = None
807 if lr is not None:
807 if lr is not None:
808 if context:
808 if context:
809 self.read_context_hunk(lr)
809 self.read_context_hunk(lr)
810 else:
810 else:
811 self.read_unified_hunk(lr)
811 self.read_unified_hunk(lr)
812
812
813 def getnormalized(self):
813 def getnormalized(self):
814 """Return a copy with line endings normalized to LF."""
814 """Return a copy with line endings normalized to LF."""
815
815
816 def normalize(lines):
816 def normalize(lines):
817 nlines = []
817 nlines = []
818 for line in lines:
818 for line in lines:
819 if line.endswith('\r\n'):
819 if line.endswith('\r\n'):
820 line = line[:-2] + '\n'
820 line = line[:-2] + '\n'
821 nlines.append(line)
821 nlines.append(line)
822 return nlines
822 return nlines
823
823
824 # Dummy object, it is rebuilt manually
824 # Dummy object, it is rebuilt manually
825 nh = hunk(self.desc, self.number, None, None)
825 nh = hunk(self.desc, self.number, None, None)
826 nh.number = self.number
826 nh.number = self.number
827 nh.desc = self.desc
827 nh.desc = self.desc
828 nh.hunk = self.hunk
828 nh.hunk = self.hunk
829 nh.a = normalize(self.a)
829 nh.a = normalize(self.a)
830 nh.b = normalize(self.b)
830 nh.b = normalize(self.b)
831 nh.starta = self.starta
831 nh.starta = self.starta
832 nh.startb = self.startb
832 nh.startb = self.startb
833 nh.lena = self.lena
833 nh.lena = self.lena
834 nh.lenb = self.lenb
834 nh.lenb = self.lenb
835 return nh
835 return nh
836
836
837 def read_unified_hunk(self, lr):
837 def read_unified_hunk(self, lr):
838 m = unidesc.match(self.desc)
838 m = unidesc.match(self.desc)
839 if not m:
839 if not m:
840 raise PatchError(_("bad hunk #%d") % self.number)
840 raise PatchError(_("bad hunk #%d") % self.number)
841 self.starta, self.lena, self.startb, self.lenb = m.groups()
841 self.starta, self.lena, self.startb, self.lenb = m.groups()
842 if self.lena is None:
842 if self.lena is None:
843 self.lena = 1
843 self.lena = 1
844 else:
844 else:
845 self.lena = int(self.lena)
845 self.lena = int(self.lena)
846 if self.lenb is None:
846 if self.lenb is None:
847 self.lenb = 1
847 self.lenb = 1
848 else:
848 else:
849 self.lenb = int(self.lenb)
849 self.lenb = int(self.lenb)
850 self.starta = int(self.starta)
850 self.starta = int(self.starta)
851 self.startb = int(self.startb)
851 self.startb = int(self.startb)
852 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
852 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
853 self.b)
853 self.b)
854 # if we hit eof before finishing out the hunk, the last line will
854 # if we hit eof before finishing out the hunk, the last line will
855 # be zero length. Lets try to fix it up.
855 # be zero length. Lets try to fix it up.
856 while len(self.hunk[-1]) == 0:
856 while len(self.hunk[-1]) == 0:
857 del self.hunk[-1]
857 del self.hunk[-1]
858 del self.a[-1]
858 del self.a[-1]
859 del self.b[-1]
859 del self.b[-1]
860 self.lena -= 1
860 self.lena -= 1
861 self.lenb -= 1
861 self.lenb -= 1
862 self._fixnewline(lr)
862 self._fixnewline(lr)
863
863
864 def read_context_hunk(self, lr):
864 def read_context_hunk(self, lr):
865 self.desc = lr.readline()
865 self.desc = lr.readline()
866 m = contextdesc.match(self.desc)
866 m = contextdesc.match(self.desc)
867 if not m:
867 if not m:
868 raise PatchError(_("bad hunk #%d") % self.number)
868 raise PatchError(_("bad hunk #%d") % self.number)
869 self.starta, aend = m.groups()
869 self.starta, aend = m.groups()
870 self.starta = int(self.starta)
870 self.starta = int(self.starta)
871 if aend is None:
871 if aend is None:
872 aend = self.starta
872 aend = self.starta
873 self.lena = int(aend) - self.starta
873 self.lena = int(aend) - self.starta
874 if self.starta:
874 if self.starta:
875 self.lena += 1
875 self.lena += 1
876 for x in xrange(self.lena):
876 for x in xrange(self.lena):
877 l = lr.readline()
877 l = lr.readline()
878 if l.startswith('---'):
878 if l.startswith('---'):
879 # lines addition, old block is empty
879 # lines addition, old block is empty
880 lr.push(l)
880 lr.push(l)
881 break
881 break
882 s = l[2:]
882 s = l[2:]
883 if l.startswith('- ') or l.startswith('! '):
883 if l.startswith('- ') or l.startswith('! '):
884 u = '-' + s
884 u = '-' + s
885 elif l.startswith(' '):
885 elif l.startswith(' '):
886 u = ' ' + s
886 u = ' ' + s
887 else:
887 else:
888 raise PatchError(_("bad hunk #%d old text line %d") %
888 raise PatchError(_("bad hunk #%d old text line %d") %
889 (self.number, x))
889 (self.number, x))
890 self.a.append(u)
890 self.a.append(u)
891 self.hunk.append(u)
891 self.hunk.append(u)
892
892
893 l = lr.readline()
893 l = lr.readline()
894 if l.startswith('\ '):
894 if l.startswith('\ '):
895 s = self.a[-1][:-1]
895 s = self.a[-1][:-1]
896 self.a[-1] = s
896 self.a[-1] = s
897 self.hunk[-1] = s
897 self.hunk[-1] = s
898 l = lr.readline()
898 l = lr.readline()
899 m = contextdesc.match(l)
899 m = contextdesc.match(l)
900 if not m:
900 if not m:
901 raise PatchError(_("bad hunk #%d") % self.number)
901 raise PatchError(_("bad hunk #%d") % self.number)
902 self.startb, bend = m.groups()
902 self.startb, bend = m.groups()
903 self.startb = int(self.startb)
903 self.startb = int(self.startb)
904 if bend is None:
904 if bend is None:
905 bend = self.startb
905 bend = self.startb
906 self.lenb = int(bend) - self.startb
906 self.lenb = int(bend) - self.startb
907 if self.startb:
907 if self.startb:
908 self.lenb += 1
908 self.lenb += 1
909 hunki = 1
909 hunki = 1
910 for x in xrange(self.lenb):
910 for x in xrange(self.lenb):
911 l = lr.readline()
911 l = lr.readline()
912 if l.startswith('\ '):
912 if l.startswith('\ '):
913 # XXX: the only way to hit this is with an invalid line range.
913 # XXX: the only way to hit this is with an invalid line range.
914 # The no-eol marker is not counted in the line range, but I
914 # The no-eol marker is not counted in the line range, but I
915 # guess there are diff(1) out there which behave differently.
915 # guess there are diff(1) out there which behave differently.
916 s = self.b[-1][:-1]
916 s = self.b[-1][:-1]
917 self.b[-1] = s
917 self.b[-1] = s
918 self.hunk[hunki - 1] = s
918 self.hunk[hunki - 1] = s
919 continue
919 continue
920 if not l:
920 if not l:
921 # line deletions, new block is empty and we hit EOF
921 # line deletions, new block is empty and we hit EOF
922 lr.push(l)
922 lr.push(l)
923 break
923 break
924 s = l[2:]
924 s = l[2:]
925 if l.startswith('+ ') or l.startswith('! '):
925 if l.startswith('+ ') or l.startswith('! '):
926 u = '+' + s
926 u = '+' + s
927 elif l.startswith(' '):
927 elif l.startswith(' '):
928 u = ' ' + s
928 u = ' ' + s
929 elif len(self.b) == 0:
929 elif len(self.b) == 0:
930 # line deletions, new block is empty
930 # line deletions, new block is empty
931 lr.push(l)
931 lr.push(l)
932 break
932 break
933 else:
933 else:
934 raise PatchError(_("bad hunk #%d old text line %d") %
934 raise PatchError(_("bad hunk #%d old text line %d") %
935 (self.number, x))
935 (self.number, x))
936 self.b.append(s)
936 self.b.append(s)
937 while True:
937 while True:
938 if hunki >= len(self.hunk):
938 if hunki >= len(self.hunk):
939 h = ""
939 h = ""
940 else:
940 else:
941 h = self.hunk[hunki]
941 h = self.hunk[hunki]
942 hunki += 1
942 hunki += 1
943 if h == u:
943 if h == u:
944 break
944 break
945 elif h.startswith('-'):
945 elif h.startswith('-'):
946 continue
946 continue
947 else:
947 else:
948 self.hunk.insert(hunki - 1, u)
948 self.hunk.insert(hunki - 1, u)
949 break
949 break
950
950
951 if not self.a:
951 if not self.a:
952 # this happens when lines were only added to the hunk
952 # this happens when lines were only added to the hunk
953 for x in self.hunk:
953 for x in self.hunk:
954 if x.startswith('-') or x.startswith(' '):
954 if x.startswith('-') or x.startswith(' '):
955 self.a.append(x)
955 self.a.append(x)
956 if not self.b:
956 if not self.b:
957 # this happens when lines were only deleted from the hunk
957 # this happens when lines were only deleted from the hunk
958 for x in self.hunk:
958 for x in self.hunk:
959 if x.startswith('+') or x.startswith(' '):
959 if x.startswith('+') or x.startswith(' '):
960 self.b.append(x[1:])
960 self.b.append(x[1:])
961 # @@ -start,len +start,len @@
961 # @@ -start,len +start,len @@
962 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
962 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
963 self.startb, self.lenb)
963 self.startb, self.lenb)
964 self.hunk[0] = self.desc
964 self.hunk[0] = self.desc
965 self._fixnewline(lr)
965 self._fixnewline(lr)
966
966
967 def _fixnewline(self, lr):
967 def _fixnewline(self, lr):
968 l = lr.readline()
968 l = lr.readline()
969 if l.startswith('\ '):
969 if l.startswith('\ '):
970 diffhelpers.fix_newline(self.hunk, self.a, self.b)
970 diffhelpers.fix_newline(self.hunk, self.a, self.b)
971 else:
971 else:
972 lr.push(l)
972 lr.push(l)
973
973
974 def complete(self):
974 def complete(self):
975 return len(self.a) == self.lena and len(self.b) == self.lenb
975 return len(self.a) == self.lena and len(self.b) == self.lenb
976
976
977 def _fuzzit(self, old, new, fuzz, toponly):
977 def _fuzzit(self, old, new, fuzz, toponly):
978 # this removes context lines from the top and bottom of list 'l'. It
978 # this removes context lines from the top and bottom of list 'l'. It
979 # checks the hunk to make sure only context lines are removed, and then
979 # checks the hunk to make sure only context lines are removed, and then
980 # returns a new shortened list of lines.
980 # returns a new shortened list of lines.
981 fuzz = min(fuzz, len(old))
981 fuzz = min(fuzz, len(old))
982 if fuzz:
982 if fuzz:
983 top = 0
983 top = 0
984 bot = 0
984 bot = 0
985 hlen = len(self.hunk)
985 hlen = len(self.hunk)
986 for x in xrange(hlen - 1):
986 for x in xrange(hlen - 1):
987 # the hunk starts with the @@ line, so use x+1
987 # the hunk starts with the @@ line, so use x+1
988 if self.hunk[x + 1][0] == ' ':
988 if self.hunk[x + 1][0] == ' ':
989 top += 1
989 top += 1
990 else:
990 else:
991 break
991 break
992 if not toponly:
992 if not toponly:
993 for x in xrange(hlen - 1):
993 for x in xrange(hlen - 1):
994 if self.hunk[hlen - bot - 1][0] == ' ':
994 if self.hunk[hlen - bot - 1][0] == ' ':
995 bot += 1
995 bot += 1
996 else:
996 else:
997 break
997 break
998
998
999 bot = min(fuzz, bot)
999 bot = min(fuzz, bot)
1000 top = min(fuzz, top)
1000 top = min(fuzz, top)
1001 return old[top:len(old) - bot], new[top:len(new) - bot], top
1001 return old[top:len(old) - bot], new[top:len(new) - bot], top
1002 return old, new, 0
1002 return old, new, 0
1003
1003
1004 def fuzzit(self, fuzz, toponly):
1004 def fuzzit(self, fuzz, toponly):
1005 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1005 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1006 oldstart = self.starta + top
1006 oldstart = self.starta + top
1007 newstart = self.startb + top
1007 newstart = self.startb + top
1008 # zero length hunk ranges already have their start decremented
1008 # zero length hunk ranges already have their start decremented
1009 if self.lena and oldstart > 0:
1009 if self.lena and oldstart > 0:
1010 oldstart -= 1
1010 oldstart -= 1
1011 if self.lenb and newstart > 0:
1011 if self.lenb and newstart > 0:
1012 newstart -= 1
1012 newstart -= 1
1013 return old, oldstart, new, newstart
1013 return old, oldstart, new, newstart
1014
1014
1015 class binhunk(object):
1015 class binhunk(object):
1016 'A binary patch file. Only understands literals so far.'
1016 'A binary patch file. Only understands literals so far.'
1017 def __init__(self, lr, fname):
1017 def __init__(self, lr, fname):
1018 self.text = None
1018 self.text = None
1019 self.hunk = ['GIT binary patch\n']
1019 self.hunk = ['GIT binary patch\n']
1020 self._fname = fname
1020 self._fname = fname
1021 self._read(lr)
1021 self._read(lr)
1022
1022
1023 def complete(self):
1023 def complete(self):
1024 return self.text is not None
1024 return self.text is not None
1025
1025
1026 def new(self):
1026 def new(self):
1027 return [self.text]
1027 return [self.text]
1028
1028
1029 def _read(self, lr):
1029 def _read(self, lr):
1030 def getline(lr, hunk):
1030 def getline(lr, hunk):
1031 l = lr.readline()
1031 l = lr.readline()
1032 hunk.append(l)
1032 hunk.append(l)
1033 return l.rstrip('\r\n')
1033 return l.rstrip('\r\n')
1034
1034
1035 while True:
1035 while True:
1036 line = getline(lr, self.hunk)
1036 line = getline(lr, self.hunk)
1037 if not line:
1037 if not line:
1038 raise PatchError(_('could not extract "%s" binary data')
1038 raise PatchError(_('could not extract "%s" binary data')
1039 % self._fname)
1039 % self._fname)
1040 if line.startswith('literal '):
1040 if line.startswith('literal '):
1041 break
1041 break
1042 size = int(line[8:].rstrip())
1042 size = int(line[8:].rstrip())
1043 dec = []
1043 dec = []
1044 line = getline(lr, self.hunk)
1044 line = getline(lr, self.hunk)
1045 while len(line) > 1:
1045 while len(line) > 1:
1046 l = line[0]
1046 l = line[0]
1047 if l <= 'Z' and l >= 'A':
1047 if l <= 'Z' and l >= 'A':
1048 l = ord(l) - ord('A') + 1
1048 l = ord(l) - ord('A') + 1
1049 else:
1049 else:
1050 l = ord(l) - ord('a') + 27
1050 l = ord(l) - ord('a') + 27
1051 try:
1051 try:
1052 dec.append(base85.b85decode(line[1:])[:l])
1052 dec.append(base85.b85decode(line[1:])[:l])
1053 except ValueError, e:
1053 except ValueError, e:
1054 raise PatchError(_('could not decode "%s" binary patch: %s')
1054 raise PatchError(_('could not decode "%s" binary patch: %s')
1055 % (self._fname, str(e)))
1055 % (self._fname, str(e)))
1056 line = getline(lr, self.hunk)
1056 line = getline(lr, self.hunk)
1057 text = zlib.decompress(''.join(dec))
1057 text = zlib.decompress(''.join(dec))
1058 if len(text) != size:
1058 if len(text) != size:
1059 raise PatchError(_('"%s" length is %d bytes, should be %d')
1059 raise PatchError(_('"%s" length is %d bytes, should be %d')
1060 % (self._fname, len(text), size))
1060 % (self._fname, len(text), size))
1061 self.text = text
1061 self.text = text
1062
1062
1063 def parsefilename(str):
1063 def parsefilename(str):
1064 # --- filename \t|space stuff
1064 # --- filename \t|space stuff
1065 s = str[4:].rstrip('\r\n')
1065 s = str[4:].rstrip('\r\n')
1066 i = s.find('\t')
1066 i = s.find('\t')
1067 if i < 0:
1067 if i < 0:
1068 i = s.find(' ')
1068 i = s.find(' ')
1069 if i < 0:
1069 if i < 0:
1070 return s
1070 return s
1071 return s[:i]
1071 return s[:i]
1072
1072
1073 def pathstrip(path, strip):
1073 def pathstrip(path, strip):
1074 pathlen = len(path)
1074 pathlen = len(path)
1075 i = 0
1075 i = 0
1076 if strip == 0:
1076 if strip == 0:
1077 return '', path.rstrip()
1077 return '', path.rstrip()
1078 count = strip
1078 count = strip
1079 while count > 0:
1079 while count > 0:
1080 i = path.find('/', i)
1080 i = path.find('/', i)
1081 if i == -1:
1081 if i == -1:
1082 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1082 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1083 (count, strip, path))
1083 (count, strip, path))
1084 i += 1
1084 i += 1
1085 # consume '//' in the path
1085 # consume '//' in the path
1086 while i < pathlen - 1 and path[i] == '/':
1086 while i < pathlen - 1 and path[i] == '/':
1087 i += 1
1087 i += 1
1088 count -= 1
1088 count -= 1
1089 return path[:i].lstrip(), path[i:].rstrip()
1089 return path[:i].lstrip(), path[i:].rstrip()
1090
1090
1091 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1091 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1092 nulla = afile_orig == "/dev/null"
1092 nulla = afile_orig == "/dev/null"
1093 nullb = bfile_orig == "/dev/null"
1093 nullb = bfile_orig == "/dev/null"
1094 create = nulla and hunk.starta == 0 and hunk.lena == 0
1094 create = nulla and hunk.starta == 0 and hunk.lena == 0
1095 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1095 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1096 abase, afile = pathstrip(afile_orig, strip)
1096 abase, afile = pathstrip(afile_orig, strip)
1097 gooda = not nulla and backend.exists(afile)
1097 gooda = not nulla and backend.exists(afile)
1098 bbase, bfile = pathstrip(bfile_orig, strip)
1098 bbase, bfile = pathstrip(bfile_orig, strip)
1099 if afile == bfile:
1099 if afile == bfile:
1100 goodb = gooda
1100 goodb = gooda
1101 else:
1101 else:
1102 goodb = not nullb and backend.exists(bfile)
1102 goodb = not nullb and backend.exists(bfile)
1103 missing = not goodb and not gooda and not create
1103 missing = not goodb and not gooda and not create
1104
1104
1105 # some diff programs apparently produce patches where the afile is
1105 # some diff programs apparently produce patches where the afile is
1106 # not /dev/null, but afile starts with bfile
1106 # not /dev/null, but afile starts with bfile
1107 abasedir = afile[:afile.rfind('/') + 1]
1107 abasedir = afile[:afile.rfind('/') + 1]
1108 bbasedir = bfile[:bfile.rfind('/') + 1]
1108 bbasedir = bfile[:bfile.rfind('/') + 1]
1109 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1109 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1110 and hunk.starta == 0 and hunk.lena == 0):
1110 and hunk.starta == 0 and hunk.lena == 0):
1111 create = True
1111 create = True
1112 missing = False
1112 missing = False
1113
1113
1114 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1114 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1115 # diff is between a file and its backup. In this case, the original
1115 # diff is between a file and its backup. In this case, the original
1116 # file should be patched (see original mpatch code).
1116 # file should be patched (see original mpatch code).
1117 isbackup = (abase == bbase and bfile.startswith(afile))
1117 isbackup = (abase == bbase and bfile.startswith(afile))
1118 fname = None
1118 fname = None
1119 if not missing:
1119 if not missing:
1120 if gooda and goodb:
1120 if gooda and goodb:
1121 fname = isbackup and afile or bfile
1121 fname = isbackup and afile or bfile
1122 elif gooda:
1122 elif gooda:
1123 fname = afile
1123 fname = afile
1124
1124
1125 if not fname:
1125 if not fname:
1126 if not nullb:
1126 if not nullb:
1127 fname = isbackup and afile or bfile
1127 fname = isbackup and afile or bfile
1128 elif not nulla:
1128 elif not nulla:
1129 fname = afile
1129 fname = afile
1130 else:
1130 else:
1131 raise PatchError(_("undefined source and destination files"))
1131 raise PatchError(_("undefined source and destination files"))
1132
1132
1133 gp = patchmeta(fname)
1133 gp = patchmeta(fname)
1134 if create:
1134 if create:
1135 gp.op = 'ADD'
1135 gp.op = 'ADD'
1136 elif remove:
1136 elif remove:
1137 gp.op = 'DELETE'
1137 gp.op = 'DELETE'
1138 return gp
1138 return gp
1139
1139
1140 def scangitpatch(lr, firstline):
1140 def scangitpatch(lr, firstline):
1141 """
1141 """
1142 Git patches can emit:
1142 Git patches can emit:
1143 - rename a to b
1143 - rename a to b
1144 - change b
1144 - change b
1145 - copy a to c
1145 - copy a to c
1146 - change c
1146 - change c
1147
1147
1148 We cannot apply this sequence as-is, the renamed 'a' could not be
1148 We cannot apply this sequence as-is, the renamed 'a' could not be
1149 found for it would have been renamed already. And we cannot copy
1149 found for it would have been renamed already. And we cannot copy
1150 from 'b' instead because 'b' would have been changed already. So
1150 from 'b' instead because 'b' would have been changed already. So
1151 we scan the git patch for copy and rename commands so we can
1151 we scan the git patch for copy and rename commands so we can
1152 perform the copies ahead of time.
1152 perform the copies ahead of time.
1153 """
1153 """
1154 pos = 0
1154 pos = 0
1155 try:
1155 try:
1156 pos = lr.fp.tell()
1156 pos = lr.fp.tell()
1157 fp = lr.fp
1157 fp = lr.fp
1158 except IOError:
1158 except IOError:
1159 fp = cStringIO.StringIO(lr.fp.read())
1159 fp = cStringIO.StringIO(lr.fp.read())
1160 gitlr = linereader(fp)
1160 gitlr = linereader(fp)
1161 gitlr.push(firstline)
1161 gitlr.push(firstline)
1162 gitpatches = readgitpatch(gitlr)
1162 gitpatches = readgitpatch(gitlr)
1163 fp.seek(pos)
1163 fp.seek(pos)
1164 return gitpatches
1164 return gitpatches
1165
1165
1166 def iterhunks(fp):
1166 def iterhunks(fp):
1167 """Read a patch and yield the following events:
1167 """Read a patch and yield the following events:
1168 - ("file", afile, bfile, firsthunk): select a new target file.
1168 - ("file", afile, bfile, firsthunk): select a new target file.
1169 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1169 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1170 "file" event.
1170 "file" event.
1171 - ("git", gitchanges): current diff is in git format, gitchanges
1171 - ("git", gitchanges): current diff is in git format, gitchanges
1172 maps filenames to gitpatch records. Unique event.
1172 maps filenames to gitpatch records. Unique event.
1173 """
1173 """
1174 afile = ""
1174 afile = ""
1175 bfile = ""
1175 bfile = ""
1176 state = None
1176 state = None
1177 hunknum = 0
1177 hunknum = 0
1178 emitfile = newfile = False
1178 emitfile = newfile = False
1179 gitpatches = None
1179 gitpatches = None
1180
1180
1181 # our states
1181 # our states
1182 BFILE = 1
1182 BFILE = 1
1183 context = None
1183 context = None
1184 lr = linereader(fp)
1184 lr = linereader(fp)
1185
1185
1186 while True:
1186 while True:
1187 x = lr.readline()
1187 x = lr.readline()
1188 if not x:
1188 if not x:
1189 break
1189 break
1190 if state == BFILE and (
1190 if state == BFILE and (
1191 (not context and x[0] == '@')
1191 (not context and x[0] == '@')
1192 or (context is not False and x.startswith('***************'))
1192 or (context is not False and x.startswith('***************'))
1193 or x.startswith('GIT binary patch')):
1193 or x.startswith('GIT binary patch')):
1194 gp = None
1194 gp = None
1195 if (gitpatches and
1195 if (gitpatches and
1196 gitpatches[-1].ispatching(afile, bfile)):
1196 gitpatches[-1].ispatching(afile, bfile)):
1197 gp = gitpatches.pop()
1197 gp = gitpatches.pop()
1198 if x.startswith('GIT binary patch'):
1198 if x.startswith('GIT binary patch'):
1199 h = binhunk(lr, gp.path)
1199 h = binhunk(lr, gp.path)
1200 else:
1200 else:
1201 if context is None and x.startswith('***************'):
1201 if context is None and x.startswith('***************'):
1202 context = True
1202 context = True
1203 h = hunk(x, hunknum + 1, lr, context)
1203 h = hunk(x, hunknum + 1, lr, context)
1204 hunknum += 1
1204 hunknum += 1
1205 if emitfile:
1205 if emitfile:
1206 emitfile = False
1206 emitfile = False
1207 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1207 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1208 yield 'hunk', h
1208 yield 'hunk', h
1209 elif x.startswith('diff --git a/'):
1209 elif x.startswith('diff --git a/'):
1210 m = gitre.match(x.rstrip(' \r\n'))
1210 m = gitre.match(x.rstrip(' \r\n'))
1211 if not m:
1211 if not m:
1212 continue
1212 continue
1213 if gitpatches is None:
1213 if gitpatches is None:
1214 # scan whole input for git metadata
1214 # scan whole input for git metadata
1215 gitpatches = scangitpatch(lr, x)
1215 gitpatches = scangitpatch(lr, x)
1216 yield 'git', [g.copy() for g in gitpatches
1216 yield 'git', [g.copy() for g in gitpatches
1217 if g.op in ('COPY', 'RENAME')]
1217 if g.op in ('COPY', 'RENAME')]
1218 gitpatches.reverse()
1218 gitpatches.reverse()
1219 afile = 'a/' + m.group(1)
1219 afile = 'a/' + m.group(1)
1220 bfile = 'b/' + m.group(2)
1220 bfile = 'b/' + m.group(2)
1221 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1221 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1222 gp = gitpatches.pop()
1222 gp = gitpatches.pop()
1223 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1223 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1224 if not gitpatches:
1224 if not gitpatches:
1225 raise PatchError(_('failed to synchronize metadata for "%s"')
1225 raise PatchError(_('failed to synchronize metadata for "%s"')
1226 % afile[2:])
1226 % afile[2:])
1227 gp = gitpatches[-1]
1227 gp = gitpatches[-1]
1228 newfile = True
1228 newfile = True
1229 elif x.startswith('---'):
1229 elif x.startswith('---'):
1230 # check for a unified diff
1230 # check for a unified diff
1231 l2 = lr.readline()
1231 l2 = lr.readline()
1232 if not l2.startswith('+++'):
1232 if not l2.startswith('+++'):
1233 lr.push(l2)
1233 lr.push(l2)
1234 continue
1234 continue
1235 newfile = True
1235 newfile = True
1236 context = False
1236 context = False
1237 afile = parsefilename(x)
1237 afile = parsefilename(x)
1238 bfile = parsefilename(l2)
1238 bfile = parsefilename(l2)
1239 elif x.startswith('***'):
1239 elif x.startswith('***'):
1240 # check for a context diff
1240 # check for a context diff
1241 l2 = lr.readline()
1241 l2 = lr.readline()
1242 if not l2.startswith('---'):
1242 if not l2.startswith('---'):
1243 lr.push(l2)
1243 lr.push(l2)
1244 continue
1244 continue
1245 l3 = lr.readline()
1245 l3 = lr.readline()
1246 lr.push(l3)
1246 lr.push(l3)
1247 if not l3.startswith("***************"):
1247 if not l3.startswith("***************"):
1248 lr.push(l2)
1248 lr.push(l2)
1249 continue
1249 continue
1250 newfile = True
1250 newfile = True
1251 context = True
1251 context = True
1252 afile = parsefilename(x)
1252 afile = parsefilename(x)
1253 bfile = parsefilename(l2)
1253 bfile = parsefilename(l2)
1254
1254
1255 if newfile:
1255 if newfile:
1256 newfile = False
1256 newfile = False
1257 emitfile = True
1257 emitfile = True
1258 state = BFILE
1258 state = BFILE
1259 hunknum = 0
1259 hunknum = 0
1260
1260
1261 while gitpatches:
1261 while gitpatches:
1262 gp = gitpatches.pop()
1262 gp = gitpatches.pop()
1263 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1263 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1264
1264
1265 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1265 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1266 """Reads a patch from fp and tries to apply it.
1266 """Reads a patch from fp and tries to apply it.
1267
1267
1268 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1268 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1269 there was any fuzz.
1269 there was any fuzz.
1270
1270
1271 If 'eolmode' is 'strict', the patch content and patched file are
1271 If 'eolmode' is 'strict', the patch content and patched file are
1272 read in binary mode. Otherwise, line endings are ignored when
1272 read in binary mode. Otherwise, line endings are ignored when
1273 patching then normalized according to 'eolmode'.
1273 patching then normalized according to 'eolmode'.
1274 """
1274 """
1275 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1275 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1276 eolmode=eolmode)
1276 eolmode=eolmode)
1277
1277
1278 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1278 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1279 eolmode='strict'):
1279 eolmode='strict'):
1280
1280
1281 def pstrip(p):
1281 def pstrip(p):
1282 return pathstrip(p, strip - 1)[1]
1282 return pathstrip(p, strip - 1)[1]
1283
1283
1284 rejects = 0
1284 rejects = 0
1285 err = 0
1285 err = 0
1286 current_file = None
1286 current_file = None
1287
1287
1288 for state, values in iterhunks(fp):
1288 for state, values in iterhunks(fp):
1289 if state == 'hunk':
1289 if state == 'hunk':
1290 if not current_file:
1290 if not current_file:
1291 continue
1291 continue
1292 ret = current_file.apply(values)
1292 ret = current_file.apply(values)
1293 if ret > 0:
1293 if ret > 0:
1294 err = 1
1294 err = 1
1295 elif state == 'file':
1295 elif state == 'file':
1296 if current_file:
1296 if current_file:
1297 rejects += current_file.close()
1297 rejects += current_file.close()
1298 current_file = None
1298 current_file = None
1299 afile, bfile, first_hunk, gp = values
1299 afile, bfile, first_hunk, gp = values
1300 if gp:
1300 if gp:
1301 gp.path = pstrip(gp.path)
1301 gp.path = pstrip(gp.path)
1302 if gp.oldpath:
1302 if gp.oldpath:
1303 gp.oldpath = pstrip(gp.oldpath)
1303 gp.oldpath = pstrip(gp.oldpath)
1304 else:
1304 else:
1305 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1305 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1306 if gp.op == 'RENAME':
1306 if gp.op == 'RENAME':
1307 backend.unlink(gp.oldpath)
1307 backend.unlink(gp.oldpath)
1308 if not first_hunk:
1308 if not first_hunk:
1309 if gp.op == 'DELETE':
1309 if gp.op == 'DELETE':
1310 backend.unlink(gp.path)
1310 backend.unlink(gp.path)
1311 continue
1311 continue
1312 data, mode = None, None
1312 data, mode = None, None
1313 if gp.op in ('RENAME', 'COPY'):
1313 if gp.op in ('RENAME', 'COPY'):
1314 data, mode = store.getfile(gp.oldpath)[:2]
1314 data, mode = store.getfile(gp.oldpath)[:2]
1315 if gp.mode:
1315 if gp.mode:
1316 mode = gp.mode
1316 mode = gp.mode
1317 if gp.op == 'ADD':
1317 if gp.op == 'ADD':
1318 # Added files without content have no hunk and
1318 # Added files without content have no hunk and
1319 # must be created
1319 # must be created
1320 data = ''
1320 data = ''
1321 if data or mode:
1321 if data or mode:
1322 if (gp.op in ('ADD', 'RENAME', 'COPY')
1322 if (gp.op in ('ADD', 'RENAME', 'COPY')
1323 and backend.exists(gp.path)):
1323 and backend.exists(gp.path)):
1324 raise PatchError(_("cannot create %s: destination "
1324 raise PatchError(_("cannot create %s: destination "
1325 "already exists") % gp.path)
1325 "already exists") % gp.path)
1326 backend.setfile(gp.path, data, mode, gp.oldpath)
1326 backend.setfile(gp.path, data, mode, gp.oldpath)
1327 continue
1327 continue
1328 try:
1328 try:
1329 current_file = patcher(ui, gp, backend, store,
1329 current_file = patcher(ui, gp, backend, store,
1330 eolmode=eolmode)
1330 eolmode=eolmode)
1331 except PatchError, inst:
1331 except PatchError, inst:
1332 ui.warn(str(inst) + '\n')
1332 ui.warn(str(inst) + '\n')
1333 current_file = None
1333 current_file = None
1334 rejects += 1
1334 rejects += 1
1335 continue
1335 continue
1336 elif state == 'git':
1336 elif state == 'git':
1337 for gp in values:
1337 for gp in values:
1338 path = pstrip(gp.oldpath)
1338 path = pstrip(gp.oldpath)
1339 try:
1339 try:
1340 data, mode = backend.getfile(path)
1340 data, mode = backend.getfile(path)
1341 except IOError, e:
1341 except IOError, e:
1342 if e.errno != errno.ENOENT:
1342 if e.errno != errno.ENOENT:
1343 raise
1343 raise
1344 # The error ignored here will trigger a getfile()
1344 # The error ignored here will trigger a getfile()
1345 # error in a place more appropriate for error
1345 # error in a place more appropriate for error
1346 # handling, and will not interrupt the patching
1346 # handling, and will not interrupt the patching
1347 # process.
1347 # process.
1348 else:
1348 else:
1349 store.setfile(path, data, mode)
1349 store.setfile(path, data, mode)
1350 else:
1350 else:
1351 raise util.Abort(_('unsupported parser state: %s') % state)
1351 raise util.Abort(_('unsupported parser state: %s') % state)
1352
1352
1353 if current_file:
1353 if current_file:
1354 rejects += current_file.close()
1354 rejects += current_file.close()
1355
1355
1356 if rejects:
1356 if rejects:
1357 return -1
1357 return -1
1358 return err
1358 return err
1359
1359
1360 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1360 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1361 similarity):
1361 similarity):
1362 """use <patcher> to apply <patchname> to the working directory.
1362 """use <patcher> to apply <patchname> to the working directory.
1363 returns whether patch was applied with fuzz factor."""
1363 returns whether patch was applied with fuzz factor."""
1364
1364
1365 fuzz = False
1365 fuzz = False
1366 args = []
1366 args = []
1367 cwd = repo.root
1367 cwd = repo.root
1368 if cwd:
1368 if cwd:
1369 args.append('-d %s' % util.shellquote(cwd))
1369 args.append('-d %s' % util.shellquote(cwd))
1370 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1370 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1371 util.shellquote(patchname)))
1371 util.shellquote(patchname)))
1372 try:
1372 try:
1373 for line in fp:
1373 for line in fp:
1374 line = line.rstrip()
1374 line = line.rstrip()
1375 ui.note(line + '\n')
1375 ui.note(line + '\n')
1376 if line.startswith('patching file '):
1376 if line.startswith('patching file '):
1377 pf = util.parsepatchoutput(line)
1377 pf = util.parsepatchoutput(line)
1378 printed_file = False
1378 printed_file = False
1379 files.add(pf)
1379 files.add(pf)
1380 elif line.find('with fuzz') >= 0:
1380 elif line.find('with fuzz') >= 0:
1381 fuzz = True
1381 fuzz = True
1382 if not printed_file:
1382 if not printed_file:
1383 ui.warn(pf + '\n')
1383 ui.warn(pf + '\n')
1384 printed_file = True
1384 printed_file = True
1385 ui.warn(line + '\n')
1385 ui.warn(line + '\n')
1386 elif line.find('saving rejects to file') >= 0:
1386 elif line.find('saving rejects to file') >= 0:
1387 ui.warn(line + '\n')
1387 ui.warn(line + '\n')
1388 elif line.find('FAILED') >= 0:
1388 elif line.find('FAILED') >= 0:
1389 if not printed_file:
1389 if not printed_file:
1390 ui.warn(pf + '\n')
1390 ui.warn(pf + '\n')
1391 printed_file = True
1391 printed_file = True
1392 ui.warn(line + '\n')
1392 ui.warn(line + '\n')
1393 finally:
1393 finally:
1394 if files:
1394 if files:
1395 scmutil.marktouched(repo, files, similarity)
1395 scmutil.marktouched(repo, files, similarity)
1396 code = fp.close()
1396 code = fp.close()
1397 if code:
1397 if code:
1398 raise PatchError(_("patch command failed: %s") %
1398 raise PatchError(_("patch command failed: %s") %
1399 util.explainexit(code)[0])
1399 util.explainexit(code)[0])
1400 return fuzz
1400 return fuzz
1401
1401
1402 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1402 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1403 if files is None:
1403 if files is None:
1404 files = set()
1404 files = set()
1405 if eolmode is None:
1405 if eolmode is None:
1406 eolmode = ui.config('patch', 'eol', 'strict')
1406 eolmode = ui.config('patch', 'eol', 'strict')
1407 if eolmode.lower() not in eolmodes:
1407 if eolmode.lower() not in eolmodes:
1408 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1408 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1409 eolmode = eolmode.lower()
1409 eolmode = eolmode.lower()
1410
1410
1411 store = filestore()
1411 store = filestore()
1412 try:
1412 try:
1413 fp = open(patchobj, 'rb')
1413 fp = open(patchobj, 'rb')
1414 except TypeError:
1414 except TypeError:
1415 fp = patchobj
1415 fp = patchobj
1416 try:
1416 try:
1417 ret = applydiff(ui, fp, backend, store, strip=strip,
1417 ret = applydiff(ui, fp, backend, store, strip=strip,
1418 eolmode=eolmode)
1418 eolmode=eolmode)
1419 finally:
1419 finally:
1420 if fp != patchobj:
1420 if fp != patchobj:
1421 fp.close()
1421 fp.close()
1422 files.update(backend.close())
1422 files.update(backend.close())
1423 store.close()
1423 store.close()
1424 if ret < 0:
1424 if ret < 0:
1425 raise PatchError(_('patch failed to apply'))
1425 raise PatchError(_('patch failed to apply'))
1426 return ret > 0
1426 return ret > 0
1427
1427
1428 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1428 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1429 similarity=0):
1429 similarity=0):
1430 """use builtin patch to apply <patchobj> to the working directory.
1430 """use builtin patch to apply <patchobj> to the working directory.
1431 returns whether patch was applied with fuzz factor."""
1431 returns whether patch was applied with fuzz factor."""
1432 backend = workingbackend(ui, repo, similarity)
1432 backend = workingbackend(ui, repo, similarity)
1433 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1433 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1434
1434
1435 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1435 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1436 eolmode='strict'):
1436 eolmode='strict'):
1437 backend = repobackend(ui, repo, ctx, store)
1437 backend = repobackend(ui, repo, ctx, store)
1438 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1438 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1439
1439
1440 def makememctx(repo, parents, text, user, date, branch, files, store,
1440 def makememctx(repo, parents, text, user, date, branch, files, store,
1441 editor=None):
1441 editor=None):
1442 def getfilectx(repo, memctx, path):
1442 def getfilectx(repo, memctx, path):
1443 data, (islink, isexec), copied = store.getfile(path)
1443 data, (islink, isexec), copied = store.getfile(path)
1444 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1444 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1445 copied=copied)
1445 copied=copied)
1446 extra = {}
1446 extra = {}
1447 if branch:
1447 if branch:
1448 extra['branch'] = encoding.fromlocal(branch)
1448 extra['branch'] = encoding.fromlocal(branch)
1449 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1449 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1450 date, extra)
1450 date, extra)
1451 if editor:
1451 if editor:
1452 ctx._text = editor(repo, ctx, [])
1452 ctx._text = editor(repo, ctx, [])
1453 return ctx
1453 return ctx
1454
1454
1455 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1455 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1456 similarity=0):
1456 similarity=0):
1457 """Apply <patchname> to the working directory.
1457 """Apply <patchname> to the working directory.
1458
1458
1459 'eolmode' specifies how end of lines should be handled. It can be:
1459 'eolmode' specifies how end of lines should be handled. It can be:
1460 - 'strict': inputs are read in binary mode, EOLs are preserved
1460 - 'strict': inputs are read in binary mode, EOLs are preserved
1461 - 'crlf': EOLs are ignored when patching and reset to CRLF
1461 - 'crlf': EOLs are ignored when patching and reset to CRLF
1462 - 'lf': EOLs are ignored when patching and reset to LF
1462 - 'lf': EOLs are ignored when patching and reset to LF
1463 - None: get it from user settings, default to 'strict'
1463 - None: get it from user settings, default to 'strict'
1464 'eolmode' is ignored when using an external patcher program.
1464 'eolmode' is ignored when using an external patcher program.
1465
1465
1466 Returns whether patch was applied with fuzz factor.
1466 Returns whether patch was applied with fuzz factor.
1467 """
1467 """
1468 patcher = ui.config('ui', 'patch')
1468 patcher = ui.config('ui', 'patch')
1469 if files is None:
1469 if files is None:
1470 files = set()
1470 files = set()
1471 try:
1471 try:
1472 if patcher:
1472 if patcher:
1473 return _externalpatch(ui, repo, patcher, patchname, strip,
1473 return _externalpatch(ui, repo, patcher, patchname, strip,
1474 files, similarity)
1474 files, similarity)
1475 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1475 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1476 similarity)
1476 similarity)
1477 except PatchError, err:
1477 except PatchError, err:
1478 raise util.Abort(str(err))
1478 raise util.Abort(str(err))
1479
1479
1480 def changedfiles(ui, repo, patchpath, strip=1):
1480 def changedfiles(ui, repo, patchpath, strip=1):
1481 backend = fsbackend(ui, repo.root)
1481 backend = fsbackend(ui, repo.root)
1482 fp = open(patchpath, 'rb')
1482 fp = open(patchpath, 'rb')
1483 try:
1483 try:
1484 changed = set()
1484 changed = set()
1485 for state, values in iterhunks(fp):
1485 for state, values in iterhunks(fp):
1486 if state == 'file':
1486 if state == 'file':
1487 afile, bfile, first_hunk, gp = values
1487 afile, bfile, first_hunk, gp = values
1488 if gp:
1488 if gp:
1489 gp.path = pathstrip(gp.path, strip - 1)[1]
1489 gp.path = pathstrip(gp.path, strip - 1)[1]
1490 if gp.oldpath:
1490 if gp.oldpath:
1491 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1491 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1492 else:
1492 else:
1493 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1493 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1494 changed.add(gp.path)
1494 changed.add(gp.path)
1495 if gp.op == 'RENAME':
1495 if gp.op == 'RENAME':
1496 changed.add(gp.oldpath)
1496 changed.add(gp.oldpath)
1497 elif state not in ('hunk', 'git'):
1497 elif state not in ('hunk', 'git'):
1498 raise util.Abort(_('unsupported parser state: %s') % state)
1498 raise util.Abort(_('unsupported parser state: %s') % state)
1499 return changed
1499 return changed
1500 finally:
1500 finally:
1501 fp.close()
1501 fp.close()
1502
1502
1503 class GitDiffRequired(Exception):
1503 class GitDiffRequired(Exception):
1504 pass
1504 pass
1505
1505
1506 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1506 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1507 def get(key, name=None, getter=ui.configbool):
1507 def get(key, name=None, getter=ui.configbool):
1508 return ((opts and opts.get(key)) or
1508 return ((opts and opts.get(key)) or
1509 getter(section, name or key, None, untrusted=untrusted))
1509 getter(section, name or key, None, untrusted=untrusted))
1510 return mdiff.diffopts(
1510 return mdiff.diffopts(
1511 text=opts and opts.get('text'),
1511 text=opts and opts.get('text'),
1512 git=get('git'),
1512 git=get('git'),
1513 nodates=get('nodates'),
1513 nodates=get('nodates'),
1514 showfunc=get('show_function', 'showfunc'),
1514 showfunc=get('show_function', 'showfunc'),
1515 ignorews=get('ignore_all_space', 'ignorews'),
1515 ignorews=get('ignore_all_space', 'ignorews'),
1516 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1516 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1517 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1517 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1518 context=get('unified', getter=ui.config))
1518 context=get('unified', getter=ui.config))
1519
1519
1520 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1520 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1521 losedatafn=None, prefix=''):
1521 losedatafn=None, prefix=''):
1522 '''yields diff of changes to files between two nodes, or node and
1522 '''yields diff of changes to files between two nodes, or node and
1523 working directory.
1523 working directory.
1524
1524
1525 if node1 is None, use first dirstate parent instead.
1525 if node1 is None, use first dirstate parent instead.
1526 if node2 is None, compare node1 with working directory.
1526 if node2 is None, compare node1 with working directory.
1527
1527
1528 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1528 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1529 every time some change cannot be represented with the current
1529 every time some change cannot be represented with the current
1530 patch format. Return False to upgrade to git patch format, True to
1530 patch format. Return False to upgrade to git patch format, True to
1531 accept the loss or raise an exception to abort the diff. It is
1531 accept the loss or raise an exception to abort the diff. It is
1532 called with the name of current file being diffed as 'fn'. If set
1532 called with the name of current file being diffed as 'fn'. If set
1533 to None, patches will always be upgraded to git format when
1533 to None, patches will always be upgraded to git format when
1534 necessary.
1534 necessary.
1535
1535
1536 prefix is a filename prefix that is prepended to all filenames on
1536 prefix is a filename prefix that is prepended to all filenames on
1537 display (used for subrepos).
1537 display (used for subrepos).
1538 '''
1538 '''
1539
1539
1540 if opts is None:
1540 if opts is None:
1541 opts = mdiff.defaultopts
1541 opts = mdiff.defaultopts
1542
1542
1543 if not node1 and not node2:
1543 if not node1 and not node2:
1544 node1 = repo.dirstate.p1()
1544 node1 = repo.dirstate.p1()
1545
1545
1546 def lrugetfilectx():
1546 def lrugetfilectx():
1547 cache = {}
1547 cache = {}
1548 order = util.deque()
1548 order = util.deque()
1549 def getfilectx(f, ctx):
1549 def getfilectx(f, ctx):
1550 fctx = ctx.filectx(f, filelog=cache.get(f))
1550 fctx = ctx.filectx(f, filelog=cache.get(f))
1551 if f not in cache:
1551 if f not in cache:
1552 if len(cache) > 20:
1552 if len(cache) > 20:
1553 del cache[order.popleft()]
1553 del cache[order.popleft()]
1554 cache[f] = fctx.filelog()
1554 cache[f] = fctx.filelog()
1555 else:
1555 else:
1556 order.remove(f)
1556 order.remove(f)
1557 order.append(f)
1557 order.append(f)
1558 return fctx
1558 return fctx
1559 return getfilectx
1559 return getfilectx
1560 getfilectx = lrugetfilectx()
1560 getfilectx = lrugetfilectx()
1561
1561
1562 ctx1 = repo[node1]
1562 ctx1 = repo[node1]
1563 ctx2 = repo[node2]
1563 ctx2 = repo[node2]
1564
1564
1565 if not changes:
1565 if not changes:
1566 changes = repo.status(ctx1, ctx2, match=match)
1566 changes = repo.status(ctx1, ctx2, match=match)
1567 modified, added, removed = changes[:3]
1567 modified, added, removed = changes[:3]
1568
1568
1569 if not modified and not added and not removed:
1569 if not modified and not added and not removed:
1570 return []
1570 return []
1571
1571
1572 revs = None
1572 revs = None
1573 hexfunc = repo.ui.debugflag and hex or short
1573 hexfunc = repo.ui.debugflag and hex or short
1574 revs = [hexfunc(node) for node in [node1, node2] if node]
1574 revs = [hexfunc(node) for node in [node1, node2] if node]
1575
1575
1576 copy = {}
1576 copy = {}
1577 if opts.git or opts.upgrade:
1577 if opts.git or opts.upgrade:
1578 copy = copies.pathcopies(ctx1, ctx2)
1578 copy = copies.pathcopies(ctx1, ctx2)
1579
1579
1580 def difffn(opts, losedata):
1580 def difffn(opts, losedata):
1581 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1581 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1582 copy, getfilectx, opts, losedata, prefix)
1582 copy, getfilectx, opts, losedata, prefix)
1583 if opts.upgrade and not opts.git:
1583 if opts.upgrade and not opts.git:
1584 try:
1584 try:
1585 def losedata(fn):
1585 def losedata(fn):
1586 if not losedatafn or not losedatafn(fn=fn):
1586 if not losedatafn or not losedatafn(fn=fn):
1587 raise GitDiffRequired
1587 raise GitDiffRequired
1588 # Buffer the whole output until we are sure it can be generated
1588 # Buffer the whole output until we are sure it can be generated
1589 return list(difffn(opts.copy(git=False), losedata))
1589 return list(difffn(opts.copy(git=False), losedata))
1590 except GitDiffRequired:
1590 except GitDiffRequired:
1591 return difffn(opts.copy(git=True), None)
1591 return difffn(opts.copy(git=True), None)
1592 else:
1592 else:
1593 return difffn(opts, None)
1593 return difffn(opts, None)
1594
1594
1595 def difflabel(func, *args, **kw):
1595 def difflabel(func, *args, **kw):
1596 '''yields 2-tuples of (output, label) based on the output of func()'''
1596 '''yields 2-tuples of (output, label) based on the output of func()'''
1597 headprefixes = [('diff', 'diff.diffline'),
1597 headprefixes = [('diff', 'diff.diffline'),
1598 ('copy', 'diff.extended'),
1598 ('copy', 'diff.extended'),
1599 ('rename', 'diff.extended'),
1599 ('rename', 'diff.extended'),
1600 ('old', 'diff.extended'),
1600 ('old', 'diff.extended'),
1601 ('new', 'diff.extended'),
1601 ('new', 'diff.extended'),
1602 ('deleted', 'diff.extended'),
1602 ('deleted', 'diff.extended'),
1603 ('---', 'diff.file_a'),
1603 ('---', 'diff.file_a'),
1604 ('+++', 'diff.file_b')]
1604 ('+++', 'diff.file_b')]
1605 textprefixes = [('@', 'diff.hunk'),
1605 textprefixes = [('@', 'diff.hunk'),
1606 ('-', 'diff.deleted'),
1606 ('-', 'diff.deleted'),
1607 ('+', 'diff.inserted')]
1607 ('+', 'diff.inserted')]
1608 head = False
1608 head = False
1609 for chunk in func(*args, **kw):
1609 for chunk in func(*args, **kw):
1610 lines = chunk.split('\n')
1610 lines = chunk.split('\n')
1611 for i, line in enumerate(lines):
1611 for i, line in enumerate(lines):
1612 if i != 0:
1612 if i != 0:
1613 yield ('\n', '')
1613 yield ('\n', '')
1614 if head:
1614 if head:
1615 if line.startswith('@'):
1615 if line.startswith('@'):
1616 head = False
1616 head = False
1617 else:
1617 else:
1618 if line and line[0] not in ' +-@\\':
1618 if line and line[0] not in ' +-@\\':
1619 head = True
1619 head = True
1620 stripline = line
1620 stripline = line
1621 if not head and line and line[0] in '+-':
1621 if not head and line and line[0] in '+-':
1622 # highlight trailing whitespace, but only in changed lines
1622 # highlight trailing whitespace, but only in changed lines
1623 stripline = line.rstrip()
1623 stripline = line.rstrip()
1624 prefixes = textprefixes
1624 prefixes = textprefixes
1625 if head:
1625 if head:
1626 prefixes = headprefixes
1626 prefixes = headprefixes
1627 for prefix, label in prefixes:
1627 for prefix, label in prefixes:
1628 if stripline.startswith(prefix):
1628 if stripline.startswith(prefix):
1629 yield (stripline, label)
1629 yield (stripline, label)
1630 break
1630 break
1631 else:
1631 else:
1632 yield (line, '')
1632 yield (line, '')
1633 if line != stripline:
1633 if line != stripline:
1634 yield (line[len(stripline):], 'diff.trailingwhitespace')
1634 yield (line[len(stripline):], 'diff.trailingwhitespace')
1635
1635
1636 def diffui(*args, **kw):
1636 def diffui(*args, **kw):
1637 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1637 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1638 return difflabel(diff, *args, **kw)
1638 return difflabel(diff, *args, **kw)
1639
1639
1640 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1640 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1641 copy, getfilectx, opts, losedatafn, prefix):
1641 copy, getfilectx, opts, losedatafn, prefix):
1642
1642
1643 def join(f):
1643 def join(f):
1644 return posixpath.join(prefix, f)
1644 return posixpath.join(prefix, f)
1645
1645
1646 def addmodehdr(header, omode, nmode):
1646 def addmodehdr(header, omode, nmode):
1647 if omode != nmode:
1647 if omode != nmode:
1648 header.append('old mode %s\n' % omode)
1648 header.append('old mode %s\n' % omode)
1649 header.append('new mode %s\n' % nmode)
1649 header.append('new mode %s\n' % nmode)
1650
1650
1651 def addindexmeta(meta, revs):
1651 def addindexmeta(meta, revs):
1652 if opts.git:
1652 if opts.git:
1653 i = len(revs)
1653 i = len(revs)
1654 if i==2:
1654 if i==2:
1655 meta.append('index %s..%s\n' % tuple(revs))
1655 meta.append('index %s..%s\n' % tuple(revs))
1656 elif i==3:
1656 elif i==3:
1657 meta.append('index %s,%s..%s\n' % tuple(revs))
1657 meta.append('index %s,%s..%s\n' % tuple(revs))
1658
1658
1659 def gitindex(text):
1659 def gitindex(text):
1660 if not text:
1660 if not text:
1661 return hex(nullid)
1661 return hex(nullid)
1662 l = len(text)
1662 l = len(text)
1663 s = util.sha1('blob %d\0' % l)
1663 s = util.sha1('blob %d\0' % l)
1664 s.update(text)
1664 s.update(text)
1665 return s.hexdigest()
1665 return s.hexdigest()
1666
1666
1667 def diffline(a, b, revs):
1667 def diffline(a, b, revs):
1668 if opts.git:
1668 if opts.git:
1669 line = 'diff --git a/%s b/%s\n' % (a, b)
1669 line = 'diff --git a/%s b/%s\n' % (a, b)
1670 elif not repo.ui.quiet:
1670 elif not repo.ui.quiet:
1671 if revs:
1671 if revs:
1672 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1672 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1673 line = 'diff %s %s\n' % (revinfo, a)
1673 line = 'diff %s %s\n' % (revinfo, a)
1674 else:
1674 else:
1675 line = 'diff %s\n' % a
1675 line = 'diff %s\n' % a
1676 else:
1676 else:
1677 line = ''
1677 line = ''
1678 return line
1678 return line
1679
1679
1680 date1 = util.datestr(ctx1.date())
1680 date1 = util.datestr(ctx1.date())
1681 man1 = ctx1.manifest()
1681 man1 = ctx1.manifest()
1682
1682
1683 gone = set()
1683 gone = set()
1684 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1684 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1685
1685
1686 copyto = dict([(v, k) for k, v in copy.items()])
1686 copyto = dict([(v, k) for k, v in copy.items()])
1687
1687
1688 if opts.git:
1688 if opts.git:
1689 revs = None
1689 revs = None
1690
1690
1691 for f in sorted(modified + added + removed):
1691 for f in sorted(modified + added + removed):
1692 to = None
1692 to = None
1693 tn = None
1693 tn = None
1694 dodiff = True
1694 dodiff = True
1695 header = []
1695 header = []
1696 if f in man1:
1696 if f in man1:
1697 to = getfilectx(f, ctx1).data()
1697 to = getfilectx(f, ctx1).data()
1698 if f not in removed:
1698 if f not in removed:
1699 tn = getfilectx(f, ctx2).data()
1699 tn = getfilectx(f, ctx2).data()
1700 a, b = f, f
1700 a, b = f, f
1701 if opts.git or losedatafn:
1701 if opts.git or losedatafn:
1702 if f in added:
1702 if f in added:
1703 mode = gitmode[ctx2.flags(f)]
1703 mode = gitmode[ctx2.flags(f)]
1704 if f in copy or f in copyto:
1704 if f in copy or f in copyto:
1705 if opts.git:
1705 if opts.git:
1706 if f in copy:
1706 if f in copy:
1707 a = copy[f]
1707 a = copy[f]
1708 else:
1708 else:
1709 a = copyto[f]
1709 a = copyto[f]
1710 omode = gitmode[man1.flags(a)]
1710 omode = gitmode[man1.flags(a)]
1711 addmodehdr(header, omode, mode)
1711 addmodehdr(header, omode, mode)
1712 if a in removed and a not in gone:
1712 if a in removed and a not in gone:
1713 op = 'rename'
1713 op = 'rename'
1714 gone.add(a)
1714 gone.add(a)
1715 else:
1715 else:
1716 op = 'copy'
1716 op = 'copy'
1717 header.append('%s from %s\n' % (op, join(a)))
1717 header.append('%s from %s\n' % (op, join(a)))
1718 header.append('%s to %s\n' % (op, join(f)))
1718 header.append('%s to %s\n' % (op, join(f)))
1719 to = getfilectx(a, ctx1).data()
1719 to = getfilectx(a, ctx1).data()
1720 else:
1720 else:
1721 losedatafn(f)
1721 losedatafn(f)
1722 else:
1722 else:
1723 if opts.git:
1723 if opts.git:
1724 header.append('new file mode %s\n' % mode)
1724 header.append('new file mode %s\n' % mode)
1725 elif ctx2.flags(f):
1725 elif ctx2.flags(f):
1726 losedatafn(f)
1726 losedatafn(f)
1727 # In theory, if tn was copied or renamed we should check
1727 # In theory, if tn was copied or renamed we should check
1728 # if the source is binary too but the copy record already
1728 # if the source is binary too but the copy record already
1729 # forces git mode.
1729 # forces git mode.
1730 if util.binary(tn):
1730 if util.binary(tn):
1731 if opts.git:
1731 if opts.git:
1732 dodiff = 'binary'
1732 dodiff = 'binary'
1733 else:
1733 else:
1734 losedatafn(f)
1734 losedatafn(f)
1735 if not opts.git and not tn:
1735 if not opts.git and not tn:
1736 # regular diffs cannot represent new empty file
1736 # regular diffs cannot represent new empty file
1737 losedatafn(f)
1737 losedatafn(f)
1738 elif f in removed:
1738 elif f in removed:
1739 if opts.git:
1739 if opts.git:
1740 # have we already reported a copy above?
1740 # have we already reported a copy above?
1741 if ((f in copy and copy[f] in added
1741 if ((f in copy and copy[f] in added
1742 and copyto[copy[f]] == f) or
1742 and copyto[copy[f]] == f) or
1743 (f in copyto and copyto[f] in added
1743 (f in copyto and copyto[f] in added
1744 and copy[copyto[f]] == f)):
1744 and copy[copyto[f]] == f)):
1745 dodiff = False
1745 dodiff = False
1746 else:
1746 else:
1747 header.append('deleted file mode %s\n' %
1747 header.append('deleted file mode %s\n' %
1748 gitmode[man1.flags(f)])
1748 gitmode[man1.flags(f)])
1749 if util.binary(to):
1749 if util.binary(to):
1750 dodiff = 'binary'
1750 dodiff = 'binary'
1751 elif not to or util.binary(to):
1751 elif not to or util.binary(to):
1752 # regular diffs cannot represent empty file deletion
1752 # regular diffs cannot represent empty file deletion
1753 losedatafn(f)
1753 losedatafn(f)
1754 else:
1754 else:
1755 oflag = man1.flags(f)
1755 oflag = man1.flags(f)
1756 nflag = ctx2.flags(f)
1756 nflag = ctx2.flags(f)
1757 binary = util.binary(to) or util.binary(tn)
1757 binary = util.binary(to) or util.binary(tn)
1758 if opts.git:
1758 if opts.git:
1759 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1759 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1760 if binary:
1760 if binary:
1761 dodiff = 'binary'
1761 dodiff = 'binary'
1762 elif binary or nflag != oflag:
1762 elif binary or nflag != oflag:
1763 losedatafn(f)
1763 losedatafn(f)
1764
1764
1765 if dodiff:
1765 if dodiff:
1766 if opts.git or revs:
1766 if opts.git or revs:
1767 header.insert(0, diffline(join(a), join(b), revs))
1767 header.insert(0, diffline(join(a), join(b), revs))
1768 if dodiff == 'binary':
1768 if dodiff == 'binary':
1769 text = mdiff.b85diff(to, tn)
1769 text = mdiff.b85diff(to, tn)
1770 if text:
1770 if text:
1771 addindexmeta(header, [gitindex(to), gitindex(tn)])
1771 addindexmeta(header, [gitindex(to), gitindex(tn)])
1772 else:
1772 else:
1773 text = mdiff.unidiff(to, date1,
1773 text = mdiff.unidiff(to, date1,
1774 # ctx2 date may be dynamic
1774 # ctx2 date may be dynamic
1775 tn, util.datestr(ctx2.date()),
1775 tn, util.datestr(ctx2.date()),
1776 join(a), join(b), opts=opts)
1776 join(a), join(b), opts=opts)
1777 if header and (text or len(header) > 1):
1777 if header and (text or len(header) > 1):
1778 yield ''.join(header)
1778 yield ''.join(header)
1779 if text:
1779 if text:
1780 yield text
1780 yield text
1781
1781
1782 def diffstatsum(stats):
1782 def diffstatsum(stats):
1783 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1783 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1784 for f, a, r, b in stats:
1784 for f, a, r, b in stats:
1785 maxfile = max(maxfile, encoding.colwidth(f))
1785 maxfile = max(maxfile, encoding.colwidth(f))
1786 maxtotal = max(maxtotal, a + r)
1786 maxtotal = max(maxtotal, a + r)
1787 addtotal += a
1787 addtotal += a
1788 removetotal += r
1788 removetotal += r
1789 binary = binary or b
1789 binary = binary or b
1790
1790
1791 return maxfile, maxtotal, addtotal, removetotal, binary
1791 return maxfile, maxtotal, addtotal, removetotal, binary
1792
1792
1793 def diffstatdata(lines):
1793 def diffstatdata(lines):
1794 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1794 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1795
1795
1796 results = []
1796 results = []
1797 filename, adds, removes, isbinary = None, 0, 0, False
1797 filename, adds, removes, isbinary = None, 0, 0, False
1798
1798
1799 def addresult():
1799 def addresult():
1800 if filename:
1800 if filename:
1801 results.append((filename, adds, removes, isbinary))
1801 results.append((filename, adds, removes, isbinary))
1802
1802
1803 for line in lines:
1803 for line in lines:
1804 if line.startswith('diff'):
1804 if line.startswith('diff'):
1805 addresult()
1805 addresult()
1806 # set numbers to 0 anyway when starting new file
1806 # set numbers to 0 anyway when starting new file
1807 adds, removes, isbinary = 0, 0, False
1807 adds, removes, isbinary = 0, 0, False
1808 if line.startswith('diff --git a/'):
1808 if line.startswith('diff --git a/'):
1809 filename = gitre.search(line).group(1)
1809 filename = gitre.search(line).group(1)
1810 elif line.startswith('diff -r'):
1810 elif line.startswith('diff -r'):
1811 # format: "diff -r ... -r ... filename"
1811 # format: "diff -r ... -r ... filename"
1812 filename = diffre.search(line).group(1)
1812 filename = diffre.search(line).group(1)
1813 elif line.startswith('+') and not line.startswith('+++ '):
1813 elif line.startswith('+') and not line.startswith('+++ '):
1814 adds += 1
1814 adds += 1
1815 elif line.startswith('-') and not line.startswith('--- '):
1815 elif line.startswith('-') and not line.startswith('--- '):
1816 removes += 1
1816 removes += 1
1817 elif (line.startswith('GIT binary patch') or
1817 elif (line.startswith('GIT binary patch') or
1818 line.startswith('Binary file')):
1818 line.startswith('Binary file')):
1819 isbinary = True
1819 isbinary = True
1820 addresult()
1820 addresult()
1821 return results
1821 return results
1822
1822
1823 def diffstat(lines, width=80, git=False):
1823 def diffstat(lines, width=80, git=False):
1824 output = []
1824 output = []
1825 stats = diffstatdata(lines)
1825 stats = diffstatdata(lines)
1826 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1826 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1827
1827
1828 countwidth = len(str(maxtotal))
1828 countwidth = len(str(maxtotal))
1829 if hasbinary and countwidth < 3:
1829 if hasbinary and countwidth < 3:
1830 countwidth = 3
1830 countwidth = 3
1831 graphwidth = width - countwidth - maxname - 6
1831 graphwidth = width - countwidth - maxname - 6
1832 if graphwidth < 10:
1832 if graphwidth < 10:
1833 graphwidth = 10
1833 graphwidth = 10
1834
1834
1835 def scale(i):
1835 def scale(i):
1836 if maxtotal <= graphwidth:
1836 if maxtotal <= graphwidth:
1837 return i
1837 return i
1838 # If diffstat runs out of room it doesn't print anything,
1838 # If diffstat runs out of room it doesn't print anything,
1839 # which isn't very useful, so always print at least one + or -
1839 # which isn't very useful, so always print at least one + or -
1840 # if there were at least some changes.
1840 # if there were at least some changes.
1841 return max(i * graphwidth // maxtotal, int(bool(i)))
1841 return max(i * graphwidth // maxtotal, int(bool(i)))
1842
1842
1843 for filename, adds, removes, isbinary in stats:
1843 for filename, adds, removes, isbinary in stats:
1844 if isbinary:
1844 if isbinary:
1845 count = 'Bin'
1845 count = 'Bin'
1846 else:
1846 else:
1847 count = adds + removes
1847 count = adds + removes
1848 pluses = '+' * scale(adds)
1848 pluses = '+' * scale(adds)
1849 minuses = '-' * scale(removes)
1849 minuses = '-' * scale(removes)
1850 output.append(' %s%s | %*s %s%s\n' %
1850 output.append(' %s%s | %*s %s%s\n' %
1851 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1851 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1852 countwidth, count, pluses, minuses))
1852 countwidth, count, pluses, minuses))
1853
1853
1854 if stats:
1854 if stats:
1855 output.append(_(' %d files changed, %d insertions(+), '
1855 output.append(_(' %d files changed, %d insertions(+), '
1856 '%d deletions(-)\n')
1856 '%d deletions(-)\n')
1857 % (len(stats), totaladds, totalremoves))
1857 % (len(stats), totaladds, totalremoves))
1858
1858
1859 return ''.join(output)
1859 return ''.join(output)
1860
1860
1861 def diffstatui(*args, **kw):
1861 def diffstatui(*args, **kw):
1862 '''like diffstat(), but yields 2-tuples of (output, label) for
1862 '''like diffstat(), but yields 2-tuples of (output, label) for
1863 ui.write()
1863 ui.write()
1864 '''
1864 '''
1865
1865
1866 for line in diffstat(*args, **kw).splitlines():
1866 for line in diffstat(*args, **kw).splitlines():
1867 if line and line[-1] in '+-':
1867 if line and line[-1] in '+-':
1868 name, graph = line.rsplit(' ', 1)
1868 name, graph = line.rsplit(' ', 1)
1869 yield (name + ' ', '')
1869 yield (name + ' ', '')
1870 m = re.search(r'\++', graph)
1870 m = re.search(r'\++', graph)
1871 if m:
1871 if m:
1872 yield (m.group(0), 'diffstat.inserted')
1872 yield (m.group(0), 'diffstat.inserted')
1873 m = re.search(r'-+', graph)
1873 m = re.search(r'-+', graph)
1874 if m:
1874 if m:
1875 yield (m.group(0), 'diffstat.deleted')
1875 yield (m.group(0), 'diffstat.deleted')
1876 else:
1876 else:
1877 yield (line, '')
1877 yield (line, '')
1878 yield ('\n', '')
1878 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now