##// END OF EJS Templates
diff: enhance highlighting with color (issue3034)...
Kirill Elagin -
r15201:2c4fdee4 default
parent child Browse files
Show More
@@ -1,1859 +1,1868 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[10:])
233 parents.append(line[10:])
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def __repr__(self):
293 def __repr__(self):
294 return "<patchmeta %s %r>" % (self.op, self.path)
294 return "<patchmeta %s %r>" % (self.op, self.path)
295
295
296 def readgitpatch(lr):
296 def readgitpatch(lr):
297 """extract git-style metadata about patches from <patchname>"""
297 """extract git-style metadata about patches from <patchname>"""
298
298
299 # Filter patch for git information
299 # Filter patch for git information
300 gp = None
300 gp = None
301 gitpatches = []
301 gitpatches = []
302 for line in lr:
302 for line in lr:
303 line = line.rstrip(' \r\n')
303 line = line.rstrip(' \r\n')
304 if line.startswith('diff --git'):
304 if line.startswith('diff --git'):
305 m = gitre.match(line)
305 m = gitre.match(line)
306 if m:
306 if m:
307 if gp:
307 if gp:
308 gitpatches.append(gp)
308 gitpatches.append(gp)
309 dst = m.group(2)
309 dst = m.group(2)
310 gp = patchmeta(dst)
310 gp = patchmeta(dst)
311 elif gp:
311 elif gp:
312 if line.startswith('--- '):
312 if line.startswith('--- '):
313 gitpatches.append(gp)
313 gitpatches.append(gp)
314 gp = None
314 gp = None
315 continue
315 continue
316 if line.startswith('rename from '):
316 if line.startswith('rename from '):
317 gp.op = 'RENAME'
317 gp.op = 'RENAME'
318 gp.oldpath = line[12:]
318 gp.oldpath = line[12:]
319 elif line.startswith('rename to '):
319 elif line.startswith('rename to '):
320 gp.path = line[10:]
320 gp.path = line[10:]
321 elif line.startswith('copy from '):
321 elif line.startswith('copy from '):
322 gp.op = 'COPY'
322 gp.op = 'COPY'
323 gp.oldpath = line[10:]
323 gp.oldpath = line[10:]
324 elif line.startswith('copy to '):
324 elif line.startswith('copy to '):
325 gp.path = line[8:]
325 gp.path = line[8:]
326 elif line.startswith('deleted file'):
326 elif line.startswith('deleted file'):
327 gp.op = 'DELETE'
327 gp.op = 'DELETE'
328 elif line.startswith('new file mode '):
328 elif line.startswith('new file mode '):
329 gp.op = 'ADD'
329 gp.op = 'ADD'
330 gp.setmode(int(line[-6:], 8))
330 gp.setmode(int(line[-6:], 8))
331 elif line.startswith('new mode '):
331 elif line.startswith('new mode '):
332 gp.setmode(int(line[-6:], 8))
332 gp.setmode(int(line[-6:], 8))
333 elif line.startswith('GIT binary patch'):
333 elif line.startswith('GIT binary patch'):
334 gp.binary = True
334 gp.binary = True
335 if gp:
335 if gp:
336 gitpatches.append(gp)
336 gitpatches.append(gp)
337
337
338 return gitpatches
338 return gitpatches
339
339
340 class linereader(object):
340 class linereader(object):
341 # simple class to allow pushing lines back into the input stream
341 # simple class to allow pushing lines back into the input stream
342 def __init__(self, fp):
342 def __init__(self, fp):
343 self.fp = fp
343 self.fp = fp
344 self.buf = []
344 self.buf = []
345
345
346 def push(self, line):
346 def push(self, line):
347 if line is not None:
347 if line is not None:
348 self.buf.append(line)
348 self.buf.append(line)
349
349
350 def readline(self):
350 def readline(self):
351 if self.buf:
351 if self.buf:
352 l = self.buf[0]
352 l = self.buf[0]
353 del self.buf[0]
353 del self.buf[0]
354 return l
354 return l
355 return self.fp.readline()
355 return self.fp.readline()
356
356
357 def __iter__(self):
357 def __iter__(self):
358 while True:
358 while True:
359 l = self.readline()
359 l = self.readline()
360 if not l:
360 if not l:
361 break
361 break
362 yield l
362 yield l
363
363
364 class abstractbackend(object):
364 class abstractbackend(object):
365 def __init__(self, ui):
365 def __init__(self, ui):
366 self.ui = ui
366 self.ui = ui
367
367
368 def getfile(self, fname):
368 def getfile(self, fname):
369 """Return target file data and flags as a (data, (islink,
369 """Return target file data and flags as a (data, (islink,
370 isexec)) tuple.
370 isexec)) tuple.
371 """
371 """
372 raise NotImplementedError
372 raise NotImplementedError
373
373
374 def setfile(self, fname, data, mode, copysource):
374 def setfile(self, fname, data, mode, copysource):
375 """Write data to target file fname and set its mode. mode is a
375 """Write data to target file fname and set its mode. mode is a
376 (islink, isexec) tuple. If data is None, the file content should
376 (islink, isexec) tuple. If data is None, the file content should
377 be left unchanged. If the file is modified after being copied,
377 be left unchanged. If the file is modified after being copied,
378 copysource is set to the original file name.
378 copysource is set to the original file name.
379 """
379 """
380 raise NotImplementedError
380 raise NotImplementedError
381
381
382 def unlink(self, fname):
382 def unlink(self, fname):
383 """Unlink target file."""
383 """Unlink target file."""
384 raise NotImplementedError
384 raise NotImplementedError
385
385
386 def writerej(self, fname, failed, total, lines):
386 def writerej(self, fname, failed, total, lines):
387 """Write rejected lines for fname. total is the number of hunks
387 """Write rejected lines for fname. total is the number of hunks
388 which failed to apply and total the total number of hunks for this
388 which failed to apply and total the total number of hunks for this
389 files.
389 files.
390 """
390 """
391 pass
391 pass
392
392
393 def exists(self, fname):
393 def exists(self, fname):
394 raise NotImplementedError
394 raise NotImplementedError
395
395
396 class fsbackend(abstractbackend):
396 class fsbackend(abstractbackend):
397 def __init__(self, ui, basedir):
397 def __init__(self, ui, basedir):
398 super(fsbackend, self).__init__(ui)
398 super(fsbackend, self).__init__(ui)
399 self.opener = scmutil.opener(basedir)
399 self.opener = scmutil.opener(basedir)
400
400
401 def _join(self, f):
401 def _join(self, f):
402 return os.path.join(self.opener.base, f)
402 return os.path.join(self.opener.base, f)
403
403
404 def getfile(self, fname):
404 def getfile(self, fname):
405 path = self._join(fname)
405 path = self._join(fname)
406 if os.path.islink(path):
406 if os.path.islink(path):
407 return (os.readlink(path), (True, False))
407 return (os.readlink(path), (True, False))
408 isexec = False
408 isexec = False
409 try:
409 try:
410 isexec = os.lstat(path).st_mode & 0100 != 0
410 isexec = os.lstat(path).st_mode & 0100 != 0
411 except OSError, e:
411 except OSError, e:
412 if e.errno != errno.ENOENT:
412 if e.errno != errno.ENOENT:
413 raise
413 raise
414 return (self.opener.read(fname), (False, isexec))
414 return (self.opener.read(fname), (False, isexec))
415
415
416 def setfile(self, fname, data, mode, copysource):
416 def setfile(self, fname, data, mode, copysource):
417 islink, isexec = mode
417 islink, isexec = mode
418 if data is None:
418 if data is None:
419 util.setflags(self._join(fname), islink, isexec)
419 util.setflags(self._join(fname), islink, isexec)
420 return
420 return
421 if islink:
421 if islink:
422 self.opener.symlink(data, fname)
422 self.opener.symlink(data, fname)
423 else:
423 else:
424 self.opener.write(fname, data)
424 self.opener.write(fname, data)
425 if isexec:
425 if isexec:
426 util.setflags(self._join(fname), False, True)
426 util.setflags(self._join(fname), False, True)
427
427
428 def unlink(self, fname):
428 def unlink(self, fname):
429 try:
429 try:
430 util.unlinkpath(self._join(fname))
430 util.unlinkpath(self._join(fname))
431 except OSError, inst:
431 except OSError, inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 def writerej(self, fname, failed, total, lines):
435 def writerej(self, fname, failed, total, lines):
436 fname = fname + ".rej"
436 fname = fname + ".rej"
437 self.ui.warn(
437 self.ui.warn(
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
439 (failed, total, fname))
439 (failed, total, fname))
440 fp = self.opener(fname, 'w')
440 fp = self.opener(fname, 'w')
441 fp.writelines(lines)
441 fp.writelines(lines)
442 fp.close()
442 fp.close()
443
443
444 def exists(self, fname):
444 def exists(self, fname):
445 return os.path.lexists(self._join(fname))
445 return os.path.lexists(self._join(fname))
446
446
447 class workingbackend(fsbackend):
447 class workingbackend(fsbackend):
448 def __init__(self, ui, repo, similarity):
448 def __init__(self, ui, repo, similarity):
449 super(workingbackend, self).__init__(ui, repo.root)
449 super(workingbackend, self).__init__(ui, repo.root)
450 self.repo = repo
450 self.repo = repo
451 self.similarity = similarity
451 self.similarity = similarity
452 self.removed = set()
452 self.removed = set()
453 self.changed = set()
453 self.changed = set()
454 self.copied = []
454 self.copied = []
455
455
456 def _checkknown(self, fname):
456 def _checkknown(self, fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
459
459
460 def setfile(self, fname, data, mode, copysource):
460 def setfile(self, fname, data, mode, copysource):
461 self._checkknown(fname)
461 self._checkknown(fname)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
463 if copysource is not None:
463 if copysource is not None:
464 self.copied.append((copysource, fname))
464 self.copied.append((copysource, fname))
465 self.changed.add(fname)
465 self.changed.add(fname)
466
466
467 def unlink(self, fname):
467 def unlink(self, fname):
468 self._checkknown(fname)
468 self._checkknown(fname)
469 super(workingbackend, self).unlink(fname)
469 super(workingbackend, self).unlink(fname)
470 self.removed.add(fname)
470 self.removed.add(fname)
471 self.changed.add(fname)
471 self.changed.add(fname)
472
472
473 def close(self):
473 def close(self):
474 wctx = self.repo[None]
474 wctx = self.repo[None]
475 addremoved = set(self.changed)
475 addremoved = set(self.changed)
476 for src, dst in self.copied:
476 for src, dst in self.copied:
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
478 addremoved.discard(src)
478 addremoved.discard(src)
479 if (not self.similarity) and self.removed:
479 if (not self.similarity) and self.removed:
480 wctx.forget(sorted(self.removed))
480 wctx.forget(sorted(self.removed))
481 if addremoved:
481 if addremoved:
482 cwd = self.repo.getcwd()
482 cwd = self.repo.getcwd()
483 if cwd:
483 if cwd:
484 addremoved = [util.pathto(self.repo.root, cwd, f)
484 addremoved = [util.pathto(self.repo.root, cwd, f)
485 for f in addremoved]
485 for f in addremoved]
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
486 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
487 return sorted(self.changed)
487 return sorted(self.changed)
488
488
489 class filestore(object):
489 class filestore(object):
490 def __init__(self, maxsize=None):
490 def __init__(self, maxsize=None):
491 self.opener = None
491 self.opener = None
492 self.files = {}
492 self.files = {}
493 self.created = 0
493 self.created = 0
494 self.maxsize = maxsize
494 self.maxsize = maxsize
495 if self.maxsize is None:
495 if self.maxsize is None:
496 self.maxsize = 4*(2**20)
496 self.maxsize = 4*(2**20)
497 self.size = 0
497 self.size = 0
498 self.data = {}
498 self.data = {}
499
499
500 def setfile(self, fname, data, mode, copied=None):
500 def setfile(self, fname, data, mode, copied=None):
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
501 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
502 self.data[fname] = (data, mode, copied)
502 self.data[fname] = (data, mode, copied)
503 self.size += len(data)
503 self.size += len(data)
504 else:
504 else:
505 if self.opener is None:
505 if self.opener is None:
506 root = tempfile.mkdtemp(prefix='hg-patch-')
506 root = tempfile.mkdtemp(prefix='hg-patch-')
507 self.opener = scmutil.opener(root)
507 self.opener = scmutil.opener(root)
508 # Avoid filename issues with these simple names
508 # Avoid filename issues with these simple names
509 fn = str(self.created)
509 fn = str(self.created)
510 self.opener.write(fn, data)
510 self.opener.write(fn, data)
511 self.created += 1
511 self.created += 1
512 self.files[fname] = (fn, mode, copied)
512 self.files[fname] = (fn, mode, copied)
513
513
514 def getfile(self, fname):
514 def getfile(self, fname):
515 if fname in self.data:
515 if fname in self.data:
516 return self.data[fname]
516 return self.data[fname]
517 if not self.opener or fname not in self.files:
517 if not self.opener or fname not in self.files:
518 raise IOError()
518 raise IOError()
519 fn, mode, copied = self.files[fname]
519 fn, mode, copied = self.files[fname]
520 return self.opener.read(fn), mode, copied
520 return self.opener.read(fn), mode, copied
521
521
522 def close(self):
522 def close(self):
523 if self.opener:
523 if self.opener:
524 shutil.rmtree(self.opener.base)
524 shutil.rmtree(self.opener.base)
525
525
526 class repobackend(abstractbackend):
526 class repobackend(abstractbackend):
527 def __init__(self, ui, repo, ctx, store):
527 def __init__(self, ui, repo, ctx, store):
528 super(repobackend, self).__init__(ui)
528 super(repobackend, self).__init__(ui)
529 self.repo = repo
529 self.repo = repo
530 self.ctx = ctx
530 self.ctx = ctx
531 self.store = store
531 self.store = store
532 self.changed = set()
532 self.changed = set()
533 self.removed = set()
533 self.removed = set()
534 self.copied = {}
534 self.copied = {}
535
535
536 def _checkknown(self, fname):
536 def _checkknown(self, fname):
537 if fname not in self.ctx:
537 if fname not in self.ctx:
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
538 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
539
539
540 def getfile(self, fname):
540 def getfile(self, fname):
541 try:
541 try:
542 fctx = self.ctx[fname]
542 fctx = self.ctx[fname]
543 except error.LookupError:
543 except error.LookupError:
544 raise IOError()
544 raise IOError()
545 flags = fctx.flags()
545 flags = fctx.flags()
546 return fctx.data(), ('l' in flags, 'x' in flags)
546 return fctx.data(), ('l' in flags, 'x' in flags)
547
547
548 def setfile(self, fname, data, mode, copysource):
548 def setfile(self, fname, data, mode, copysource):
549 if copysource:
549 if copysource:
550 self._checkknown(copysource)
550 self._checkknown(copysource)
551 if data is None:
551 if data is None:
552 data = self.ctx[fname].data()
552 data = self.ctx[fname].data()
553 self.store.setfile(fname, data, mode, copysource)
553 self.store.setfile(fname, data, mode, copysource)
554 self.changed.add(fname)
554 self.changed.add(fname)
555 if copysource:
555 if copysource:
556 self.copied[fname] = copysource
556 self.copied[fname] = copysource
557
557
558 def unlink(self, fname):
558 def unlink(self, fname):
559 self._checkknown(fname)
559 self._checkknown(fname)
560 self.removed.add(fname)
560 self.removed.add(fname)
561
561
562 def exists(self, fname):
562 def exists(self, fname):
563 return fname in self.ctx
563 return fname in self.ctx
564
564
565 def close(self):
565 def close(self):
566 return self.changed | self.removed
566 return self.changed | self.removed
567
567
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
568 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
569 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
569 unidesc = re.compile('@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))? @@')
570 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
570 contextdesc = re.compile('(---|\*\*\*) (\d+)(,(\d+))? (---|\*\*\*)')
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
571 eolmodes = ['strict', 'crlf', 'lf', 'auto']
572
572
573 class patchfile(object):
573 class patchfile(object):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
574 def __init__(self, ui, gp, backend, store, eolmode='strict'):
575 self.fname = gp.path
575 self.fname = gp.path
576 self.eolmode = eolmode
576 self.eolmode = eolmode
577 self.eol = None
577 self.eol = None
578 self.backend = backend
578 self.backend = backend
579 self.ui = ui
579 self.ui = ui
580 self.lines = []
580 self.lines = []
581 self.exists = False
581 self.exists = False
582 self.missing = True
582 self.missing = True
583 self.mode = gp.mode
583 self.mode = gp.mode
584 self.copysource = gp.oldpath
584 self.copysource = gp.oldpath
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
585 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
586 self.remove = gp.op == 'DELETE'
586 self.remove = gp.op == 'DELETE'
587 try:
587 try:
588 if self.copysource is None:
588 if self.copysource is None:
589 data, mode = backend.getfile(self.fname)
589 data, mode = backend.getfile(self.fname)
590 self.exists = True
590 self.exists = True
591 else:
591 else:
592 data, mode = store.getfile(self.copysource)[:2]
592 data, mode = store.getfile(self.copysource)[:2]
593 self.exists = backend.exists(self.fname)
593 self.exists = backend.exists(self.fname)
594 self.missing = False
594 self.missing = False
595 if data:
595 if data:
596 self.lines = mdiff.splitnewlines(data)
596 self.lines = mdiff.splitnewlines(data)
597 if self.mode is None:
597 if self.mode is None:
598 self.mode = mode
598 self.mode = mode
599 if self.lines:
599 if self.lines:
600 # Normalize line endings
600 # Normalize line endings
601 if self.lines[0].endswith('\r\n'):
601 if self.lines[0].endswith('\r\n'):
602 self.eol = '\r\n'
602 self.eol = '\r\n'
603 elif self.lines[0].endswith('\n'):
603 elif self.lines[0].endswith('\n'):
604 self.eol = '\n'
604 self.eol = '\n'
605 if eolmode != 'strict':
605 if eolmode != 'strict':
606 nlines = []
606 nlines = []
607 for l in self.lines:
607 for l in self.lines:
608 if l.endswith('\r\n'):
608 if l.endswith('\r\n'):
609 l = l[:-2] + '\n'
609 l = l[:-2] + '\n'
610 nlines.append(l)
610 nlines.append(l)
611 self.lines = nlines
611 self.lines = nlines
612 except IOError:
612 except IOError:
613 if self.create:
613 if self.create:
614 self.missing = False
614 self.missing = False
615 if self.mode is None:
615 if self.mode is None:
616 self.mode = (False, False)
616 self.mode = (False, False)
617 if self.missing:
617 if self.missing:
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
618 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
619
619
620 self.hash = {}
620 self.hash = {}
621 self.dirty = 0
621 self.dirty = 0
622 self.offset = 0
622 self.offset = 0
623 self.skew = 0
623 self.skew = 0
624 self.rej = []
624 self.rej = []
625 self.fileprinted = False
625 self.fileprinted = False
626 self.printfile(False)
626 self.printfile(False)
627 self.hunks = 0
627 self.hunks = 0
628
628
629 def writelines(self, fname, lines, mode):
629 def writelines(self, fname, lines, mode):
630 if self.eolmode == 'auto':
630 if self.eolmode == 'auto':
631 eol = self.eol
631 eol = self.eol
632 elif self.eolmode == 'crlf':
632 elif self.eolmode == 'crlf':
633 eol = '\r\n'
633 eol = '\r\n'
634 else:
634 else:
635 eol = '\n'
635 eol = '\n'
636
636
637 if self.eolmode != 'strict' and eol and eol != '\n':
637 if self.eolmode != 'strict' and eol and eol != '\n':
638 rawlines = []
638 rawlines = []
639 for l in lines:
639 for l in lines:
640 if l and l[-1] == '\n':
640 if l and l[-1] == '\n':
641 l = l[:-1] + eol
641 l = l[:-1] + eol
642 rawlines.append(l)
642 rawlines.append(l)
643 lines = rawlines
643 lines = rawlines
644
644
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
645 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
646
646
647 def printfile(self, warn):
647 def printfile(self, warn):
648 if self.fileprinted:
648 if self.fileprinted:
649 return
649 return
650 if warn or self.ui.verbose:
650 if warn or self.ui.verbose:
651 self.fileprinted = True
651 self.fileprinted = True
652 s = _("patching file %s\n") % self.fname
652 s = _("patching file %s\n") % self.fname
653 if warn:
653 if warn:
654 self.ui.warn(s)
654 self.ui.warn(s)
655 else:
655 else:
656 self.ui.note(s)
656 self.ui.note(s)
657
657
658
658
659 def findlines(self, l, linenum):
659 def findlines(self, l, linenum):
660 # looks through the hash and finds candidate lines. The
660 # looks through the hash and finds candidate lines. The
661 # result is a list of line numbers sorted based on distance
661 # result is a list of line numbers sorted based on distance
662 # from linenum
662 # from linenum
663
663
664 cand = self.hash.get(l, [])
664 cand = self.hash.get(l, [])
665 if len(cand) > 1:
665 if len(cand) > 1:
666 # resort our list of potentials forward then back.
666 # resort our list of potentials forward then back.
667 cand.sort(key=lambda x: abs(x - linenum))
667 cand.sort(key=lambda x: abs(x - linenum))
668 return cand
668 return cand
669
669
670 def write_rej(self):
670 def write_rej(self):
671 # our rejects are a little different from patch(1). This always
671 # our rejects are a little different from patch(1). This always
672 # creates rejects in the same form as the original patch. A file
672 # creates rejects in the same form as the original patch. A file
673 # header is inserted so that you can run the reject through patch again
673 # header is inserted so that you can run the reject through patch again
674 # without having to type the filename.
674 # without having to type the filename.
675 if not self.rej:
675 if not self.rej:
676 return
676 return
677 base = os.path.basename(self.fname)
677 base = os.path.basename(self.fname)
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
678 lines = ["--- %s\n+++ %s\n" % (base, base)]
679 for x in self.rej:
679 for x in self.rej:
680 for l in x.hunk:
680 for l in x.hunk:
681 lines.append(l)
681 lines.append(l)
682 if l[-1] != '\n':
682 if l[-1] != '\n':
683 lines.append("\n\ No newline at end of file\n")
683 lines.append("\n\ No newline at end of file\n")
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
684 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
685
685
686 def apply(self, h):
686 def apply(self, h):
687 if not h.complete():
687 if not h.complete():
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
688 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
689 (h.number, h.desc, len(h.a), h.lena, len(h.b),
690 h.lenb))
690 h.lenb))
691
691
692 self.hunks += 1
692 self.hunks += 1
693
693
694 if self.missing:
694 if self.missing:
695 self.rej.append(h)
695 self.rej.append(h)
696 return -1
696 return -1
697
697
698 if self.exists and self.create:
698 if self.exists and self.create:
699 if self.copysource:
699 if self.copysource:
700 self.ui.warn(_("cannot create %s: destination already "
700 self.ui.warn(_("cannot create %s: destination already "
701 "exists\n" % self.fname))
701 "exists\n" % self.fname))
702 else:
702 else:
703 self.ui.warn(_("file %s already exists\n") % self.fname)
703 self.ui.warn(_("file %s already exists\n") % self.fname)
704 self.rej.append(h)
704 self.rej.append(h)
705 return -1
705 return -1
706
706
707 if isinstance(h, binhunk):
707 if isinstance(h, binhunk):
708 if self.remove:
708 if self.remove:
709 self.backend.unlink(self.fname)
709 self.backend.unlink(self.fname)
710 else:
710 else:
711 self.lines[:] = h.new()
711 self.lines[:] = h.new()
712 self.offset += len(h.new())
712 self.offset += len(h.new())
713 self.dirty = True
713 self.dirty = True
714 return 0
714 return 0
715
715
716 horig = h
716 horig = h
717 if (self.eolmode in ('crlf', 'lf')
717 if (self.eolmode in ('crlf', 'lf')
718 or self.eolmode == 'auto' and self.eol):
718 or self.eolmode == 'auto' and self.eol):
719 # If new eols are going to be normalized, then normalize
719 # If new eols are going to be normalized, then normalize
720 # hunk data before patching. Otherwise, preserve input
720 # hunk data before patching. Otherwise, preserve input
721 # line-endings.
721 # line-endings.
722 h = h.getnormalized()
722 h = h.getnormalized()
723
723
724 # fast case first, no offsets, no fuzz
724 # fast case first, no offsets, no fuzz
725 old = h.old()
725 old = h.old()
726 # patch starts counting at 1 unless we are adding the file
726 # patch starts counting at 1 unless we are adding the file
727 if h.starta == 0:
727 if h.starta == 0:
728 start = 0
728 start = 0
729 else:
729 else:
730 start = h.starta + self.offset - 1
730 start = h.starta + self.offset - 1
731 orig_start = start
731 orig_start = start
732 # if there's skew we want to emit the "(offset %d lines)" even
732 # if there's skew we want to emit the "(offset %d lines)" even
733 # when the hunk cleanly applies at start + skew, so skip the
733 # when the hunk cleanly applies at start + skew, so skip the
734 # fast case code
734 # fast case code
735 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
735 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
736 if self.remove:
736 if self.remove:
737 self.backend.unlink(self.fname)
737 self.backend.unlink(self.fname)
738 else:
738 else:
739 self.lines[start : start + h.lena] = h.new()
739 self.lines[start : start + h.lena] = h.new()
740 self.offset += h.lenb - h.lena
740 self.offset += h.lenb - h.lena
741 self.dirty = True
741 self.dirty = True
742 return 0
742 return 0
743
743
744 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
744 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
745 self.hash = {}
745 self.hash = {}
746 for x, s in enumerate(self.lines):
746 for x, s in enumerate(self.lines):
747 self.hash.setdefault(s, []).append(x)
747 self.hash.setdefault(s, []).append(x)
748 if h.hunk[-1][0] != ' ':
748 if h.hunk[-1][0] != ' ':
749 # if the hunk tried to put something at the bottom of the file
749 # if the hunk tried to put something at the bottom of the file
750 # override the start line and use eof here
750 # override the start line and use eof here
751 search_start = len(self.lines)
751 search_start = len(self.lines)
752 else:
752 else:
753 search_start = orig_start + self.skew
753 search_start = orig_start + self.skew
754
754
755 for fuzzlen in xrange(3):
755 for fuzzlen in xrange(3):
756 for toponly in [True, False]:
756 for toponly in [True, False]:
757 old = h.old(fuzzlen, toponly)
757 old = h.old(fuzzlen, toponly)
758
758
759 cand = self.findlines(old[0][1:], search_start)
759 cand = self.findlines(old[0][1:], search_start)
760 for l in cand:
760 for l in cand:
761 if diffhelpers.testhunk(old, self.lines, l) == 0:
761 if diffhelpers.testhunk(old, self.lines, l) == 0:
762 newlines = h.new(fuzzlen, toponly)
762 newlines = h.new(fuzzlen, toponly)
763 self.lines[l : l + len(old)] = newlines
763 self.lines[l : l + len(old)] = newlines
764 self.offset += len(newlines) - len(old)
764 self.offset += len(newlines) - len(old)
765 self.skew = l - orig_start
765 self.skew = l - orig_start
766 self.dirty = True
766 self.dirty = True
767 offset = l - orig_start - fuzzlen
767 offset = l - orig_start - fuzzlen
768 if fuzzlen:
768 if fuzzlen:
769 msg = _("Hunk #%d succeeded at %d "
769 msg = _("Hunk #%d succeeded at %d "
770 "with fuzz %d "
770 "with fuzz %d "
771 "(offset %d lines).\n")
771 "(offset %d lines).\n")
772 self.printfile(True)
772 self.printfile(True)
773 self.ui.warn(msg %
773 self.ui.warn(msg %
774 (h.number, l + 1, fuzzlen, offset))
774 (h.number, l + 1, fuzzlen, offset))
775 else:
775 else:
776 msg = _("Hunk #%d succeeded at %d "
776 msg = _("Hunk #%d succeeded at %d "
777 "(offset %d lines).\n")
777 "(offset %d lines).\n")
778 self.ui.note(msg % (h.number, l + 1, offset))
778 self.ui.note(msg % (h.number, l + 1, offset))
779 return fuzzlen
779 return fuzzlen
780 self.printfile(True)
780 self.printfile(True)
781 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
781 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
782 self.rej.append(horig)
782 self.rej.append(horig)
783 return -1
783 return -1
784
784
785 def close(self):
785 def close(self):
786 if self.dirty:
786 if self.dirty:
787 self.writelines(self.fname, self.lines, self.mode)
787 self.writelines(self.fname, self.lines, self.mode)
788 self.write_rej()
788 self.write_rej()
789 return len(self.rej)
789 return len(self.rej)
790
790
791 class hunk(object):
791 class hunk(object):
792 def __init__(self, desc, num, lr, context):
792 def __init__(self, desc, num, lr, context):
793 self.number = num
793 self.number = num
794 self.desc = desc
794 self.desc = desc
795 self.hunk = [desc]
795 self.hunk = [desc]
796 self.a = []
796 self.a = []
797 self.b = []
797 self.b = []
798 self.starta = self.lena = None
798 self.starta = self.lena = None
799 self.startb = self.lenb = None
799 self.startb = self.lenb = None
800 if lr is not None:
800 if lr is not None:
801 if context:
801 if context:
802 self.read_context_hunk(lr)
802 self.read_context_hunk(lr)
803 else:
803 else:
804 self.read_unified_hunk(lr)
804 self.read_unified_hunk(lr)
805
805
806 def getnormalized(self):
806 def getnormalized(self):
807 """Return a copy with line endings normalized to LF."""
807 """Return a copy with line endings normalized to LF."""
808
808
809 def normalize(lines):
809 def normalize(lines):
810 nlines = []
810 nlines = []
811 for line in lines:
811 for line in lines:
812 if line.endswith('\r\n'):
812 if line.endswith('\r\n'):
813 line = line[:-2] + '\n'
813 line = line[:-2] + '\n'
814 nlines.append(line)
814 nlines.append(line)
815 return nlines
815 return nlines
816
816
817 # Dummy object, it is rebuilt manually
817 # Dummy object, it is rebuilt manually
818 nh = hunk(self.desc, self.number, None, None)
818 nh = hunk(self.desc, self.number, None, None)
819 nh.number = self.number
819 nh.number = self.number
820 nh.desc = self.desc
820 nh.desc = self.desc
821 nh.hunk = self.hunk
821 nh.hunk = self.hunk
822 nh.a = normalize(self.a)
822 nh.a = normalize(self.a)
823 nh.b = normalize(self.b)
823 nh.b = normalize(self.b)
824 nh.starta = self.starta
824 nh.starta = self.starta
825 nh.startb = self.startb
825 nh.startb = self.startb
826 nh.lena = self.lena
826 nh.lena = self.lena
827 nh.lenb = self.lenb
827 nh.lenb = self.lenb
828 return nh
828 return nh
829
829
830 def read_unified_hunk(self, lr):
830 def read_unified_hunk(self, lr):
831 m = unidesc.match(self.desc)
831 m = unidesc.match(self.desc)
832 if not m:
832 if not m:
833 raise PatchError(_("bad hunk #%d") % self.number)
833 raise PatchError(_("bad hunk #%d") % self.number)
834 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
834 self.starta, foo, self.lena, self.startb, foo2, self.lenb = m.groups()
835 if self.lena is None:
835 if self.lena is None:
836 self.lena = 1
836 self.lena = 1
837 else:
837 else:
838 self.lena = int(self.lena)
838 self.lena = int(self.lena)
839 if self.lenb is None:
839 if self.lenb is None:
840 self.lenb = 1
840 self.lenb = 1
841 else:
841 else:
842 self.lenb = int(self.lenb)
842 self.lenb = int(self.lenb)
843 self.starta = int(self.starta)
843 self.starta = int(self.starta)
844 self.startb = int(self.startb)
844 self.startb = int(self.startb)
845 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
845 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
846 # if we hit eof before finishing out the hunk, the last line will
846 # if we hit eof before finishing out the hunk, the last line will
847 # be zero length. Lets try to fix it up.
847 # be zero length. Lets try to fix it up.
848 while len(self.hunk[-1]) == 0:
848 while len(self.hunk[-1]) == 0:
849 del self.hunk[-1]
849 del self.hunk[-1]
850 del self.a[-1]
850 del self.a[-1]
851 del self.b[-1]
851 del self.b[-1]
852 self.lena -= 1
852 self.lena -= 1
853 self.lenb -= 1
853 self.lenb -= 1
854 self._fixnewline(lr)
854 self._fixnewline(lr)
855
855
856 def read_context_hunk(self, lr):
856 def read_context_hunk(self, lr):
857 self.desc = lr.readline()
857 self.desc = lr.readline()
858 m = contextdesc.match(self.desc)
858 m = contextdesc.match(self.desc)
859 if not m:
859 if not m:
860 raise PatchError(_("bad hunk #%d") % self.number)
860 raise PatchError(_("bad hunk #%d") % self.number)
861 foo, self.starta, foo2, aend, foo3 = m.groups()
861 foo, self.starta, foo2, aend, foo3 = m.groups()
862 self.starta = int(self.starta)
862 self.starta = int(self.starta)
863 if aend is None:
863 if aend is None:
864 aend = self.starta
864 aend = self.starta
865 self.lena = int(aend) - self.starta
865 self.lena = int(aend) - self.starta
866 if self.starta:
866 if self.starta:
867 self.lena += 1
867 self.lena += 1
868 for x in xrange(self.lena):
868 for x in xrange(self.lena):
869 l = lr.readline()
869 l = lr.readline()
870 if l.startswith('---'):
870 if l.startswith('---'):
871 # lines addition, old block is empty
871 # lines addition, old block is empty
872 lr.push(l)
872 lr.push(l)
873 break
873 break
874 s = l[2:]
874 s = l[2:]
875 if l.startswith('- ') or l.startswith('! '):
875 if l.startswith('- ') or l.startswith('! '):
876 u = '-' + s
876 u = '-' + s
877 elif l.startswith(' '):
877 elif l.startswith(' '):
878 u = ' ' + s
878 u = ' ' + s
879 else:
879 else:
880 raise PatchError(_("bad hunk #%d old text line %d") %
880 raise PatchError(_("bad hunk #%d old text line %d") %
881 (self.number, x))
881 (self.number, x))
882 self.a.append(u)
882 self.a.append(u)
883 self.hunk.append(u)
883 self.hunk.append(u)
884
884
885 l = lr.readline()
885 l = lr.readline()
886 if l.startswith('\ '):
886 if l.startswith('\ '):
887 s = self.a[-1][:-1]
887 s = self.a[-1][:-1]
888 self.a[-1] = s
888 self.a[-1] = s
889 self.hunk[-1] = s
889 self.hunk[-1] = s
890 l = lr.readline()
890 l = lr.readline()
891 m = contextdesc.match(l)
891 m = contextdesc.match(l)
892 if not m:
892 if not m:
893 raise PatchError(_("bad hunk #%d") % self.number)
893 raise PatchError(_("bad hunk #%d") % self.number)
894 foo, self.startb, foo2, bend, foo3 = m.groups()
894 foo, self.startb, foo2, bend, foo3 = m.groups()
895 self.startb = int(self.startb)
895 self.startb = int(self.startb)
896 if bend is None:
896 if bend is None:
897 bend = self.startb
897 bend = self.startb
898 self.lenb = int(bend) - self.startb
898 self.lenb = int(bend) - self.startb
899 if self.startb:
899 if self.startb:
900 self.lenb += 1
900 self.lenb += 1
901 hunki = 1
901 hunki = 1
902 for x in xrange(self.lenb):
902 for x in xrange(self.lenb):
903 l = lr.readline()
903 l = lr.readline()
904 if l.startswith('\ '):
904 if l.startswith('\ '):
905 # XXX: the only way to hit this is with an invalid line range.
905 # XXX: the only way to hit this is with an invalid line range.
906 # The no-eol marker is not counted in the line range, but I
906 # The no-eol marker is not counted in the line range, but I
907 # guess there are diff(1) out there which behave differently.
907 # guess there are diff(1) out there which behave differently.
908 s = self.b[-1][:-1]
908 s = self.b[-1][:-1]
909 self.b[-1] = s
909 self.b[-1] = s
910 self.hunk[hunki - 1] = s
910 self.hunk[hunki - 1] = s
911 continue
911 continue
912 if not l:
912 if not l:
913 # line deletions, new block is empty and we hit EOF
913 # line deletions, new block is empty and we hit EOF
914 lr.push(l)
914 lr.push(l)
915 break
915 break
916 s = l[2:]
916 s = l[2:]
917 if l.startswith('+ ') or l.startswith('! '):
917 if l.startswith('+ ') or l.startswith('! '):
918 u = '+' + s
918 u = '+' + s
919 elif l.startswith(' '):
919 elif l.startswith(' '):
920 u = ' ' + s
920 u = ' ' + s
921 elif len(self.b) == 0:
921 elif len(self.b) == 0:
922 # line deletions, new block is empty
922 # line deletions, new block is empty
923 lr.push(l)
923 lr.push(l)
924 break
924 break
925 else:
925 else:
926 raise PatchError(_("bad hunk #%d old text line %d") %
926 raise PatchError(_("bad hunk #%d old text line %d") %
927 (self.number, x))
927 (self.number, x))
928 self.b.append(s)
928 self.b.append(s)
929 while True:
929 while True:
930 if hunki >= len(self.hunk):
930 if hunki >= len(self.hunk):
931 h = ""
931 h = ""
932 else:
932 else:
933 h = self.hunk[hunki]
933 h = self.hunk[hunki]
934 hunki += 1
934 hunki += 1
935 if h == u:
935 if h == u:
936 break
936 break
937 elif h.startswith('-'):
937 elif h.startswith('-'):
938 continue
938 continue
939 else:
939 else:
940 self.hunk.insert(hunki - 1, u)
940 self.hunk.insert(hunki - 1, u)
941 break
941 break
942
942
943 if not self.a:
943 if not self.a:
944 # this happens when lines were only added to the hunk
944 # this happens when lines were only added to the hunk
945 for x in self.hunk:
945 for x in self.hunk:
946 if x.startswith('-') or x.startswith(' '):
946 if x.startswith('-') or x.startswith(' '):
947 self.a.append(x)
947 self.a.append(x)
948 if not self.b:
948 if not self.b:
949 # this happens when lines were only deleted from the hunk
949 # this happens when lines were only deleted from the hunk
950 for x in self.hunk:
950 for x in self.hunk:
951 if x.startswith('+') or x.startswith(' '):
951 if x.startswith('+') or x.startswith(' '):
952 self.b.append(x[1:])
952 self.b.append(x[1:])
953 # @@ -start,len +start,len @@
953 # @@ -start,len +start,len @@
954 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
954 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
955 self.startb, self.lenb)
955 self.startb, self.lenb)
956 self.hunk[0] = self.desc
956 self.hunk[0] = self.desc
957 self._fixnewline(lr)
957 self._fixnewline(lr)
958
958
959 def _fixnewline(self, lr):
959 def _fixnewline(self, lr):
960 l = lr.readline()
960 l = lr.readline()
961 if l.startswith('\ '):
961 if l.startswith('\ '):
962 diffhelpers.fix_newline(self.hunk, self.a, self.b)
962 diffhelpers.fix_newline(self.hunk, self.a, self.b)
963 else:
963 else:
964 lr.push(l)
964 lr.push(l)
965
965
966 def complete(self):
966 def complete(self):
967 return len(self.a) == self.lena and len(self.b) == self.lenb
967 return len(self.a) == self.lena and len(self.b) == self.lenb
968
968
969 def fuzzit(self, l, fuzz, toponly):
969 def fuzzit(self, l, fuzz, toponly):
970 # this removes context lines from the top and bottom of list 'l'. It
970 # this removes context lines from the top and bottom of list 'l'. It
971 # checks the hunk to make sure only context lines are removed, and then
971 # checks the hunk to make sure only context lines are removed, and then
972 # returns a new shortened list of lines.
972 # returns a new shortened list of lines.
973 fuzz = min(fuzz, len(l)-1)
973 fuzz = min(fuzz, len(l)-1)
974 if fuzz:
974 if fuzz:
975 top = 0
975 top = 0
976 bot = 0
976 bot = 0
977 hlen = len(self.hunk)
977 hlen = len(self.hunk)
978 for x in xrange(hlen - 1):
978 for x in xrange(hlen - 1):
979 # the hunk starts with the @@ line, so use x+1
979 # the hunk starts with the @@ line, so use x+1
980 if self.hunk[x + 1][0] == ' ':
980 if self.hunk[x + 1][0] == ' ':
981 top += 1
981 top += 1
982 else:
982 else:
983 break
983 break
984 if not toponly:
984 if not toponly:
985 for x in xrange(hlen - 1):
985 for x in xrange(hlen - 1):
986 if self.hunk[hlen - bot - 1][0] == ' ':
986 if self.hunk[hlen - bot - 1][0] == ' ':
987 bot += 1
987 bot += 1
988 else:
988 else:
989 break
989 break
990
990
991 # top and bot now count context in the hunk
991 # top and bot now count context in the hunk
992 # adjust them if either one is short
992 # adjust them if either one is short
993 context = max(top, bot, 3)
993 context = max(top, bot, 3)
994 if bot < context:
994 if bot < context:
995 bot = max(0, fuzz - (context - bot))
995 bot = max(0, fuzz - (context - bot))
996 else:
996 else:
997 bot = min(fuzz, bot)
997 bot = min(fuzz, bot)
998 if top < context:
998 if top < context:
999 top = max(0, fuzz - (context - top))
999 top = max(0, fuzz - (context - top))
1000 else:
1000 else:
1001 top = min(fuzz, top)
1001 top = min(fuzz, top)
1002
1002
1003 return l[top:len(l)-bot]
1003 return l[top:len(l)-bot]
1004 return l
1004 return l
1005
1005
1006 def old(self, fuzz=0, toponly=False):
1006 def old(self, fuzz=0, toponly=False):
1007 return self.fuzzit(self.a, fuzz, toponly)
1007 return self.fuzzit(self.a, fuzz, toponly)
1008
1008
1009 def new(self, fuzz=0, toponly=False):
1009 def new(self, fuzz=0, toponly=False):
1010 return self.fuzzit(self.b, fuzz, toponly)
1010 return self.fuzzit(self.b, fuzz, toponly)
1011
1011
1012 class binhunk(object):
1012 class binhunk(object):
1013 'A binary patch file. Only understands literals so far.'
1013 'A binary patch file. Only understands literals so far.'
1014 def __init__(self, lr):
1014 def __init__(self, lr):
1015 self.text = None
1015 self.text = None
1016 self.hunk = ['GIT binary patch\n']
1016 self.hunk = ['GIT binary patch\n']
1017 self._read(lr)
1017 self._read(lr)
1018
1018
1019 def complete(self):
1019 def complete(self):
1020 return self.text is not None
1020 return self.text is not None
1021
1021
1022 def new(self):
1022 def new(self):
1023 return [self.text]
1023 return [self.text]
1024
1024
1025 def _read(self, lr):
1025 def _read(self, lr):
1026 line = lr.readline()
1026 line = lr.readline()
1027 self.hunk.append(line)
1027 self.hunk.append(line)
1028 while line and not line.startswith('literal '):
1028 while line and not line.startswith('literal '):
1029 line = lr.readline()
1029 line = lr.readline()
1030 self.hunk.append(line)
1030 self.hunk.append(line)
1031 if not line:
1031 if not line:
1032 raise PatchError(_('could not extract binary patch'))
1032 raise PatchError(_('could not extract binary patch'))
1033 size = int(line[8:].rstrip())
1033 size = int(line[8:].rstrip())
1034 dec = []
1034 dec = []
1035 line = lr.readline()
1035 line = lr.readline()
1036 self.hunk.append(line)
1036 self.hunk.append(line)
1037 while len(line) > 1:
1037 while len(line) > 1:
1038 l = line[0]
1038 l = line[0]
1039 if l <= 'Z' and l >= 'A':
1039 if l <= 'Z' and l >= 'A':
1040 l = ord(l) - ord('A') + 1
1040 l = ord(l) - ord('A') + 1
1041 else:
1041 else:
1042 l = ord(l) - ord('a') + 27
1042 l = ord(l) - ord('a') + 27
1043 dec.append(base85.b85decode(line[1:-1])[:l])
1043 dec.append(base85.b85decode(line[1:-1])[:l])
1044 line = lr.readline()
1044 line = lr.readline()
1045 self.hunk.append(line)
1045 self.hunk.append(line)
1046 text = zlib.decompress(''.join(dec))
1046 text = zlib.decompress(''.join(dec))
1047 if len(text) != size:
1047 if len(text) != size:
1048 raise PatchError(_('binary patch is %d bytes, not %d') %
1048 raise PatchError(_('binary patch is %d bytes, not %d') %
1049 len(text), size)
1049 len(text), size)
1050 self.text = text
1050 self.text = text
1051
1051
1052 def parsefilename(str):
1052 def parsefilename(str):
1053 # --- filename \t|space stuff
1053 # --- filename \t|space stuff
1054 s = str[4:].rstrip('\r\n')
1054 s = str[4:].rstrip('\r\n')
1055 i = s.find('\t')
1055 i = s.find('\t')
1056 if i < 0:
1056 if i < 0:
1057 i = s.find(' ')
1057 i = s.find(' ')
1058 if i < 0:
1058 if i < 0:
1059 return s
1059 return s
1060 return s[:i]
1060 return s[:i]
1061
1061
1062 def pathstrip(path, strip):
1062 def pathstrip(path, strip):
1063 pathlen = len(path)
1063 pathlen = len(path)
1064 i = 0
1064 i = 0
1065 if strip == 0:
1065 if strip == 0:
1066 return '', path.rstrip()
1066 return '', path.rstrip()
1067 count = strip
1067 count = strip
1068 while count > 0:
1068 while count > 0:
1069 i = path.find('/', i)
1069 i = path.find('/', i)
1070 if i == -1:
1070 if i == -1:
1071 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1071 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1072 (count, strip, path))
1072 (count, strip, path))
1073 i += 1
1073 i += 1
1074 # consume '//' in the path
1074 # consume '//' in the path
1075 while i < pathlen - 1 and path[i] == '/':
1075 while i < pathlen - 1 and path[i] == '/':
1076 i += 1
1076 i += 1
1077 count -= 1
1077 count -= 1
1078 return path[:i].lstrip(), path[i:].rstrip()
1078 return path[:i].lstrip(), path[i:].rstrip()
1079
1079
1080 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1080 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1081 nulla = afile_orig == "/dev/null"
1081 nulla = afile_orig == "/dev/null"
1082 nullb = bfile_orig == "/dev/null"
1082 nullb = bfile_orig == "/dev/null"
1083 create = nulla and hunk.starta == 0 and hunk.lena == 0
1083 create = nulla and hunk.starta == 0 and hunk.lena == 0
1084 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1084 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1085 abase, afile = pathstrip(afile_orig, strip)
1085 abase, afile = pathstrip(afile_orig, strip)
1086 gooda = not nulla and backend.exists(afile)
1086 gooda = not nulla and backend.exists(afile)
1087 bbase, bfile = pathstrip(bfile_orig, strip)
1087 bbase, bfile = pathstrip(bfile_orig, strip)
1088 if afile == bfile:
1088 if afile == bfile:
1089 goodb = gooda
1089 goodb = gooda
1090 else:
1090 else:
1091 goodb = not nullb and backend.exists(bfile)
1091 goodb = not nullb and backend.exists(bfile)
1092 missing = not goodb and not gooda and not create
1092 missing = not goodb and not gooda and not create
1093
1093
1094 # some diff programs apparently produce patches where the afile is
1094 # some diff programs apparently produce patches where the afile is
1095 # not /dev/null, but afile starts with bfile
1095 # not /dev/null, but afile starts with bfile
1096 abasedir = afile[:afile.rfind('/') + 1]
1096 abasedir = afile[:afile.rfind('/') + 1]
1097 bbasedir = bfile[:bfile.rfind('/') + 1]
1097 bbasedir = bfile[:bfile.rfind('/') + 1]
1098 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1098 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1099 and hunk.starta == 0 and hunk.lena == 0):
1099 and hunk.starta == 0 and hunk.lena == 0):
1100 create = True
1100 create = True
1101 missing = False
1101 missing = False
1102
1102
1103 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1103 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1104 # diff is between a file and its backup. In this case, the original
1104 # diff is between a file and its backup. In this case, the original
1105 # file should be patched (see original mpatch code).
1105 # file should be patched (see original mpatch code).
1106 isbackup = (abase == bbase and bfile.startswith(afile))
1106 isbackup = (abase == bbase and bfile.startswith(afile))
1107 fname = None
1107 fname = None
1108 if not missing:
1108 if not missing:
1109 if gooda and goodb:
1109 if gooda and goodb:
1110 fname = isbackup and afile or bfile
1110 fname = isbackup and afile or bfile
1111 elif gooda:
1111 elif gooda:
1112 fname = afile
1112 fname = afile
1113
1113
1114 if not fname:
1114 if not fname:
1115 if not nullb:
1115 if not nullb:
1116 fname = isbackup and afile or bfile
1116 fname = isbackup and afile or bfile
1117 elif not nulla:
1117 elif not nulla:
1118 fname = afile
1118 fname = afile
1119 else:
1119 else:
1120 raise PatchError(_("undefined source and destination files"))
1120 raise PatchError(_("undefined source and destination files"))
1121
1121
1122 gp = patchmeta(fname)
1122 gp = patchmeta(fname)
1123 if create:
1123 if create:
1124 gp.op = 'ADD'
1124 gp.op = 'ADD'
1125 elif remove:
1125 elif remove:
1126 gp.op = 'DELETE'
1126 gp.op = 'DELETE'
1127 return gp
1127 return gp
1128
1128
1129 def scangitpatch(lr, firstline):
1129 def scangitpatch(lr, firstline):
1130 """
1130 """
1131 Git patches can emit:
1131 Git patches can emit:
1132 - rename a to b
1132 - rename a to b
1133 - change b
1133 - change b
1134 - copy a to c
1134 - copy a to c
1135 - change c
1135 - change c
1136
1136
1137 We cannot apply this sequence as-is, the renamed 'a' could not be
1137 We cannot apply this sequence as-is, the renamed 'a' could not be
1138 found for it would have been renamed already. And we cannot copy
1138 found for it would have been renamed already. And we cannot copy
1139 from 'b' instead because 'b' would have been changed already. So
1139 from 'b' instead because 'b' would have been changed already. So
1140 we scan the git patch for copy and rename commands so we can
1140 we scan the git patch for copy and rename commands so we can
1141 perform the copies ahead of time.
1141 perform the copies ahead of time.
1142 """
1142 """
1143 pos = 0
1143 pos = 0
1144 try:
1144 try:
1145 pos = lr.fp.tell()
1145 pos = lr.fp.tell()
1146 fp = lr.fp
1146 fp = lr.fp
1147 except IOError:
1147 except IOError:
1148 fp = cStringIO.StringIO(lr.fp.read())
1148 fp = cStringIO.StringIO(lr.fp.read())
1149 gitlr = linereader(fp)
1149 gitlr = linereader(fp)
1150 gitlr.push(firstline)
1150 gitlr.push(firstline)
1151 gitpatches = readgitpatch(gitlr)
1151 gitpatches = readgitpatch(gitlr)
1152 fp.seek(pos)
1152 fp.seek(pos)
1153 return gitpatches
1153 return gitpatches
1154
1154
1155 def iterhunks(fp):
1155 def iterhunks(fp):
1156 """Read a patch and yield the following events:
1156 """Read a patch and yield the following events:
1157 - ("file", afile, bfile, firsthunk): select a new target file.
1157 - ("file", afile, bfile, firsthunk): select a new target file.
1158 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1158 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1159 "file" event.
1159 "file" event.
1160 - ("git", gitchanges): current diff is in git format, gitchanges
1160 - ("git", gitchanges): current diff is in git format, gitchanges
1161 maps filenames to gitpatch records. Unique event.
1161 maps filenames to gitpatch records. Unique event.
1162 """
1162 """
1163 afile = ""
1163 afile = ""
1164 bfile = ""
1164 bfile = ""
1165 state = None
1165 state = None
1166 hunknum = 0
1166 hunknum = 0
1167 emitfile = newfile = False
1167 emitfile = newfile = False
1168 gitpatches = None
1168 gitpatches = None
1169
1169
1170 # our states
1170 # our states
1171 BFILE = 1
1171 BFILE = 1
1172 context = None
1172 context = None
1173 lr = linereader(fp)
1173 lr = linereader(fp)
1174
1174
1175 while True:
1175 while True:
1176 x = lr.readline()
1176 x = lr.readline()
1177 if not x:
1177 if not x:
1178 break
1178 break
1179 if state == BFILE and (
1179 if state == BFILE and (
1180 (not context and x[0] == '@')
1180 (not context and x[0] == '@')
1181 or (context is not False and x.startswith('***************'))
1181 or (context is not False and x.startswith('***************'))
1182 or x.startswith('GIT binary patch')):
1182 or x.startswith('GIT binary patch')):
1183 gp = None
1183 gp = None
1184 if (gitpatches and
1184 if (gitpatches and
1185 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1185 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1186 gp = gitpatches.pop()[2]
1186 gp = gitpatches.pop()[2]
1187 if x.startswith('GIT binary patch'):
1187 if x.startswith('GIT binary patch'):
1188 h = binhunk(lr)
1188 h = binhunk(lr)
1189 else:
1189 else:
1190 if context is None and x.startswith('***************'):
1190 if context is None and x.startswith('***************'):
1191 context = True
1191 context = True
1192 h = hunk(x, hunknum + 1, lr, context)
1192 h = hunk(x, hunknum + 1, lr, context)
1193 hunknum += 1
1193 hunknum += 1
1194 if emitfile:
1194 if emitfile:
1195 emitfile = False
1195 emitfile = False
1196 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1196 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1197 yield 'hunk', h
1197 yield 'hunk', h
1198 elif x.startswith('diff --git'):
1198 elif x.startswith('diff --git'):
1199 m = gitre.match(x)
1199 m = gitre.match(x)
1200 if not m:
1200 if not m:
1201 continue
1201 continue
1202 if not gitpatches:
1202 if not gitpatches:
1203 # scan whole input for git metadata
1203 # scan whole input for git metadata
1204 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1204 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1205 in scangitpatch(lr, x)]
1205 in scangitpatch(lr, x)]
1206 yield 'git', [g[2].copy() for g in gitpatches
1206 yield 'git', [g[2].copy() for g in gitpatches
1207 if g[2].op in ('COPY', 'RENAME')]
1207 if g[2].op in ('COPY', 'RENAME')]
1208 gitpatches.reverse()
1208 gitpatches.reverse()
1209 afile = 'a/' + m.group(1)
1209 afile = 'a/' + m.group(1)
1210 bfile = 'b/' + m.group(2)
1210 bfile = 'b/' + m.group(2)
1211 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1211 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1212 gp = gitpatches.pop()[2]
1212 gp = gitpatches.pop()[2]
1213 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1213 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1214 gp = gitpatches[-1][2]
1214 gp = gitpatches[-1][2]
1215 # copy/rename + modify should modify target, not source
1215 # copy/rename + modify should modify target, not source
1216 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1216 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1217 afile = bfile
1217 afile = bfile
1218 newfile = True
1218 newfile = True
1219 elif x.startswith('---'):
1219 elif x.startswith('---'):
1220 # check for a unified diff
1220 # check for a unified diff
1221 l2 = lr.readline()
1221 l2 = lr.readline()
1222 if not l2.startswith('+++'):
1222 if not l2.startswith('+++'):
1223 lr.push(l2)
1223 lr.push(l2)
1224 continue
1224 continue
1225 newfile = True
1225 newfile = True
1226 context = False
1226 context = False
1227 afile = parsefilename(x)
1227 afile = parsefilename(x)
1228 bfile = parsefilename(l2)
1228 bfile = parsefilename(l2)
1229 elif x.startswith('***'):
1229 elif x.startswith('***'):
1230 # check for a context diff
1230 # check for a context diff
1231 l2 = lr.readline()
1231 l2 = lr.readline()
1232 if not l2.startswith('---'):
1232 if not l2.startswith('---'):
1233 lr.push(l2)
1233 lr.push(l2)
1234 continue
1234 continue
1235 l3 = lr.readline()
1235 l3 = lr.readline()
1236 lr.push(l3)
1236 lr.push(l3)
1237 if not l3.startswith("***************"):
1237 if not l3.startswith("***************"):
1238 lr.push(l2)
1238 lr.push(l2)
1239 continue
1239 continue
1240 newfile = True
1240 newfile = True
1241 context = True
1241 context = True
1242 afile = parsefilename(x)
1242 afile = parsefilename(x)
1243 bfile = parsefilename(l2)
1243 bfile = parsefilename(l2)
1244
1244
1245 if newfile:
1245 if newfile:
1246 newfile = False
1246 newfile = False
1247 emitfile = True
1247 emitfile = True
1248 state = BFILE
1248 state = BFILE
1249 hunknum = 0
1249 hunknum = 0
1250
1250
1251 while gitpatches:
1251 while gitpatches:
1252 gp = gitpatches.pop()[2]
1252 gp = gitpatches.pop()[2]
1253 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1253 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1254
1254
1255 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1255 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1256 """Reads a patch from fp and tries to apply it.
1256 """Reads a patch from fp and tries to apply it.
1257
1257
1258 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1258 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1259 there was any fuzz.
1259 there was any fuzz.
1260
1260
1261 If 'eolmode' is 'strict', the patch content and patched file are
1261 If 'eolmode' is 'strict', the patch content and patched file are
1262 read in binary mode. Otherwise, line endings are ignored when
1262 read in binary mode. Otherwise, line endings are ignored when
1263 patching then normalized according to 'eolmode'.
1263 patching then normalized according to 'eolmode'.
1264 """
1264 """
1265 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1265 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1266 eolmode=eolmode)
1266 eolmode=eolmode)
1267
1267
1268 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1268 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1269 eolmode='strict'):
1269 eolmode='strict'):
1270
1270
1271 def pstrip(p):
1271 def pstrip(p):
1272 return pathstrip(p, strip - 1)[1]
1272 return pathstrip(p, strip - 1)[1]
1273
1273
1274 rejects = 0
1274 rejects = 0
1275 err = 0
1275 err = 0
1276 current_file = None
1276 current_file = None
1277
1277
1278 for state, values in iterhunks(fp):
1278 for state, values in iterhunks(fp):
1279 if state == 'hunk':
1279 if state == 'hunk':
1280 if not current_file:
1280 if not current_file:
1281 continue
1281 continue
1282 ret = current_file.apply(values)
1282 ret = current_file.apply(values)
1283 if ret > 0:
1283 if ret > 0:
1284 err = 1
1284 err = 1
1285 elif state == 'file':
1285 elif state == 'file':
1286 if current_file:
1286 if current_file:
1287 rejects += current_file.close()
1287 rejects += current_file.close()
1288 current_file = None
1288 current_file = None
1289 afile, bfile, first_hunk, gp = values
1289 afile, bfile, first_hunk, gp = values
1290 if gp:
1290 if gp:
1291 path = pstrip(gp.path)
1291 path = pstrip(gp.path)
1292 gp.path = pstrip(gp.path)
1292 gp.path = pstrip(gp.path)
1293 if gp.oldpath:
1293 if gp.oldpath:
1294 gp.oldpath = pstrip(gp.oldpath)
1294 gp.oldpath = pstrip(gp.oldpath)
1295 else:
1295 else:
1296 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1296 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1297 if gp.op == 'RENAME':
1297 if gp.op == 'RENAME':
1298 backend.unlink(gp.oldpath)
1298 backend.unlink(gp.oldpath)
1299 if not first_hunk:
1299 if not first_hunk:
1300 if gp.op == 'DELETE':
1300 if gp.op == 'DELETE':
1301 backend.unlink(gp.path)
1301 backend.unlink(gp.path)
1302 continue
1302 continue
1303 data, mode = None, None
1303 data, mode = None, None
1304 if gp.op in ('RENAME', 'COPY'):
1304 if gp.op in ('RENAME', 'COPY'):
1305 data, mode = store.getfile(gp.oldpath)[:2]
1305 data, mode = store.getfile(gp.oldpath)[:2]
1306 if gp.mode:
1306 if gp.mode:
1307 mode = gp.mode
1307 mode = gp.mode
1308 if gp.op == 'ADD':
1308 if gp.op == 'ADD':
1309 # Added files without content have no hunk and
1309 # Added files without content have no hunk and
1310 # must be created
1310 # must be created
1311 data = ''
1311 data = ''
1312 if data or mode:
1312 if data or mode:
1313 if (gp.op in ('ADD', 'RENAME', 'COPY')
1313 if (gp.op in ('ADD', 'RENAME', 'COPY')
1314 and backend.exists(gp.path)):
1314 and backend.exists(gp.path)):
1315 raise PatchError(_("cannot create %s: destination "
1315 raise PatchError(_("cannot create %s: destination "
1316 "already exists") % gp.path)
1316 "already exists") % gp.path)
1317 backend.setfile(gp.path, data, mode, gp.oldpath)
1317 backend.setfile(gp.path, data, mode, gp.oldpath)
1318 continue
1318 continue
1319 try:
1319 try:
1320 current_file = patcher(ui, gp, backend, store,
1320 current_file = patcher(ui, gp, backend, store,
1321 eolmode=eolmode)
1321 eolmode=eolmode)
1322 except PatchError, inst:
1322 except PatchError, inst:
1323 ui.warn(str(inst) + '\n')
1323 ui.warn(str(inst) + '\n')
1324 current_file = None
1324 current_file = None
1325 rejects += 1
1325 rejects += 1
1326 continue
1326 continue
1327 elif state == 'git':
1327 elif state == 'git':
1328 for gp in values:
1328 for gp in values:
1329 path = pstrip(gp.oldpath)
1329 path = pstrip(gp.oldpath)
1330 data, mode = backend.getfile(path)
1330 data, mode = backend.getfile(path)
1331 store.setfile(path, data, mode)
1331 store.setfile(path, data, mode)
1332 else:
1332 else:
1333 raise util.Abort(_('unsupported parser state: %s') % state)
1333 raise util.Abort(_('unsupported parser state: %s') % state)
1334
1334
1335 if current_file:
1335 if current_file:
1336 rejects += current_file.close()
1336 rejects += current_file.close()
1337
1337
1338 if rejects:
1338 if rejects:
1339 return -1
1339 return -1
1340 return err
1340 return err
1341
1341
1342 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1342 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1343 similarity):
1343 similarity):
1344 """use <patcher> to apply <patchname> to the working directory.
1344 """use <patcher> to apply <patchname> to the working directory.
1345 returns whether patch was applied with fuzz factor."""
1345 returns whether patch was applied with fuzz factor."""
1346
1346
1347 fuzz = False
1347 fuzz = False
1348 args = []
1348 args = []
1349 cwd = repo.root
1349 cwd = repo.root
1350 if cwd:
1350 if cwd:
1351 args.append('-d %s' % util.shellquote(cwd))
1351 args.append('-d %s' % util.shellquote(cwd))
1352 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1352 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1353 util.shellquote(patchname)))
1353 util.shellquote(patchname)))
1354 try:
1354 try:
1355 for line in fp:
1355 for line in fp:
1356 line = line.rstrip()
1356 line = line.rstrip()
1357 ui.note(line + '\n')
1357 ui.note(line + '\n')
1358 if line.startswith('patching file '):
1358 if line.startswith('patching file '):
1359 pf = util.parsepatchoutput(line)
1359 pf = util.parsepatchoutput(line)
1360 printed_file = False
1360 printed_file = False
1361 files.add(pf)
1361 files.add(pf)
1362 elif line.find('with fuzz') >= 0:
1362 elif line.find('with fuzz') >= 0:
1363 fuzz = True
1363 fuzz = True
1364 if not printed_file:
1364 if not printed_file:
1365 ui.warn(pf + '\n')
1365 ui.warn(pf + '\n')
1366 printed_file = True
1366 printed_file = True
1367 ui.warn(line + '\n')
1367 ui.warn(line + '\n')
1368 elif line.find('saving rejects to file') >= 0:
1368 elif line.find('saving rejects to file') >= 0:
1369 ui.warn(line + '\n')
1369 ui.warn(line + '\n')
1370 elif line.find('FAILED') >= 0:
1370 elif line.find('FAILED') >= 0:
1371 if not printed_file:
1371 if not printed_file:
1372 ui.warn(pf + '\n')
1372 ui.warn(pf + '\n')
1373 printed_file = True
1373 printed_file = True
1374 ui.warn(line + '\n')
1374 ui.warn(line + '\n')
1375 finally:
1375 finally:
1376 if files:
1376 if files:
1377 cfiles = list(files)
1377 cfiles = list(files)
1378 cwd = repo.getcwd()
1378 cwd = repo.getcwd()
1379 if cwd:
1379 if cwd:
1380 cfiles = [util.pathto(repo.root, cwd, f)
1380 cfiles = [util.pathto(repo.root, cwd, f)
1381 for f in cfiles]
1381 for f in cfiles]
1382 scmutil.addremove(repo, cfiles, similarity=similarity)
1382 scmutil.addremove(repo, cfiles, similarity=similarity)
1383 code = fp.close()
1383 code = fp.close()
1384 if code:
1384 if code:
1385 raise PatchError(_("patch command failed: %s") %
1385 raise PatchError(_("patch command failed: %s") %
1386 util.explainexit(code)[0])
1386 util.explainexit(code)[0])
1387 return fuzz
1387 return fuzz
1388
1388
1389 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1389 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1390 if files is None:
1390 if files is None:
1391 files = set()
1391 files = set()
1392 if eolmode is None:
1392 if eolmode is None:
1393 eolmode = ui.config('patch', 'eol', 'strict')
1393 eolmode = ui.config('patch', 'eol', 'strict')
1394 if eolmode.lower() not in eolmodes:
1394 if eolmode.lower() not in eolmodes:
1395 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1395 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1396 eolmode = eolmode.lower()
1396 eolmode = eolmode.lower()
1397
1397
1398 store = filestore()
1398 store = filestore()
1399 try:
1399 try:
1400 fp = open(patchobj, 'rb')
1400 fp = open(patchobj, 'rb')
1401 except TypeError:
1401 except TypeError:
1402 fp = patchobj
1402 fp = patchobj
1403 try:
1403 try:
1404 ret = applydiff(ui, fp, backend, store, strip=strip,
1404 ret = applydiff(ui, fp, backend, store, strip=strip,
1405 eolmode=eolmode)
1405 eolmode=eolmode)
1406 finally:
1406 finally:
1407 if fp != patchobj:
1407 if fp != patchobj:
1408 fp.close()
1408 fp.close()
1409 files.update(backend.close())
1409 files.update(backend.close())
1410 store.close()
1410 store.close()
1411 if ret < 0:
1411 if ret < 0:
1412 raise PatchError(_('patch failed to apply'))
1412 raise PatchError(_('patch failed to apply'))
1413 return ret > 0
1413 return ret > 0
1414
1414
1415 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1415 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1416 similarity=0):
1416 similarity=0):
1417 """use builtin patch to apply <patchobj> to the working directory.
1417 """use builtin patch to apply <patchobj> to the working directory.
1418 returns whether patch was applied with fuzz factor."""
1418 returns whether patch was applied with fuzz factor."""
1419 backend = workingbackend(ui, repo, similarity)
1419 backend = workingbackend(ui, repo, similarity)
1420 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1420 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1421
1421
1422 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1422 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1423 eolmode='strict'):
1423 eolmode='strict'):
1424 backend = repobackend(ui, repo, ctx, store)
1424 backend = repobackend(ui, repo, ctx, store)
1425 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1425 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1426
1426
1427 def makememctx(repo, parents, text, user, date, branch, files, store,
1427 def makememctx(repo, parents, text, user, date, branch, files, store,
1428 editor=None):
1428 editor=None):
1429 def getfilectx(repo, memctx, path):
1429 def getfilectx(repo, memctx, path):
1430 data, (islink, isexec), copied = store.getfile(path)
1430 data, (islink, isexec), copied = store.getfile(path)
1431 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1431 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1432 copied=copied)
1432 copied=copied)
1433 extra = {}
1433 extra = {}
1434 if branch:
1434 if branch:
1435 extra['branch'] = encoding.fromlocal(branch)
1435 extra['branch'] = encoding.fromlocal(branch)
1436 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1436 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1437 date, extra)
1437 date, extra)
1438 if editor:
1438 if editor:
1439 ctx._text = editor(repo, ctx, [])
1439 ctx._text = editor(repo, ctx, [])
1440 return ctx
1440 return ctx
1441
1441
1442 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1442 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1443 similarity=0):
1443 similarity=0):
1444 """Apply <patchname> to the working directory.
1444 """Apply <patchname> to the working directory.
1445
1445
1446 'eolmode' specifies how end of lines should be handled. It can be:
1446 'eolmode' specifies how end of lines should be handled. It can be:
1447 - 'strict': inputs are read in binary mode, EOLs are preserved
1447 - 'strict': inputs are read in binary mode, EOLs are preserved
1448 - 'crlf': EOLs are ignored when patching and reset to CRLF
1448 - 'crlf': EOLs are ignored when patching and reset to CRLF
1449 - 'lf': EOLs are ignored when patching and reset to LF
1449 - 'lf': EOLs are ignored when patching and reset to LF
1450 - None: get it from user settings, default to 'strict'
1450 - None: get it from user settings, default to 'strict'
1451 'eolmode' is ignored when using an external patcher program.
1451 'eolmode' is ignored when using an external patcher program.
1452
1452
1453 Returns whether patch was applied with fuzz factor.
1453 Returns whether patch was applied with fuzz factor.
1454 """
1454 """
1455 patcher = ui.config('ui', 'patch')
1455 patcher = ui.config('ui', 'patch')
1456 if files is None:
1456 if files is None:
1457 files = set()
1457 files = set()
1458 try:
1458 try:
1459 if patcher:
1459 if patcher:
1460 return _externalpatch(ui, repo, patcher, patchname, strip,
1460 return _externalpatch(ui, repo, patcher, patchname, strip,
1461 files, similarity)
1461 files, similarity)
1462 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1462 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1463 similarity)
1463 similarity)
1464 except PatchError, err:
1464 except PatchError, err:
1465 raise util.Abort(str(err))
1465 raise util.Abort(str(err))
1466
1466
1467 def changedfiles(ui, repo, patchpath, strip=1):
1467 def changedfiles(ui, repo, patchpath, strip=1):
1468 backend = fsbackend(ui, repo.root)
1468 backend = fsbackend(ui, repo.root)
1469 fp = open(patchpath, 'rb')
1469 fp = open(patchpath, 'rb')
1470 try:
1470 try:
1471 changed = set()
1471 changed = set()
1472 for state, values in iterhunks(fp):
1472 for state, values in iterhunks(fp):
1473 if state == 'file':
1473 if state == 'file':
1474 afile, bfile, first_hunk, gp = values
1474 afile, bfile, first_hunk, gp = values
1475 if gp:
1475 if gp:
1476 gp.path = pathstrip(gp.path, strip - 1)[1]
1476 gp.path = pathstrip(gp.path, strip - 1)[1]
1477 if gp.oldpath:
1477 if gp.oldpath:
1478 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1478 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1479 else:
1479 else:
1480 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1480 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1481 changed.add(gp.path)
1481 changed.add(gp.path)
1482 if gp.op == 'RENAME':
1482 if gp.op == 'RENAME':
1483 changed.add(gp.oldpath)
1483 changed.add(gp.oldpath)
1484 elif state not in ('hunk', 'git'):
1484 elif state not in ('hunk', 'git'):
1485 raise util.Abort(_('unsupported parser state: %s') % state)
1485 raise util.Abort(_('unsupported parser state: %s') % state)
1486 return changed
1486 return changed
1487 finally:
1487 finally:
1488 fp.close()
1488 fp.close()
1489
1489
1490 def b85diff(to, tn):
1490 def b85diff(to, tn):
1491 '''print base85-encoded binary diff'''
1491 '''print base85-encoded binary diff'''
1492 def gitindex(text):
1492 def gitindex(text):
1493 if not text:
1493 if not text:
1494 return hex(nullid)
1494 return hex(nullid)
1495 l = len(text)
1495 l = len(text)
1496 s = util.sha1('blob %d\0' % l)
1496 s = util.sha1('blob %d\0' % l)
1497 s.update(text)
1497 s.update(text)
1498 return s.hexdigest()
1498 return s.hexdigest()
1499
1499
1500 def fmtline(line):
1500 def fmtline(line):
1501 l = len(line)
1501 l = len(line)
1502 if l <= 26:
1502 if l <= 26:
1503 l = chr(ord('A') + l - 1)
1503 l = chr(ord('A') + l - 1)
1504 else:
1504 else:
1505 l = chr(l - 26 + ord('a') - 1)
1505 l = chr(l - 26 + ord('a') - 1)
1506 return '%c%s\n' % (l, base85.b85encode(line, True))
1506 return '%c%s\n' % (l, base85.b85encode(line, True))
1507
1507
1508 def chunk(text, csize=52):
1508 def chunk(text, csize=52):
1509 l = len(text)
1509 l = len(text)
1510 i = 0
1510 i = 0
1511 while i < l:
1511 while i < l:
1512 yield text[i:i + csize]
1512 yield text[i:i + csize]
1513 i += csize
1513 i += csize
1514
1514
1515 tohash = gitindex(to)
1515 tohash = gitindex(to)
1516 tnhash = gitindex(tn)
1516 tnhash = gitindex(tn)
1517 if tohash == tnhash:
1517 if tohash == tnhash:
1518 return ""
1518 return ""
1519
1519
1520 # TODO: deltas
1520 # TODO: deltas
1521 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1521 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1522 (tohash, tnhash, len(tn))]
1522 (tohash, tnhash, len(tn))]
1523 for l in chunk(zlib.compress(tn)):
1523 for l in chunk(zlib.compress(tn)):
1524 ret.append(fmtline(l))
1524 ret.append(fmtline(l))
1525 ret.append('\n')
1525 ret.append('\n')
1526 return ''.join(ret)
1526 return ''.join(ret)
1527
1527
1528 class GitDiffRequired(Exception):
1528 class GitDiffRequired(Exception):
1529 pass
1529 pass
1530
1530
1531 def diffopts(ui, opts=None, untrusted=False):
1531 def diffopts(ui, opts=None, untrusted=False):
1532 def get(key, name=None, getter=ui.configbool):
1532 def get(key, name=None, getter=ui.configbool):
1533 return ((opts and opts.get(key)) or
1533 return ((opts and opts.get(key)) or
1534 getter('diff', name or key, None, untrusted=untrusted))
1534 getter('diff', name or key, None, untrusted=untrusted))
1535 return mdiff.diffopts(
1535 return mdiff.diffopts(
1536 text=opts and opts.get('text'),
1536 text=opts and opts.get('text'),
1537 git=get('git'),
1537 git=get('git'),
1538 nodates=get('nodates'),
1538 nodates=get('nodates'),
1539 showfunc=get('show_function', 'showfunc'),
1539 showfunc=get('show_function', 'showfunc'),
1540 ignorews=get('ignore_all_space', 'ignorews'),
1540 ignorews=get('ignore_all_space', 'ignorews'),
1541 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1541 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1542 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1542 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1543 context=get('unified', getter=ui.config))
1543 context=get('unified', getter=ui.config))
1544
1544
1545 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1545 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1546 losedatafn=None, prefix=''):
1546 losedatafn=None, prefix=''):
1547 '''yields diff of changes to files between two nodes, or node and
1547 '''yields diff of changes to files between two nodes, or node and
1548 working directory.
1548 working directory.
1549
1549
1550 if node1 is None, use first dirstate parent instead.
1550 if node1 is None, use first dirstate parent instead.
1551 if node2 is None, compare node1 with working directory.
1551 if node2 is None, compare node1 with working directory.
1552
1552
1553 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1553 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1554 every time some change cannot be represented with the current
1554 every time some change cannot be represented with the current
1555 patch format. Return False to upgrade to git patch format, True to
1555 patch format. Return False to upgrade to git patch format, True to
1556 accept the loss or raise an exception to abort the diff. It is
1556 accept the loss or raise an exception to abort the diff. It is
1557 called with the name of current file being diffed as 'fn'. If set
1557 called with the name of current file being diffed as 'fn'. If set
1558 to None, patches will always be upgraded to git format when
1558 to None, patches will always be upgraded to git format when
1559 necessary.
1559 necessary.
1560
1560
1561 prefix is a filename prefix that is prepended to all filenames on
1561 prefix is a filename prefix that is prepended to all filenames on
1562 display (used for subrepos).
1562 display (used for subrepos).
1563 '''
1563 '''
1564
1564
1565 if opts is None:
1565 if opts is None:
1566 opts = mdiff.defaultopts
1566 opts = mdiff.defaultopts
1567
1567
1568 if not node1 and not node2:
1568 if not node1 and not node2:
1569 node1 = repo.dirstate.p1()
1569 node1 = repo.dirstate.p1()
1570
1570
1571 def lrugetfilectx():
1571 def lrugetfilectx():
1572 cache = {}
1572 cache = {}
1573 order = []
1573 order = []
1574 def getfilectx(f, ctx):
1574 def getfilectx(f, ctx):
1575 fctx = ctx.filectx(f, filelog=cache.get(f))
1575 fctx = ctx.filectx(f, filelog=cache.get(f))
1576 if f not in cache:
1576 if f not in cache:
1577 if len(cache) > 20:
1577 if len(cache) > 20:
1578 del cache[order.pop(0)]
1578 del cache[order.pop(0)]
1579 cache[f] = fctx.filelog()
1579 cache[f] = fctx.filelog()
1580 else:
1580 else:
1581 order.remove(f)
1581 order.remove(f)
1582 order.append(f)
1582 order.append(f)
1583 return fctx
1583 return fctx
1584 return getfilectx
1584 return getfilectx
1585 getfilectx = lrugetfilectx()
1585 getfilectx = lrugetfilectx()
1586
1586
1587 ctx1 = repo[node1]
1587 ctx1 = repo[node1]
1588 ctx2 = repo[node2]
1588 ctx2 = repo[node2]
1589
1589
1590 if not changes:
1590 if not changes:
1591 changes = repo.status(ctx1, ctx2, match=match)
1591 changes = repo.status(ctx1, ctx2, match=match)
1592 modified, added, removed = changes[:3]
1592 modified, added, removed = changes[:3]
1593
1593
1594 if not modified and not added and not removed:
1594 if not modified and not added and not removed:
1595 return []
1595 return []
1596
1596
1597 revs = None
1597 revs = None
1598 if not repo.ui.quiet:
1598 if not repo.ui.quiet:
1599 hexfunc = repo.ui.debugflag and hex or short
1599 hexfunc = repo.ui.debugflag and hex or short
1600 revs = [hexfunc(node) for node in [node1, node2] if node]
1600 revs = [hexfunc(node) for node in [node1, node2] if node]
1601
1601
1602 copy = {}
1602 copy = {}
1603 if opts.git or opts.upgrade:
1603 if opts.git or opts.upgrade:
1604 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1604 copy = copies.copies(repo, ctx1, ctx2, repo[nullid])[0]
1605
1605
1606 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1606 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1607 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1607 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1608 if opts.upgrade and not opts.git:
1608 if opts.upgrade and not opts.git:
1609 try:
1609 try:
1610 def losedata(fn):
1610 def losedata(fn):
1611 if not losedatafn or not losedatafn(fn=fn):
1611 if not losedatafn or not losedatafn(fn=fn):
1612 raise GitDiffRequired()
1612 raise GitDiffRequired()
1613 # Buffer the whole output until we are sure it can be generated
1613 # Buffer the whole output until we are sure it can be generated
1614 return list(difffn(opts.copy(git=False), losedata))
1614 return list(difffn(opts.copy(git=False), losedata))
1615 except GitDiffRequired:
1615 except GitDiffRequired:
1616 return difffn(opts.copy(git=True), None)
1616 return difffn(opts.copy(git=True), None)
1617 else:
1617 else:
1618 return difffn(opts, None)
1618 return difffn(opts, None)
1619
1619
1620 def difflabel(func, *args, **kw):
1620 def difflabel(func, *args, **kw):
1621 '''yields 2-tuples of (output, label) based on the output of func()'''
1621 '''yields 2-tuples of (output, label) based on the output of func()'''
1622 prefixes = [('diff', 'diff.diffline'),
1622 headprefixes = [('diff', 'diff.diffline'),
1623 ('copy', 'diff.extended'),
1623 ('copy', 'diff.extended'),
1624 ('rename', 'diff.extended'),
1624 ('rename', 'diff.extended'),
1625 ('old', 'diff.extended'),
1625 ('old', 'diff.extended'),
1626 ('new', 'diff.extended'),
1626 ('new', 'diff.extended'),
1627 ('deleted', 'diff.extended'),
1627 ('deleted', 'diff.extended'),
1628 ('---', 'diff.file_a'),
1628 ('---', 'diff.file_a'),
1629 ('+++', 'diff.file_b'),
1629 ('+++', 'diff.file_b')]
1630 ('@@', 'diff.hunk'),
1630 textprefixes = [('@', 'diff.hunk'),
1631 ('-', 'diff.deleted'),
1631 ('-', 'diff.deleted'),
1632 ('+', 'diff.inserted')]
1632 ('+', 'diff.inserted')]
1633
1633 head = False
1634 for chunk in func(*args, **kw):
1634 for chunk in func(*args, **kw):
1635 lines = chunk.split('\n')
1635 lines = chunk.split('\n')
1636 for i, line in enumerate(lines):
1636 for i, line in enumerate(lines):
1637 if i != 0:
1637 if i != 0:
1638 yield ('\n', '')
1638 yield ('\n', '')
1639 if head:
1640 if line.startswith('@'):
1641 head = False
1642 else:
1643 if line and not line[0] in ' +-@':
1644 head = True
1639 stripline = line
1645 stripline = line
1640 if line and line[0] in '+-':
1646 if not head and line and line[0] in '+-':
1641 # highlight trailing whitespace, but only in changed lines
1647 # highlight trailing whitespace, but only in changed lines
1642 stripline = line.rstrip()
1648 stripline = line.rstrip()
1649 prefixes = textprefixes
1650 if head:
1651 prefixes = headprefixes
1643 for prefix, label in prefixes:
1652 for prefix, label in prefixes:
1644 if stripline.startswith(prefix):
1653 if stripline.startswith(prefix):
1645 yield (stripline, label)
1654 yield (stripline, label)
1646 break
1655 break
1647 else:
1656 else:
1648 yield (line, '')
1657 yield (line, '')
1649 if line != stripline:
1658 if line != stripline:
1650 yield (line[len(stripline):], 'diff.trailingwhitespace')
1659 yield (line[len(stripline):], 'diff.trailingwhitespace')
1651
1660
1652 def diffui(*args, **kw):
1661 def diffui(*args, **kw):
1653 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1662 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1654 return difflabel(diff, *args, **kw)
1663 return difflabel(diff, *args, **kw)
1655
1664
1656
1665
1657 def _addmodehdr(header, omode, nmode):
1666 def _addmodehdr(header, omode, nmode):
1658 if omode != nmode:
1667 if omode != nmode:
1659 header.append('old mode %s\n' % omode)
1668 header.append('old mode %s\n' % omode)
1660 header.append('new mode %s\n' % nmode)
1669 header.append('new mode %s\n' % nmode)
1661
1670
1662 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1671 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1663 copy, getfilectx, opts, losedatafn, prefix):
1672 copy, getfilectx, opts, losedatafn, prefix):
1664
1673
1665 def join(f):
1674 def join(f):
1666 return os.path.join(prefix, f)
1675 return os.path.join(prefix, f)
1667
1676
1668 date1 = util.datestr(ctx1.date())
1677 date1 = util.datestr(ctx1.date())
1669 man1 = ctx1.manifest()
1678 man1 = ctx1.manifest()
1670
1679
1671 gone = set()
1680 gone = set()
1672 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1681 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1673
1682
1674 copyto = dict([(v, k) for k, v in copy.items()])
1683 copyto = dict([(v, k) for k, v in copy.items()])
1675
1684
1676 if opts.git:
1685 if opts.git:
1677 revs = None
1686 revs = None
1678
1687
1679 for f in sorted(modified + added + removed):
1688 for f in sorted(modified + added + removed):
1680 to = None
1689 to = None
1681 tn = None
1690 tn = None
1682 dodiff = True
1691 dodiff = True
1683 header = []
1692 header = []
1684 if f in man1:
1693 if f in man1:
1685 to = getfilectx(f, ctx1).data()
1694 to = getfilectx(f, ctx1).data()
1686 if f not in removed:
1695 if f not in removed:
1687 tn = getfilectx(f, ctx2).data()
1696 tn = getfilectx(f, ctx2).data()
1688 a, b = f, f
1697 a, b = f, f
1689 if opts.git or losedatafn:
1698 if opts.git or losedatafn:
1690 if f in added:
1699 if f in added:
1691 mode = gitmode[ctx2.flags(f)]
1700 mode = gitmode[ctx2.flags(f)]
1692 if f in copy or f in copyto:
1701 if f in copy or f in copyto:
1693 if opts.git:
1702 if opts.git:
1694 if f in copy:
1703 if f in copy:
1695 a = copy[f]
1704 a = copy[f]
1696 else:
1705 else:
1697 a = copyto[f]
1706 a = copyto[f]
1698 omode = gitmode[man1.flags(a)]
1707 omode = gitmode[man1.flags(a)]
1699 _addmodehdr(header, omode, mode)
1708 _addmodehdr(header, omode, mode)
1700 if a in removed and a not in gone:
1709 if a in removed and a not in gone:
1701 op = 'rename'
1710 op = 'rename'
1702 gone.add(a)
1711 gone.add(a)
1703 else:
1712 else:
1704 op = 'copy'
1713 op = 'copy'
1705 header.append('%s from %s\n' % (op, join(a)))
1714 header.append('%s from %s\n' % (op, join(a)))
1706 header.append('%s to %s\n' % (op, join(f)))
1715 header.append('%s to %s\n' % (op, join(f)))
1707 to = getfilectx(a, ctx1).data()
1716 to = getfilectx(a, ctx1).data()
1708 else:
1717 else:
1709 losedatafn(f)
1718 losedatafn(f)
1710 else:
1719 else:
1711 if opts.git:
1720 if opts.git:
1712 header.append('new file mode %s\n' % mode)
1721 header.append('new file mode %s\n' % mode)
1713 elif ctx2.flags(f):
1722 elif ctx2.flags(f):
1714 losedatafn(f)
1723 losedatafn(f)
1715 # In theory, if tn was copied or renamed we should check
1724 # In theory, if tn was copied or renamed we should check
1716 # if the source is binary too but the copy record already
1725 # if the source is binary too but the copy record already
1717 # forces git mode.
1726 # forces git mode.
1718 if util.binary(tn):
1727 if util.binary(tn):
1719 if opts.git:
1728 if opts.git:
1720 dodiff = 'binary'
1729 dodiff = 'binary'
1721 else:
1730 else:
1722 losedatafn(f)
1731 losedatafn(f)
1723 if not opts.git and not tn:
1732 if not opts.git and not tn:
1724 # regular diffs cannot represent new empty file
1733 # regular diffs cannot represent new empty file
1725 losedatafn(f)
1734 losedatafn(f)
1726 elif f in removed:
1735 elif f in removed:
1727 if opts.git:
1736 if opts.git:
1728 # have we already reported a copy above?
1737 # have we already reported a copy above?
1729 if ((f in copy and copy[f] in added
1738 if ((f in copy and copy[f] in added
1730 and copyto[copy[f]] == f) or
1739 and copyto[copy[f]] == f) or
1731 (f in copyto and copyto[f] in added
1740 (f in copyto and copyto[f] in added
1732 and copy[copyto[f]] == f)):
1741 and copy[copyto[f]] == f)):
1733 dodiff = False
1742 dodiff = False
1734 else:
1743 else:
1735 header.append('deleted file mode %s\n' %
1744 header.append('deleted file mode %s\n' %
1736 gitmode[man1.flags(f)])
1745 gitmode[man1.flags(f)])
1737 elif not to or util.binary(to):
1746 elif not to or util.binary(to):
1738 # regular diffs cannot represent empty file deletion
1747 # regular diffs cannot represent empty file deletion
1739 losedatafn(f)
1748 losedatafn(f)
1740 else:
1749 else:
1741 oflag = man1.flags(f)
1750 oflag = man1.flags(f)
1742 nflag = ctx2.flags(f)
1751 nflag = ctx2.flags(f)
1743 binary = util.binary(to) or util.binary(tn)
1752 binary = util.binary(to) or util.binary(tn)
1744 if opts.git:
1753 if opts.git:
1745 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1754 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1746 if binary:
1755 if binary:
1747 dodiff = 'binary'
1756 dodiff = 'binary'
1748 elif binary or nflag != oflag:
1757 elif binary or nflag != oflag:
1749 losedatafn(f)
1758 losedatafn(f)
1750 if opts.git:
1759 if opts.git:
1751 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1760 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1752
1761
1753 if dodiff:
1762 if dodiff:
1754 if dodiff == 'binary':
1763 if dodiff == 'binary':
1755 text = b85diff(to, tn)
1764 text = b85diff(to, tn)
1756 else:
1765 else:
1757 text = mdiff.unidiff(to, date1,
1766 text = mdiff.unidiff(to, date1,
1758 # ctx2 date may be dynamic
1767 # ctx2 date may be dynamic
1759 tn, util.datestr(ctx2.date()),
1768 tn, util.datestr(ctx2.date()),
1760 join(a), join(b), revs, opts=opts)
1769 join(a), join(b), revs, opts=opts)
1761 if header and (text or len(header) > 1):
1770 if header and (text or len(header) > 1):
1762 yield ''.join(header)
1771 yield ''.join(header)
1763 if text:
1772 if text:
1764 yield text
1773 yield text
1765
1774
1766 def diffstatsum(stats):
1775 def diffstatsum(stats):
1767 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1776 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1768 for f, a, r, b in stats:
1777 for f, a, r, b in stats:
1769 maxfile = max(maxfile, encoding.colwidth(f))
1778 maxfile = max(maxfile, encoding.colwidth(f))
1770 maxtotal = max(maxtotal, a + r)
1779 maxtotal = max(maxtotal, a + r)
1771 addtotal += a
1780 addtotal += a
1772 removetotal += r
1781 removetotal += r
1773 binary = binary or b
1782 binary = binary or b
1774
1783
1775 return maxfile, maxtotal, addtotal, removetotal, binary
1784 return maxfile, maxtotal, addtotal, removetotal, binary
1776
1785
1777 def diffstatdata(lines):
1786 def diffstatdata(lines):
1778 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1787 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1779
1788
1780 results = []
1789 results = []
1781 filename, adds, removes = None, 0, 0
1790 filename, adds, removes = None, 0, 0
1782
1791
1783 def addresult():
1792 def addresult():
1784 if filename:
1793 if filename:
1785 isbinary = adds == 0 and removes == 0
1794 isbinary = adds == 0 and removes == 0
1786 results.append((filename, adds, removes, isbinary))
1795 results.append((filename, adds, removes, isbinary))
1787
1796
1788 for line in lines:
1797 for line in lines:
1789 if line.startswith('diff'):
1798 if line.startswith('diff'):
1790 addresult()
1799 addresult()
1791 # set numbers to 0 anyway when starting new file
1800 # set numbers to 0 anyway when starting new file
1792 adds, removes = 0, 0
1801 adds, removes = 0, 0
1793 if line.startswith('diff --git'):
1802 if line.startswith('diff --git'):
1794 filename = gitre.search(line).group(1)
1803 filename = gitre.search(line).group(1)
1795 elif line.startswith('diff -r'):
1804 elif line.startswith('diff -r'):
1796 # format: "diff -r ... -r ... filename"
1805 # format: "diff -r ... -r ... filename"
1797 filename = diffre.search(line).group(1)
1806 filename = diffre.search(line).group(1)
1798 elif line.startswith('+') and not line.startswith('+++'):
1807 elif line.startswith('+') and not line.startswith('+++'):
1799 adds += 1
1808 adds += 1
1800 elif line.startswith('-') and not line.startswith('---'):
1809 elif line.startswith('-') and not line.startswith('---'):
1801 removes += 1
1810 removes += 1
1802 addresult()
1811 addresult()
1803 return results
1812 return results
1804
1813
1805 def diffstat(lines, width=80, git=False):
1814 def diffstat(lines, width=80, git=False):
1806 output = []
1815 output = []
1807 stats = diffstatdata(lines)
1816 stats = diffstatdata(lines)
1808 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1817 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1809
1818
1810 countwidth = len(str(maxtotal))
1819 countwidth = len(str(maxtotal))
1811 if hasbinary and countwidth < 3:
1820 if hasbinary and countwidth < 3:
1812 countwidth = 3
1821 countwidth = 3
1813 graphwidth = width - countwidth - maxname - 6
1822 graphwidth = width - countwidth - maxname - 6
1814 if graphwidth < 10:
1823 if graphwidth < 10:
1815 graphwidth = 10
1824 graphwidth = 10
1816
1825
1817 def scale(i):
1826 def scale(i):
1818 if maxtotal <= graphwidth:
1827 if maxtotal <= graphwidth:
1819 return i
1828 return i
1820 # If diffstat runs out of room it doesn't print anything,
1829 # If diffstat runs out of room it doesn't print anything,
1821 # which isn't very useful, so always print at least one + or -
1830 # which isn't very useful, so always print at least one + or -
1822 # if there were at least some changes.
1831 # if there were at least some changes.
1823 return max(i * graphwidth // maxtotal, int(bool(i)))
1832 return max(i * graphwidth // maxtotal, int(bool(i)))
1824
1833
1825 for filename, adds, removes, isbinary in stats:
1834 for filename, adds, removes, isbinary in stats:
1826 if git and isbinary:
1835 if git and isbinary:
1827 count = 'Bin'
1836 count = 'Bin'
1828 else:
1837 else:
1829 count = adds + removes
1838 count = adds + removes
1830 pluses = '+' * scale(adds)
1839 pluses = '+' * scale(adds)
1831 minuses = '-' * scale(removes)
1840 minuses = '-' * scale(removes)
1832 output.append(' %s%s | %*s %s%s\n' %
1841 output.append(' %s%s | %*s %s%s\n' %
1833 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1842 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1834 countwidth, count, pluses, minuses))
1843 countwidth, count, pluses, minuses))
1835
1844
1836 if stats:
1845 if stats:
1837 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1846 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1838 % (len(stats), totaladds, totalremoves))
1847 % (len(stats), totaladds, totalremoves))
1839
1848
1840 return ''.join(output)
1849 return ''.join(output)
1841
1850
1842 def diffstatui(*args, **kw):
1851 def diffstatui(*args, **kw):
1843 '''like diffstat(), but yields 2-tuples of (output, label) for
1852 '''like diffstat(), but yields 2-tuples of (output, label) for
1844 ui.write()
1853 ui.write()
1845 '''
1854 '''
1846
1855
1847 for line in diffstat(*args, **kw).splitlines():
1856 for line in diffstat(*args, **kw).splitlines():
1848 if line and line[-1] in '+-':
1857 if line and line[-1] in '+-':
1849 name, graph = line.rsplit(' ', 1)
1858 name, graph = line.rsplit(' ', 1)
1850 yield (name + ' ', '')
1859 yield (name + ' ', '')
1851 m = re.search(r'\++', graph)
1860 m = re.search(r'\++', graph)
1852 if m:
1861 if m:
1853 yield (m.group(0), 'diffstat.inserted')
1862 yield (m.group(0), 'diffstat.inserted')
1854 m = re.search(r'-+', graph)
1863 m = re.search(r'-+', graph)
1855 if m:
1864 if m:
1856 yield (m.group(0), 'diffstat.deleted')
1865 yield (m.group(0), 'diffstat.deleted')
1857 else:
1866 else:
1858 yield (line, '')
1867 yield (line, '')
1859 yield ('\n', '')
1868 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now