##// END OF EJS Templates
patch: make hunk.fuzzit() compute the fuzzed start locations...
Patrick Mezard -
r16122:9ef3a4a2 stable
parent child Browse files
Show More
@@ -1,1871 +1,1877 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[10:])
233 parents.append(line[10:])
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def __repr__(self):
293 def __repr__(self):
294 return "<patchmeta %s %r>" % (self.op, self.path)
294 return "<patchmeta %s %r>" % (self.op, self.path)
295
295
296 def readgitpatch(lr):
296 def readgitpatch(lr):
297 """extract git-style metadata about patches from <patchname>"""
297 """extract git-style metadata about patches from <patchname>"""
298
298
299 # Filter patch for git information
299 # Filter patch for git information
300 gp = None
300 gp = None
301 gitpatches = []
301 gitpatches = []
302 for line in lr:
302 for line in lr:
303 line = line.rstrip(' \r\n')
303 line = line.rstrip(' \r\n')
304 if line.startswith('diff --git'):
304 if line.startswith('diff --git'):
305 m = gitre.match(line)
305 m = gitre.match(line)
306 if m:
306 if m:
307 if gp:
307 if gp:
308 gitpatches.append(gp)
308 gitpatches.append(gp)
309 dst = m.group(2)
309 dst = m.group(2)
310 gp = patchmeta(dst)
310 gp = patchmeta(dst)
311 elif gp:
311 elif gp:
312 if line.startswith('--- '):
312 if line.startswith('--- '):
313 gitpatches.append(gp)
313 gitpatches.append(gp)
314 gp = None
314 gp = None
315 continue
315 continue
316 if line.startswith('rename from '):
316 if line.startswith('rename from '):
317 gp.op = 'RENAME'
317 gp.op = 'RENAME'
318 gp.oldpath = line[12:]
318 gp.oldpath = line[12:]
319 elif line.startswith('rename to '):
319 elif line.startswith('rename to '):
320 gp.path = line[10:]
320 gp.path = line[10:]
321 elif line.startswith('copy from '):
321 elif line.startswith('copy from '):
322 gp.op = 'COPY'
322 gp.op = 'COPY'
323 gp.oldpath = line[10:]
323 gp.oldpath = line[10:]
324 elif line.startswith('copy to '):
324 elif line.startswith('copy to '):
325 gp.path = line[8:]
325 gp.path = line[8:]
326 elif line.startswith('deleted file'):
326 elif line.startswith('deleted file'):
327 gp.op = 'DELETE'
327 gp.op = 'DELETE'
328 elif line.startswith('new file mode '):
328 elif line.startswith('new file mode '):
329 gp.op = 'ADD'
329 gp.op = 'ADD'
330 gp.setmode(int(line[-6:], 8))
330 gp.setmode(int(line[-6:], 8))
331 elif line.startswith('new mode '):
331 elif line.startswith('new mode '):
332 gp.setmode(int(line[-6:], 8))
332 gp.setmode(int(line[-6:], 8))
333 elif line.startswith('GIT binary patch'):
333 elif line.startswith('GIT binary patch'):
334 gp.binary = True
334 gp.binary = True
335 if gp:
335 if gp:
336 gitpatches.append(gp)
336 gitpatches.append(gp)
337
337
338 return gitpatches
338 return gitpatches
339
339
340 class linereader(object):
340 class linereader(object):
341 # simple class to allow pushing lines back into the input stream
341 # simple class to allow pushing lines back into the input stream
342 def __init__(self, fp):
342 def __init__(self, fp):
343 self.fp = fp
343 self.fp = fp
344 self.buf = []
344 self.buf = []
345
345
346 def push(self, line):
346 def push(self, line):
347 if line is not None:
347 if line is not None:
348 self.buf.append(line)
348 self.buf.append(line)
349
349
350 def readline(self):
350 def readline(self):
351 if self.buf:
351 if self.buf:
352 l = self.buf[0]
352 l = self.buf[0]
353 del self.buf[0]
353 del self.buf[0]
354 return l
354 return l
355 return self.fp.readline()
355 return self.fp.readline()
356
356
357 def __iter__(self):
357 def __iter__(self):
358 while True:
358 while True:
359 l = self.readline()
359 l = self.readline()
360 if not l:
360 if not l:
361 break
361 break
362 yield l
362 yield l
363
363
364 class abstractbackend(object):
364 class abstractbackend(object):
365 def __init__(self, ui):
365 def __init__(self, ui):
366 self.ui = ui
366 self.ui = ui
367
367
368 def getfile(self, fname):
368 def getfile(self, fname):
369 """Return target file data and flags as a (data, (islink,
369 """Return target file data and flags as a (data, (islink,
370 isexec)) tuple.
370 isexec)) tuple.
371 """
371 """
372 raise NotImplementedError
372 raise NotImplementedError
373
373
374 def setfile(self, fname, data, mode, copysource):
374 def setfile(self, fname, data, mode, copysource):
375 """Write data to target file fname and set its mode. mode is a
375 """Write data to target file fname and set its mode. mode is a
376 (islink, isexec) tuple. If data is None, the file content should
376 (islink, isexec) tuple. If data is None, the file content should
377 be left unchanged. If the file is modified after being copied,
377 be left unchanged. If the file is modified after being copied,
378 copysource is set to the original file name.
378 copysource is set to the original file name.
379 """
379 """
380 raise NotImplementedError
380 raise NotImplementedError
381
381
382 def unlink(self, fname):
382 def unlink(self, fname):
383 """Unlink target file."""
383 """Unlink target file."""
384 raise NotImplementedError
384 raise NotImplementedError
385
385
386 def writerej(self, fname, failed, total, lines):
386 def writerej(self, fname, failed, total, lines):
387 """Write rejected lines for fname. total is the number of hunks
387 """Write rejected lines for fname. total is the number of hunks
388 which failed to apply and total the total number of hunks for this
388 which failed to apply and total the total number of hunks for this
389 files.
389 files.
390 """
390 """
391 pass
391 pass
392
392
393 def exists(self, fname):
393 def exists(self, fname):
394 raise NotImplementedError
394 raise NotImplementedError
395
395
396 class fsbackend(abstractbackend):
396 class fsbackend(abstractbackend):
397 def __init__(self, ui, basedir):
397 def __init__(self, ui, basedir):
398 super(fsbackend, self).__init__(ui)
398 super(fsbackend, self).__init__(ui)
399 self.opener = scmutil.opener(basedir)
399 self.opener = scmutil.opener(basedir)
400
400
401 def _join(self, f):
401 def _join(self, f):
402 return os.path.join(self.opener.base, f)
402 return os.path.join(self.opener.base, f)
403
403
404 def getfile(self, fname):
404 def getfile(self, fname):
405 path = self._join(fname)
405 path = self._join(fname)
406 if os.path.islink(path):
406 if os.path.islink(path):
407 return (os.readlink(path), (True, False))
407 return (os.readlink(path), (True, False))
408 isexec = False
408 isexec = False
409 try:
409 try:
410 isexec = os.lstat(path).st_mode & 0100 != 0
410 isexec = os.lstat(path).st_mode & 0100 != 0
411 except OSError, e:
411 except OSError, e:
412 if e.errno != errno.ENOENT:
412 if e.errno != errno.ENOENT:
413 raise
413 raise
414 return (self.opener.read(fname), (False, isexec))
414 return (self.opener.read(fname), (False, isexec))
415
415
416 def setfile(self, fname, data, mode, copysource):
416 def setfile(self, fname, data, mode, copysource):
417 islink, isexec = mode
417 islink, isexec = mode
418 if data is None:
418 if data is None:
419 util.setflags(self._join(fname), islink, isexec)
419 util.setflags(self._join(fname), islink, isexec)
420 return
420 return
421 if islink:
421 if islink:
422 self.opener.symlink(data, fname)
422 self.opener.symlink(data, fname)
423 else:
423 else:
424 self.opener.write(fname, data)
424 self.opener.write(fname, data)
425 if isexec:
425 if isexec:
426 util.setflags(self._join(fname), False, True)
426 util.setflags(self._join(fname), False, True)
427
427
428 def unlink(self, fname):
428 def unlink(self, fname):
429 try:
429 try:
430 util.unlinkpath(self._join(fname))
430 util.unlinkpath(self._join(fname))
431 except OSError, inst:
431 except OSError, inst:
432 if inst.errno != errno.ENOENT:
432 if inst.errno != errno.ENOENT:
433 raise
433 raise
434
434
435 def writerej(self, fname, failed, total, lines):
435 def writerej(self, fname, failed, total, lines):
436 fname = fname + ".rej"
436 fname = fname + ".rej"
437 self.ui.warn(
437 self.ui.warn(
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
438 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
439 (failed, total, fname))
439 (failed, total, fname))
440 fp = self.opener(fname, 'w')
440 fp = self.opener(fname, 'w')
441 fp.writelines(lines)
441 fp.writelines(lines)
442 fp.close()
442 fp.close()
443
443
444 def exists(self, fname):
444 def exists(self, fname):
445 return os.path.lexists(self._join(fname))
445 return os.path.lexists(self._join(fname))
446
446
447 class workingbackend(fsbackend):
447 class workingbackend(fsbackend):
448 def __init__(self, ui, repo, similarity):
448 def __init__(self, ui, repo, similarity):
449 super(workingbackend, self).__init__(ui, repo.root)
449 super(workingbackend, self).__init__(ui, repo.root)
450 self.repo = repo
450 self.repo = repo
451 self.similarity = similarity
451 self.similarity = similarity
452 self.removed = set()
452 self.removed = set()
453 self.changed = set()
453 self.changed = set()
454 self.copied = []
454 self.copied = []
455
455
456 def _checkknown(self, fname):
456 def _checkknown(self, fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
457 if self.repo.dirstate[fname] == '?' and self.exists(fname):
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
458 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
459
459
460 def setfile(self, fname, data, mode, copysource):
460 def setfile(self, fname, data, mode, copysource):
461 self._checkknown(fname)
461 self._checkknown(fname)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
462 super(workingbackend, self).setfile(fname, data, mode, copysource)
463 if copysource is not None:
463 if copysource is not None:
464 self.copied.append((copysource, fname))
464 self.copied.append((copysource, fname))
465 self.changed.add(fname)
465 self.changed.add(fname)
466
466
467 def unlink(self, fname):
467 def unlink(self, fname):
468 self._checkknown(fname)
468 self._checkknown(fname)
469 super(workingbackend, self).unlink(fname)
469 super(workingbackend, self).unlink(fname)
470 self.removed.add(fname)
470 self.removed.add(fname)
471 self.changed.add(fname)
471 self.changed.add(fname)
472
472
473 def close(self):
473 def close(self):
474 wctx = self.repo[None]
474 wctx = self.repo[None]
475 addremoved = set(self.changed)
475 addremoved = set(self.changed)
476 for src, dst in self.copied:
476 for src, dst in self.copied:
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
477 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
478 if self.removed:
478 if self.removed:
479 wctx.forget(sorted(self.removed))
479 wctx.forget(sorted(self.removed))
480 for f in self.removed:
480 for f in self.removed:
481 if f not in self.repo.dirstate:
481 if f not in self.repo.dirstate:
482 # File was deleted and no longer belongs to the
482 # File was deleted and no longer belongs to the
483 # dirstate, it was probably marked added then
483 # dirstate, it was probably marked added then
484 # deleted, and should not be considered by
484 # deleted, and should not be considered by
485 # addremove().
485 # addremove().
486 addremoved.discard(f)
486 addremoved.discard(f)
487 if addremoved:
487 if addremoved:
488 cwd = self.repo.getcwd()
488 cwd = self.repo.getcwd()
489 if cwd:
489 if cwd:
490 addremoved = [util.pathto(self.repo.root, cwd, f)
490 addremoved = [util.pathto(self.repo.root, cwd, f)
491 for f in addremoved]
491 for f in addremoved]
492 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
492 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
493 return sorted(self.changed)
493 return sorted(self.changed)
494
494
495 class filestore(object):
495 class filestore(object):
496 def __init__(self, maxsize=None):
496 def __init__(self, maxsize=None):
497 self.opener = None
497 self.opener = None
498 self.files = {}
498 self.files = {}
499 self.created = 0
499 self.created = 0
500 self.maxsize = maxsize
500 self.maxsize = maxsize
501 if self.maxsize is None:
501 if self.maxsize is None:
502 self.maxsize = 4*(2**20)
502 self.maxsize = 4*(2**20)
503 self.size = 0
503 self.size = 0
504 self.data = {}
504 self.data = {}
505
505
506 def setfile(self, fname, data, mode, copied=None):
506 def setfile(self, fname, data, mode, copied=None):
507 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
507 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
508 self.data[fname] = (data, mode, copied)
508 self.data[fname] = (data, mode, copied)
509 self.size += len(data)
509 self.size += len(data)
510 else:
510 else:
511 if self.opener is None:
511 if self.opener is None:
512 root = tempfile.mkdtemp(prefix='hg-patch-')
512 root = tempfile.mkdtemp(prefix='hg-patch-')
513 self.opener = scmutil.opener(root)
513 self.opener = scmutil.opener(root)
514 # Avoid filename issues with these simple names
514 # Avoid filename issues with these simple names
515 fn = str(self.created)
515 fn = str(self.created)
516 self.opener.write(fn, data)
516 self.opener.write(fn, data)
517 self.created += 1
517 self.created += 1
518 self.files[fname] = (fn, mode, copied)
518 self.files[fname] = (fn, mode, copied)
519
519
520 def getfile(self, fname):
520 def getfile(self, fname):
521 if fname in self.data:
521 if fname in self.data:
522 return self.data[fname]
522 return self.data[fname]
523 if not self.opener or fname not in self.files:
523 if not self.opener or fname not in self.files:
524 raise IOError()
524 raise IOError()
525 fn, mode, copied = self.files[fname]
525 fn, mode, copied = self.files[fname]
526 return self.opener.read(fn), mode, copied
526 return self.opener.read(fn), mode, copied
527
527
528 def close(self):
528 def close(self):
529 if self.opener:
529 if self.opener:
530 shutil.rmtree(self.opener.base)
530 shutil.rmtree(self.opener.base)
531
531
532 class repobackend(abstractbackend):
532 class repobackend(abstractbackend):
533 def __init__(self, ui, repo, ctx, store):
533 def __init__(self, ui, repo, ctx, store):
534 super(repobackend, self).__init__(ui)
534 super(repobackend, self).__init__(ui)
535 self.repo = repo
535 self.repo = repo
536 self.ctx = ctx
536 self.ctx = ctx
537 self.store = store
537 self.store = store
538 self.changed = set()
538 self.changed = set()
539 self.removed = set()
539 self.removed = set()
540 self.copied = {}
540 self.copied = {}
541
541
542 def _checkknown(self, fname):
542 def _checkknown(self, fname):
543 if fname not in self.ctx:
543 if fname not in self.ctx:
544 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
544 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
545
545
546 def getfile(self, fname):
546 def getfile(self, fname):
547 try:
547 try:
548 fctx = self.ctx[fname]
548 fctx = self.ctx[fname]
549 except error.LookupError:
549 except error.LookupError:
550 raise IOError()
550 raise IOError()
551 flags = fctx.flags()
551 flags = fctx.flags()
552 return fctx.data(), ('l' in flags, 'x' in flags)
552 return fctx.data(), ('l' in flags, 'x' in flags)
553
553
554 def setfile(self, fname, data, mode, copysource):
554 def setfile(self, fname, data, mode, copysource):
555 if copysource:
555 if copysource:
556 self._checkknown(copysource)
556 self._checkknown(copysource)
557 if data is None:
557 if data is None:
558 data = self.ctx[fname].data()
558 data = self.ctx[fname].data()
559 self.store.setfile(fname, data, mode, copysource)
559 self.store.setfile(fname, data, mode, copysource)
560 self.changed.add(fname)
560 self.changed.add(fname)
561 if copysource:
561 if copysource:
562 self.copied[fname] = copysource
562 self.copied[fname] = copysource
563
563
564 def unlink(self, fname):
564 def unlink(self, fname):
565 self._checkknown(fname)
565 self._checkknown(fname)
566 self.removed.add(fname)
566 self.removed.add(fname)
567
567
568 def exists(self, fname):
568 def exists(self, fname):
569 return fname in self.ctx
569 return fname in self.ctx
570
570
571 def close(self):
571 def close(self):
572 return self.changed | self.removed
572 return self.changed | self.removed
573
573
574 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
574 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
575 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
575 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
576 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
576 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
577 eolmodes = ['strict', 'crlf', 'lf', 'auto']
577 eolmodes = ['strict', 'crlf', 'lf', 'auto']
578
578
579 class patchfile(object):
579 class patchfile(object):
580 def __init__(self, ui, gp, backend, store, eolmode='strict'):
580 def __init__(self, ui, gp, backend, store, eolmode='strict'):
581 self.fname = gp.path
581 self.fname = gp.path
582 self.eolmode = eolmode
582 self.eolmode = eolmode
583 self.eol = None
583 self.eol = None
584 self.backend = backend
584 self.backend = backend
585 self.ui = ui
585 self.ui = ui
586 self.lines = []
586 self.lines = []
587 self.exists = False
587 self.exists = False
588 self.missing = True
588 self.missing = True
589 self.mode = gp.mode
589 self.mode = gp.mode
590 self.copysource = gp.oldpath
590 self.copysource = gp.oldpath
591 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
591 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
592 self.remove = gp.op == 'DELETE'
592 self.remove = gp.op == 'DELETE'
593 try:
593 try:
594 if self.copysource is None:
594 if self.copysource is None:
595 data, mode = backend.getfile(self.fname)
595 data, mode = backend.getfile(self.fname)
596 self.exists = True
596 self.exists = True
597 else:
597 else:
598 data, mode = store.getfile(self.copysource)[:2]
598 data, mode = store.getfile(self.copysource)[:2]
599 self.exists = backend.exists(self.fname)
599 self.exists = backend.exists(self.fname)
600 self.missing = False
600 self.missing = False
601 if data:
601 if data:
602 self.lines = mdiff.splitnewlines(data)
602 self.lines = mdiff.splitnewlines(data)
603 if self.mode is None:
603 if self.mode is None:
604 self.mode = mode
604 self.mode = mode
605 if self.lines:
605 if self.lines:
606 # Normalize line endings
606 # Normalize line endings
607 if self.lines[0].endswith('\r\n'):
607 if self.lines[0].endswith('\r\n'):
608 self.eol = '\r\n'
608 self.eol = '\r\n'
609 elif self.lines[0].endswith('\n'):
609 elif self.lines[0].endswith('\n'):
610 self.eol = '\n'
610 self.eol = '\n'
611 if eolmode != 'strict':
611 if eolmode != 'strict':
612 nlines = []
612 nlines = []
613 for l in self.lines:
613 for l in self.lines:
614 if l.endswith('\r\n'):
614 if l.endswith('\r\n'):
615 l = l[:-2] + '\n'
615 l = l[:-2] + '\n'
616 nlines.append(l)
616 nlines.append(l)
617 self.lines = nlines
617 self.lines = nlines
618 except IOError:
618 except IOError:
619 if self.create:
619 if self.create:
620 self.missing = False
620 self.missing = False
621 if self.mode is None:
621 if self.mode is None:
622 self.mode = (False, False)
622 self.mode = (False, False)
623 if self.missing:
623 if self.missing:
624 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
624 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
625
625
626 self.hash = {}
626 self.hash = {}
627 self.dirty = 0
627 self.dirty = 0
628 self.offset = 0
628 self.offset = 0
629 self.skew = 0
629 self.skew = 0
630 self.rej = []
630 self.rej = []
631 self.fileprinted = False
631 self.fileprinted = False
632 self.printfile(False)
632 self.printfile(False)
633 self.hunks = 0
633 self.hunks = 0
634
634
635 def writelines(self, fname, lines, mode):
635 def writelines(self, fname, lines, mode):
636 if self.eolmode == 'auto':
636 if self.eolmode == 'auto':
637 eol = self.eol
637 eol = self.eol
638 elif self.eolmode == 'crlf':
638 elif self.eolmode == 'crlf':
639 eol = '\r\n'
639 eol = '\r\n'
640 else:
640 else:
641 eol = '\n'
641 eol = '\n'
642
642
643 if self.eolmode != 'strict' and eol and eol != '\n':
643 if self.eolmode != 'strict' and eol and eol != '\n':
644 rawlines = []
644 rawlines = []
645 for l in lines:
645 for l in lines:
646 if l and l[-1] == '\n':
646 if l and l[-1] == '\n':
647 l = l[:-1] + eol
647 l = l[:-1] + eol
648 rawlines.append(l)
648 rawlines.append(l)
649 lines = rawlines
649 lines = rawlines
650
650
651 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
651 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
652
652
653 def printfile(self, warn):
653 def printfile(self, warn):
654 if self.fileprinted:
654 if self.fileprinted:
655 return
655 return
656 if warn or self.ui.verbose:
656 if warn or self.ui.verbose:
657 self.fileprinted = True
657 self.fileprinted = True
658 s = _("patching file %s\n") % self.fname
658 s = _("patching file %s\n") % self.fname
659 if warn:
659 if warn:
660 self.ui.warn(s)
660 self.ui.warn(s)
661 else:
661 else:
662 self.ui.note(s)
662 self.ui.note(s)
663
663
664
664
665 def findlines(self, l, linenum):
665 def findlines(self, l, linenum):
666 # looks through the hash and finds candidate lines. The
666 # looks through the hash and finds candidate lines. The
667 # result is a list of line numbers sorted based on distance
667 # result is a list of line numbers sorted based on distance
668 # from linenum
668 # from linenum
669
669
670 cand = self.hash.get(l, [])
670 cand = self.hash.get(l, [])
671 if len(cand) > 1:
671 if len(cand) > 1:
672 # resort our list of potentials forward then back.
672 # resort our list of potentials forward then back.
673 cand.sort(key=lambda x: abs(x - linenum))
673 cand.sort(key=lambda x: abs(x - linenum))
674 return cand
674 return cand
675
675
676 def write_rej(self):
676 def write_rej(self):
677 # our rejects are a little different from patch(1). This always
677 # our rejects are a little different from patch(1). This always
678 # creates rejects in the same form as the original patch. A file
678 # creates rejects in the same form as the original patch. A file
679 # header is inserted so that you can run the reject through patch again
679 # header is inserted so that you can run the reject through patch again
680 # without having to type the filename.
680 # without having to type the filename.
681 if not self.rej:
681 if not self.rej:
682 return
682 return
683 base = os.path.basename(self.fname)
683 base = os.path.basename(self.fname)
684 lines = ["--- %s\n+++ %s\n" % (base, base)]
684 lines = ["--- %s\n+++ %s\n" % (base, base)]
685 for x in self.rej:
685 for x in self.rej:
686 for l in x.hunk:
686 for l in x.hunk:
687 lines.append(l)
687 lines.append(l)
688 if l[-1] != '\n':
688 if l[-1] != '\n':
689 lines.append("\n\ No newline at end of file\n")
689 lines.append("\n\ No newline at end of file\n")
690 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
690 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
691
691
692 def apply(self, h):
692 def apply(self, h):
693 if not h.complete():
693 if not h.complete():
694 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
694 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
695 (h.number, h.desc, len(h.a), h.lena, len(h.b),
695 (h.number, h.desc, len(h.a), h.lena, len(h.b),
696 h.lenb))
696 h.lenb))
697
697
698 self.hunks += 1
698 self.hunks += 1
699
699
700 if self.missing:
700 if self.missing:
701 self.rej.append(h)
701 self.rej.append(h)
702 return -1
702 return -1
703
703
704 if self.exists and self.create:
704 if self.exists and self.create:
705 if self.copysource:
705 if self.copysource:
706 self.ui.warn(_("cannot create %s: destination already "
706 self.ui.warn(_("cannot create %s: destination already "
707 "exists\n" % self.fname))
707 "exists\n" % self.fname))
708 else:
708 else:
709 self.ui.warn(_("file %s already exists\n") % self.fname)
709 self.ui.warn(_("file %s already exists\n") % self.fname)
710 self.rej.append(h)
710 self.rej.append(h)
711 return -1
711 return -1
712
712
713 if isinstance(h, binhunk):
713 if isinstance(h, binhunk):
714 if self.remove:
714 if self.remove:
715 self.backend.unlink(self.fname)
715 self.backend.unlink(self.fname)
716 else:
716 else:
717 self.lines[:] = h.new()
717 self.lines[:] = h.new()
718 self.offset += len(h.new())
718 self.offset += len(h.new())
719 self.dirty = True
719 self.dirty = True
720 return 0
720 return 0
721
721
722 horig = h
722 horig = h
723 if (self.eolmode in ('crlf', 'lf')
723 if (self.eolmode in ('crlf', 'lf')
724 or self.eolmode == 'auto' and self.eol):
724 or self.eolmode == 'auto' and self.eol):
725 # If new eols are going to be normalized, then normalize
725 # If new eols are going to be normalized, then normalize
726 # hunk data before patching. Otherwise, preserve input
726 # hunk data before patching. Otherwise, preserve input
727 # line-endings.
727 # line-endings.
728 h = h.getnormalized()
728 h = h.getnormalized()
729
729
730 # fast case first, no offsets, no fuzz
730 # fast case first, no offsets, no fuzz
731 old, new = h.fuzzit(0, False)
731 old, oldstart, new, newstart = h.fuzzit(0, False)
732 start = h.starta + self.offset
732 oldstart += self.offset
733 # zero length hunk ranges already have their start decremented
733 orig_start = oldstart
734 if h.lena:
735 start -= 1
736 orig_start = start
737 # if there's skew we want to emit the "(offset %d lines)" even
734 # if there's skew we want to emit the "(offset %d lines)" even
738 # when the hunk cleanly applies at start + skew, so skip the
735 # when the hunk cleanly applies at start + skew, so skip the
739 # fast case code
736 # fast case code
740 if self.skew == 0 and diffhelpers.testhunk(old, self.lines, start) == 0:
737 if (self.skew == 0 and
738 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
741 if self.remove:
739 if self.remove:
742 self.backend.unlink(self.fname)
740 self.backend.unlink(self.fname)
743 else:
741 else:
744 self.lines[start : start + h.lena] = new
742 self.lines[oldstart:oldstart + len(old)] = new
745 self.offset += h.lenb - h.lena
743 self.offset += len(new) - len(old)
746 self.dirty = True
744 self.dirty = True
747 return 0
745 return 0
748
746
749 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
747 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
750 self.hash = {}
748 self.hash = {}
751 for x, s in enumerate(self.lines):
749 for x, s in enumerate(self.lines):
752 self.hash.setdefault(s, []).append(x)
750 self.hash.setdefault(s, []).append(x)
753 if h.hunk[-1][0] != ' ':
751 if h.hunk[-1][0] != ' ':
754 # if the hunk tried to put something at the bottom of the file
752 # if the hunk tried to put something at the bottom of the file
755 # override the start line and use eof here
753 # override the start line and use eof here
756 search_start = len(self.lines)
754 search_start = len(self.lines)
757 else:
755 else:
758 search_start = orig_start + self.skew
756 search_start = orig_start + self.skew
759
757
760 for fuzzlen in xrange(3):
758 for fuzzlen in xrange(3):
761 for toponly in [True, False]:
759 for toponly in [True, False]:
762 old, new = h.fuzzit(fuzzlen, toponly)
760 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
763
761
764 cand = self.findlines(old[0][1:], search_start)
762 cand = self.findlines(old[0][1:], search_start)
765 for l in cand:
763 for l in cand:
766 if diffhelpers.testhunk(old, self.lines, l) == 0:
764 if diffhelpers.testhunk(old, self.lines, l) == 0:
767 self.lines[l : l + len(old)] = new
765 self.lines[l : l + len(old)] = new
768 self.offset += len(new) - len(old)
766 self.offset += len(new) - len(old)
769 self.skew = l - orig_start
767 self.skew = l - orig_start
770 self.dirty = True
768 self.dirty = True
771 offset = l - orig_start - fuzzlen
769 offset = l - orig_start - fuzzlen
772 if fuzzlen:
770 if fuzzlen:
773 msg = _("Hunk #%d succeeded at %d "
771 msg = _("Hunk #%d succeeded at %d "
774 "with fuzz %d "
772 "with fuzz %d "
775 "(offset %d lines).\n")
773 "(offset %d lines).\n")
776 self.printfile(True)
774 self.printfile(True)
777 self.ui.warn(msg %
775 self.ui.warn(msg %
778 (h.number, l + 1, fuzzlen, offset))
776 (h.number, l + 1, fuzzlen, offset))
779 else:
777 else:
780 msg = _("Hunk #%d succeeded at %d "
778 msg = _("Hunk #%d succeeded at %d "
781 "(offset %d lines).\n")
779 "(offset %d lines).\n")
782 self.ui.note(msg % (h.number, l + 1, offset))
780 self.ui.note(msg % (h.number, l + 1, offset))
783 return fuzzlen
781 return fuzzlen
784 self.printfile(True)
782 self.printfile(True)
785 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
783 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
786 self.rej.append(horig)
784 self.rej.append(horig)
787 return -1
785 return -1
788
786
789 def close(self):
787 def close(self):
790 if self.dirty:
788 if self.dirty:
791 self.writelines(self.fname, self.lines, self.mode)
789 self.writelines(self.fname, self.lines, self.mode)
792 self.write_rej()
790 self.write_rej()
793 return len(self.rej)
791 return len(self.rej)
794
792
795 class hunk(object):
793 class hunk(object):
796 def __init__(self, desc, num, lr, context):
794 def __init__(self, desc, num, lr, context):
797 self.number = num
795 self.number = num
798 self.desc = desc
796 self.desc = desc
799 self.hunk = [desc]
797 self.hunk = [desc]
800 self.a = []
798 self.a = []
801 self.b = []
799 self.b = []
802 self.starta = self.lena = None
800 self.starta = self.lena = None
803 self.startb = self.lenb = None
801 self.startb = self.lenb = None
804 if lr is not None:
802 if lr is not None:
805 if context:
803 if context:
806 self.read_context_hunk(lr)
804 self.read_context_hunk(lr)
807 else:
805 else:
808 self.read_unified_hunk(lr)
806 self.read_unified_hunk(lr)
809
807
810 def getnormalized(self):
808 def getnormalized(self):
811 """Return a copy with line endings normalized to LF."""
809 """Return a copy with line endings normalized to LF."""
812
810
813 def normalize(lines):
811 def normalize(lines):
814 nlines = []
812 nlines = []
815 for line in lines:
813 for line in lines:
816 if line.endswith('\r\n'):
814 if line.endswith('\r\n'):
817 line = line[:-2] + '\n'
815 line = line[:-2] + '\n'
818 nlines.append(line)
816 nlines.append(line)
819 return nlines
817 return nlines
820
818
821 # Dummy object, it is rebuilt manually
819 # Dummy object, it is rebuilt manually
822 nh = hunk(self.desc, self.number, None, None)
820 nh = hunk(self.desc, self.number, None, None)
823 nh.number = self.number
821 nh.number = self.number
824 nh.desc = self.desc
822 nh.desc = self.desc
825 nh.hunk = self.hunk
823 nh.hunk = self.hunk
826 nh.a = normalize(self.a)
824 nh.a = normalize(self.a)
827 nh.b = normalize(self.b)
825 nh.b = normalize(self.b)
828 nh.starta = self.starta
826 nh.starta = self.starta
829 nh.startb = self.startb
827 nh.startb = self.startb
830 nh.lena = self.lena
828 nh.lena = self.lena
831 nh.lenb = self.lenb
829 nh.lenb = self.lenb
832 return nh
830 return nh
833
831
834 def read_unified_hunk(self, lr):
832 def read_unified_hunk(self, lr):
835 m = unidesc.match(self.desc)
833 m = unidesc.match(self.desc)
836 if not m:
834 if not m:
837 raise PatchError(_("bad hunk #%d") % self.number)
835 raise PatchError(_("bad hunk #%d") % self.number)
838 self.starta, self.lena, self.startb, self.lenb = m.groups()
836 self.starta, self.lena, self.startb, self.lenb = m.groups()
839 if self.lena is None:
837 if self.lena is None:
840 self.lena = 1
838 self.lena = 1
841 else:
839 else:
842 self.lena = int(self.lena)
840 self.lena = int(self.lena)
843 if self.lenb is None:
841 if self.lenb is None:
844 self.lenb = 1
842 self.lenb = 1
845 else:
843 else:
846 self.lenb = int(self.lenb)
844 self.lenb = int(self.lenb)
847 self.starta = int(self.starta)
845 self.starta = int(self.starta)
848 self.startb = int(self.startb)
846 self.startb = int(self.startb)
849 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
847 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
850 # if we hit eof before finishing out the hunk, the last line will
848 # if we hit eof before finishing out the hunk, the last line will
851 # be zero length. Lets try to fix it up.
849 # be zero length. Lets try to fix it up.
852 while len(self.hunk[-1]) == 0:
850 while len(self.hunk[-1]) == 0:
853 del self.hunk[-1]
851 del self.hunk[-1]
854 del self.a[-1]
852 del self.a[-1]
855 del self.b[-1]
853 del self.b[-1]
856 self.lena -= 1
854 self.lena -= 1
857 self.lenb -= 1
855 self.lenb -= 1
858 self._fixnewline(lr)
856 self._fixnewline(lr)
859
857
860 def read_context_hunk(self, lr):
858 def read_context_hunk(self, lr):
861 self.desc = lr.readline()
859 self.desc = lr.readline()
862 m = contextdesc.match(self.desc)
860 m = contextdesc.match(self.desc)
863 if not m:
861 if not m:
864 raise PatchError(_("bad hunk #%d") % self.number)
862 raise PatchError(_("bad hunk #%d") % self.number)
865 self.starta, aend = m.groups()
863 self.starta, aend = m.groups()
866 self.starta = int(self.starta)
864 self.starta = int(self.starta)
867 if aend is None:
865 if aend is None:
868 aend = self.starta
866 aend = self.starta
869 self.lena = int(aend) - self.starta
867 self.lena = int(aend) - self.starta
870 if self.starta:
868 if self.starta:
871 self.lena += 1
869 self.lena += 1
872 for x in xrange(self.lena):
870 for x in xrange(self.lena):
873 l = lr.readline()
871 l = lr.readline()
874 if l.startswith('---'):
872 if l.startswith('---'):
875 # lines addition, old block is empty
873 # lines addition, old block is empty
876 lr.push(l)
874 lr.push(l)
877 break
875 break
878 s = l[2:]
876 s = l[2:]
879 if l.startswith('- ') or l.startswith('! '):
877 if l.startswith('- ') or l.startswith('! '):
880 u = '-' + s
878 u = '-' + s
881 elif l.startswith(' '):
879 elif l.startswith(' '):
882 u = ' ' + s
880 u = ' ' + s
883 else:
881 else:
884 raise PatchError(_("bad hunk #%d old text line %d") %
882 raise PatchError(_("bad hunk #%d old text line %d") %
885 (self.number, x))
883 (self.number, x))
886 self.a.append(u)
884 self.a.append(u)
887 self.hunk.append(u)
885 self.hunk.append(u)
888
886
889 l = lr.readline()
887 l = lr.readline()
890 if l.startswith('\ '):
888 if l.startswith('\ '):
891 s = self.a[-1][:-1]
889 s = self.a[-1][:-1]
892 self.a[-1] = s
890 self.a[-1] = s
893 self.hunk[-1] = s
891 self.hunk[-1] = s
894 l = lr.readline()
892 l = lr.readline()
895 m = contextdesc.match(l)
893 m = contextdesc.match(l)
896 if not m:
894 if not m:
897 raise PatchError(_("bad hunk #%d") % self.number)
895 raise PatchError(_("bad hunk #%d") % self.number)
898 self.startb, bend = m.groups()
896 self.startb, bend = m.groups()
899 self.startb = int(self.startb)
897 self.startb = int(self.startb)
900 if bend is None:
898 if bend is None:
901 bend = self.startb
899 bend = self.startb
902 self.lenb = int(bend) - self.startb
900 self.lenb = int(bend) - self.startb
903 if self.startb:
901 if self.startb:
904 self.lenb += 1
902 self.lenb += 1
905 hunki = 1
903 hunki = 1
906 for x in xrange(self.lenb):
904 for x in xrange(self.lenb):
907 l = lr.readline()
905 l = lr.readline()
908 if l.startswith('\ '):
906 if l.startswith('\ '):
909 # XXX: the only way to hit this is with an invalid line range.
907 # XXX: the only way to hit this is with an invalid line range.
910 # The no-eol marker is not counted in the line range, but I
908 # The no-eol marker is not counted in the line range, but I
911 # guess there are diff(1) out there which behave differently.
909 # guess there are diff(1) out there which behave differently.
912 s = self.b[-1][:-1]
910 s = self.b[-1][:-1]
913 self.b[-1] = s
911 self.b[-1] = s
914 self.hunk[hunki - 1] = s
912 self.hunk[hunki - 1] = s
915 continue
913 continue
916 if not l:
914 if not l:
917 # line deletions, new block is empty and we hit EOF
915 # line deletions, new block is empty and we hit EOF
918 lr.push(l)
916 lr.push(l)
919 break
917 break
920 s = l[2:]
918 s = l[2:]
921 if l.startswith('+ ') or l.startswith('! '):
919 if l.startswith('+ ') or l.startswith('! '):
922 u = '+' + s
920 u = '+' + s
923 elif l.startswith(' '):
921 elif l.startswith(' '):
924 u = ' ' + s
922 u = ' ' + s
925 elif len(self.b) == 0:
923 elif len(self.b) == 0:
926 # line deletions, new block is empty
924 # line deletions, new block is empty
927 lr.push(l)
925 lr.push(l)
928 break
926 break
929 else:
927 else:
930 raise PatchError(_("bad hunk #%d old text line %d") %
928 raise PatchError(_("bad hunk #%d old text line %d") %
931 (self.number, x))
929 (self.number, x))
932 self.b.append(s)
930 self.b.append(s)
933 while True:
931 while True:
934 if hunki >= len(self.hunk):
932 if hunki >= len(self.hunk):
935 h = ""
933 h = ""
936 else:
934 else:
937 h = self.hunk[hunki]
935 h = self.hunk[hunki]
938 hunki += 1
936 hunki += 1
939 if h == u:
937 if h == u:
940 break
938 break
941 elif h.startswith('-'):
939 elif h.startswith('-'):
942 continue
940 continue
943 else:
941 else:
944 self.hunk.insert(hunki - 1, u)
942 self.hunk.insert(hunki - 1, u)
945 break
943 break
946
944
947 if not self.a:
945 if not self.a:
948 # this happens when lines were only added to the hunk
946 # this happens when lines were only added to the hunk
949 for x in self.hunk:
947 for x in self.hunk:
950 if x.startswith('-') or x.startswith(' '):
948 if x.startswith('-') or x.startswith(' '):
951 self.a.append(x)
949 self.a.append(x)
952 if not self.b:
950 if not self.b:
953 # this happens when lines were only deleted from the hunk
951 # this happens when lines were only deleted from the hunk
954 for x in self.hunk:
952 for x in self.hunk:
955 if x.startswith('+') or x.startswith(' '):
953 if x.startswith('+') or x.startswith(' '):
956 self.b.append(x[1:])
954 self.b.append(x[1:])
957 # @@ -start,len +start,len @@
955 # @@ -start,len +start,len @@
958 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
956 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
959 self.startb, self.lenb)
957 self.startb, self.lenb)
960 self.hunk[0] = self.desc
958 self.hunk[0] = self.desc
961 self._fixnewline(lr)
959 self._fixnewline(lr)
962
960
963 def _fixnewline(self, lr):
961 def _fixnewline(self, lr):
964 l = lr.readline()
962 l = lr.readline()
965 if l.startswith('\ '):
963 if l.startswith('\ '):
966 diffhelpers.fix_newline(self.hunk, self.a, self.b)
964 diffhelpers.fix_newline(self.hunk, self.a, self.b)
967 else:
965 else:
968 lr.push(l)
966 lr.push(l)
969
967
970 def complete(self):
968 def complete(self):
971 return len(self.a) == self.lena and len(self.b) == self.lenb
969 return len(self.a) == self.lena and len(self.b) == self.lenb
972
970
973 def _fuzzit(self, old, new, fuzz, toponly):
971 def _fuzzit(self, old, new, fuzz, toponly):
974 # this removes context lines from the top and bottom of list 'l'. It
972 # this removes context lines from the top and bottom of list 'l'. It
975 # checks the hunk to make sure only context lines are removed, and then
973 # checks the hunk to make sure only context lines are removed, and then
976 # returns a new shortened list of lines.
974 # returns a new shortened list of lines.
977 fuzz = min(fuzz, len(old)-1)
975 fuzz = min(fuzz, len(old)-1)
978 if fuzz:
976 if fuzz:
979 top = 0
977 top = 0
980 bot = 0
978 bot = 0
981 hlen = len(self.hunk)
979 hlen = len(self.hunk)
982 for x in xrange(hlen - 1):
980 for x in xrange(hlen - 1):
983 # the hunk starts with the @@ line, so use x+1
981 # the hunk starts with the @@ line, so use x+1
984 if self.hunk[x + 1][0] == ' ':
982 if self.hunk[x + 1][0] == ' ':
985 top += 1
983 top += 1
986 else:
984 else:
987 break
985 break
988 if not toponly:
986 if not toponly:
989 for x in xrange(hlen - 1):
987 for x in xrange(hlen - 1):
990 if self.hunk[hlen - bot - 1][0] == ' ':
988 if self.hunk[hlen - bot - 1][0] == ' ':
991 bot += 1
989 bot += 1
992 else:
990 else:
993 break
991 break
994
992
995 # top and bot now count context in the hunk
993 # top and bot now count context in the hunk
996 # adjust them if either one is short
994 # adjust them if either one is short
997 context = max(top, bot, 3)
995 context = max(top, bot, 3)
998 if bot < context:
996 if bot < context:
999 bot = max(0, fuzz - (context - bot))
997 bot = max(0, fuzz - (context - bot))
1000 else:
998 else:
1001 bot = min(fuzz, bot)
999 bot = min(fuzz, bot)
1002 if top < context:
1000 if top < context:
1003 top = max(0, fuzz - (context - top))
1001 top = max(0, fuzz - (context - top))
1004 else:
1002 else:
1005 top = min(fuzz, top)
1003 top = min(fuzz, top)
1006
1004
1007 return old[top:len(old)-bot], new[top:len(new)-bot]
1005 return old[top:len(old)-bot], new[top:len(new)-bot], top
1008 return old, new
1006 return old, new, 0
1009
1007
1010 def fuzzit(self, fuzz, toponly):
1008 def fuzzit(self, fuzz, toponly):
1011 return self._fuzzit(self.a, self.b, fuzz, toponly)
1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1010 oldstart = self.starta + top
1011 newstart = self.startb + top
1012 # zero length hunk ranges already have their start decremented
1013 if self.lena:
1014 oldstart -= 1
1015 if self.lenb:
1016 newstart -= 1
1017 return old, oldstart, new, newstart
1012
1018
1013 class binhunk(object):
1019 class binhunk(object):
1014 'A binary patch file. Only understands literals so far.'
1020 'A binary patch file. Only understands literals so far.'
1015 def __init__(self, lr):
1021 def __init__(self, lr):
1016 self.text = None
1022 self.text = None
1017 self.hunk = ['GIT binary patch\n']
1023 self.hunk = ['GIT binary patch\n']
1018 self._read(lr)
1024 self._read(lr)
1019
1025
1020 def complete(self):
1026 def complete(self):
1021 return self.text is not None
1027 return self.text is not None
1022
1028
1023 def new(self):
1029 def new(self):
1024 return [self.text]
1030 return [self.text]
1025
1031
1026 def _read(self, lr):
1032 def _read(self, lr):
1027 line = lr.readline()
1033 line = lr.readline()
1028 self.hunk.append(line)
1034 self.hunk.append(line)
1029 while line and not line.startswith('literal '):
1035 while line and not line.startswith('literal '):
1030 line = lr.readline()
1036 line = lr.readline()
1031 self.hunk.append(line)
1037 self.hunk.append(line)
1032 if not line:
1038 if not line:
1033 raise PatchError(_('could not extract binary patch'))
1039 raise PatchError(_('could not extract binary patch'))
1034 size = int(line[8:].rstrip())
1040 size = int(line[8:].rstrip())
1035 dec = []
1041 dec = []
1036 line = lr.readline()
1042 line = lr.readline()
1037 self.hunk.append(line)
1043 self.hunk.append(line)
1038 while len(line) > 1:
1044 while len(line) > 1:
1039 l = line[0]
1045 l = line[0]
1040 if l <= 'Z' and l >= 'A':
1046 if l <= 'Z' and l >= 'A':
1041 l = ord(l) - ord('A') + 1
1047 l = ord(l) - ord('A') + 1
1042 else:
1048 else:
1043 l = ord(l) - ord('a') + 27
1049 l = ord(l) - ord('a') + 27
1044 dec.append(base85.b85decode(line[1:-1])[:l])
1050 dec.append(base85.b85decode(line[1:-1])[:l])
1045 line = lr.readline()
1051 line = lr.readline()
1046 self.hunk.append(line)
1052 self.hunk.append(line)
1047 text = zlib.decompress(''.join(dec))
1053 text = zlib.decompress(''.join(dec))
1048 if len(text) != size:
1054 if len(text) != size:
1049 raise PatchError(_('binary patch is %d bytes, not %d') %
1055 raise PatchError(_('binary patch is %d bytes, not %d') %
1050 len(text), size)
1056 len(text), size)
1051 self.text = text
1057 self.text = text
1052
1058
1053 def parsefilename(str):
1059 def parsefilename(str):
1054 # --- filename \t|space stuff
1060 # --- filename \t|space stuff
1055 s = str[4:].rstrip('\r\n')
1061 s = str[4:].rstrip('\r\n')
1056 i = s.find('\t')
1062 i = s.find('\t')
1057 if i < 0:
1063 if i < 0:
1058 i = s.find(' ')
1064 i = s.find(' ')
1059 if i < 0:
1065 if i < 0:
1060 return s
1066 return s
1061 return s[:i]
1067 return s[:i]
1062
1068
1063 def pathstrip(path, strip):
1069 def pathstrip(path, strip):
1064 pathlen = len(path)
1070 pathlen = len(path)
1065 i = 0
1071 i = 0
1066 if strip == 0:
1072 if strip == 0:
1067 return '', path.rstrip()
1073 return '', path.rstrip()
1068 count = strip
1074 count = strip
1069 while count > 0:
1075 while count > 0:
1070 i = path.find('/', i)
1076 i = path.find('/', i)
1071 if i == -1:
1077 if i == -1:
1072 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1078 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1073 (count, strip, path))
1079 (count, strip, path))
1074 i += 1
1080 i += 1
1075 # consume '//' in the path
1081 # consume '//' in the path
1076 while i < pathlen - 1 and path[i] == '/':
1082 while i < pathlen - 1 and path[i] == '/':
1077 i += 1
1083 i += 1
1078 count -= 1
1084 count -= 1
1079 return path[:i].lstrip(), path[i:].rstrip()
1085 return path[:i].lstrip(), path[i:].rstrip()
1080
1086
1081 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1087 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1082 nulla = afile_orig == "/dev/null"
1088 nulla = afile_orig == "/dev/null"
1083 nullb = bfile_orig == "/dev/null"
1089 nullb = bfile_orig == "/dev/null"
1084 create = nulla and hunk.starta == 0 and hunk.lena == 0
1090 create = nulla and hunk.starta == 0 and hunk.lena == 0
1085 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1091 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1086 abase, afile = pathstrip(afile_orig, strip)
1092 abase, afile = pathstrip(afile_orig, strip)
1087 gooda = not nulla and backend.exists(afile)
1093 gooda = not nulla and backend.exists(afile)
1088 bbase, bfile = pathstrip(bfile_orig, strip)
1094 bbase, bfile = pathstrip(bfile_orig, strip)
1089 if afile == bfile:
1095 if afile == bfile:
1090 goodb = gooda
1096 goodb = gooda
1091 else:
1097 else:
1092 goodb = not nullb and backend.exists(bfile)
1098 goodb = not nullb and backend.exists(bfile)
1093 missing = not goodb and not gooda and not create
1099 missing = not goodb and not gooda and not create
1094
1100
1095 # some diff programs apparently produce patches where the afile is
1101 # some diff programs apparently produce patches where the afile is
1096 # not /dev/null, but afile starts with bfile
1102 # not /dev/null, but afile starts with bfile
1097 abasedir = afile[:afile.rfind('/') + 1]
1103 abasedir = afile[:afile.rfind('/') + 1]
1098 bbasedir = bfile[:bfile.rfind('/') + 1]
1104 bbasedir = bfile[:bfile.rfind('/') + 1]
1099 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1105 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1100 and hunk.starta == 0 and hunk.lena == 0):
1106 and hunk.starta == 0 and hunk.lena == 0):
1101 create = True
1107 create = True
1102 missing = False
1108 missing = False
1103
1109
1104 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1110 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1105 # diff is between a file and its backup. In this case, the original
1111 # diff is between a file and its backup. In this case, the original
1106 # file should be patched (see original mpatch code).
1112 # file should be patched (see original mpatch code).
1107 isbackup = (abase == bbase and bfile.startswith(afile))
1113 isbackup = (abase == bbase and bfile.startswith(afile))
1108 fname = None
1114 fname = None
1109 if not missing:
1115 if not missing:
1110 if gooda and goodb:
1116 if gooda and goodb:
1111 fname = isbackup and afile or bfile
1117 fname = isbackup and afile or bfile
1112 elif gooda:
1118 elif gooda:
1113 fname = afile
1119 fname = afile
1114
1120
1115 if not fname:
1121 if not fname:
1116 if not nullb:
1122 if not nullb:
1117 fname = isbackup and afile or bfile
1123 fname = isbackup and afile or bfile
1118 elif not nulla:
1124 elif not nulla:
1119 fname = afile
1125 fname = afile
1120 else:
1126 else:
1121 raise PatchError(_("undefined source and destination files"))
1127 raise PatchError(_("undefined source and destination files"))
1122
1128
1123 gp = patchmeta(fname)
1129 gp = patchmeta(fname)
1124 if create:
1130 if create:
1125 gp.op = 'ADD'
1131 gp.op = 'ADD'
1126 elif remove:
1132 elif remove:
1127 gp.op = 'DELETE'
1133 gp.op = 'DELETE'
1128 return gp
1134 return gp
1129
1135
1130 def scangitpatch(lr, firstline):
1136 def scangitpatch(lr, firstline):
1131 """
1137 """
1132 Git patches can emit:
1138 Git patches can emit:
1133 - rename a to b
1139 - rename a to b
1134 - change b
1140 - change b
1135 - copy a to c
1141 - copy a to c
1136 - change c
1142 - change c
1137
1143
1138 We cannot apply this sequence as-is, the renamed 'a' could not be
1144 We cannot apply this sequence as-is, the renamed 'a' could not be
1139 found for it would have been renamed already. And we cannot copy
1145 found for it would have been renamed already. And we cannot copy
1140 from 'b' instead because 'b' would have been changed already. So
1146 from 'b' instead because 'b' would have been changed already. So
1141 we scan the git patch for copy and rename commands so we can
1147 we scan the git patch for copy and rename commands so we can
1142 perform the copies ahead of time.
1148 perform the copies ahead of time.
1143 """
1149 """
1144 pos = 0
1150 pos = 0
1145 try:
1151 try:
1146 pos = lr.fp.tell()
1152 pos = lr.fp.tell()
1147 fp = lr.fp
1153 fp = lr.fp
1148 except IOError:
1154 except IOError:
1149 fp = cStringIO.StringIO(lr.fp.read())
1155 fp = cStringIO.StringIO(lr.fp.read())
1150 gitlr = linereader(fp)
1156 gitlr = linereader(fp)
1151 gitlr.push(firstline)
1157 gitlr.push(firstline)
1152 gitpatches = readgitpatch(gitlr)
1158 gitpatches = readgitpatch(gitlr)
1153 fp.seek(pos)
1159 fp.seek(pos)
1154 return gitpatches
1160 return gitpatches
1155
1161
1156 def iterhunks(fp):
1162 def iterhunks(fp):
1157 """Read a patch and yield the following events:
1163 """Read a patch and yield the following events:
1158 - ("file", afile, bfile, firsthunk): select a new target file.
1164 - ("file", afile, bfile, firsthunk): select a new target file.
1159 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1165 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1160 "file" event.
1166 "file" event.
1161 - ("git", gitchanges): current diff is in git format, gitchanges
1167 - ("git", gitchanges): current diff is in git format, gitchanges
1162 maps filenames to gitpatch records. Unique event.
1168 maps filenames to gitpatch records. Unique event.
1163 """
1169 """
1164 afile = ""
1170 afile = ""
1165 bfile = ""
1171 bfile = ""
1166 state = None
1172 state = None
1167 hunknum = 0
1173 hunknum = 0
1168 emitfile = newfile = False
1174 emitfile = newfile = False
1169 gitpatches = None
1175 gitpatches = None
1170
1176
1171 # our states
1177 # our states
1172 BFILE = 1
1178 BFILE = 1
1173 context = None
1179 context = None
1174 lr = linereader(fp)
1180 lr = linereader(fp)
1175
1181
1176 while True:
1182 while True:
1177 x = lr.readline()
1183 x = lr.readline()
1178 if not x:
1184 if not x:
1179 break
1185 break
1180 if state == BFILE and (
1186 if state == BFILE and (
1181 (not context and x[0] == '@')
1187 (not context and x[0] == '@')
1182 or (context is not False and x.startswith('***************'))
1188 or (context is not False and x.startswith('***************'))
1183 or x.startswith('GIT binary patch')):
1189 or x.startswith('GIT binary patch')):
1184 gp = None
1190 gp = None
1185 if (gitpatches and
1191 if (gitpatches and
1186 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1192 (gitpatches[-1][0] == afile or gitpatches[-1][1] == bfile)):
1187 gp = gitpatches.pop()[2]
1193 gp = gitpatches.pop()[2]
1188 if x.startswith('GIT binary patch'):
1194 if x.startswith('GIT binary patch'):
1189 h = binhunk(lr)
1195 h = binhunk(lr)
1190 else:
1196 else:
1191 if context is None and x.startswith('***************'):
1197 if context is None and x.startswith('***************'):
1192 context = True
1198 context = True
1193 h = hunk(x, hunknum + 1, lr, context)
1199 h = hunk(x, hunknum + 1, lr, context)
1194 hunknum += 1
1200 hunknum += 1
1195 if emitfile:
1201 if emitfile:
1196 emitfile = False
1202 emitfile = False
1197 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1203 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1198 yield 'hunk', h
1204 yield 'hunk', h
1199 elif x.startswith('diff --git'):
1205 elif x.startswith('diff --git'):
1200 m = gitre.match(x)
1206 m = gitre.match(x)
1201 if not m:
1207 if not m:
1202 continue
1208 continue
1203 if not gitpatches:
1209 if not gitpatches:
1204 # scan whole input for git metadata
1210 # scan whole input for git metadata
1205 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1211 gitpatches = [('a/' + gp.path, 'b/' + gp.path, gp) for gp
1206 in scangitpatch(lr, x)]
1212 in scangitpatch(lr, x)]
1207 yield 'git', [g[2].copy() for g in gitpatches
1213 yield 'git', [g[2].copy() for g in gitpatches
1208 if g[2].op in ('COPY', 'RENAME')]
1214 if g[2].op in ('COPY', 'RENAME')]
1209 gitpatches.reverse()
1215 gitpatches.reverse()
1210 afile = 'a/' + m.group(1)
1216 afile = 'a/' + m.group(1)
1211 bfile = 'b/' + m.group(2)
1217 bfile = 'b/' + m.group(2)
1212 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1218 while afile != gitpatches[-1][0] and bfile != gitpatches[-1][1]:
1213 gp = gitpatches.pop()[2]
1219 gp = gitpatches.pop()[2]
1214 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1220 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1215 gp = gitpatches[-1][2]
1221 gp = gitpatches[-1][2]
1216 # copy/rename + modify should modify target, not source
1222 # copy/rename + modify should modify target, not source
1217 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1223 if gp.op in ('COPY', 'DELETE', 'RENAME', 'ADD') or gp.mode:
1218 afile = bfile
1224 afile = bfile
1219 newfile = True
1225 newfile = True
1220 elif x.startswith('---'):
1226 elif x.startswith('---'):
1221 # check for a unified diff
1227 # check for a unified diff
1222 l2 = lr.readline()
1228 l2 = lr.readline()
1223 if not l2.startswith('+++'):
1229 if not l2.startswith('+++'):
1224 lr.push(l2)
1230 lr.push(l2)
1225 continue
1231 continue
1226 newfile = True
1232 newfile = True
1227 context = False
1233 context = False
1228 afile = parsefilename(x)
1234 afile = parsefilename(x)
1229 bfile = parsefilename(l2)
1235 bfile = parsefilename(l2)
1230 elif x.startswith('***'):
1236 elif x.startswith('***'):
1231 # check for a context diff
1237 # check for a context diff
1232 l2 = lr.readline()
1238 l2 = lr.readline()
1233 if not l2.startswith('---'):
1239 if not l2.startswith('---'):
1234 lr.push(l2)
1240 lr.push(l2)
1235 continue
1241 continue
1236 l3 = lr.readline()
1242 l3 = lr.readline()
1237 lr.push(l3)
1243 lr.push(l3)
1238 if not l3.startswith("***************"):
1244 if not l3.startswith("***************"):
1239 lr.push(l2)
1245 lr.push(l2)
1240 continue
1246 continue
1241 newfile = True
1247 newfile = True
1242 context = True
1248 context = True
1243 afile = parsefilename(x)
1249 afile = parsefilename(x)
1244 bfile = parsefilename(l2)
1250 bfile = parsefilename(l2)
1245
1251
1246 if newfile:
1252 if newfile:
1247 newfile = False
1253 newfile = False
1248 emitfile = True
1254 emitfile = True
1249 state = BFILE
1255 state = BFILE
1250 hunknum = 0
1256 hunknum = 0
1251
1257
1252 while gitpatches:
1258 while gitpatches:
1253 gp = gitpatches.pop()[2]
1259 gp = gitpatches.pop()[2]
1254 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1260 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1255
1261
1256 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1262 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1257 """Reads a patch from fp and tries to apply it.
1263 """Reads a patch from fp and tries to apply it.
1258
1264
1259 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1265 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1260 there was any fuzz.
1266 there was any fuzz.
1261
1267
1262 If 'eolmode' is 'strict', the patch content and patched file are
1268 If 'eolmode' is 'strict', the patch content and patched file are
1263 read in binary mode. Otherwise, line endings are ignored when
1269 read in binary mode. Otherwise, line endings are ignored when
1264 patching then normalized according to 'eolmode'.
1270 patching then normalized according to 'eolmode'.
1265 """
1271 """
1266 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1272 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1267 eolmode=eolmode)
1273 eolmode=eolmode)
1268
1274
1269 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1275 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1270 eolmode='strict'):
1276 eolmode='strict'):
1271
1277
1272 def pstrip(p):
1278 def pstrip(p):
1273 return pathstrip(p, strip - 1)[1]
1279 return pathstrip(p, strip - 1)[1]
1274
1280
1275 rejects = 0
1281 rejects = 0
1276 err = 0
1282 err = 0
1277 current_file = None
1283 current_file = None
1278
1284
1279 for state, values in iterhunks(fp):
1285 for state, values in iterhunks(fp):
1280 if state == 'hunk':
1286 if state == 'hunk':
1281 if not current_file:
1287 if not current_file:
1282 continue
1288 continue
1283 ret = current_file.apply(values)
1289 ret = current_file.apply(values)
1284 if ret > 0:
1290 if ret > 0:
1285 err = 1
1291 err = 1
1286 elif state == 'file':
1292 elif state == 'file':
1287 if current_file:
1293 if current_file:
1288 rejects += current_file.close()
1294 rejects += current_file.close()
1289 current_file = None
1295 current_file = None
1290 afile, bfile, first_hunk, gp = values
1296 afile, bfile, first_hunk, gp = values
1291 if gp:
1297 if gp:
1292 path = pstrip(gp.path)
1298 path = pstrip(gp.path)
1293 gp.path = pstrip(gp.path)
1299 gp.path = pstrip(gp.path)
1294 if gp.oldpath:
1300 if gp.oldpath:
1295 gp.oldpath = pstrip(gp.oldpath)
1301 gp.oldpath = pstrip(gp.oldpath)
1296 else:
1302 else:
1297 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1303 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1298 if gp.op == 'RENAME':
1304 if gp.op == 'RENAME':
1299 backend.unlink(gp.oldpath)
1305 backend.unlink(gp.oldpath)
1300 if not first_hunk:
1306 if not first_hunk:
1301 if gp.op == 'DELETE':
1307 if gp.op == 'DELETE':
1302 backend.unlink(gp.path)
1308 backend.unlink(gp.path)
1303 continue
1309 continue
1304 data, mode = None, None
1310 data, mode = None, None
1305 if gp.op in ('RENAME', 'COPY'):
1311 if gp.op in ('RENAME', 'COPY'):
1306 data, mode = store.getfile(gp.oldpath)[:2]
1312 data, mode = store.getfile(gp.oldpath)[:2]
1307 if gp.mode:
1313 if gp.mode:
1308 mode = gp.mode
1314 mode = gp.mode
1309 if gp.op == 'ADD':
1315 if gp.op == 'ADD':
1310 # Added files without content have no hunk and
1316 # Added files without content have no hunk and
1311 # must be created
1317 # must be created
1312 data = ''
1318 data = ''
1313 if data or mode:
1319 if data or mode:
1314 if (gp.op in ('ADD', 'RENAME', 'COPY')
1320 if (gp.op in ('ADD', 'RENAME', 'COPY')
1315 and backend.exists(gp.path)):
1321 and backend.exists(gp.path)):
1316 raise PatchError(_("cannot create %s: destination "
1322 raise PatchError(_("cannot create %s: destination "
1317 "already exists") % gp.path)
1323 "already exists") % gp.path)
1318 backend.setfile(gp.path, data, mode, gp.oldpath)
1324 backend.setfile(gp.path, data, mode, gp.oldpath)
1319 continue
1325 continue
1320 try:
1326 try:
1321 current_file = patcher(ui, gp, backend, store,
1327 current_file = patcher(ui, gp, backend, store,
1322 eolmode=eolmode)
1328 eolmode=eolmode)
1323 except PatchError, inst:
1329 except PatchError, inst:
1324 ui.warn(str(inst) + '\n')
1330 ui.warn(str(inst) + '\n')
1325 current_file = None
1331 current_file = None
1326 rejects += 1
1332 rejects += 1
1327 continue
1333 continue
1328 elif state == 'git':
1334 elif state == 'git':
1329 for gp in values:
1335 for gp in values:
1330 path = pstrip(gp.oldpath)
1336 path = pstrip(gp.oldpath)
1331 data, mode = backend.getfile(path)
1337 data, mode = backend.getfile(path)
1332 store.setfile(path, data, mode)
1338 store.setfile(path, data, mode)
1333 else:
1339 else:
1334 raise util.Abort(_('unsupported parser state: %s') % state)
1340 raise util.Abort(_('unsupported parser state: %s') % state)
1335
1341
1336 if current_file:
1342 if current_file:
1337 rejects += current_file.close()
1343 rejects += current_file.close()
1338
1344
1339 if rejects:
1345 if rejects:
1340 return -1
1346 return -1
1341 return err
1347 return err
1342
1348
1343 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1349 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1344 similarity):
1350 similarity):
1345 """use <patcher> to apply <patchname> to the working directory.
1351 """use <patcher> to apply <patchname> to the working directory.
1346 returns whether patch was applied with fuzz factor."""
1352 returns whether patch was applied with fuzz factor."""
1347
1353
1348 fuzz = False
1354 fuzz = False
1349 args = []
1355 args = []
1350 cwd = repo.root
1356 cwd = repo.root
1351 if cwd:
1357 if cwd:
1352 args.append('-d %s' % util.shellquote(cwd))
1358 args.append('-d %s' % util.shellquote(cwd))
1353 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1359 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1354 util.shellquote(patchname)))
1360 util.shellquote(patchname)))
1355 try:
1361 try:
1356 for line in fp:
1362 for line in fp:
1357 line = line.rstrip()
1363 line = line.rstrip()
1358 ui.note(line + '\n')
1364 ui.note(line + '\n')
1359 if line.startswith('patching file '):
1365 if line.startswith('patching file '):
1360 pf = util.parsepatchoutput(line)
1366 pf = util.parsepatchoutput(line)
1361 printed_file = False
1367 printed_file = False
1362 files.add(pf)
1368 files.add(pf)
1363 elif line.find('with fuzz') >= 0:
1369 elif line.find('with fuzz') >= 0:
1364 fuzz = True
1370 fuzz = True
1365 if not printed_file:
1371 if not printed_file:
1366 ui.warn(pf + '\n')
1372 ui.warn(pf + '\n')
1367 printed_file = True
1373 printed_file = True
1368 ui.warn(line + '\n')
1374 ui.warn(line + '\n')
1369 elif line.find('saving rejects to file') >= 0:
1375 elif line.find('saving rejects to file') >= 0:
1370 ui.warn(line + '\n')
1376 ui.warn(line + '\n')
1371 elif line.find('FAILED') >= 0:
1377 elif line.find('FAILED') >= 0:
1372 if not printed_file:
1378 if not printed_file:
1373 ui.warn(pf + '\n')
1379 ui.warn(pf + '\n')
1374 printed_file = True
1380 printed_file = True
1375 ui.warn(line + '\n')
1381 ui.warn(line + '\n')
1376 finally:
1382 finally:
1377 if files:
1383 if files:
1378 cfiles = list(files)
1384 cfiles = list(files)
1379 cwd = repo.getcwd()
1385 cwd = repo.getcwd()
1380 if cwd:
1386 if cwd:
1381 cfiles = [util.pathto(repo.root, cwd, f)
1387 cfiles = [util.pathto(repo.root, cwd, f)
1382 for f in cfiles]
1388 for f in cfiles]
1383 scmutil.addremove(repo, cfiles, similarity=similarity)
1389 scmutil.addremove(repo, cfiles, similarity=similarity)
1384 code = fp.close()
1390 code = fp.close()
1385 if code:
1391 if code:
1386 raise PatchError(_("patch command failed: %s") %
1392 raise PatchError(_("patch command failed: %s") %
1387 util.explainexit(code)[0])
1393 util.explainexit(code)[0])
1388 return fuzz
1394 return fuzz
1389
1395
1390 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1396 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1391 if files is None:
1397 if files is None:
1392 files = set()
1398 files = set()
1393 if eolmode is None:
1399 if eolmode is None:
1394 eolmode = ui.config('patch', 'eol', 'strict')
1400 eolmode = ui.config('patch', 'eol', 'strict')
1395 if eolmode.lower() not in eolmodes:
1401 if eolmode.lower() not in eolmodes:
1396 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1402 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1397 eolmode = eolmode.lower()
1403 eolmode = eolmode.lower()
1398
1404
1399 store = filestore()
1405 store = filestore()
1400 try:
1406 try:
1401 fp = open(patchobj, 'rb')
1407 fp = open(patchobj, 'rb')
1402 except TypeError:
1408 except TypeError:
1403 fp = patchobj
1409 fp = patchobj
1404 try:
1410 try:
1405 ret = applydiff(ui, fp, backend, store, strip=strip,
1411 ret = applydiff(ui, fp, backend, store, strip=strip,
1406 eolmode=eolmode)
1412 eolmode=eolmode)
1407 finally:
1413 finally:
1408 if fp != patchobj:
1414 if fp != patchobj:
1409 fp.close()
1415 fp.close()
1410 files.update(backend.close())
1416 files.update(backend.close())
1411 store.close()
1417 store.close()
1412 if ret < 0:
1418 if ret < 0:
1413 raise PatchError(_('patch failed to apply'))
1419 raise PatchError(_('patch failed to apply'))
1414 return ret > 0
1420 return ret > 0
1415
1421
1416 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1422 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1417 similarity=0):
1423 similarity=0):
1418 """use builtin patch to apply <patchobj> to the working directory.
1424 """use builtin patch to apply <patchobj> to the working directory.
1419 returns whether patch was applied with fuzz factor."""
1425 returns whether patch was applied with fuzz factor."""
1420 backend = workingbackend(ui, repo, similarity)
1426 backend = workingbackend(ui, repo, similarity)
1421 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1427 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1422
1428
1423 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1429 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1424 eolmode='strict'):
1430 eolmode='strict'):
1425 backend = repobackend(ui, repo, ctx, store)
1431 backend = repobackend(ui, repo, ctx, store)
1426 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1432 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1427
1433
1428 def makememctx(repo, parents, text, user, date, branch, files, store,
1434 def makememctx(repo, parents, text, user, date, branch, files, store,
1429 editor=None):
1435 editor=None):
1430 def getfilectx(repo, memctx, path):
1436 def getfilectx(repo, memctx, path):
1431 data, (islink, isexec), copied = store.getfile(path)
1437 data, (islink, isexec), copied = store.getfile(path)
1432 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1438 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1433 copied=copied)
1439 copied=copied)
1434 extra = {}
1440 extra = {}
1435 if branch:
1441 if branch:
1436 extra['branch'] = encoding.fromlocal(branch)
1442 extra['branch'] = encoding.fromlocal(branch)
1437 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1443 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1438 date, extra)
1444 date, extra)
1439 if editor:
1445 if editor:
1440 ctx._text = editor(repo, ctx, [])
1446 ctx._text = editor(repo, ctx, [])
1441 return ctx
1447 return ctx
1442
1448
1443 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1449 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1444 similarity=0):
1450 similarity=0):
1445 """Apply <patchname> to the working directory.
1451 """Apply <patchname> to the working directory.
1446
1452
1447 'eolmode' specifies how end of lines should be handled. It can be:
1453 'eolmode' specifies how end of lines should be handled. It can be:
1448 - 'strict': inputs are read in binary mode, EOLs are preserved
1454 - 'strict': inputs are read in binary mode, EOLs are preserved
1449 - 'crlf': EOLs are ignored when patching and reset to CRLF
1455 - 'crlf': EOLs are ignored when patching and reset to CRLF
1450 - 'lf': EOLs are ignored when patching and reset to LF
1456 - 'lf': EOLs are ignored when patching and reset to LF
1451 - None: get it from user settings, default to 'strict'
1457 - None: get it from user settings, default to 'strict'
1452 'eolmode' is ignored when using an external patcher program.
1458 'eolmode' is ignored when using an external patcher program.
1453
1459
1454 Returns whether patch was applied with fuzz factor.
1460 Returns whether patch was applied with fuzz factor.
1455 """
1461 """
1456 patcher = ui.config('ui', 'patch')
1462 patcher = ui.config('ui', 'patch')
1457 if files is None:
1463 if files is None:
1458 files = set()
1464 files = set()
1459 try:
1465 try:
1460 if patcher:
1466 if patcher:
1461 return _externalpatch(ui, repo, patcher, patchname, strip,
1467 return _externalpatch(ui, repo, patcher, patchname, strip,
1462 files, similarity)
1468 files, similarity)
1463 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1469 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1464 similarity)
1470 similarity)
1465 except PatchError, err:
1471 except PatchError, err:
1466 raise util.Abort(str(err))
1472 raise util.Abort(str(err))
1467
1473
1468 def changedfiles(ui, repo, patchpath, strip=1):
1474 def changedfiles(ui, repo, patchpath, strip=1):
1469 backend = fsbackend(ui, repo.root)
1475 backend = fsbackend(ui, repo.root)
1470 fp = open(patchpath, 'rb')
1476 fp = open(patchpath, 'rb')
1471 try:
1477 try:
1472 changed = set()
1478 changed = set()
1473 for state, values in iterhunks(fp):
1479 for state, values in iterhunks(fp):
1474 if state == 'file':
1480 if state == 'file':
1475 afile, bfile, first_hunk, gp = values
1481 afile, bfile, first_hunk, gp = values
1476 if gp:
1482 if gp:
1477 gp.path = pathstrip(gp.path, strip - 1)[1]
1483 gp.path = pathstrip(gp.path, strip - 1)[1]
1478 if gp.oldpath:
1484 if gp.oldpath:
1479 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1485 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1480 else:
1486 else:
1481 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1487 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1482 changed.add(gp.path)
1488 changed.add(gp.path)
1483 if gp.op == 'RENAME':
1489 if gp.op == 'RENAME':
1484 changed.add(gp.oldpath)
1490 changed.add(gp.oldpath)
1485 elif state not in ('hunk', 'git'):
1491 elif state not in ('hunk', 'git'):
1486 raise util.Abort(_('unsupported parser state: %s') % state)
1492 raise util.Abort(_('unsupported parser state: %s') % state)
1487 return changed
1493 return changed
1488 finally:
1494 finally:
1489 fp.close()
1495 fp.close()
1490
1496
1491 def b85diff(to, tn):
1497 def b85diff(to, tn):
1492 '''print base85-encoded binary diff'''
1498 '''print base85-encoded binary diff'''
1493 def gitindex(text):
1499 def gitindex(text):
1494 if not text:
1500 if not text:
1495 return hex(nullid)
1501 return hex(nullid)
1496 l = len(text)
1502 l = len(text)
1497 s = util.sha1('blob %d\0' % l)
1503 s = util.sha1('blob %d\0' % l)
1498 s.update(text)
1504 s.update(text)
1499 return s.hexdigest()
1505 return s.hexdigest()
1500
1506
1501 def fmtline(line):
1507 def fmtline(line):
1502 l = len(line)
1508 l = len(line)
1503 if l <= 26:
1509 if l <= 26:
1504 l = chr(ord('A') + l - 1)
1510 l = chr(ord('A') + l - 1)
1505 else:
1511 else:
1506 l = chr(l - 26 + ord('a') - 1)
1512 l = chr(l - 26 + ord('a') - 1)
1507 return '%c%s\n' % (l, base85.b85encode(line, True))
1513 return '%c%s\n' % (l, base85.b85encode(line, True))
1508
1514
1509 def chunk(text, csize=52):
1515 def chunk(text, csize=52):
1510 l = len(text)
1516 l = len(text)
1511 i = 0
1517 i = 0
1512 while i < l:
1518 while i < l:
1513 yield text[i:i + csize]
1519 yield text[i:i + csize]
1514 i += csize
1520 i += csize
1515
1521
1516 tohash = gitindex(to)
1522 tohash = gitindex(to)
1517 tnhash = gitindex(tn)
1523 tnhash = gitindex(tn)
1518 if tohash == tnhash:
1524 if tohash == tnhash:
1519 return ""
1525 return ""
1520
1526
1521 # TODO: deltas
1527 # TODO: deltas
1522 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1528 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1523 (tohash, tnhash, len(tn))]
1529 (tohash, tnhash, len(tn))]
1524 for l in chunk(zlib.compress(tn)):
1530 for l in chunk(zlib.compress(tn)):
1525 ret.append(fmtline(l))
1531 ret.append(fmtline(l))
1526 ret.append('\n')
1532 ret.append('\n')
1527 return ''.join(ret)
1533 return ''.join(ret)
1528
1534
1529 class GitDiffRequired(Exception):
1535 class GitDiffRequired(Exception):
1530 pass
1536 pass
1531
1537
1532 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1538 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1533 def get(key, name=None, getter=ui.configbool):
1539 def get(key, name=None, getter=ui.configbool):
1534 return ((opts and opts.get(key)) or
1540 return ((opts and opts.get(key)) or
1535 getter(section, name or key, None, untrusted=untrusted))
1541 getter(section, name or key, None, untrusted=untrusted))
1536 return mdiff.diffopts(
1542 return mdiff.diffopts(
1537 text=opts and opts.get('text'),
1543 text=opts and opts.get('text'),
1538 git=get('git'),
1544 git=get('git'),
1539 nodates=get('nodates'),
1545 nodates=get('nodates'),
1540 showfunc=get('show_function', 'showfunc'),
1546 showfunc=get('show_function', 'showfunc'),
1541 ignorews=get('ignore_all_space', 'ignorews'),
1547 ignorews=get('ignore_all_space', 'ignorews'),
1542 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1548 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1543 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1549 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1544 context=get('unified', getter=ui.config))
1550 context=get('unified', getter=ui.config))
1545
1551
1546 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1552 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1547 losedatafn=None, prefix=''):
1553 losedatafn=None, prefix=''):
1548 '''yields diff of changes to files between two nodes, or node and
1554 '''yields diff of changes to files between two nodes, or node and
1549 working directory.
1555 working directory.
1550
1556
1551 if node1 is None, use first dirstate parent instead.
1557 if node1 is None, use first dirstate parent instead.
1552 if node2 is None, compare node1 with working directory.
1558 if node2 is None, compare node1 with working directory.
1553
1559
1554 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1560 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1555 every time some change cannot be represented with the current
1561 every time some change cannot be represented with the current
1556 patch format. Return False to upgrade to git patch format, True to
1562 patch format. Return False to upgrade to git patch format, True to
1557 accept the loss or raise an exception to abort the diff. It is
1563 accept the loss or raise an exception to abort the diff. It is
1558 called with the name of current file being diffed as 'fn'. If set
1564 called with the name of current file being diffed as 'fn'. If set
1559 to None, patches will always be upgraded to git format when
1565 to None, patches will always be upgraded to git format when
1560 necessary.
1566 necessary.
1561
1567
1562 prefix is a filename prefix that is prepended to all filenames on
1568 prefix is a filename prefix that is prepended to all filenames on
1563 display (used for subrepos).
1569 display (used for subrepos).
1564 '''
1570 '''
1565
1571
1566 if opts is None:
1572 if opts is None:
1567 opts = mdiff.defaultopts
1573 opts = mdiff.defaultopts
1568
1574
1569 if not node1 and not node2:
1575 if not node1 and not node2:
1570 node1 = repo.dirstate.p1()
1576 node1 = repo.dirstate.p1()
1571
1577
1572 def lrugetfilectx():
1578 def lrugetfilectx():
1573 cache = {}
1579 cache = {}
1574 order = []
1580 order = []
1575 def getfilectx(f, ctx):
1581 def getfilectx(f, ctx):
1576 fctx = ctx.filectx(f, filelog=cache.get(f))
1582 fctx = ctx.filectx(f, filelog=cache.get(f))
1577 if f not in cache:
1583 if f not in cache:
1578 if len(cache) > 20:
1584 if len(cache) > 20:
1579 del cache[order.pop(0)]
1585 del cache[order.pop(0)]
1580 cache[f] = fctx.filelog()
1586 cache[f] = fctx.filelog()
1581 else:
1587 else:
1582 order.remove(f)
1588 order.remove(f)
1583 order.append(f)
1589 order.append(f)
1584 return fctx
1590 return fctx
1585 return getfilectx
1591 return getfilectx
1586 getfilectx = lrugetfilectx()
1592 getfilectx = lrugetfilectx()
1587
1593
1588 ctx1 = repo[node1]
1594 ctx1 = repo[node1]
1589 ctx2 = repo[node2]
1595 ctx2 = repo[node2]
1590
1596
1591 if not changes:
1597 if not changes:
1592 changes = repo.status(ctx1, ctx2, match=match)
1598 changes = repo.status(ctx1, ctx2, match=match)
1593 modified, added, removed = changes[:3]
1599 modified, added, removed = changes[:3]
1594
1600
1595 if not modified and not added and not removed:
1601 if not modified and not added and not removed:
1596 return []
1602 return []
1597
1603
1598 revs = None
1604 revs = None
1599 if not repo.ui.quiet:
1605 if not repo.ui.quiet:
1600 hexfunc = repo.ui.debugflag and hex or short
1606 hexfunc = repo.ui.debugflag and hex or short
1601 revs = [hexfunc(node) for node in [node1, node2] if node]
1607 revs = [hexfunc(node) for node in [node1, node2] if node]
1602
1608
1603 copy = {}
1609 copy = {}
1604 if opts.git or opts.upgrade:
1610 if opts.git or opts.upgrade:
1605 copy = copies.pathcopies(ctx1, ctx2)
1611 copy = copies.pathcopies(ctx1, ctx2)
1606
1612
1607 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1613 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1608 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1614 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1609 if opts.upgrade and not opts.git:
1615 if opts.upgrade and not opts.git:
1610 try:
1616 try:
1611 def losedata(fn):
1617 def losedata(fn):
1612 if not losedatafn or not losedatafn(fn=fn):
1618 if not losedatafn or not losedatafn(fn=fn):
1613 raise GitDiffRequired()
1619 raise GitDiffRequired()
1614 # Buffer the whole output until we are sure it can be generated
1620 # Buffer the whole output until we are sure it can be generated
1615 return list(difffn(opts.copy(git=False), losedata))
1621 return list(difffn(opts.copy(git=False), losedata))
1616 except GitDiffRequired:
1622 except GitDiffRequired:
1617 return difffn(opts.copy(git=True), None)
1623 return difffn(opts.copy(git=True), None)
1618 else:
1624 else:
1619 return difffn(opts, None)
1625 return difffn(opts, None)
1620
1626
1621 def difflabel(func, *args, **kw):
1627 def difflabel(func, *args, **kw):
1622 '''yields 2-tuples of (output, label) based on the output of func()'''
1628 '''yields 2-tuples of (output, label) based on the output of func()'''
1623 headprefixes = [('diff', 'diff.diffline'),
1629 headprefixes = [('diff', 'diff.diffline'),
1624 ('copy', 'diff.extended'),
1630 ('copy', 'diff.extended'),
1625 ('rename', 'diff.extended'),
1631 ('rename', 'diff.extended'),
1626 ('old', 'diff.extended'),
1632 ('old', 'diff.extended'),
1627 ('new', 'diff.extended'),
1633 ('new', 'diff.extended'),
1628 ('deleted', 'diff.extended'),
1634 ('deleted', 'diff.extended'),
1629 ('---', 'diff.file_a'),
1635 ('---', 'diff.file_a'),
1630 ('+++', 'diff.file_b')]
1636 ('+++', 'diff.file_b')]
1631 textprefixes = [('@', 'diff.hunk'),
1637 textprefixes = [('@', 'diff.hunk'),
1632 ('-', 'diff.deleted'),
1638 ('-', 'diff.deleted'),
1633 ('+', 'diff.inserted')]
1639 ('+', 'diff.inserted')]
1634 head = False
1640 head = False
1635 for chunk in func(*args, **kw):
1641 for chunk in func(*args, **kw):
1636 lines = chunk.split('\n')
1642 lines = chunk.split('\n')
1637 for i, line in enumerate(lines):
1643 for i, line in enumerate(lines):
1638 if i != 0:
1644 if i != 0:
1639 yield ('\n', '')
1645 yield ('\n', '')
1640 if head:
1646 if head:
1641 if line.startswith('@'):
1647 if line.startswith('@'):
1642 head = False
1648 head = False
1643 else:
1649 else:
1644 if line and not line[0] in ' +-@\\':
1650 if line and not line[0] in ' +-@\\':
1645 head = True
1651 head = True
1646 stripline = line
1652 stripline = line
1647 if not head and line and line[0] in '+-':
1653 if not head and line and line[0] in '+-':
1648 # highlight trailing whitespace, but only in changed lines
1654 # highlight trailing whitespace, but only in changed lines
1649 stripline = line.rstrip()
1655 stripline = line.rstrip()
1650 prefixes = textprefixes
1656 prefixes = textprefixes
1651 if head:
1657 if head:
1652 prefixes = headprefixes
1658 prefixes = headprefixes
1653 for prefix, label in prefixes:
1659 for prefix, label in prefixes:
1654 if stripline.startswith(prefix):
1660 if stripline.startswith(prefix):
1655 yield (stripline, label)
1661 yield (stripline, label)
1656 break
1662 break
1657 else:
1663 else:
1658 yield (line, '')
1664 yield (line, '')
1659 if line != stripline:
1665 if line != stripline:
1660 yield (line[len(stripline):], 'diff.trailingwhitespace')
1666 yield (line[len(stripline):], 'diff.trailingwhitespace')
1661
1667
1662 def diffui(*args, **kw):
1668 def diffui(*args, **kw):
1663 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1669 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1664 return difflabel(diff, *args, **kw)
1670 return difflabel(diff, *args, **kw)
1665
1671
1666
1672
1667 def _addmodehdr(header, omode, nmode):
1673 def _addmodehdr(header, omode, nmode):
1668 if omode != nmode:
1674 if omode != nmode:
1669 header.append('old mode %s\n' % omode)
1675 header.append('old mode %s\n' % omode)
1670 header.append('new mode %s\n' % nmode)
1676 header.append('new mode %s\n' % nmode)
1671
1677
1672 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1678 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1673 copy, getfilectx, opts, losedatafn, prefix):
1679 copy, getfilectx, opts, losedatafn, prefix):
1674
1680
1675 def join(f):
1681 def join(f):
1676 return os.path.join(prefix, f)
1682 return os.path.join(prefix, f)
1677
1683
1678 date1 = util.datestr(ctx1.date())
1684 date1 = util.datestr(ctx1.date())
1679 man1 = ctx1.manifest()
1685 man1 = ctx1.manifest()
1680
1686
1681 gone = set()
1687 gone = set()
1682 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1688 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1683
1689
1684 copyto = dict([(v, k) for k, v in copy.items()])
1690 copyto = dict([(v, k) for k, v in copy.items()])
1685
1691
1686 if opts.git:
1692 if opts.git:
1687 revs = None
1693 revs = None
1688
1694
1689 for f in sorted(modified + added + removed):
1695 for f in sorted(modified + added + removed):
1690 to = None
1696 to = None
1691 tn = None
1697 tn = None
1692 dodiff = True
1698 dodiff = True
1693 header = []
1699 header = []
1694 if f in man1:
1700 if f in man1:
1695 to = getfilectx(f, ctx1).data()
1701 to = getfilectx(f, ctx1).data()
1696 if f not in removed:
1702 if f not in removed:
1697 tn = getfilectx(f, ctx2).data()
1703 tn = getfilectx(f, ctx2).data()
1698 a, b = f, f
1704 a, b = f, f
1699 if opts.git or losedatafn:
1705 if opts.git or losedatafn:
1700 if f in added:
1706 if f in added:
1701 mode = gitmode[ctx2.flags(f)]
1707 mode = gitmode[ctx2.flags(f)]
1702 if f in copy or f in copyto:
1708 if f in copy or f in copyto:
1703 if opts.git:
1709 if opts.git:
1704 if f in copy:
1710 if f in copy:
1705 a = copy[f]
1711 a = copy[f]
1706 else:
1712 else:
1707 a = copyto[f]
1713 a = copyto[f]
1708 omode = gitmode[man1.flags(a)]
1714 omode = gitmode[man1.flags(a)]
1709 _addmodehdr(header, omode, mode)
1715 _addmodehdr(header, omode, mode)
1710 if a in removed and a not in gone:
1716 if a in removed and a not in gone:
1711 op = 'rename'
1717 op = 'rename'
1712 gone.add(a)
1718 gone.add(a)
1713 else:
1719 else:
1714 op = 'copy'
1720 op = 'copy'
1715 header.append('%s from %s\n' % (op, join(a)))
1721 header.append('%s from %s\n' % (op, join(a)))
1716 header.append('%s to %s\n' % (op, join(f)))
1722 header.append('%s to %s\n' % (op, join(f)))
1717 to = getfilectx(a, ctx1).data()
1723 to = getfilectx(a, ctx1).data()
1718 else:
1724 else:
1719 losedatafn(f)
1725 losedatafn(f)
1720 else:
1726 else:
1721 if opts.git:
1727 if opts.git:
1722 header.append('new file mode %s\n' % mode)
1728 header.append('new file mode %s\n' % mode)
1723 elif ctx2.flags(f):
1729 elif ctx2.flags(f):
1724 losedatafn(f)
1730 losedatafn(f)
1725 # In theory, if tn was copied or renamed we should check
1731 # In theory, if tn was copied or renamed we should check
1726 # if the source is binary too but the copy record already
1732 # if the source is binary too but the copy record already
1727 # forces git mode.
1733 # forces git mode.
1728 if util.binary(tn):
1734 if util.binary(tn):
1729 if opts.git:
1735 if opts.git:
1730 dodiff = 'binary'
1736 dodiff = 'binary'
1731 else:
1737 else:
1732 losedatafn(f)
1738 losedatafn(f)
1733 if not opts.git and not tn:
1739 if not opts.git and not tn:
1734 # regular diffs cannot represent new empty file
1740 # regular diffs cannot represent new empty file
1735 losedatafn(f)
1741 losedatafn(f)
1736 elif f in removed:
1742 elif f in removed:
1737 if opts.git:
1743 if opts.git:
1738 # have we already reported a copy above?
1744 # have we already reported a copy above?
1739 if ((f in copy and copy[f] in added
1745 if ((f in copy and copy[f] in added
1740 and copyto[copy[f]] == f) or
1746 and copyto[copy[f]] == f) or
1741 (f in copyto and copyto[f] in added
1747 (f in copyto and copyto[f] in added
1742 and copy[copyto[f]] == f)):
1748 and copy[copyto[f]] == f)):
1743 dodiff = False
1749 dodiff = False
1744 else:
1750 else:
1745 header.append('deleted file mode %s\n' %
1751 header.append('deleted file mode %s\n' %
1746 gitmode[man1.flags(f)])
1752 gitmode[man1.flags(f)])
1747 elif not to or util.binary(to):
1753 elif not to or util.binary(to):
1748 # regular diffs cannot represent empty file deletion
1754 # regular diffs cannot represent empty file deletion
1749 losedatafn(f)
1755 losedatafn(f)
1750 else:
1756 else:
1751 oflag = man1.flags(f)
1757 oflag = man1.flags(f)
1752 nflag = ctx2.flags(f)
1758 nflag = ctx2.flags(f)
1753 binary = util.binary(to) or util.binary(tn)
1759 binary = util.binary(to) or util.binary(tn)
1754 if opts.git:
1760 if opts.git:
1755 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1761 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1756 if binary:
1762 if binary:
1757 dodiff = 'binary'
1763 dodiff = 'binary'
1758 elif binary or nflag != oflag:
1764 elif binary or nflag != oflag:
1759 losedatafn(f)
1765 losedatafn(f)
1760 if opts.git:
1766 if opts.git:
1761 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1767 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1762
1768
1763 if dodiff:
1769 if dodiff:
1764 if dodiff == 'binary':
1770 if dodiff == 'binary':
1765 text = b85diff(to, tn)
1771 text = b85diff(to, tn)
1766 else:
1772 else:
1767 text = mdiff.unidiff(to, date1,
1773 text = mdiff.unidiff(to, date1,
1768 # ctx2 date may be dynamic
1774 # ctx2 date may be dynamic
1769 tn, util.datestr(ctx2.date()),
1775 tn, util.datestr(ctx2.date()),
1770 join(a), join(b), revs, opts=opts)
1776 join(a), join(b), revs, opts=opts)
1771 if header and (text or len(header) > 1):
1777 if header and (text or len(header) > 1):
1772 yield ''.join(header)
1778 yield ''.join(header)
1773 if text:
1779 if text:
1774 yield text
1780 yield text
1775
1781
1776 def diffstatsum(stats):
1782 def diffstatsum(stats):
1777 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1783 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1778 for f, a, r, b in stats:
1784 for f, a, r, b in stats:
1779 maxfile = max(maxfile, encoding.colwidth(f))
1785 maxfile = max(maxfile, encoding.colwidth(f))
1780 maxtotal = max(maxtotal, a + r)
1786 maxtotal = max(maxtotal, a + r)
1781 addtotal += a
1787 addtotal += a
1782 removetotal += r
1788 removetotal += r
1783 binary = binary or b
1789 binary = binary or b
1784
1790
1785 return maxfile, maxtotal, addtotal, removetotal, binary
1791 return maxfile, maxtotal, addtotal, removetotal, binary
1786
1792
1787 def diffstatdata(lines):
1793 def diffstatdata(lines):
1788 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1794 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1789
1795
1790 results = []
1796 results = []
1791 filename, adds, removes, isbinary = None, 0, 0, False
1797 filename, adds, removes, isbinary = None, 0, 0, False
1792
1798
1793 def addresult():
1799 def addresult():
1794 if filename:
1800 if filename:
1795 results.append((filename, adds, removes, isbinary))
1801 results.append((filename, adds, removes, isbinary))
1796
1802
1797 for line in lines:
1803 for line in lines:
1798 if line.startswith('diff'):
1804 if line.startswith('diff'):
1799 addresult()
1805 addresult()
1800 # set numbers to 0 anyway when starting new file
1806 # set numbers to 0 anyway when starting new file
1801 adds, removes, isbinary = 0, 0, False
1807 adds, removes, isbinary = 0, 0, False
1802 if line.startswith('diff --git'):
1808 if line.startswith('diff --git'):
1803 filename = gitre.search(line).group(1)
1809 filename = gitre.search(line).group(1)
1804 elif line.startswith('diff -r'):
1810 elif line.startswith('diff -r'):
1805 # format: "diff -r ... -r ... filename"
1811 # format: "diff -r ... -r ... filename"
1806 filename = diffre.search(line).group(1)
1812 filename = diffre.search(line).group(1)
1807 elif line.startswith('+') and not line.startswith('+++ '):
1813 elif line.startswith('+') and not line.startswith('+++ '):
1808 adds += 1
1814 adds += 1
1809 elif line.startswith('-') and not line.startswith('--- '):
1815 elif line.startswith('-') and not line.startswith('--- '):
1810 removes += 1
1816 removes += 1
1811 elif (line.startswith('GIT binary patch') or
1817 elif (line.startswith('GIT binary patch') or
1812 line.startswith('Binary file')):
1818 line.startswith('Binary file')):
1813 isbinary = True
1819 isbinary = True
1814 addresult()
1820 addresult()
1815 return results
1821 return results
1816
1822
1817 def diffstat(lines, width=80, git=False):
1823 def diffstat(lines, width=80, git=False):
1818 output = []
1824 output = []
1819 stats = diffstatdata(lines)
1825 stats = diffstatdata(lines)
1820 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1826 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1821
1827
1822 countwidth = len(str(maxtotal))
1828 countwidth = len(str(maxtotal))
1823 if hasbinary and countwidth < 3:
1829 if hasbinary and countwidth < 3:
1824 countwidth = 3
1830 countwidth = 3
1825 graphwidth = width - countwidth - maxname - 6
1831 graphwidth = width - countwidth - maxname - 6
1826 if graphwidth < 10:
1832 if graphwidth < 10:
1827 graphwidth = 10
1833 graphwidth = 10
1828
1834
1829 def scale(i):
1835 def scale(i):
1830 if maxtotal <= graphwidth:
1836 if maxtotal <= graphwidth:
1831 return i
1837 return i
1832 # If diffstat runs out of room it doesn't print anything,
1838 # If diffstat runs out of room it doesn't print anything,
1833 # which isn't very useful, so always print at least one + or -
1839 # which isn't very useful, so always print at least one + or -
1834 # if there were at least some changes.
1840 # if there were at least some changes.
1835 return max(i * graphwidth // maxtotal, int(bool(i)))
1841 return max(i * graphwidth // maxtotal, int(bool(i)))
1836
1842
1837 for filename, adds, removes, isbinary in stats:
1843 for filename, adds, removes, isbinary in stats:
1838 if isbinary:
1844 if isbinary:
1839 count = 'Bin'
1845 count = 'Bin'
1840 else:
1846 else:
1841 count = adds + removes
1847 count = adds + removes
1842 pluses = '+' * scale(adds)
1848 pluses = '+' * scale(adds)
1843 minuses = '-' * scale(removes)
1849 minuses = '-' * scale(removes)
1844 output.append(' %s%s | %*s %s%s\n' %
1850 output.append(' %s%s | %*s %s%s\n' %
1845 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1851 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1846 countwidth, count, pluses, minuses))
1852 countwidth, count, pluses, minuses))
1847
1853
1848 if stats:
1854 if stats:
1849 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1855 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1850 % (len(stats), totaladds, totalremoves))
1856 % (len(stats), totaladds, totalremoves))
1851
1857
1852 return ''.join(output)
1858 return ''.join(output)
1853
1859
1854 def diffstatui(*args, **kw):
1860 def diffstatui(*args, **kw):
1855 '''like diffstat(), but yields 2-tuples of (output, label) for
1861 '''like diffstat(), but yields 2-tuples of (output, label) for
1856 ui.write()
1862 ui.write()
1857 '''
1863 '''
1858
1864
1859 for line in diffstat(*args, **kw).splitlines():
1865 for line in diffstat(*args, **kw).splitlines():
1860 if line and line[-1] in '+-':
1866 if line and line[-1] in '+-':
1861 name, graph = line.rsplit(' ', 1)
1867 name, graph = line.rsplit(' ', 1)
1862 yield (name + ' ', '')
1868 yield (name + ' ', '')
1863 m = re.search(r'\++', graph)
1869 m = re.search(r'\++', graph)
1864 if m:
1870 if m:
1865 yield (m.group(0), 'diffstat.inserted')
1871 yield (m.group(0), 'diffstat.inserted')
1866 m = re.search(r'-+', graph)
1872 m = re.search(r'-+', graph)
1867 if m:
1873 if m:
1868 yield (m.group(0), 'diffstat.deleted')
1874 yield (m.group(0), 'diffstat.deleted')
1869 else:
1875 else:
1870 yield (line, '')
1876 yield (line, '')
1871 yield ('\n', '')
1877 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now