##// END OF EJS Templates
patch: clarify binary hunk parsing loop
Patrick Mezard -
r16567:aef3d0d4 default
parent child Browse files
Show More
@@ -1,1886 +1,1887 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re
9 import cStringIO, email.Parser, os, errno, re
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[9:].lstrip())
233 parents.append(line[9:].lstrip())
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except:
248 except:
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def _ispatchinga(self, afile):
293 def _ispatchinga(self, afile):
294 if afile == '/dev/null':
294 if afile == '/dev/null':
295 return self.op == 'ADD'
295 return self.op == 'ADD'
296 return afile == 'a/' + (self.oldpath or self.path)
296 return afile == 'a/' + (self.oldpath or self.path)
297
297
298 def _ispatchingb(self, bfile):
298 def _ispatchingb(self, bfile):
299 if bfile == '/dev/null':
299 if bfile == '/dev/null':
300 return self.op == 'DELETE'
300 return self.op == 'DELETE'
301 return bfile == 'b/' + self.path
301 return bfile == 'b/' + self.path
302
302
303 def ispatching(self, afile, bfile):
303 def ispatching(self, afile, bfile):
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305
305
306 def __repr__(self):
306 def __repr__(self):
307 return "<patchmeta %s %r>" % (self.op, self.path)
307 return "<patchmeta %s %r>" % (self.op, self.path)
308
308
309 def readgitpatch(lr):
309 def readgitpatch(lr):
310 """extract git-style metadata about patches from <patchname>"""
310 """extract git-style metadata about patches from <patchname>"""
311
311
312 # Filter patch for git information
312 # Filter patch for git information
313 gp = None
313 gp = None
314 gitpatches = []
314 gitpatches = []
315 for line in lr:
315 for line in lr:
316 line = line.rstrip(' \r\n')
316 line = line.rstrip(' \r\n')
317 if line.startswith('diff --git'):
317 if line.startswith('diff --git'):
318 m = gitre.match(line)
318 m = gitre.match(line)
319 if m:
319 if m:
320 if gp:
320 if gp:
321 gitpatches.append(gp)
321 gitpatches.append(gp)
322 dst = m.group(2)
322 dst = m.group(2)
323 gp = patchmeta(dst)
323 gp = patchmeta(dst)
324 elif gp:
324 elif gp:
325 if line.startswith('--- '):
325 if line.startswith('--- '):
326 gitpatches.append(gp)
326 gitpatches.append(gp)
327 gp = None
327 gp = None
328 continue
328 continue
329 if line.startswith('rename from '):
329 if line.startswith('rename from '):
330 gp.op = 'RENAME'
330 gp.op = 'RENAME'
331 gp.oldpath = line[12:]
331 gp.oldpath = line[12:]
332 elif line.startswith('rename to '):
332 elif line.startswith('rename to '):
333 gp.path = line[10:]
333 gp.path = line[10:]
334 elif line.startswith('copy from '):
334 elif line.startswith('copy from '):
335 gp.op = 'COPY'
335 gp.op = 'COPY'
336 gp.oldpath = line[10:]
336 gp.oldpath = line[10:]
337 elif line.startswith('copy to '):
337 elif line.startswith('copy to '):
338 gp.path = line[8:]
338 gp.path = line[8:]
339 elif line.startswith('deleted file'):
339 elif line.startswith('deleted file'):
340 gp.op = 'DELETE'
340 gp.op = 'DELETE'
341 elif line.startswith('new file mode '):
341 elif line.startswith('new file mode '):
342 gp.op = 'ADD'
342 gp.op = 'ADD'
343 gp.setmode(int(line[-6:], 8))
343 gp.setmode(int(line[-6:], 8))
344 elif line.startswith('new mode '):
344 elif line.startswith('new mode '):
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('GIT binary patch'):
346 elif line.startswith('GIT binary patch'):
347 gp.binary = True
347 gp.binary = True
348 if gp:
348 if gp:
349 gitpatches.append(gp)
349 gitpatches.append(gp)
350
350
351 return gitpatches
351 return gitpatches
352
352
353 class linereader(object):
353 class linereader(object):
354 # simple class to allow pushing lines back into the input stream
354 # simple class to allow pushing lines back into the input stream
355 def __init__(self, fp):
355 def __init__(self, fp):
356 self.fp = fp
356 self.fp = fp
357 self.buf = []
357 self.buf = []
358
358
359 def push(self, line):
359 def push(self, line):
360 if line is not None:
360 if line is not None:
361 self.buf.append(line)
361 self.buf.append(line)
362
362
363 def readline(self):
363 def readline(self):
364 if self.buf:
364 if self.buf:
365 l = self.buf[0]
365 l = self.buf[0]
366 del self.buf[0]
366 del self.buf[0]
367 return l
367 return l
368 return self.fp.readline()
368 return self.fp.readline()
369
369
370 def __iter__(self):
370 def __iter__(self):
371 while True:
371 while True:
372 l = self.readline()
372 l = self.readline()
373 if not l:
373 if not l:
374 break
374 break
375 yield l
375 yield l
376
376
377 class abstractbackend(object):
377 class abstractbackend(object):
378 def __init__(self, ui):
378 def __init__(self, ui):
379 self.ui = ui
379 self.ui = ui
380
380
381 def getfile(self, fname):
381 def getfile(self, fname):
382 """Return target file data and flags as a (data, (islink,
382 """Return target file data and flags as a (data, (islink,
383 isexec)) tuple.
383 isexec)) tuple.
384 """
384 """
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 def setfile(self, fname, data, mode, copysource):
387 def setfile(self, fname, data, mode, copysource):
388 """Write data to target file fname and set its mode. mode is a
388 """Write data to target file fname and set its mode. mode is a
389 (islink, isexec) tuple. If data is None, the file content should
389 (islink, isexec) tuple. If data is None, the file content should
390 be left unchanged. If the file is modified after being copied,
390 be left unchanged. If the file is modified after being copied,
391 copysource is set to the original file name.
391 copysource is set to the original file name.
392 """
392 """
393 raise NotImplementedError
393 raise NotImplementedError
394
394
395 def unlink(self, fname):
395 def unlink(self, fname):
396 """Unlink target file."""
396 """Unlink target file."""
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def writerej(self, fname, failed, total, lines):
399 def writerej(self, fname, failed, total, lines):
400 """Write rejected lines for fname. total is the number of hunks
400 """Write rejected lines for fname. total is the number of hunks
401 which failed to apply and total the total number of hunks for this
401 which failed to apply and total the total number of hunks for this
402 files.
402 files.
403 """
403 """
404 pass
404 pass
405
405
406 def exists(self, fname):
406 def exists(self, fname):
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 class fsbackend(abstractbackend):
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, basedir):
410 def __init__(self, ui, basedir):
411 super(fsbackend, self).__init__(ui)
411 super(fsbackend, self).__init__(ui)
412 self.opener = scmutil.opener(basedir)
412 self.opener = scmutil.opener(basedir)
413
413
414 def _join(self, f):
414 def _join(self, f):
415 return os.path.join(self.opener.base, f)
415 return os.path.join(self.opener.base, f)
416
416
417 def getfile(self, fname):
417 def getfile(self, fname):
418 path = self._join(fname)
418 path = self._join(fname)
419 if os.path.islink(path):
419 if os.path.islink(path):
420 return (os.readlink(path), (True, False))
420 return (os.readlink(path), (True, False))
421 isexec = False
421 isexec = False
422 try:
422 try:
423 isexec = os.lstat(path).st_mode & 0100 != 0
423 isexec = os.lstat(path).st_mode & 0100 != 0
424 except OSError, e:
424 except OSError, e:
425 if e.errno != errno.ENOENT:
425 if e.errno != errno.ENOENT:
426 raise
426 raise
427 return (self.opener.read(fname), (False, isexec))
427 return (self.opener.read(fname), (False, isexec))
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 islink, isexec = mode
430 islink, isexec = mode
431 if data is None:
431 if data is None:
432 util.setflags(self._join(fname), islink, isexec)
432 util.setflags(self._join(fname), islink, isexec)
433 return
433 return
434 if islink:
434 if islink:
435 self.opener.symlink(data, fname)
435 self.opener.symlink(data, fname)
436 else:
436 else:
437 self.opener.write(fname, data)
437 self.opener.write(fname, data)
438 if isexec:
438 if isexec:
439 util.setflags(self._join(fname), False, True)
439 util.setflags(self._join(fname), False, True)
440
440
441 def unlink(self, fname):
441 def unlink(self, fname):
442 try:
442 try:
443 util.unlinkpath(self._join(fname))
443 util.unlinkpath(self._join(fname))
444 except OSError, inst:
444 except OSError, inst:
445 if inst.errno != errno.ENOENT:
445 if inst.errno != errno.ENOENT:
446 raise
446 raise
447
447
448 def writerej(self, fname, failed, total, lines):
448 def writerej(self, fname, failed, total, lines):
449 fname = fname + ".rej"
449 fname = fname + ".rej"
450 self.ui.warn(
450 self.ui.warn(
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
451 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
452 (failed, total, fname))
452 (failed, total, fname))
453 fp = self.opener(fname, 'w')
453 fp = self.opener(fname, 'w')
454 fp.writelines(lines)
454 fp.writelines(lines)
455 fp.close()
455 fp.close()
456
456
457 def exists(self, fname):
457 def exists(self, fname):
458 return os.path.lexists(self._join(fname))
458 return os.path.lexists(self._join(fname))
459
459
460 class workingbackend(fsbackend):
460 class workingbackend(fsbackend):
461 def __init__(self, ui, repo, similarity):
461 def __init__(self, ui, repo, similarity):
462 super(workingbackend, self).__init__(ui, repo.root)
462 super(workingbackend, self).__init__(ui, repo.root)
463 self.repo = repo
463 self.repo = repo
464 self.similarity = similarity
464 self.similarity = similarity
465 self.removed = set()
465 self.removed = set()
466 self.changed = set()
466 self.changed = set()
467 self.copied = []
467 self.copied = []
468
468
469 def _checkknown(self, fname):
469 def _checkknown(self, fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
470 if self.repo.dirstate[fname] == '?' and self.exists(fname):
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
471 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
472
472
473 def setfile(self, fname, data, mode, copysource):
473 def setfile(self, fname, data, mode, copysource):
474 self._checkknown(fname)
474 self._checkknown(fname)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
475 super(workingbackend, self).setfile(fname, data, mode, copysource)
476 if copysource is not None:
476 if copysource is not None:
477 self.copied.append((copysource, fname))
477 self.copied.append((copysource, fname))
478 self.changed.add(fname)
478 self.changed.add(fname)
479
479
480 def unlink(self, fname):
480 def unlink(self, fname):
481 self._checkknown(fname)
481 self._checkknown(fname)
482 super(workingbackend, self).unlink(fname)
482 super(workingbackend, self).unlink(fname)
483 self.removed.add(fname)
483 self.removed.add(fname)
484 self.changed.add(fname)
484 self.changed.add(fname)
485
485
486 def close(self):
486 def close(self):
487 wctx = self.repo[None]
487 wctx = self.repo[None]
488 addremoved = set(self.changed)
488 addremoved = set(self.changed)
489 for src, dst in self.copied:
489 for src, dst in self.copied:
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
490 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
491 if self.removed:
491 if self.removed:
492 wctx.forget(sorted(self.removed))
492 wctx.forget(sorted(self.removed))
493 for f in self.removed:
493 for f in self.removed:
494 if f not in self.repo.dirstate:
494 if f not in self.repo.dirstate:
495 # File was deleted and no longer belongs to the
495 # File was deleted and no longer belongs to the
496 # dirstate, it was probably marked added then
496 # dirstate, it was probably marked added then
497 # deleted, and should not be considered by
497 # deleted, and should not be considered by
498 # addremove().
498 # addremove().
499 addremoved.discard(f)
499 addremoved.discard(f)
500 if addremoved:
500 if addremoved:
501 cwd = self.repo.getcwd()
501 cwd = self.repo.getcwd()
502 if cwd:
502 if cwd:
503 addremoved = [util.pathto(self.repo.root, cwd, f)
503 addremoved = [util.pathto(self.repo.root, cwd, f)
504 for f in addremoved]
504 for f in addremoved]
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
505 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
506 return sorted(self.changed)
506 return sorted(self.changed)
507
507
508 class filestore(object):
508 class filestore(object):
509 def __init__(self, maxsize=None):
509 def __init__(self, maxsize=None):
510 self.opener = None
510 self.opener = None
511 self.files = {}
511 self.files = {}
512 self.created = 0
512 self.created = 0
513 self.maxsize = maxsize
513 self.maxsize = maxsize
514 if self.maxsize is None:
514 if self.maxsize is None:
515 self.maxsize = 4*(2**20)
515 self.maxsize = 4*(2**20)
516 self.size = 0
516 self.size = 0
517 self.data = {}
517 self.data = {}
518
518
519 def setfile(self, fname, data, mode, copied=None):
519 def setfile(self, fname, data, mode, copied=None):
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
520 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
521 self.data[fname] = (data, mode, copied)
521 self.data[fname] = (data, mode, copied)
522 self.size += len(data)
522 self.size += len(data)
523 else:
523 else:
524 if self.opener is None:
524 if self.opener is None:
525 root = tempfile.mkdtemp(prefix='hg-patch-')
525 root = tempfile.mkdtemp(prefix='hg-patch-')
526 self.opener = scmutil.opener(root)
526 self.opener = scmutil.opener(root)
527 # Avoid filename issues with these simple names
527 # Avoid filename issues with these simple names
528 fn = str(self.created)
528 fn = str(self.created)
529 self.opener.write(fn, data)
529 self.opener.write(fn, data)
530 self.created += 1
530 self.created += 1
531 self.files[fname] = (fn, mode, copied)
531 self.files[fname] = (fn, mode, copied)
532
532
533 def getfile(self, fname):
533 def getfile(self, fname):
534 if fname in self.data:
534 if fname in self.data:
535 return self.data[fname]
535 return self.data[fname]
536 if not self.opener or fname not in self.files:
536 if not self.opener or fname not in self.files:
537 raise IOError()
537 raise IOError()
538 fn, mode, copied = self.files[fname]
538 fn, mode, copied = self.files[fname]
539 return self.opener.read(fn), mode, copied
539 return self.opener.read(fn), mode, copied
540
540
541 def close(self):
541 def close(self):
542 if self.opener:
542 if self.opener:
543 shutil.rmtree(self.opener.base)
543 shutil.rmtree(self.opener.base)
544
544
545 class repobackend(abstractbackend):
545 class repobackend(abstractbackend):
546 def __init__(self, ui, repo, ctx, store):
546 def __init__(self, ui, repo, ctx, store):
547 super(repobackend, self).__init__(ui)
547 super(repobackend, self).__init__(ui)
548 self.repo = repo
548 self.repo = repo
549 self.ctx = ctx
549 self.ctx = ctx
550 self.store = store
550 self.store = store
551 self.changed = set()
551 self.changed = set()
552 self.removed = set()
552 self.removed = set()
553 self.copied = {}
553 self.copied = {}
554
554
555 def _checkknown(self, fname):
555 def _checkknown(self, fname):
556 if fname not in self.ctx:
556 if fname not in self.ctx:
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
557 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
558
558
559 def getfile(self, fname):
559 def getfile(self, fname):
560 try:
560 try:
561 fctx = self.ctx[fname]
561 fctx = self.ctx[fname]
562 except error.LookupError:
562 except error.LookupError:
563 raise IOError()
563 raise IOError()
564 flags = fctx.flags()
564 flags = fctx.flags()
565 return fctx.data(), ('l' in flags, 'x' in flags)
565 return fctx.data(), ('l' in flags, 'x' in flags)
566
566
567 def setfile(self, fname, data, mode, copysource):
567 def setfile(self, fname, data, mode, copysource):
568 if copysource:
568 if copysource:
569 self._checkknown(copysource)
569 self._checkknown(copysource)
570 if data is None:
570 if data is None:
571 data = self.ctx[fname].data()
571 data = self.ctx[fname].data()
572 self.store.setfile(fname, data, mode, copysource)
572 self.store.setfile(fname, data, mode, copysource)
573 self.changed.add(fname)
573 self.changed.add(fname)
574 if copysource:
574 if copysource:
575 self.copied[fname] = copysource
575 self.copied[fname] = copysource
576
576
577 def unlink(self, fname):
577 def unlink(self, fname):
578 self._checkknown(fname)
578 self._checkknown(fname)
579 self.removed.add(fname)
579 self.removed.add(fname)
580
580
581 def exists(self, fname):
581 def exists(self, fname):
582 return fname in self.ctx
582 return fname in self.ctx
583
583
584 def close(self):
584 def close(self):
585 return self.changed | self.removed
585 return self.changed | self.removed
586
586
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
587 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
588 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
589 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
590 eolmodes = ['strict', 'crlf', 'lf', 'auto']
591
591
592 class patchfile(object):
592 class patchfile(object):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
593 def __init__(self, ui, gp, backend, store, eolmode='strict'):
594 self.fname = gp.path
594 self.fname = gp.path
595 self.eolmode = eolmode
595 self.eolmode = eolmode
596 self.eol = None
596 self.eol = None
597 self.backend = backend
597 self.backend = backend
598 self.ui = ui
598 self.ui = ui
599 self.lines = []
599 self.lines = []
600 self.exists = False
600 self.exists = False
601 self.missing = True
601 self.missing = True
602 self.mode = gp.mode
602 self.mode = gp.mode
603 self.copysource = gp.oldpath
603 self.copysource = gp.oldpath
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
604 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
605 self.remove = gp.op == 'DELETE'
605 self.remove = gp.op == 'DELETE'
606 try:
606 try:
607 if self.copysource is None:
607 if self.copysource is None:
608 data, mode = backend.getfile(self.fname)
608 data, mode = backend.getfile(self.fname)
609 self.exists = True
609 self.exists = True
610 else:
610 else:
611 data, mode = store.getfile(self.copysource)[:2]
611 data, mode = store.getfile(self.copysource)[:2]
612 self.exists = backend.exists(self.fname)
612 self.exists = backend.exists(self.fname)
613 self.missing = False
613 self.missing = False
614 if data:
614 if data:
615 self.lines = mdiff.splitnewlines(data)
615 self.lines = mdiff.splitnewlines(data)
616 if self.mode is None:
616 if self.mode is None:
617 self.mode = mode
617 self.mode = mode
618 if self.lines:
618 if self.lines:
619 # Normalize line endings
619 # Normalize line endings
620 if self.lines[0].endswith('\r\n'):
620 if self.lines[0].endswith('\r\n'):
621 self.eol = '\r\n'
621 self.eol = '\r\n'
622 elif self.lines[0].endswith('\n'):
622 elif self.lines[0].endswith('\n'):
623 self.eol = '\n'
623 self.eol = '\n'
624 if eolmode != 'strict':
624 if eolmode != 'strict':
625 nlines = []
625 nlines = []
626 for l in self.lines:
626 for l in self.lines:
627 if l.endswith('\r\n'):
627 if l.endswith('\r\n'):
628 l = l[:-2] + '\n'
628 l = l[:-2] + '\n'
629 nlines.append(l)
629 nlines.append(l)
630 self.lines = nlines
630 self.lines = nlines
631 except IOError:
631 except IOError:
632 if self.create:
632 if self.create:
633 self.missing = False
633 self.missing = False
634 if self.mode is None:
634 if self.mode is None:
635 self.mode = (False, False)
635 self.mode = (False, False)
636 if self.missing:
636 if self.missing:
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
637 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
638
638
639 self.hash = {}
639 self.hash = {}
640 self.dirty = 0
640 self.dirty = 0
641 self.offset = 0
641 self.offset = 0
642 self.skew = 0
642 self.skew = 0
643 self.rej = []
643 self.rej = []
644 self.fileprinted = False
644 self.fileprinted = False
645 self.printfile(False)
645 self.printfile(False)
646 self.hunks = 0
646 self.hunks = 0
647
647
648 def writelines(self, fname, lines, mode):
648 def writelines(self, fname, lines, mode):
649 if self.eolmode == 'auto':
649 if self.eolmode == 'auto':
650 eol = self.eol
650 eol = self.eol
651 elif self.eolmode == 'crlf':
651 elif self.eolmode == 'crlf':
652 eol = '\r\n'
652 eol = '\r\n'
653 else:
653 else:
654 eol = '\n'
654 eol = '\n'
655
655
656 if self.eolmode != 'strict' and eol and eol != '\n':
656 if self.eolmode != 'strict' and eol and eol != '\n':
657 rawlines = []
657 rawlines = []
658 for l in lines:
658 for l in lines:
659 if l and l[-1] == '\n':
659 if l and l[-1] == '\n':
660 l = l[:-1] + eol
660 l = l[:-1] + eol
661 rawlines.append(l)
661 rawlines.append(l)
662 lines = rawlines
662 lines = rawlines
663
663
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
664 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
665
665
666 def printfile(self, warn):
666 def printfile(self, warn):
667 if self.fileprinted:
667 if self.fileprinted:
668 return
668 return
669 if warn or self.ui.verbose:
669 if warn or self.ui.verbose:
670 self.fileprinted = True
670 self.fileprinted = True
671 s = _("patching file %s\n") % self.fname
671 s = _("patching file %s\n") % self.fname
672 if warn:
672 if warn:
673 self.ui.warn(s)
673 self.ui.warn(s)
674 else:
674 else:
675 self.ui.note(s)
675 self.ui.note(s)
676
676
677
677
678 def findlines(self, l, linenum):
678 def findlines(self, l, linenum):
679 # looks through the hash and finds candidate lines. The
679 # looks through the hash and finds candidate lines. The
680 # result is a list of line numbers sorted based on distance
680 # result is a list of line numbers sorted based on distance
681 # from linenum
681 # from linenum
682
682
683 cand = self.hash.get(l, [])
683 cand = self.hash.get(l, [])
684 if len(cand) > 1:
684 if len(cand) > 1:
685 # resort our list of potentials forward then back.
685 # resort our list of potentials forward then back.
686 cand.sort(key=lambda x: abs(x - linenum))
686 cand.sort(key=lambda x: abs(x - linenum))
687 return cand
687 return cand
688
688
689 def write_rej(self):
689 def write_rej(self):
690 # our rejects are a little different from patch(1). This always
690 # our rejects are a little different from patch(1). This always
691 # creates rejects in the same form as the original patch. A file
691 # creates rejects in the same form as the original patch. A file
692 # header is inserted so that you can run the reject through patch again
692 # header is inserted so that you can run the reject through patch again
693 # without having to type the filename.
693 # without having to type the filename.
694 if not self.rej:
694 if not self.rej:
695 return
695 return
696 base = os.path.basename(self.fname)
696 base = os.path.basename(self.fname)
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
697 lines = ["--- %s\n+++ %s\n" % (base, base)]
698 for x in self.rej:
698 for x in self.rej:
699 for l in x.hunk:
699 for l in x.hunk:
700 lines.append(l)
700 lines.append(l)
701 if l[-1] != '\n':
701 if l[-1] != '\n':
702 lines.append("\n\ No newline at end of file\n")
702 lines.append("\n\ No newline at end of file\n")
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
703 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
704
704
705 def apply(self, h):
705 def apply(self, h):
706 if not h.complete():
706 if not h.complete():
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
707 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
708 (h.number, h.desc, len(h.a), h.lena, len(h.b),
709 h.lenb))
709 h.lenb))
710
710
711 self.hunks += 1
711 self.hunks += 1
712
712
713 if self.missing:
713 if self.missing:
714 self.rej.append(h)
714 self.rej.append(h)
715 return -1
715 return -1
716
716
717 if self.exists and self.create:
717 if self.exists and self.create:
718 if self.copysource:
718 if self.copysource:
719 self.ui.warn(_("cannot create %s: destination already "
719 self.ui.warn(_("cannot create %s: destination already "
720 "exists\n" % self.fname))
720 "exists\n" % self.fname))
721 else:
721 else:
722 self.ui.warn(_("file %s already exists\n") % self.fname)
722 self.ui.warn(_("file %s already exists\n") % self.fname)
723 self.rej.append(h)
723 self.rej.append(h)
724 return -1
724 return -1
725
725
726 if isinstance(h, binhunk):
726 if isinstance(h, binhunk):
727 if self.remove:
727 if self.remove:
728 self.backend.unlink(self.fname)
728 self.backend.unlink(self.fname)
729 else:
729 else:
730 self.lines[:] = h.new()
730 self.lines[:] = h.new()
731 self.offset += len(h.new())
731 self.offset += len(h.new())
732 self.dirty = True
732 self.dirty = True
733 return 0
733 return 0
734
734
735 horig = h
735 horig = h
736 if (self.eolmode in ('crlf', 'lf')
736 if (self.eolmode in ('crlf', 'lf')
737 or self.eolmode == 'auto' and self.eol):
737 or self.eolmode == 'auto' and self.eol):
738 # If new eols are going to be normalized, then normalize
738 # If new eols are going to be normalized, then normalize
739 # hunk data before patching. Otherwise, preserve input
739 # hunk data before patching. Otherwise, preserve input
740 # line-endings.
740 # line-endings.
741 h = h.getnormalized()
741 h = h.getnormalized()
742
742
743 # fast case first, no offsets, no fuzz
743 # fast case first, no offsets, no fuzz
744 old, oldstart, new, newstart = h.fuzzit(0, False)
744 old, oldstart, new, newstart = h.fuzzit(0, False)
745 oldstart += self.offset
745 oldstart += self.offset
746 orig_start = oldstart
746 orig_start = oldstart
747 # if there's skew we want to emit the "(offset %d lines)" even
747 # if there's skew we want to emit the "(offset %d lines)" even
748 # when the hunk cleanly applies at start + skew, so skip the
748 # when the hunk cleanly applies at start + skew, so skip the
749 # fast case code
749 # fast case code
750 if (self.skew == 0 and
750 if (self.skew == 0 and
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
751 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
752 if self.remove:
752 if self.remove:
753 self.backend.unlink(self.fname)
753 self.backend.unlink(self.fname)
754 else:
754 else:
755 self.lines[oldstart:oldstart + len(old)] = new
755 self.lines[oldstart:oldstart + len(old)] = new
756 self.offset += len(new) - len(old)
756 self.offset += len(new) - len(old)
757 self.dirty = True
757 self.dirty = True
758 return 0
758 return 0
759
759
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
760 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
761 self.hash = {}
761 self.hash = {}
762 for x, s in enumerate(self.lines):
762 for x, s in enumerate(self.lines):
763 self.hash.setdefault(s, []).append(x)
763 self.hash.setdefault(s, []).append(x)
764
764
765 for fuzzlen in xrange(3):
765 for fuzzlen in xrange(3):
766 for toponly in [True, False]:
766 for toponly in [True, False]:
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
767 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
768 oldstart = oldstart + self.offset + self.skew
768 oldstart = oldstart + self.offset + self.skew
769 oldstart = min(oldstart, len(self.lines))
769 oldstart = min(oldstart, len(self.lines))
770 if old:
770 if old:
771 cand = self.findlines(old[0][1:], oldstart)
771 cand = self.findlines(old[0][1:], oldstart)
772 else:
772 else:
773 # Only adding lines with no or fuzzed context, just
773 # Only adding lines with no or fuzzed context, just
774 # take the skew in account
774 # take the skew in account
775 cand = [oldstart]
775 cand = [oldstart]
776
776
777 for l in cand:
777 for l in cand:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
778 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
779 self.lines[l : l + len(old)] = new
779 self.lines[l : l + len(old)] = new
780 self.offset += len(new) - len(old)
780 self.offset += len(new) - len(old)
781 self.skew = l - orig_start
781 self.skew = l - orig_start
782 self.dirty = True
782 self.dirty = True
783 offset = l - orig_start - fuzzlen
783 offset = l - orig_start - fuzzlen
784 if fuzzlen:
784 if fuzzlen:
785 msg = _("Hunk #%d succeeded at %d "
785 msg = _("Hunk #%d succeeded at %d "
786 "with fuzz %d "
786 "with fuzz %d "
787 "(offset %d lines).\n")
787 "(offset %d lines).\n")
788 self.printfile(True)
788 self.printfile(True)
789 self.ui.warn(msg %
789 self.ui.warn(msg %
790 (h.number, l + 1, fuzzlen, offset))
790 (h.number, l + 1, fuzzlen, offset))
791 else:
791 else:
792 msg = _("Hunk #%d succeeded at %d "
792 msg = _("Hunk #%d succeeded at %d "
793 "(offset %d lines).\n")
793 "(offset %d lines).\n")
794 self.ui.note(msg % (h.number, l + 1, offset))
794 self.ui.note(msg % (h.number, l + 1, offset))
795 return fuzzlen
795 return fuzzlen
796 self.printfile(True)
796 self.printfile(True)
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
797 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
798 self.rej.append(horig)
798 self.rej.append(horig)
799 return -1
799 return -1
800
800
801 def close(self):
801 def close(self):
802 if self.dirty:
802 if self.dirty:
803 self.writelines(self.fname, self.lines, self.mode)
803 self.writelines(self.fname, self.lines, self.mode)
804 self.write_rej()
804 self.write_rej()
805 return len(self.rej)
805 return len(self.rej)
806
806
807 class hunk(object):
807 class hunk(object):
808 def __init__(self, desc, num, lr, context):
808 def __init__(self, desc, num, lr, context):
809 self.number = num
809 self.number = num
810 self.desc = desc
810 self.desc = desc
811 self.hunk = [desc]
811 self.hunk = [desc]
812 self.a = []
812 self.a = []
813 self.b = []
813 self.b = []
814 self.starta = self.lena = None
814 self.starta = self.lena = None
815 self.startb = self.lenb = None
815 self.startb = self.lenb = None
816 if lr is not None:
816 if lr is not None:
817 if context:
817 if context:
818 self.read_context_hunk(lr)
818 self.read_context_hunk(lr)
819 else:
819 else:
820 self.read_unified_hunk(lr)
820 self.read_unified_hunk(lr)
821
821
822 def getnormalized(self):
822 def getnormalized(self):
823 """Return a copy with line endings normalized to LF."""
823 """Return a copy with line endings normalized to LF."""
824
824
825 def normalize(lines):
825 def normalize(lines):
826 nlines = []
826 nlines = []
827 for line in lines:
827 for line in lines:
828 if line.endswith('\r\n'):
828 if line.endswith('\r\n'):
829 line = line[:-2] + '\n'
829 line = line[:-2] + '\n'
830 nlines.append(line)
830 nlines.append(line)
831 return nlines
831 return nlines
832
832
833 # Dummy object, it is rebuilt manually
833 # Dummy object, it is rebuilt manually
834 nh = hunk(self.desc, self.number, None, None)
834 nh = hunk(self.desc, self.number, None, None)
835 nh.number = self.number
835 nh.number = self.number
836 nh.desc = self.desc
836 nh.desc = self.desc
837 nh.hunk = self.hunk
837 nh.hunk = self.hunk
838 nh.a = normalize(self.a)
838 nh.a = normalize(self.a)
839 nh.b = normalize(self.b)
839 nh.b = normalize(self.b)
840 nh.starta = self.starta
840 nh.starta = self.starta
841 nh.startb = self.startb
841 nh.startb = self.startb
842 nh.lena = self.lena
842 nh.lena = self.lena
843 nh.lenb = self.lenb
843 nh.lenb = self.lenb
844 return nh
844 return nh
845
845
846 def read_unified_hunk(self, lr):
846 def read_unified_hunk(self, lr):
847 m = unidesc.match(self.desc)
847 m = unidesc.match(self.desc)
848 if not m:
848 if not m:
849 raise PatchError(_("bad hunk #%d") % self.number)
849 raise PatchError(_("bad hunk #%d") % self.number)
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
850 self.starta, self.lena, self.startb, self.lenb = m.groups()
851 if self.lena is None:
851 if self.lena is None:
852 self.lena = 1
852 self.lena = 1
853 else:
853 else:
854 self.lena = int(self.lena)
854 self.lena = int(self.lena)
855 if self.lenb is None:
855 if self.lenb is None:
856 self.lenb = 1
856 self.lenb = 1
857 else:
857 else:
858 self.lenb = int(self.lenb)
858 self.lenb = int(self.lenb)
859 self.starta = int(self.starta)
859 self.starta = int(self.starta)
860 self.startb = int(self.startb)
860 self.startb = int(self.startb)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
861 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a, self.b)
862 # if we hit eof before finishing out the hunk, the last line will
862 # if we hit eof before finishing out the hunk, the last line will
863 # be zero length. Lets try to fix it up.
863 # be zero length. Lets try to fix it up.
864 while len(self.hunk[-1]) == 0:
864 while len(self.hunk[-1]) == 0:
865 del self.hunk[-1]
865 del self.hunk[-1]
866 del self.a[-1]
866 del self.a[-1]
867 del self.b[-1]
867 del self.b[-1]
868 self.lena -= 1
868 self.lena -= 1
869 self.lenb -= 1
869 self.lenb -= 1
870 self._fixnewline(lr)
870 self._fixnewline(lr)
871
871
872 def read_context_hunk(self, lr):
872 def read_context_hunk(self, lr):
873 self.desc = lr.readline()
873 self.desc = lr.readline()
874 m = contextdesc.match(self.desc)
874 m = contextdesc.match(self.desc)
875 if not m:
875 if not m:
876 raise PatchError(_("bad hunk #%d") % self.number)
876 raise PatchError(_("bad hunk #%d") % self.number)
877 self.starta, aend = m.groups()
877 self.starta, aend = m.groups()
878 self.starta = int(self.starta)
878 self.starta = int(self.starta)
879 if aend is None:
879 if aend is None:
880 aend = self.starta
880 aend = self.starta
881 self.lena = int(aend) - self.starta
881 self.lena = int(aend) - self.starta
882 if self.starta:
882 if self.starta:
883 self.lena += 1
883 self.lena += 1
884 for x in xrange(self.lena):
884 for x in xrange(self.lena):
885 l = lr.readline()
885 l = lr.readline()
886 if l.startswith('---'):
886 if l.startswith('---'):
887 # lines addition, old block is empty
887 # lines addition, old block is empty
888 lr.push(l)
888 lr.push(l)
889 break
889 break
890 s = l[2:]
890 s = l[2:]
891 if l.startswith('- ') or l.startswith('! '):
891 if l.startswith('- ') or l.startswith('! '):
892 u = '-' + s
892 u = '-' + s
893 elif l.startswith(' '):
893 elif l.startswith(' '):
894 u = ' ' + s
894 u = ' ' + s
895 else:
895 else:
896 raise PatchError(_("bad hunk #%d old text line %d") %
896 raise PatchError(_("bad hunk #%d old text line %d") %
897 (self.number, x))
897 (self.number, x))
898 self.a.append(u)
898 self.a.append(u)
899 self.hunk.append(u)
899 self.hunk.append(u)
900
900
901 l = lr.readline()
901 l = lr.readline()
902 if l.startswith('\ '):
902 if l.startswith('\ '):
903 s = self.a[-1][:-1]
903 s = self.a[-1][:-1]
904 self.a[-1] = s
904 self.a[-1] = s
905 self.hunk[-1] = s
905 self.hunk[-1] = s
906 l = lr.readline()
906 l = lr.readline()
907 m = contextdesc.match(l)
907 m = contextdesc.match(l)
908 if not m:
908 if not m:
909 raise PatchError(_("bad hunk #%d") % self.number)
909 raise PatchError(_("bad hunk #%d") % self.number)
910 self.startb, bend = m.groups()
910 self.startb, bend = m.groups()
911 self.startb = int(self.startb)
911 self.startb = int(self.startb)
912 if bend is None:
912 if bend is None:
913 bend = self.startb
913 bend = self.startb
914 self.lenb = int(bend) - self.startb
914 self.lenb = int(bend) - self.startb
915 if self.startb:
915 if self.startb:
916 self.lenb += 1
916 self.lenb += 1
917 hunki = 1
917 hunki = 1
918 for x in xrange(self.lenb):
918 for x in xrange(self.lenb):
919 l = lr.readline()
919 l = lr.readline()
920 if l.startswith('\ '):
920 if l.startswith('\ '):
921 # XXX: the only way to hit this is with an invalid line range.
921 # XXX: the only way to hit this is with an invalid line range.
922 # The no-eol marker is not counted in the line range, but I
922 # The no-eol marker is not counted in the line range, but I
923 # guess there are diff(1) out there which behave differently.
923 # guess there are diff(1) out there which behave differently.
924 s = self.b[-1][:-1]
924 s = self.b[-1][:-1]
925 self.b[-1] = s
925 self.b[-1] = s
926 self.hunk[hunki - 1] = s
926 self.hunk[hunki - 1] = s
927 continue
927 continue
928 if not l:
928 if not l:
929 # line deletions, new block is empty and we hit EOF
929 # line deletions, new block is empty and we hit EOF
930 lr.push(l)
930 lr.push(l)
931 break
931 break
932 s = l[2:]
932 s = l[2:]
933 if l.startswith('+ ') or l.startswith('! '):
933 if l.startswith('+ ') or l.startswith('! '):
934 u = '+' + s
934 u = '+' + s
935 elif l.startswith(' '):
935 elif l.startswith(' '):
936 u = ' ' + s
936 u = ' ' + s
937 elif len(self.b) == 0:
937 elif len(self.b) == 0:
938 # line deletions, new block is empty
938 # line deletions, new block is empty
939 lr.push(l)
939 lr.push(l)
940 break
940 break
941 else:
941 else:
942 raise PatchError(_("bad hunk #%d old text line %d") %
942 raise PatchError(_("bad hunk #%d old text line %d") %
943 (self.number, x))
943 (self.number, x))
944 self.b.append(s)
944 self.b.append(s)
945 while True:
945 while True:
946 if hunki >= len(self.hunk):
946 if hunki >= len(self.hunk):
947 h = ""
947 h = ""
948 else:
948 else:
949 h = self.hunk[hunki]
949 h = self.hunk[hunki]
950 hunki += 1
950 hunki += 1
951 if h == u:
951 if h == u:
952 break
952 break
953 elif h.startswith('-'):
953 elif h.startswith('-'):
954 continue
954 continue
955 else:
955 else:
956 self.hunk.insert(hunki - 1, u)
956 self.hunk.insert(hunki - 1, u)
957 break
957 break
958
958
959 if not self.a:
959 if not self.a:
960 # this happens when lines were only added to the hunk
960 # this happens when lines were only added to the hunk
961 for x in self.hunk:
961 for x in self.hunk:
962 if x.startswith('-') or x.startswith(' '):
962 if x.startswith('-') or x.startswith(' '):
963 self.a.append(x)
963 self.a.append(x)
964 if not self.b:
964 if not self.b:
965 # this happens when lines were only deleted from the hunk
965 # this happens when lines were only deleted from the hunk
966 for x in self.hunk:
966 for x in self.hunk:
967 if x.startswith('+') or x.startswith(' '):
967 if x.startswith('+') or x.startswith(' '):
968 self.b.append(x[1:])
968 self.b.append(x[1:])
969 # @@ -start,len +start,len @@
969 # @@ -start,len +start,len @@
970 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
970 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
971 self.startb, self.lenb)
971 self.startb, self.lenb)
972 self.hunk[0] = self.desc
972 self.hunk[0] = self.desc
973 self._fixnewline(lr)
973 self._fixnewline(lr)
974
974
975 def _fixnewline(self, lr):
975 def _fixnewline(self, lr):
976 l = lr.readline()
976 l = lr.readline()
977 if l.startswith('\ '):
977 if l.startswith('\ '):
978 diffhelpers.fix_newline(self.hunk, self.a, self.b)
978 diffhelpers.fix_newline(self.hunk, self.a, self.b)
979 else:
979 else:
980 lr.push(l)
980 lr.push(l)
981
981
982 def complete(self):
982 def complete(self):
983 return len(self.a) == self.lena and len(self.b) == self.lenb
983 return len(self.a) == self.lena and len(self.b) == self.lenb
984
984
985 def _fuzzit(self, old, new, fuzz, toponly):
985 def _fuzzit(self, old, new, fuzz, toponly):
986 # this removes context lines from the top and bottom of list 'l'. It
986 # this removes context lines from the top and bottom of list 'l'. It
987 # checks the hunk to make sure only context lines are removed, and then
987 # checks the hunk to make sure only context lines are removed, and then
988 # returns a new shortened list of lines.
988 # returns a new shortened list of lines.
989 fuzz = min(fuzz, len(old))
989 fuzz = min(fuzz, len(old))
990 if fuzz:
990 if fuzz:
991 top = 0
991 top = 0
992 bot = 0
992 bot = 0
993 hlen = len(self.hunk)
993 hlen = len(self.hunk)
994 for x in xrange(hlen - 1):
994 for x in xrange(hlen - 1):
995 # the hunk starts with the @@ line, so use x+1
995 # the hunk starts with the @@ line, so use x+1
996 if self.hunk[x + 1][0] == ' ':
996 if self.hunk[x + 1][0] == ' ':
997 top += 1
997 top += 1
998 else:
998 else:
999 break
999 break
1000 if not toponly:
1000 if not toponly:
1001 for x in xrange(hlen - 1):
1001 for x in xrange(hlen - 1):
1002 if self.hunk[hlen - bot - 1][0] == ' ':
1002 if self.hunk[hlen - bot - 1][0] == ' ':
1003 bot += 1
1003 bot += 1
1004 else:
1004 else:
1005 break
1005 break
1006
1006
1007 bot = min(fuzz, bot)
1007 bot = min(fuzz, bot)
1008 top = min(fuzz, top)
1008 top = min(fuzz, top)
1009 return old[top:len(old)-bot], new[top:len(new)-bot], top
1009 return old[top:len(old)-bot], new[top:len(new)-bot], top
1010 return old, new, 0
1010 return old, new, 0
1011
1011
1012 def fuzzit(self, fuzz, toponly):
1012 def fuzzit(self, fuzz, toponly):
1013 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1013 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1014 oldstart = self.starta + top
1014 oldstart = self.starta + top
1015 newstart = self.startb + top
1015 newstart = self.startb + top
1016 # zero length hunk ranges already have their start decremented
1016 # zero length hunk ranges already have their start decremented
1017 if self.lena:
1017 if self.lena:
1018 oldstart -= 1
1018 oldstart -= 1
1019 if self.lenb:
1019 if self.lenb:
1020 newstart -= 1
1020 newstart -= 1
1021 return old, oldstart, new, newstart
1021 return old, oldstart, new, newstart
1022
1022
1023 class binhunk(object):
1023 class binhunk(object):
1024 'A binary patch file. Only understands literals so far.'
1024 'A binary patch file. Only understands literals so far.'
1025 def __init__(self, lr, fname):
1025 def __init__(self, lr, fname):
1026 self.text = None
1026 self.text = None
1027 self.hunk = ['GIT binary patch\n']
1027 self.hunk = ['GIT binary patch\n']
1028 self._fname = fname
1028 self._fname = fname
1029 self._read(lr)
1029 self._read(lr)
1030
1030
1031 def complete(self):
1031 def complete(self):
1032 return self.text is not None
1032 return self.text is not None
1033
1033
1034 def new(self):
1034 def new(self):
1035 return [self.text]
1035 return [self.text]
1036
1036
1037 def _read(self, lr):
1037 def _read(self, lr):
1038 def getline(lr, hunk):
1038 def getline(lr, hunk):
1039 l = lr.readline()
1039 l = lr.readline()
1040 hunk.append(l)
1040 hunk.append(l)
1041 return l.rstrip('\r\n')
1041 return l.rstrip('\r\n')
1042
1042
1043 line = getline(lr, self.hunk)
1043 while True:
1044 while line and not line.startswith('literal '):
1045 line = getline(lr, self.hunk)
1044 line = getline(lr, self.hunk)
1046 if not line:
1045 if not line:
1047 raise PatchError(_('could not extract "%s" binary data')
1046 raise PatchError(_('could not extract "%s" binary data')
1048 % self._fname)
1047 % self._fname)
1048 if line.startswith('literal '):
1049 break
1049 size = int(line[8:].rstrip())
1050 size = int(line[8:].rstrip())
1050 dec = []
1051 dec = []
1051 line = getline(lr, self.hunk)
1052 line = getline(lr, self.hunk)
1052 while len(line) > 1:
1053 while len(line) > 1:
1053 l = line[0]
1054 l = line[0]
1054 if l <= 'Z' and l >= 'A':
1055 if l <= 'Z' and l >= 'A':
1055 l = ord(l) - ord('A') + 1
1056 l = ord(l) - ord('A') + 1
1056 else:
1057 else:
1057 l = ord(l) - ord('a') + 27
1058 l = ord(l) - ord('a') + 27
1058 try:
1059 try:
1059 dec.append(base85.b85decode(line[1:])[:l])
1060 dec.append(base85.b85decode(line[1:])[:l])
1060 except ValueError, e:
1061 except ValueError, e:
1061 raise PatchError(_('could not decode "%s" binary patch: %s')
1062 raise PatchError(_('could not decode "%s" binary patch: %s')
1062 % (self._fname, str(e)))
1063 % (self._fname, str(e)))
1063 line = getline(lr, self.hunk)
1064 line = getline(lr, self.hunk)
1064 text = zlib.decompress(''.join(dec))
1065 text = zlib.decompress(''.join(dec))
1065 if len(text) != size:
1066 if len(text) != size:
1066 raise PatchError(_('"%s" length is %d bytes, should be %d')
1067 raise PatchError(_('"%s" length is %d bytes, should be %d')
1067 % (self._fname, len(text), size))
1068 % (self._fname, len(text), size))
1068 self.text = text
1069 self.text = text
1069
1070
1070 def parsefilename(str):
1071 def parsefilename(str):
1071 # --- filename \t|space stuff
1072 # --- filename \t|space stuff
1072 s = str[4:].rstrip('\r\n')
1073 s = str[4:].rstrip('\r\n')
1073 i = s.find('\t')
1074 i = s.find('\t')
1074 if i < 0:
1075 if i < 0:
1075 i = s.find(' ')
1076 i = s.find(' ')
1076 if i < 0:
1077 if i < 0:
1077 return s
1078 return s
1078 return s[:i]
1079 return s[:i]
1079
1080
1080 def pathstrip(path, strip):
1081 def pathstrip(path, strip):
1081 pathlen = len(path)
1082 pathlen = len(path)
1082 i = 0
1083 i = 0
1083 if strip == 0:
1084 if strip == 0:
1084 return '', path.rstrip()
1085 return '', path.rstrip()
1085 count = strip
1086 count = strip
1086 while count > 0:
1087 while count > 0:
1087 i = path.find('/', i)
1088 i = path.find('/', i)
1088 if i == -1:
1089 if i == -1:
1089 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1090 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1090 (count, strip, path))
1091 (count, strip, path))
1091 i += 1
1092 i += 1
1092 # consume '//' in the path
1093 # consume '//' in the path
1093 while i < pathlen - 1 and path[i] == '/':
1094 while i < pathlen - 1 and path[i] == '/':
1094 i += 1
1095 i += 1
1095 count -= 1
1096 count -= 1
1096 return path[:i].lstrip(), path[i:].rstrip()
1097 return path[:i].lstrip(), path[i:].rstrip()
1097
1098
1098 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1099 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1099 nulla = afile_orig == "/dev/null"
1100 nulla = afile_orig == "/dev/null"
1100 nullb = bfile_orig == "/dev/null"
1101 nullb = bfile_orig == "/dev/null"
1101 create = nulla and hunk.starta == 0 and hunk.lena == 0
1102 create = nulla and hunk.starta == 0 and hunk.lena == 0
1102 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1103 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1103 abase, afile = pathstrip(afile_orig, strip)
1104 abase, afile = pathstrip(afile_orig, strip)
1104 gooda = not nulla and backend.exists(afile)
1105 gooda = not nulla and backend.exists(afile)
1105 bbase, bfile = pathstrip(bfile_orig, strip)
1106 bbase, bfile = pathstrip(bfile_orig, strip)
1106 if afile == bfile:
1107 if afile == bfile:
1107 goodb = gooda
1108 goodb = gooda
1108 else:
1109 else:
1109 goodb = not nullb and backend.exists(bfile)
1110 goodb = not nullb and backend.exists(bfile)
1110 missing = not goodb and not gooda and not create
1111 missing = not goodb and not gooda and not create
1111
1112
1112 # some diff programs apparently produce patches where the afile is
1113 # some diff programs apparently produce patches where the afile is
1113 # not /dev/null, but afile starts with bfile
1114 # not /dev/null, but afile starts with bfile
1114 abasedir = afile[:afile.rfind('/') + 1]
1115 abasedir = afile[:afile.rfind('/') + 1]
1115 bbasedir = bfile[:bfile.rfind('/') + 1]
1116 bbasedir = bfile[:bfile.rfind('/') + 1]
1116 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1117 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1117 and hunk.starta == 0 and hunk.lena == 0):
1118 and hunk.starta == 0 and hunk.lena == 0):
1118 create = True
1119 create = True
1119 missing = False
1120 missing = False
1120
1121
1121 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1122 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1122 # diff is between a file and its backup. In this case, the original
1123 # diff is between a file and its backup. In this case, the original
1123 # file should be patched (see original mpatch code).
1124 # file should be patched (see original mpatch code).
1124 isbackup = (abase == bbase and bfile.startswith(afile))
1125 isbackup = (abase == bbase and bfile.startswith(afile))
1125 fname = None
1126 fname = None
1126 if not missing:
1127 if not missing:
1127 if gooda and goodb:
1128 if gooda and goodb:
1128 fname = isbackup and afile or bfile
1129 fname = isbackup and afile or bfile
1129 elif gooda:
1130 elif gooda:
1130 fname = afile
1131 fname = afile
1131
1132
1132 if not fname:
1133 if not fname:
1133 if not nullb:
1134 if not nullb:
1134 fname = isbackup and afile or bfile
1135 fname = isbackup and afile or bfile
1135 elif not nulla:
1136 elif not nulla:
1136 fname = afile
1137 fname = afile
1137 else:
1138 else:
1138 raise PatchError(_("undefined source and destination files"))
1139 raise PatchError(_("undefined source and destination files"))
1139
1140
1140 gp = patchmeta(fname)
1141 gp = patchmeta(fname)
1141 if create:
1142 if create:
1142 gp.op = 'ADD'
1143 gp.op = 'ADD'
1143 elif remove:
1144 elif remove:
1144 gp.op = 'DELETE'
1145 gp.op = 'DELETE'
1145 return gp
1146 return gp
1146
1147
1147 def scangitpatch(lr, firstline):
1148 def scangitpatch(lr, firstline):
1148 """
1149 """
1149 Git patches can emit:
1150 Git patches can emit:
1150 - rename a to b
1151 - rename a to b
1151 - change b
1152 - change b
1152 - copy a to c
1153 - copy a to c
1153 - change c
1154 - change c
1154
1155
1155 We cannot apply this sequence as-is, the renamed 'a' could not be
1156 We cannot apply this sequence as-is, the renamed 'a' could not be
1156 found for it would have been renamed already. And we cannot copy
1157 found for it would have been renamed already. And we cannot copy
1157 from 'b' instead because 'b' would have been changed already. So
1158 from 'b' instead because 'b' would have been changed already. So
1158 we scan the git patch for copy and rename commands so we can
1159 we scan the git patch for copy and rename commands so we can
1159 perform the copies ahead of time.
1160 perform the copies ahead of time.
1160 """
1161 """
1161 pos = 0
1162 pos = 0
1162 try:
1163 try:
1163 pos = lr.fp.tell()
1164 pos = lr.fp.tell()
1164 fp = lr.fp
1165 fp = lr.fp
1165 except IOError:
1166 except IOError:
1166 fp = cStringIO.StringIO(lr.fp.read())
1167 fp = cStringIO.StringIO(lr.fp.read())
1167 gitlr = linereader(fp)
1168 gitlr = linereader(fp)
1168 gitlr.push(firstline)
1169 gitlr.push(firstline)
1169 gitpatches = readgitpatch(gitlr)
1170 gitpatches = readgitpatch(gitlr)
1170 fp.seek(pos)
1171 fp.seek(pos)
1171 return gitpatches
1172 return gitpatches
1172
1173
1173 def iterhunks(fp):
1174 def iterhunks(fp):
1174 """Read a patch and yield the following events:
1175 """Read a patch and yield the following events:
1175 - ("file", afile, bfile, firsthunk): select a new target file.
1176 - ("file", afile, bfile, firsthunk): select a new target file.
1176 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1177 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1177 "file" event.
1178 "file" event.
1178 - ("git", gitchanges): current diff is in git format, gitchanges
1179 - ("git", gitchanges): current diff is in git format, gitchanges
1179 maps filenames to gitpatch records. Unique event.
1180 maps filenames to gitpatch records. Unique event.
1180 """
1181 """
1181 afile = ""
1182 afile = ""
1182 bfile = ""
1183 bfile = ""
1183 state = None
1184 state = None
1184 hunknum = 0
1185 hunknum = 0
1185 emitfile = newfile = False
1186 emitfile = newfile = False
1186 gitpatches = None
1187 gitpatches = None
1187
1188
1188 # our states
1189 # our states
1189 BFILE = 1
1190 BFILE = 1
1190 context = None
1191 context = None
1191 lr = linereader(fp)
1192 lr = linereader(fp)
1192
1193
1193 while True:
1194 while True:
1194 x = lr.readline()
1195 x = lr.readline()
1195 if not x:
1196 if not x:
1196 break
1197 break
1197 if state == BFILE and (
1198 if state == BFILE and (
1198 (not context and x[0] == '@')
1199 (not context and x[0] == '@')
1199 or (context is not False and x.startswith('***************'))
1200 or (context is not False and x.startswith('***************'))
1200 or x.startswith('GIT binary patch')):
1201 or x.startswith('GIT binary patch')):
1201 gp = None
1202 gp = None
1202 if (gitpatches and
1203 if (gitpatches and
1203 gitpatches[-1].ispatching(afile, bfile)):
1204 gitpatches[-1].ispatching(afile, bfile)):
1204 gp = gitpatches.pop()
1205 gp = gitpatches.pop()
1205 if x.startswith('GIT binary patch'):
1206 if x.startswith('GIT binary patch'):
1206 h = binhunk(lr, gp.path)
1207 h = binhunk(lr, gp.path)
1207 else:
1208 else:
1208 if context is None and x.startswith('***************'):
1209 if context is None and x.startswith('***************'):
1209 context = True
1210 context = True
1210 h = hunk(x, hunknum + 1, lr, context)
1211 h = hunk(x, hunknum + 1, lr, context)
1211 hunknum += 1
1212 hunknum += 1
1212 if emitfile:
1213 if emitfile:
1213 emitfile = False
1214 emitfile = False
1214 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1215 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1215 yield 'hunk', h
1216 yield 'hunk', h
1216 elif x.startswith('diff --git'):
1217 elif x.startswith('diff --git'):
1217 m = gitre.match(x.rstrip(' \r\n'))
1218 m = gitre.match(x.rstrip(' \r\n'))
1218 if not m:
1219 if not m:
1219 continue
1220 continue
1220 if gitpatches is None:
1221 if gitpatches is None:
1221 # scan whole input for git metadata
1222 # scan whole input for git metadata
1222 gitpatches = scangitpatch(lr, x)
1223 gitpatches = scangitpatch(lr, x)
1223 yield 'git', [g.copy() for g in gitpatches
1224 yield 'git', [g.copy() for g in gitpatches
1224 if g.op in ('COPY', 'RENAME')]
1225 if g.op in ('COPY', 'RENAME')]
1225 gitpatches.reverse()
1226 gitpatches.reverse()
1226 afile = 'a/' + m.group(1)
1227 afile = 'a/' + m.group(1)
1227 bfile = 'b/' + m.group(2)
1228 bfile = 'b/' + m.group(2)
1228 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1229 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1229 gp = gitpatches.pop()
1230 gp = gitpatches.pop()
1230 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1231 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1231 if not gitpatches:
1232 if not gitpatches:
1232 raise PatchError(_('failed to synchronize metadata for "%s"')
1233 raise PatchError(_('failed to synchronize metadata for "%s"')
1233 % afile[2:])
1234 % afile[2:])
1234 gp = gitpatches[-1]
1235 gp = gitpatches[-1]
1235 newfile = True
1236 newfile = True
1236 elif x.startswith('---'):
1237 elif x.startswith('---'):
1237 # check for a unified diff
1238 # check for a unified diff
1238 l2 = lr.readline()
1239 l2 = lr.readline()
1239 if not l2.startswith('+++'):
1240 if not l2.startswith('+++'):
1240 lr.push(l2)
1241 lr.push(l2)
1241 continue
1242 continue
1242 newfile = True
1243 newfile = True
1243 context = False
1244 context = False
1244 afile = parsefilename(x)
1245 afile = parsefilename(x)
1245 bfile = parsefilename(l2)
1246 bfile = parsefilename(l2)
1246 elif x.startswith('***'):
1247 elif x.startswith('***'):
1247 # check for a context diff
1248 # check for a context diff
1248 l2 = lr.readline()
1249 l2 = lr.readline()
1249 if not l2.startswith('---'):
1250 if not l2.startswith('---'):
1250 lr.push(l2)
1251 lr.push(l2)
1251 continue
1252 continue
1252 l3 = lr.readline()
1253 l3 = lr.readline()
1253 lr.push(l3)
1254 lr.push(l3)
1254 if not l3.startswith("***************"):
1255 if not l3.startswith("***************"):
1255 lr.push(l2)
1256 lr.push(l2)
1256 continue
1257 continue
1257 newfile = True
1258 newfile = True
1258 context = True
1259 context = True
1259 afile = parsefilename(x)
1260 afile = parsefilename(x)
1260 bfile = parsefilename(l2)
1261 bfile = parsefilename(l2)
1261
1262
1262 if newfile:
1263 if newfile:
1263 newfile = False
1264 newfile = False
1264 emitfile = True
1265 emitfile = True
1265 state = BFILE
1266 state = BFILE
1266 hunknum = 0
1267 hunknum = 0
1267
1268
1268 while gitpatches:
1269 while gitpatches:
1269 gp = gitpatches.pop()
1270 gp = gitpatches.pop()
1270 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1271 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1271
1272
1272 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1273 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1273 """Reads a patch from fp and tries to apply it.
1274 """Reads a patch from fp and tries to apply it.
1274
1275
1275 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1276 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1276 there was any fuzz.
1277 there was any fuzz.
1277
1278
1278 If 'eolmode' is 'strict', the patch content and patched file are
1279 If 'eolmode' is 'strict', the patch content and patched file are
1279 read in binary mode. Otherwise, line endings are ignored when
1280 read in binary mode. Otherwise, line endings are ignored when
1280 patching then normalized according to 'eolmode'.
1281 patching then normalized according to 'eolmode'.
1281 """
1282 """
1282 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1283 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1283 eolmode=eolmode)
1284 eolmode=eolmode)
1284
1285
1285 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1286 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1286 eolmode='strict'):
1287 eolmode='strict'):
1287
1288
1288 def pstrip(p):
1289 def pstrip(p):
1289 return pathstrip(p, strip - 1)[1]
1290 return pathstrip(p, strip - 1)[1]
1290
1291
1291 rejects = 0
1292 rejects = 0
1292 err = 0
1293 err = 0
1293 current_file = None
1294 current_file = None
1294
1295
1295 for state, values in iterhunks(fp):
1296 for state, values in iterhunks(fp):
1296 if state == 'hunk':
1297 if state == 'hunk':
1297 if not current_file:
1298 if not current_file:
1298 continue
1299 continue
1299 ret = current_file.apply(values)
1300 ret = current_file.apply(values)
1300 if ret > 0:
1301 if ret > 0:
1301 err = 1
1302 err = 1
1302 elif state == 'file':
1303 elif state == 'file':
1303 if current_file:
1304 if current_file:
1304 rejects += current_file.close()
1305 rejects += current_file.close()
1305 current_file = None
1306 current_file = None
1306 afile, bfile, first_hunk, gp = values
1307 afile, bfile, first_hunk, gp = values
1307 if gp:
1308 if gp:
1308 gp.path = pstrip(gp.path)
1309 gp.path = pstrip(gp.path)
1309 if gp.oldpath:
1310 if gp.oldpath:
1310 gp.oldpath = pstrip(gp.oldpath)
1311 gp.oldpath = pstrip(gp.oldpath)
1311 else:
1312 else:
1312 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1313 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1313 if gp.op == 'RENAME':
1314 if gp.op == 'RENAME':
1314 backend.unlink(gp.oldpath)
1315 backend.unlink(gp.oldpath)
1315 if not first_hunk:
1316 if not first_hunk:
1316 if gp.op == 'DELETE':
1317 if gp.op == 'DELETE':
1317 backend.unlink(gp.path)
1318 backend.unlink(gp.path)
1318 continue
1319 continue
1319 data, mode = None, None
1320 data, mode = None, None
1320 if gp.op in ('RENAME', 'COPY'):
1321 if gp.op in ('RENAME', 'COPY'):
1321 data, mode = store.getfile(gp.oldpath)[:2]
1322 data, mode = store.getfile(gp.oldpath)[:2]
1322 if gp.mode:
1323 if gp.mode:
1323 mode = gp.mode
1324 mode = gp.mode
1324 if gp.op == 'ADD':
1325 if gp.op == 'ADD':
1325 # Added files without content have no hunk and
1326 # Added files without content have no hunk and
1326 # must be created
1327 # must be created
1327 data = ''
1328 data = ''
1328 if data or mode:
1329 if data or mode:
1329 if (gp.op in ('ADD', 'RENAME', 'COPY')
1330 if (gp.op in ('ADD', 'RENAME', 'COPY')
1330 and backend.exists(gp.path)):
1331 and backend.exists(gp.path)):
1331 raise PatchError(_("cannot create %s: destination "
1332 raise PatchError(_("cannot create %s: destination "
1332 "already exists") % gp.path)
1333 "already exists") % gp.path)
1333 backend.setfile(gp.path, data, mode, gp.oldpath)
1334 backend.setfile(gp.path, data, mode, gp.oldpath)
1334 continue
1335 continue
1335 try:
1336 try:
1336 current_file = patcher(ui, gp, backend, store,
1337 current_file = patcher(ui, gp, backend, store,
1337 eolmode=eolmode)
1338 eolmode=eolmode)
1338 except PatchError, inst:
1339 except PatchError, inst:
1339 ui.warn(str(inst) + '\n')
1340 ui.warn(str(inst) + '\n')
1340 current_file = None
1341 current_file = None
1341 rejects += 1
1342 rejects += 1
1342 continue
1343 continue
1343 elif state == 'git':
1344 elif state == 'git':
1344 for gp in values:
1345 for gp in values:
1345 path = pstrip(gp.oldpath)
1346 path = pstrip(gp.oldpath)
1346 data, mode = backend.getfile(path)
1347 data, mode = backend.getfile(path)
1347 store.setfile(path, data, mode)
1348 store.setfile(path, data, mode)
1348 else:
1349 else:
1349 raise util.Abort(_('unsupported parser state: %s') % state)
1350 raise util.Abort(_('unsupported parser state: %s') % state)
1350
1351
1351 if current_file:
1352 if current_file:
1352 rejects += current_file.close()
1353 rejects += current_file.close()
1353
1354
1354 if rejects:
1355 if rejects:
1355 return -1
1356 return -1
1356 return err
1357 return err
1357
1358
1358 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1359 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1359 similarity):
1360 similarity):
1360 """use <patcher> to apply <patchname> to the working directory.
1361 """use <patcher> to apply <patchname> to the working directory.
1361 returns whether patch was applied with fuzz factor."""
1362 returns whether patch was applied with fuzz factor."""
1362
1363
1363 fuzz = False
1364 fuzz = False
1364 args = []
1365 args = []
1365 cwd = repo.root
1366 cwd = repo.root
1366 if cwd:
1367 if cwd:
1367 args.append('-d %s' % util.shellquote(cwd))
1368 args.append('-d %s' % util.shellquote(cwd))
1368 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1369 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1369 util.shellquote(patchname)))
1370 util.shellquote(patchname)))
1370 try:
1371 try:
1371 for line in fp:
1372 for line in fp:
1372 line = line.rstrip()
1373 line = line.rstrip()
1373 ui.note(line + '\n')
1374 ui.note(line + '\n')
1374 if line.startswith('patching file '):
1375 if line.startswith('patching file '):
1375 pf = util.parsepatchoutput(line)
1376 pf = util.parsepatchoutput(line)
1376 printed_file = False
1377 printed_file = False
1377 files.add(pf)
1378 files.add(pf)
1378 elif line.find('with fuzz') >= 0:
1379 elif line.find('with fuzz') >= 0:
1379 fuzz = True
1380 fuzz = True
1380 if not printed_file:
1381 if not printed_file:
1381 ui.warn(pf + '\n')
1382 ui.warn(pf + '\n')
1382 printed_file = True
1383 printed_file = True
1383 ui.warn(line + '\n')
1384 ui.warn(line + '\n')
1384 elif line.find('saving rejects to file') >= 0:
1385 elif line.find('saving rejects to file') >= 0:
1385 ui.warn(line + '\n')
1386 ui.warn(line + '\n')
1386 elif line.find('FAILED') >= 0:
1387 elif line.find('FAILED') >= 0:
1387 if not printed_file:
1388 if not printed_file:
1388 ui.warn(pf + '\n')
1389 ui.warn(pf + '\n')
1389 printed_file = True
1390 printed_file = True
1390 ui.warn(line + '\n')
1391 ui.warn(line + '\n')
1391 finally:
1392 finally:
1392 if files:
1393 if files:
1393 cfiles = list(files)
1394 cfiles = list(files)
1394 cwd = repo.getcwd()
1395 cwd = repo.getcwd()
1395 if cwd:
1396 if cwd:
1396 cfiles = [util.pathto(repo.root, cwd, f)
1397 cfiles = [util.pathto(repo.root, cwd, f)
1397 for f in cfiles]
1398 for f in cfiles]
1398 scmutil.addremove(repo, cfiles, similarity=similarity)
1399 scmutil.addremove(repo, cfiles, similarity=similarity)
1399 code = fp.close()
1400 code = fp.close()
1400 if code:
1401 if code:
1401 raise PatchError(_("patch command failed: %s") %
1402 raise PatchError(_("patch command failed: %s") %
1402 util.explainexit(code)[0])
1403 util.explainexit(code)[0])
1403 return fuzz
1404 return fuzz
1404
1405
1405 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1406 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1406 if files is None:
1407 if files is None:
1407 files = set()
1408 files = set()
1408 if eolmode is None:
1409 if eolmode is None:
1409 eolmode = ui.config('patch', 'eol', 'strict')
1410 eolmode = ui.config('patch', 'eol', 'strict')
1410 if eolmode.lower() not in eolmodes:
1411 if eolmode.lower() not in eolmodes:
1411 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1412 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1412 eolmode = eolmode.lower()
1413 eolmode = eolmode.lower()
1413
1414
1414 store = filestore()
1415 store = filestore()
1415 try:
1416 try:
1416 fp = open(patchobj, 'rb')
1417 fp = open(patchobj, 'rb')
1417 except TypeError:
1418 except TypeError:
1418 fp = patchobj
1419 fp = patchobj
1419 try:
1420 try:
1420 ret = applydiff(ui, fp, backend, store, strip=strip,
1421 ret = applydiff(ui, fp, backend, store, strip=strip,
1421 eolmode=eolmode)
1422 eolmode=eolmode)
1422 finally:
1423 finally:
1423 if fp != patchobj:
1424 if fp != patchobj:
1424 fp.close()
1425 fp.close()
1425 files.update(backend.close())
1426 files.update(backend.close())
1426 store.close()
1427 store.close()
1427 if ret < 0:
1428 if ret < 0:
1428 raise PatchError(_('patch failed to apply'))
1429 raise PatchError(_('patch failed to apply'))
1429 return ret > 0
1430 return ret > 0
1430
1431
1431 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1432 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1432 similarity=0):
1433 similarity=0):
1433 """use builtin patch to apply <patchobj> to the working directory.
1434 """use builtin patch to apply <patchobj> to the working directory.
1434 returns whether patch was applied with fuzz factor."""
1435 returns whether patch was applied with fuzz factor."""
1435 backend = workingbackend(ui, repo, similarity)
1436 backend = workingbackend(ui, repo, similarity)
1436 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1437 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1437
1438
1438 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1439 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1439 eolmode='strict'):
1440 eolmode='strict'):
1440 backend = repobackend(ui, repo, ctx, store)
1441 backend = repobackend(ui, repo, ctx, store)
1441 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1442 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1442
1443
1443 def makememctx(repo, parents, text, user, date, branch, files, store,
1444 def makememctx(repo, parents, text, user, date, branch, files, store,
1444 editor=None):
1445 editor=None):
1445 def getfilectx(repo, memctx, path):
1446 def getfilectx(repo, memctx, path):
1446 data, (islink, isexec), copied = store.getfile(path)
1447 data, (islink, isexec), copied = store.getfile(path)
1447 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1448 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1448 copied=copied)
1449 copied=copied)
1449 extra = {}
1450 extra = {}
1450 if branch:
1451 if branch:
1451 extra['branch'] = encoding.fromlocal(branch)
1452 extra['branch'] = encoding.fromlocal(branch)
1452 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1453 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1453 date, extra)
1454 date, extra)
1454 if editor:
1455 if editor:
1455 ctx._text = editor(repo, ctx, [])
1456 ctx._text = editor(repo, ctx, [])
1456 return ctx
1457 return ctx
1457
1458
1458 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1459 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1459 similarity=0):
1460 similarity=0):
1460 """Apply <patchname> to the working directory.
1461 """Apply <patchname> to the working directory.
1461
1462
1462 'eolmode' specifies how end of lines should be handled. It can be:
1463 'eolmode' specifies how end of lines should be handled. It can be:
1463 - 'strict': inputs are read in binary mode, EOLs are preserved
1464 - 'strict': inputs are read in binary mode, EOLs are preserved
1464 - 'crlf': EOLs are ignored when patching and reset to CRLF
1465 - 'crlf': EOLs are ignored when patching and reset to CRLF
1465 - 'lf': EOLs are ignored when patching and reset to LF
1466 - 'lf': EOLs are ignored when patching and reset to LF
1466 - None: get it from user settings, default to 'strict'
1467 - None: get it from user settings, default to 'strict'
1467 'eolmode' is ignored when using an external patcher program.
1468 'eolmode' is ignored when using an external patcher program.
1468
1469
1469 Returns whether patch was applied with fuzz factor.
1470 Returns whether patch was applied with fuzz factor.
1470 """
1471 """
1471 patcher = ui.config('ui', 'patch')
1472 patcher = ui.config('ui', 'patch')
1472 if files is None:
1473 if files is None:
1473 files = set()
1474 files = set()
1474 try:
1475 try:
1475 if patcher:
1476 if patcher:
1476 return _externalpatch(ui, repo, patcher, patchname, strip,
1477 return _externalpatch(ui, repo, patcher, patchname, strip,
1477 files, similarity)
1478 files, similarity)
1478 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1479 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1479 similarity)
1480 similarity)
1480 except PatchError, err:
1481 except PatchError, err:
1481 raise util.Abort(str(err))
1482 raise util.Abort(str(err))
1482
1483
1483 def changedfiles(ui, repo, patchpath, strip=1):
1484 def changedfiles(ui, repo, patchpath, strip=1):
1484 backend = fsbackend(ui, repo.root)
1485 backend = fsbackend(ui, repo.root)
1485 fp = open(patchpath, 'rb')
1486 fp = open(patchpath, 'rb')
1486 try:
1487 try:
1487 changed = set()
1488 changed = set()
1488 for state, values in iterhunks(fp):
1489 for state, values in iterhunks(fp):
1489 if state == 'file':
1490 if state == 'file':
1490 afile, bfile, first_hunk, gp = values
1491 afile, bfile, first_hunk, gp = values
1491 if gp:
1492 if gp:
1492 gp.path = pathstrip(gp.path, strip - 1)[1]
1493 gp.path = pathstrip(gp.path, strip - 1)[1]
1493 if gp.oldpath:
1494 if gp.oldpath:
1494 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1495 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1495 else:
1496 else:
1496 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1497 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1497 changed.add(gp.path)
1498 changed.add(gp.path)
1498 if gp.op == 'RENAME':
1499 if gp.op == 'RENAME':
1499 changed.add(gp.oldpath)
1500 changed.add(gp.oldpath)
1500 elif state not in ('hunk', 'git'):
1501 elif state not in ('hunk', 'git'):
1501 raise util.Abort(_('unsupported parser state: %s') % state)
1502 raise util.Abort(_('unsupported parser state: %s') % state)
1502 return changed
1503 return changed
1503 finally:
1504 finally:
1504 fp.close()
1505 fp.close()
1505
1506
1506 def b85diff(to, tn):
1507 def b85diff(to, tn):
1507 '''print base85-encoded binary diff'''
1508 '''print base85-encoded binary diff'''
1508 def gitindex(text):
1509 def gitindex(text):
1509 if not text:
1510 if not text:
1510 return hex(nullid)
1511 return hex(nullid)
1511 l = len(text)
1512 l = len(text)
1512 s = util.sha1('blob %d\0' % l)
1513 s = util.sha1('blob %d\0' % l)
1513 s.update(text)
1514 s.update(text)
1514 return s.hexdigest()
1515 return s.hexdigest()
1515
1516
1516 def fmtline(line):
1517 def fmtline(line):
1517 l = len(line)
1518 l = len(line)
1518 if l <= 26:
1519 if l <= 26:
1519 l = chr(ord('A') + l - 1)
1520 l = chr(ord('A') + l - 1)
1520 else:
1521 else:
1521 l = chr(l - 26 + ord('a') - 1)
1522 l = chr(l - 26 + ord('a') - 1)
1522 return '%c%s\n' % (l, base85.b85encode(line, True))
1523 return '%c%s\n' % (l, base85.b85encode(line, True))
1523
1524
1524 def chunk(text, csize=52):
1525 def chunk(text, csize=52):
1525 l = len(text)
1526 l = len(text)
1526 i = 0
1527 i = 0
1527 while i < l:
1528 while i < l:
1528 yield text[i:i + csize]
1529 yield text[i:i + csize]
1529 i += csize
1530 i += csize
1530
1531
1531 tohash = gitindex(to)
1532 tohash = gitindex(to)
1532 tnhash = gitindex(tn)
1533 tnhash = gitindex(tn)
1533 if tohash == tnhash:
1534 if tohash == tnhash:
1534 return ""
1535 return ""
1535
1536
1536 # TODO: deltas
1537 # TODO: deltas
1537 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1538 ret = ['index %s..%s\nGIT binary patch\nliteral %s\n' %
1538 (tohash, tnhash, len(tn))]
1539 (tohash, tnhash, len(tn))]
1539 for l in chunk(zlib.compress(tn)):
1540 for l in chunk(zlib.compress(tn)):
1540 ret.append(fmtline(l))
1541 ret.append(fmtline(l))
1541 ret.append('\n')
1542 ret.append('\n')
1542 return ''.join(ret)
1543 return ''.join(ret)
1543
1544
1544 class GitDiffRequired(Exception):
1545 class GitDiffRequired(Exception):
1545 pass
1546 pass
1546
1547
1547 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1548 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1548 def get(key, name=None, getter=ui.configbool):
1549 def get(key, name=None, getter=ui.configbool):
1549 return ((opts and opts.get(key)) or
1550 return ((opts and opts.get(key)) or
1550 getter(section, name or key, None, untrusted=untrusted))
1551 getter(section, name or key, None, untrusted=untrusted))
1551 return mdiff.diffopts(
1552 return mdiff.diffopts(
1552 text=opts and opts.get('text'),
1553 text=opts and opts.get('text'),
1553 git=get('git'),
1554 git=get('git'),
1554 nodates=get('nodates'),
1555 nodates=get('nodates'),
1555 showfunc=get('show_function', 'showfunc'),
1556 showfunc=get('show_function', 'showfunc'),
1556 ignorews=get('ignore_all_space', 'ignorews'),
1557 ignorews=get('ignore_all_space', 'ignorews'),
1557 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1558 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1558 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1559 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1559 context=get('unified', getter=ui.config))
1560 context=get('unified', getter=ui.config))
1560
1561
1561 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1562 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1562 losedatafn=None, prefix=''):
1563 losedatafn=None, prefix=''):
1563 '''yields diff of changes to files between two nodes, or node and
1564 '''yields diff of changes to files between two nodes, or node and
1564 working directory.
1565 working directory.
1565
1566
1566 if node1 is None, use first dirstate parent instead.
1567 if node1 is None, use first dirstate parent instead.
1567 if node2 is None, compare node1 with working directory.
1568 if node2 is None, compare node1 with working directory.
1568
1569
1569 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1570 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1570 every time some change cannot be represented with the current
1571 every time some change cannot be represented with the current
1571 patch format. Return False to upgrade to git patch format, True to
1572 patch format. Return False to upgrade to git patch format, True to
1572 accept the loss or raise an exception to abort the diff. It is
1573 accept the loss or raise an exception to abort the diff. It is
1573 called with the name of current file being diffed as 'fn'. If set
1574 called with the name of current file being diffed as 'fn'. If set
1574 to None, patches will always be upgraded to git format when
1575 to None, patches will always be upgraded to git format when
1575 necessary.
1576 necessary.
1576
1577
1577 prefix is a filename prefix that is prepended to all filenames on
1578 prefix is a filename prefix that is prepended to all filenames on
1578 display (used for subrepos).
1579 display (used for subrepos).
1579 '''
1580 '''
1580
1581
1581 if opts is None:
1582 if opts is None:
1582 opts = mdiff.defaultopts
1583 opts = mdiff.defaultopts
1583
1584
1584 if not node1 and not node2:
1585 if not node1 and not node2:
1585 node1 = repo.dirstate.p1()
1586 node1 = repo.dirstate.p1()
1586
1587
1587 def lrugetfilectx():
1588 def lrugetfilectx():
1588 cache = {}
1589 cache = {}
1589 order = []
1590 order = []
1590 def getfilectx(f, ctx):
1591 def getfilectx(f, ctx):
1591 fctx = ctx.filectx(f, filelog=cache.get(f))
1592 fctx = ctx.filectx(f, filelog=cache.get(f))
1592 if f not in cache:
1593 if f not in cache:
1593 if len(cache) > 20:
1594 if len(cache) > 20:
1594 del cache[order.pop(0)]
1595 del cache[order.pop(0)]
1595 cache[f] = fctx.filelog()
1596 cache[f] = fctx.filelog()
1596 else:
1597 else:
1597 order.remove(f)
1598 order.remove(f)
1598 order.append(f)
1599 order.append(f)
1599 return fctx
1600 return fctx
1600 return getfilectx
1601 return getfilectx
1601 getfilectx = lrugetfilectx()
1602 getfilectx = lrugetfilectx()
1602
1603
1603 ctx1 = repo[node1]
1604 ctx1 = repo[node1]
1604 ctx2 = repo[node2]
1605 ctx2 = repo[node2]
1605
1606
1606 if not changes:
1607 if not changes:
1607 changes = repo.status(ctx1, ctx2, match=match)
1608 changes = repo.status(ctx1, ctx2, match=match)
1608 modified, added, removed = changes[:3]
1609 modified, added, removed = changes[:3]
1609
1610
1610 if not modified and not added and not removed:
1611 if not modified and not added and not removed:
1611 return []
1612 return []
1612
1613
1613 revs = None
1614 revs = None
1614 if not repo.ui.quiet:
1615 if not repo.ui.quiet:
1615 hexfunc = repo.ui.debugflag and hex or short
1616 hexfunc = repo.ui.debugflag and hex or short
1616 revs = [hexfunc(node) for node in [node1, node2] if node]
1617 revs = [hexfunc(node) for node in [node1, node2] if node]
1617
1618
1618 copy = {}
1619 copy = {}
1619 if opts.git or opts.upgrade:
1620 if opts.git or opts.upgrade:
1620 copy = copies.pathcopies(ctx1, ctx2)
1621 copy = copies.pathcopies(ctx1, ctx2)
1621
1622
1622 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1623 difffn = lambda opts, losedata: trydiff(repo, revs, ctx1, ctx2,
1623 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1624 modified, added, removed, copy, getfilectx, opts, losedata, prefix)
1624 if opts.upgrade and not opts.git:
1625 if opts.upgrade and not opts.git:
1625 try:
1626 try:
1626 def losedata(fn):
1627 def losedata(fn):
1627 if not losedatafn or not losedatafn(fn=fn):
1628 if not losedatafn or not losedatafn(fn=fn):
1628 raise GitDiffRequired()
1629 raise GitDiffRequired()
1629 # Buffer the whole output until we are sure it can be generated
1630 # Buffer the whole output until we are sure it can be generated
1630 return list(difffn(opts.copy(git=False), losedata))
1631 return list(difffn(opts.copy(git=False), losedata))
1631 except GitDiffRequired:
1632 except GitDiffRequired:
1632 return difffn(opts.copy(git=True), None)
1633 return difffn(opts.copy(git=True), None)
1633 else:
1634 else:
1634 return difffn(opts, None)
1635 return difffn(opts, None)
1635
1636
1636 def difflabel(func, *args, **kw):
1637 def difflabel(func, *args, **kw):
1637 '''yields 2-tuples of (output, label) based on the output of func()'''
1638 '''yields 2-tuples of (output, label) based on the output of func()'''
1638 headprefixes = [('diff', 'diff.diffline'),
1639 headprefixes = [('diff', 'diff.diffline'),
1639 ('copy', 'diff.extended'),
1640 ('copy', 'diff.extended'),
1640 ('rename', 'diff.extended'),
1641 ('rename', 'diff.extended'),
1641 ('old', 'diff.extended'),
1642 ('old', 'diff.extended'),
1642 ('new', 'diff.extended'),
1643 ('new', 'diff.extended'),
1643 ('deleted', 'diff.extended'),
1644 ('deleted', 'diff.extended'),
1644 ('---', 'diff.file_a'),
1645 ('---', 'diff.file_a'),
1645 ('+++', 'diff.file_b')]
1646 ('+++', 'diff.file_b')]
1646 textprefixes = [('@', 'diff.hunk'),
1647 textprefixes = [('@', 'diff.hunk'),
1647 ('-', 'diff.deleted'),
1648 ('-', 'diff.deleted'),
1648 ('+', 'diff.inserted')]
1649 ('+', 'diff.inserted')]
1649 head = False
1650 head = False
1650 for chunk in func(*args, **kw):
1651 for chunk in func(*args, **kw):
1651 lines = chunk.split('\n')
1652 lines = chunk.split('\n')
1652 for i, line in enumerate(lines):
1653 for i, line in enumerate(lines):
1653 if i != 0:
1654 if i != 0:
1654 yield ('\n', '')
1655 yield ('\n', '')
1655 if head:
1656 if head:
1656 if line.startswith('@'):
1657 if line.startswith('@'):
1657 head = False
1658 head = False
1658 else:
1659 else:
1659 if line and not line[0] in ' +-@\\':
1660 if line and not line[0] in ' +-@\\':
1660 head = True
1661 head = True
1661 stripline = line
1662 stripline = line
1662 if not head and line and line[0] in '+-':
1663 if not head and line and line[0] in '+-':
1663 # highlight trailing whitespace, but only in changed lines
1664 # highlight trailing whitespace, but only in changed lines
1664 stripline = line.rstrip()
1665 stripline = line.rstrip()
1665 prefixes = textprefixes
1666 prefixes = textprefixes
1666 if head:
1667 if head:
1667 prefixes = headprefixes
1668 prefixes = headprefixes
1668 for prefix, label in prefixes:
1669 for prefix, label in prefixes:
1669 if stripline.startswith(prefix):
1670 if stripline.startswith(prefix):
1670 yield (stripline, label)
1671 yield (stripline, label)
1671 break
1672 break
1672 else:
1673 else:
1673 yield (line, '')
1674 yield (line, '')
1674 if line != stripline:
1675 if line != stripline:
1675 yield (line[len(stripline):], 'diff.trailingwhitespace')
1676 yield (line[len(stripline):], 'diff.trailingwhitespace')
1676
1677
1677 def diffui(*args, **kw):
1678 def diffui(*args, **kw):
1678 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1679 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1679 return difflabel(diff, *args, **kw)
1680 return difflabel(diff, *args, **kw)
1680
1681
1681
1682
1682 def _addmodehdr(header, omode, nmode):
1683 def _addmodehdr(header, omode, nmode):
1683 if omode != nmode:
1684 if omode != nmode:
1684 header.append('old mode %s\n' % omode)
1685 header.append('old mode %s\n' % omode)
1685 header.append('new mode %s\n' % nmode)
1686 header.append('new mode %s\n' % nmode)
1686
1687
1687 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1688 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1688 copy, getfilectx, opts, losedatafn, prefix):
1689 copy, getfilectx, opts, losedatafn, prefix):
1689
1690
1690 def join(f):
1691 def join(f):
1691 return os.path.join(prefix, f)
1692 return os.path.join(prefix, f)
1692
1693
1693 date1 = util.datestr(ctx1.date())
1694 date1 = util.datestr(ctx1.date())
1694 man1 = ctx1.manifest()
1695 man1 = ctx1.manifest()
1695
1696
1696 gone = set()
1697 gone = set()
1697 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1698 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1698
1699
1699 copyto = dict([(v, k) for k, v in copy.items()])
1700 copyto = dict([(v, k) for k, v in copy.items()])
1700
1701
1701 if opts.git:
1702 if opts.git:
1702 revs = None
1703 revs = None
1703
1704
1704 for f in sorted(modified + added + removed):
1705 for f in sorted(modified + added + removed):
1705 to = None
1706 to = None
1706 tn = None
1707 tn = None
1707 dodiff = True
1708 dodiff = True
1708 header = []
1709 header = []
1709 if f in man1:
1710 if f in man1:
1710 to = getfilectx(f, ctx1).data()
1711 to = getfilectx(f, ctx1).data()
1711 if f not in removed:
1712 if f not in removed:
1712 tn = getfilectx(f, ctx2).data()
1713 tn = getfilectx(f, ctx2).data()
1713 a, b = f, f
1714 a, b = f, f
1714 if opts.git or losedatafn:
1715 if opts.git or losedatafn:
1715 if f in added:
1716 if f in added:
1716 mode = gitmode[ctx2.flags(f)]
1717 mode = gitmode[ctx2.flags(f)]
1717 if f in copy or f in copyto:
1718 if f in copy or f in copyto:
1718 if opts.git:
1719 if opts.git:
1719 if f in copy:
1720 if f in copy:
1720 a = copy[f]
1721 a = copy[f]
1721 else:
1722 else:
1722 a = copyto[f]
1723 a = copyto[f]
1723 omode = gitmode[man1.flags(a)]
1724 omode = gitmode[man1.flags(a)]
1724 _addmodehdr(header, omode, mode)
1725 _addmodehdr(header, omode, mode)
1725 if a in removed and a not in gone:
1726 if a in removed and a not in gone:
1726 op = 'rename'
1727 op = 'rename'
1727 gone.add(a)
1728 gone.add(a)
1728 else:
1729 else:
1729 op = 'copy'
1730 op = 'copy'
1730 header.append('%s from %s\n' % (op, join(a)))
1731 header.append('%s from %s\n' % (op, join(a)))
1731 header.append('%s to %s\n' % (op, join(f)))
1732 header.append('%s to %s\n' % (op, join(f)))
1732 to = getfilectx(a, ctx1).data()
1733 to = getfilectx(a, ctx1).data()
1733 else:
1734 else:
1734 losedatafn(f)
1735 losedatafn(f)
1735 else:
1736 else:
1736 if opts.git:
1737 if opts.git:
1737 header.append('new file mode %s\n' % mode)
1738 header.append('new file mode %s\n' % mode)
1738 elif ctx2.flags(f):
1739 elif ctx2.flags(f):
1739 losedatafn(f)
1740 losedatafn(f)
1740 # In theory, if tn was copied or renamed we should check
1741 # In theory, if tn was copied or renamed we should check
1741 # if the source is binary too but the copy record already
1742 # if the source is binary too but the copy record already
1742 # forces git mode.
1743 # forces git mode.
1743 if util.binary(tn):
1744 if util.binary(tn):
1744 if opts.git:
1745 if opts.git:
1745 dodiff = 'binary'
1746 dodiff = 'binary'
1746 else:
1747 else:
1747 losedatafn(f)
1748 losedatafn(f)
1748 if not opts.git and not tn:
1749 if not opts.git and not tn:
1749 # regular diffs cannot represent new empty file
1750 # regular diffs cannot represent new empty file
1750 losedatafn(f)
1751 losedatafn(f)
1751 elif f in removed:
1752 elif f in removed:
1752 if opts.git:
1753 if opts.git:
1753 # have we already reported a copy above?
1754 # have we already reported a copy above?
1754 if ((f in copy and copy[f] in added
1755 if ((f in copy and copy[f] in added
1755 and copyto[copy[f]] == f) or
1756 and copyto[copy[f]] == f) or
1756 (f in copyto and copyto[f] in added
1757 (f in copyto and copyto[f] in added
1757 and copy[copyto[f]] == f)):
1758 and copy[copyto[f]] == f)):
1758 dodiff = False
1759 dodiff = False
1759 else:
1760 else:
1760 header.append('deleted file mode %s\n' %
1761 header.append('deleted file mode %s\n' %
1761 gitmode[man1.flags(f)])
1762 gitmode[man1.flags(f)])
1762 elif not to or util.binary(to):
1763 elif not to or util.binary(to):
1763 # regular diffs cannot represent empty file deletion
1764 # regular diffs cannot represent empty file deletion
1764 losedatafn(f)
1765 losedatafn(f)
1765 else:
1766 else:
1766 oflag = man1.flags(f)
1767 oflag = man1.flags(f)
1767 nflag = ctx2.flags(f)
1768 nflag = ctx2.flags(f)
1768 binary = util.binary(to) or util.binary(tn)
1769 binary = util.binary(to) or util.binary(tn)
1769 if opts.git:
1770 if opts.git:
1770 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1771 _addmodehdr(header, gitmode[oflag], gitmode[nflag])
1771 if binary:
1772 if binary:
1772 dodiff = 'binary'
1773 dodiff = 'binary'
1773 elif binary or nflag != oflag:
1774 elif binary or nflag != oflag:
1774 losedatafn(f)
1775 losedatafn(f)
1775 if opts.git:
1776 if opts.git:
1776 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1777 header.insert(0, mdiff.diffline(revs, join(a), join(b), opts))
1777
1778
1778 if dodiff:
1779 if dodiff:
1779 if dodiff == 'binary':
1780 if dodiff == 'binary':
1780 text = b85diff(to, tn)
1781 text = b85diff(to, tn)
1781 else:
1782 else:
1782 text = mdiff.unidiff(to, date1,
1783 text = mdiff.unidiff(to, date1,
1783 # ctx2 date may be dynamic
1784 # ctx2 date may be dynamic
1784 tn, util.datestr(ctx2.date()),
1785 tn, util.datestr(ctx2.date()),
1785 join(a), join(b), revs, opts=opts)
1786 join(a), join(b), revs, opts=opts)
1786 if header and (text or len(header) > 1):
1787 if header and (text or len(header) > 1):
1787 yield ''.join(header)
1788 yield ''.join(header)
1788 if text:
1789 if text:
1789 yield text
1790 yield text
1790
1791
1791 def diffstatsum(stats):
1792 def diffstatsum(stats):
1792 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1793 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1793 for f, a, r, b in stats:
1794 for f, a, r, b in stats:
1794 maxfile = max(maxfile, encoding.colwidth(f))
1795 maxfile = max(maxfile, encoding.colwidth(f))
1795 maxtotal = max(maxtotal, a + r)
1796 maxtotal = max(maxtotal, a + r)
1796 addtotal += a
1797 addtotal += a
1797 removetotal += r
1798 removetotal += r
1798 binary = binary or b
1799 binary = binary or b
1799
1800
1800 return maxfile, maxtotal, addtotal, removetotal, binary
1801 return maxfile, maxtotal, addtotal, removetotal, binary
1801
1802
1802 def diffstatdata(lines):
1803 def diffstatdata(lines):
1803 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1804 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1804
1805
1805 results = []
1806 results = []
1806 filename, adds, removes, isbinary = None, 0, 0, False
1807 filename, adds, removes, isbinary = None, 0, 0, False
1807
1808
1808 def addresult():
1809 def addresult():
1809 if filename:
1810 if filename:
1810 results.append((filename, adds, removes, isbinary))
1811 results.append((filename, adds, removes, isbinary))
1811
1812
1812 for line in lines:
1813 for line in lines:
1813 if line.startswith('diff'):
1814 if line.startswith('diff'):
1814 addresult()
1815 addresult()
1815 # set numbers to 0 anyway when starting new file
1816 # set numbers to 0 anyway when starting new file
1816 adds, removes, isbinary = 0, 0, False
1817 adds, removes, isbinary = 0, 0, False
1817 if line.startswith('diff --git'):
1818 if line.startswith('diff --git'):
1818 filename = gitre.search(line).group(1)
1819 filename = gitre.search(line).group(1)
1819 elif line.startswith('diff -r'):
1820 elif line.startswith('diff -r'):
1820 # format: "diff -r ... -r ... filename"
1821 # format: "diff -r ... -r ... filename"
1821 filename = diffre.search(line).group(1)
1822 filename = diffre.search(line).group(1)
1822 elif line.startswith('+') and not line.startswith('+++ '):
1823 elif line.startswith('+') and not line.startswith('+++ '):
1823 adds += 1
1824 adds += 1
1824 elif line.startswith('-') and not line.startswith('--- '):
1825 elif line.startswith('-') and not line.startswith('--- '):
1825 removes += 1
1826 removes += 1
1826 elif (line.startswith('GIT binary patch') or
1827 elif (line.startswith('GIT binary patch') or
1827 line.startswith('Binary file')):
1828 line.startswith('Binary file')):
1828 isbinary = True
1829 isbinary = True
1829 addresult()
1830 addresult()
1830 return results
1831 return results
1831
1832
1832 def diffstat(lines, width=80, git=False):
1833 def diffstat(lines, width=80, git=False):
1833 output = []
1834 output = []
1834 stats = diffstatdata(lines)
1835 stats = diffstatdata(lines)
1835 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1836 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1836
1837
1837 countwidth = len(str(maxtotal))
1838 countwidth = len(str(maxtotal))
1838 if hasbinary and countwidth < 3:
1839 if hasbinary and countwidth < 3:
1839 countwidth = 3
1840 countwidth = 3
1840 graphwidth = width - countwidth - maxname - 6
1841 graphwidth = width - countwidth - maxname - 6
1841 if graphwidth < 10:
1842 if graphwidth < 10:
1842 graphwidth = 10
1843 graphwidth = 10
1843
1844
1844 def scale(i):
1845 def scale(i):
1845 if maxtotal <= graphwidth:
1846 if maxtotal <= graphwidth:
1846 return i
1847 return i
1847 # If diffstat runs out of room it doesn't print anything,
1848 # If diffstat runs out of room it doesn't print anything,
1848 # which isn't very useful, so always print at least one + or -
1849 # which isn't very useful, so always print at least one + or -
1849 # if there were at least some changes.
1850 # if there were at least some changes.
1850 return max(i * graphwidth // maxtotal, int(bool(i)))
1851 return max(i * graphwidth // maxtotal, int(bool(i)))
1851
1852
1852 for filename, adds, removes, isbinary in stats:
1853 for filename, adds, removes, isbinary in stats:
1853 if isbinary:
1854 if isbinary:
1854 count = 'Bin'
1855 count = 'Bin'
1855 else:
1856 else:
1856 count = adds + removes
1857 count = adds + removes
1857 pluses = '+' * scale(adds)
1858 pluses = '+' * scale(adds)
1858 minuses = '-' * scale(removes)
1859 minuses = '-' * scale(removes)
1859 output.append(' %s%s | %*s %s%s\n' %
1860 output.append(' %s%s | %*s %s%s\n' %
1860 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1861 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1861 countwidth, count, pluses, minuses))
1862 countwidth, count, pluses, minuses))
1862
1863
1863 if stats:
1864 if stats:
1864 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1865 output.append(_(' %d files changed, %d insertions(+), %d deletions(-)\n')
1865 % (len(stats), totaladds, totalremoves))
1866 % (len(stats), totaladds, totalremoves))
1866
1867
1867 return ''.join(output)
1868 return ''.join(output)
1868
1869
1869 def diffstatui(*args, **kw):
1870 def diffstatui(*args, **kw):
1870 '''like diffstat(), but yields 2-tuples of (output, label) for
1871 '''like diffstat(), but yields 2-tuples of (output, label) for
1871 ui.write()
1872 ui.write()
1872 '''
1873 '''
1873
1874
1874 for line in diffstat(*args, **kw).splitlines():
1875 for line in diffstat(*args, **kw).splitlines():
1875 if line and line[-1] in '+-':
1876 if line and line[-1] in '+-':
1876 name, graph = line.rsplit(' ', 1)
1877 name, graph = line.rsplit(' ', 1)
1877 yield (name + ' ', '')
1878 yield (name + ' ', '')
1878 m = re.search(r'\++', graph)
1879 m = re.search(r'\++', graph)
1879 if m:
1880 if m:
1880 yield (m.group(0), 'diffstat.inserted')
1881 yield (m.group(0), 'diffstat.inserted')
1881 m = re.search(r'-+', graph)
1882 m = re.search(r'-+', graph)
1882 if m:
1883 if m:
1883 yield (m.group(0), 'diffstat.deleted')
1884 yield (m.group(0), 'diffstat.deleted')
1884 else:
1885 else:
1885 yield (line, '')
1886 yield (line, '')
1886 yield ('\n', '')
1887 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now