##// END OF EJS Templates
patch: replace functions in fsbackend to use vfs...
Chinmay Joshi -
r21717:2a095d34 default
parent child Browse files
Show More
@@ -1,1928 +1,1928 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath
9 import cStringIO, email, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
19
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21
21
22 class PatchError(Exception):
22 class PatchError(Exception):
23 pass
23 pass
24
24
25
25
26 # public functions
26 # public functions
27
27
28 def split(stream):
28 def split(stream):
29 '''return an iterator of individual patches from a stream'''
29 '''return an iterator of individual patches from a stream'''
30 def isheader(line, inheader):
30 def isheader(line, inheader):
31 if inheader and line[0] in (' ', '\t'):
31 if inheader and line[0] in (' ', '\t'):
32 # continuation
32 # continuation
33 return True
33 return True
34 if line[0] in (' ', '-', '+'):
34 if line[0] in (' ', '-', '+'):
35 # diff line - don't check for header pattern in there
35 # diff line - don't check for header pattern in there
36 return False
36 return False
37 l = line.split(': ', 1)
37 l = line.split(': ', 1)
38 return len(l) == 2 and ' ' not in l[0]
38 return len(l) == 2 and ' ' not in l[0]
39
39
40 def chunk(lines):
40 def chunk(lines):
41 return cStringIO.StringIO(''.join(lines))
41 return cStringIO.StringIO(''.join(lines))
42
42
43 def hgsplit(stream, cur):
43 def hgsplit(stream, cur):
44 inheader = True
44 inheader = True
45
45
46 for line in stream:
46 for line in stream:
47 if not line.strip():
47 if not line.strip():
48 inheader = False
48 inheader = False
49 if not inheader and line.startswith('# HG changeset patch'):
49 if not inheader and line.startswith('# HG changeset patch'):
50 yield chunk(cur)
50 yield chunk(cur)
51 cur = []
51 cur = []
52 inheader = True
52 inheader = True
53
53
54 cur.append(line)
54 cur.append(line)
55
55
56 if cur:
56 if cur:
57 yield chunk(cur)
57 yield chunk(cur)
58
58
59 def mboxsplit(stream, cur):
59 def mboxsplit(stream, cur):
60 for line in stream:
60 for line in stream:
61 if line.startswith('From '):
61 if line.startswith('From '):
62 for c in split(chunk(cur[1:])):
62 for c in split(chunk(cur[1:])):
63 yield c
63 yield c
64 cur = []
64 cur = []
65
65
66 cur.append(line)
66 cur.append(line)
67
67
68 if cur:
68 if cur:
69 for c in split(chunk(cur[1:])):
69 for c in split(chunk(cur[1:])):
70 yield c
70 yield c
71
71
72 def mimesplit(stream, cur):
72 def mimesplit(stream, cur):
73 def msgfp(m):
73 def msgfp(m):
74 fp = cStringIO.StringIO()
74 fp = cStringIO.StringIO()
75 g = email.Generator.Generator(fp, mangle_from_=False)
75 g = email.Generator.Generator(fp, mangle_from_=False)
76 g.flatten(m)
76 g.flatten(m)
77 fp.seek(0)
77 fp.seek(0)
78 return fp
78 return fp
79
79
80 for line in stream:
80 for line in stream:
81 cur.append(line)
81 cur.append(line)
82 c = chunk(cur)
82 c = chunk(cur)
83
83
84 m = email.Parser.Parser().parse(c)
84 m = email.Parser.Parser().parse(c)
85 if not m.is_multipart():
85 if not m.is_multipart():
86 yield msgfp(m)
86 yield msgfp(m)
87 else:
87 else:
88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
89 for part in m.walk():
89 for part in m.walk():
90 ct = part.get_content_type()
90 ct = part.get_content_type()
91 if ct not in ok_types:
91 if ct not in ok_types:
92 continue
92 continue
93 yield msgfp(part)
93 yield msgfp(part)
94
94
95 def headersplit(stream, cur):
95 def headersplit(stream, cur):
96 inheader = False
96 inheader = False
97
97
98 for line in stream:
98 for line in stream:
99 if not inheader and isheader(line, inheader):
99 if not inheader and isheader(line, inheader):
100 yield chunk(cur)
100 yield chunk(cur)
101 cur = []
101 cur = []
102 inheader = True
102 inheader = True
103 if inheader and not isheader(line, inheader):
103 if inheader and not isheader(line, inheader):
104 inheader = False
104 inheader = False
105
105
106 cur.append(line)
106 cur.append(line)
107
107
108 if cur:
108 if cur:
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 def remainder(cur):
111 def remainder(cur):
112 yield chunk(cur)
112 yield chunk(cur)
113
113
114 class fiter(object):
114 class fiter(object):
115 def __init__(self, fp):
115 def __init__(self, fp):
116 self.fp = fp
116 self.fp = fp
117
117
118 def __iter__(self):
118 def __iter__(self):
119 return self
119 return self
120
120
121 def next(self):
121 def next(self):
122 l = self.fp.readline()
122 l = self.fp.readline()
123 if not l:
123 if not l:
124 raise StopIteration
124 raise StopIteration
125 return l
125 return l
126
126
127 inheader = False
127 inheader = False
128 cur = []
128 cur = []
129
129
130 mimeheaders = ['content-type']
130 mimeheaders = ['content-type']
131
131
132 if not util.safehasattr(stream, 'next'):
132 if not util.safehasattr(stream, 'next'):
133 # http responses, for example, have readline but not next
133 # http responses, for example, have readline but not next
134 stream = fiter(stream)
134 stream = fiter(stream)
135
135
136 for line in stream:
136 for line in stream:
137 cur.append(line)
137 cur.append(line)
138 if line.startswith('# HG changeset patch'):
138 if line.startswith('# HG changeset patch'):
139 return hgsplit(stream, cur)
139 return hgsplit(stream, cur)
140 elif line.startswith('From '):
140 elif line.startswith('From '):
141 return mboxsplit(stream, cur)
141 return mboxsplit(stream, cur)
142 elif isheader(line, inheader):
142 elif isheader(line, inheader):
143 inheader = True
143 inheader = True
144 if line.split(':', 1)[0].lower() in mimeheaders:
144 if line.split(':', 1)[0].lower() in mimeheaders:
145 # let email parser handle this
145 # let email parser handle this
146 return mimesplit(stream, cur)
146 return mimesplit(stream, cur)
147 elif line.startswith('--- ') and inheader:
147 elif line.startswith('--- ') and inheader:
148 # No evil headers seen by diff start, split by hand
148 # No evil headers seen by diff start, split by hand
149 return headersplit(stream, cur)
149 return headersplit(stream, cur)
150 # Not enough info, keep reading
150 # Not enough info, keep reading
151
151
152 # if we are here, we have a very plain patch
152 # if we are here, we have a very plain patch
153 return remainder(cur)
153 return remainder(cur)
154
154
155 def extract(ui, fileobj):
155 def extract(ui, fileobj):
156 '''extract patch from data read from fileobj.
156 '''extract patch from data read from fileobj.
157
157
158 patch can be a normal patch or contained in an email message.
158 patch can be a normal patch or contained in an email message.
159
159
160 return tuple (filename, message, user, date, branch, node, p1, p2).
160 return tuple (filename, message, user, date, branch, node, p1, p2).
161 Any item in the returned tuple can be None. If filename is None,
161 Any item in the returned tuple can be None. If filename is None,
162 fileobj did not contain a patch. Caller must unlink filename when done.'''
162 fileobj did not contain a patch. Caller must unlink filename when done.'''
163
163
164 # attempt to detect the start of a patch
164 # attempt to detect the start of a patch
165 # (this heuristic is borrowed from quilt)
165 # (this heuristic is borrowed from quilt)
166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
166 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
167 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
168 r'---[ \t].*?^\+\+\+[ \t]|'
168 r'---[ \t].*?^\+\+\+[ \t]|'
169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
169 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
170
170
171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
171 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
172 tmpfp = os.fdopen(fd, 'w')
172 tmpfp = os.fdopen(fd, 'w')
173 try:
173 try:
174 msg = email.Parser.Parser().parse(fileobj)
174 msg = email.Parser.Parser().parse(fileobj)
175
175
176 subject = msg['Subject']
176 subject = msg['Subject']
177 user = msg['From']
177 user = msg['From']
178 if not subject and not user:
178 if not subject and not user:
179 # Not an email, restore parsed headers if any
179 # Not an email, restore parsed headers if any
180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
180 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
181
181
182 # should try to parse msg['Date']
182 # should try to parse msg['Date']
183 date = None
183 date = None
184 nodeid = None
184 nodeid = None
185 branch = None
185 branch = None
186 parents = []
186 parents = []
187
187
188 if subject:
188 if subject:
189 if subject.startswith('[PATCH'):
189 if subject.startswith('[PATCH'):
190 pend = subject.find(']')
190 pend = subject.find(']')
191 if pend >= 0:
191 if pend >= 0:
192 subject = subject[pend + 1:].lstrip()
192 subject = subject[pend + 1:].lstrip()
193 subject = re.sub(r'\n[ \t]+', ' ', subject)
193 subject = re.sub(r'\n[ \t]+', ' ', subject)
194 ui.debug('Subject: %s\n' % subject)
194 ui.debug('Subject: %s\n' % subject)
195 if user:
195 if user:
196 ui.debug('From: %s\n' % user)
196 ui.debug('From: %s\n' % user)
197 diffs_seen = 0
197 diffs_seen = 0
198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
198 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
199 message = ''
199 message = ''
200 for part in msg.walk():
200 for part in msg.walk():
201 content_type = part.get_content_type()
201 content_type = part.get_content_type()
202 ui.debug('Content-Type: %s\n' % content_type)
202 ui.debug('Content-Type: %s\n' % content_type)
203 if content_type not in ok_types:
203 if content_type not in ok_types:
204 continue
204 continue
205 payload = part.get_payload(decode=True)
205 payload = part.get_payload(decode=True)
206 m = diffre.search(payload)
206 m = diffre.search(payload)
207 if m:
207 if m:
208 hgpatch = False
208 hgpatch = False
209 hgpatchheader = False
209 hgpatchheader = False
210 ignoretext = False
210 ignoretext = False
211
211
212 ui.debug('found patch at byte %d\n' % m.start(0))
212 ui.debug('found patch at byte %d\n' % m.start(0))
213 diffs_seen += 1
213 diffs_seen += 1
214 cfp = cStringIO.StringIO()
214 cfp = cStringIO.StringIO()
215 for line in payload[:m.start(0)].splitlines():
215 for line in payload[:m.start(0)].splitlines():
216 if line.startswith('# HG changeset patch') and not hgpatch:
216 if line.startswith('# HG changeset patch') and not hgpatch:
217 ui.debug('patch generated by hg export\n')
217 ui.debug('patch generated by hg export\n')
218 hgpatch = True
218 hgpatch = True
219 hgpatchheader = True
219 hgpatchheader = True
220 # drop earlier commit message content
220 # drop earlier commit message content
221 cfp.seek(0)
221 cfp.seek(0)
222 cfp.truncate()
222 cfp.truncate()
223 subject = None
223 subject = None
224 elif hgpatchheader:
224 elif hgpatchheader:
225 if line.startswith('# User '):
225 if line.startswith('# User '):
226 user = line[7:]
226 user = line[7:]
227 ui.debug('From: %s\n' % user)
227 ui.debug('From: %s\n' % user)
228 elif line.startswith("# Date "):
228 elif line.startswith("# Date "):
229 date = line[7:]
229 date = line[7:]
230 elif line.startswith("# Branch "):
230 elif line.startswith("# Branch "):
231 branch = line[9:]
231 branch = line[9:]
232 elif line.startswith("# Node ID "):
232 elif line.startswith("# Node ID "):
233 nodeid = line[10:]
233 nodeid = line[10:]
234 elif line.startswith("# Parent "):
234 elif line.startswith("# Parent "):
235 parents.append(line[9:].lstrip())
235 parents.append(line[9:].lstrip())
236 elif not line.startswith("# "):
236 elif not line.startswith("# "):
237 hgpatchheader = False
237 hgpatchheader = False
238 elif line == '---':
238 elif line == '---':
239 ignoretext = True
239 ignoretext = True
240 if not hgpatchheader and not ignoretext:
240 if not hgpatchheader and not ignoretext:
241 cfp.write(line)
241 cfp.write(line)
242 cfp.write('\n')
242 cfp.write('\n')
243 message = cfp.getvalue()
243 message = cfp.getvalue()
244 if tmpfp:
244 if tmpfp:
245 tmpfp.write(payload)
245 tmpfp.write(payload)
246 if not payload.endswith('\n'):
246 if not payload.endswith('\n'):
247 tmpfp.write('\n')
247 tmpfp.write('\n')
248 elif not diffs_seen and message and content_type == 'text/plain':
248 elif not diffs_seen and message and content_type == 'text/plain':
249 message += '\n' + payload
249 message += '\n' + payload
250 except: # re-raises
250 except: # re-raises
251 tmpfp.close()
251 tmpfp.close()
252 os.unlink(tmpname)
252 os.unlink(tmpname)
253 raise
253 raise
254
254
255 if subject and not message.startswith(subject):
255 if subject and not message.startswith(subject):
256 message = '%s\n%s' % (subject, message)
256 message = '%s\n%s' % (subject, message)
257 tmpfp.close()
257 tmpfp.close()
258 if not diffs_seen:
258 if not diffs_seen:
259 os.unlink(tmpname)
259 os.unlink(tmpname)
260 return None, message, user, date, branch, None, None, None
260 return None, message, user, date, branch, None, None, None
261 p1 = parents and parents.pop(0) or None
261 p1 = parents and parents.pop(0) or None
262 p2 = parents and parents.pop(0) or None
262 p2 = parents and parents.pop(0) or None
263 return tmpname, message, user, date, branch, nodeid, p1, p2
263 return tmpname, message, user, date, branch, nodeid, p1, p2
264
264
265 class patchmeta(object):
265 class patchmeta(object):
266 """Patched file metadata
266 """Patched file metadata
267
267
268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
268 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
269 or COPY. 'path' is patched file path. 'oldpath' is set to the
269 or COPY. 'path' is patched file path. 'oldpath' is set to the
270 origin file when 'op' is either COPY or RENAME, None otherwise. If
270 origin file when 'op' is either COPY or RENAME, None otherwise. If
271 file mode is changed, 'mode' is a tuple (islink, isexec) where
271 file mode is changed, 'mode' is a tuple (islink, isexec) where
272 'islink' is True if the file is a symlink and 'isexec' is True if
272 'islink' is True if the file is a symlink and 'isexec' is True if
273 the file is executable. Otherwise, 'mode' is None.
273 the file is executable. Otherwise, 'mode' is None.
274 """
274 """
275 def __init__(self, path):
275 def __init__(self, path):
276 self.path = path
276 self.path = path
277 self.oldpath = None
277 self.oldpath = None
278 self.mode = None
278 self.mode = None
279 self.op = 'MODIFY'
279 self.op = 'MODIFY'
280 self.binary = False
280 self.binary = False
281
281
282 def setmode(self, mode):
282 def setmode(self, mode):
283 islink = mode & 020000
283 islink = mode & 020000
284 isexec = mode & 0100
284 isexec = mode & 0100
285 self.mode = (islink, isexec)
285 self.mode = (islink, isexec)
286
286
287 def copy(self):
287 def copy(self):
288 other = patchmeta(self.path)
288 other = patchmeta(self.path)
289 other.oldpath = self.oldpath
289 other.oldpath = self.oldpath
290 other.mode = self.mode
290 other.mode = self.mode
291 other.op = self.op
291 other.op = self.op
292 other.binary = self.binary
292 other.binary = self.binary
293 return other
293 return other
294
294
295 def _ispatchinga(self, afile):
295 def _ispatchinga(self, afile):
296 if afile == '/dev/null':
296 if afile == '/dev/null':
297 return self.op == 'ADD'
297 return self.op == 'ADD'
298 return afile == 'a/' + (self.oldpath or self.path)
298 return afile == 'a/' + (self.oldpath or self.path)
299
299
300 def _ispatchingb(self, bfile):
300 def _ispatchingb(self, bfile):
301 if bfile == '/dev/null':
301 if bfile == '/dev/null':
302 return self.op == 'DELETE'
302 return self.op == 'DELETE'
303 return bfile == 'b/' + self.path
303 return bfile == 'b/' + self.path
304
304
305 def ispatching(self, afile, bfile):
305 def ispatching(self, afile, bfile):
306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
306 return self._ispatchinga(afile) and self._ispatchingb(bfile)
307
307
308 def __repr__(self):
308 def __repr__(self):
309 return "<patchmeta %s %r>" % (self.op, self.path)
309 return "<patchmeta %s %r>" % (self.op, self.path)
310
310
311 def readgitpatch(lr):
311 def readgitpatch(lr):
312 """extract git-style metadata about patches from <patchname>"""
312 """extract git-style metadata about patches from <patchname>"""
313
313
314 # Filter patch for git information
314 # Filter patch for git information
315 gp = None
315 gp = None
316 gitpatches = []
316 gitpatches = []
317 for line in lr:
317 for line in lr:
318 line = line.rstrip(' \r\n')
318 line = line.rstrip(' \r\n')
319 if line.startswith('diff --git a/'):
319 if line.startswith('diff --git a/'):
320 m = gitre.match(line)
320 m = gitre.match(line)
321 if m:
321 if m:
322 if gp:
322 if gp:
323 gitpatches.append(gp)
323 gitpatches.append(gp)
324 dst = m.group(2)
324 dst = m.group(2)
325 gp = patchmeta(dst)
325 gp = patchmeta(dst)
326 elif gp:
326 elif gp:
327 if line.startswith('--- '):
327 if line.startswith('--- '):
328 gitpatches.append(gp)
328 gitpatches.append(gp)
329 gp = None
329 gp = None
330 continue
330 continue
331 if line.startswith('rename from '):
331 if line.startswith('rename from '):
332 gp.op = 'RENAME'
332 gp.op = 'RENAME'
333 gp.oldpath = line[12:]
333 gp.oldpath = line[12:]
334 elif line.startswith('rename to '):
334 elif line.startswith('rename to '):
335 gp.path = line[10:]
335 gp.path = line[10:]
336 elif line.startswith('copy from '):
336 elif line.startswith('copy from '):
337 gp.op = 'COPY'
337 gp.op = 'COPY'
338 gp.oldpath = line[10:]
338 gp.oldpath = line[10:]
339 elif line.startswith('copy to '):
339 elif line.startswith('copy to '):
340 gp.path = line[8:]
340 gp.path = line[8:]
341 elif line.startswith('deleted file'):
341 elif line.startswith('deleted file'):
342 gp.op = 'DELETE'
342 gp.op = 'DELETE'
343 elif line.startswith('new file mode '):
343 elif line.startswith('new file mode '):
344 gp.op = 'ADD'
344 gp.op = 'ADD'
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('new mode '):
346 elif line.startswith('new mode '):
347 gp.setmode(int(line[-6:], 8))
347 gp.setmode(int(line[-6:], 8))
348 elif line.startswith('GIT binary patch'):
348 elif line.startswith('GIT binary patch'):
349 gp.binary = True
349 gp.binary = True
350 if gp:
350 if gp:
351 gitpatches.append(gp)
351 gitpatches.append(gp)
352
352
353 return gitpatches
353 return gitpatches
354
354
355 class linereader(object):
355 class linereader(object):
356 # simple class to allow pushing lines back into the input stream
356 # simple class to allow pushing lines back into the input stream
357 def __init__(self, fp):
357 def __init__(self, fp):
358 self.fp = fp
358 self.fp = fp
359 self.buf = []
359 self.buf = []
360
360
361 def push(self, line):
361 def push(self, line):
362 if line is not None:
362 if line is not None:
363 self.buf.append(line)
363 self.buf.append(line)
364
364
365 def readline(self):
365 def readline(self):
366 if self.buf:
366 if self.buf:
367 l = self.buf[0]
367 l = self.buf[0]
368 del self.buf[0]
368 del self.buf[0]
369 return l
369 return l
370 return self.fp.readline()
370 return self.fp.readline()
371
371
372 def __iter__(self):
372 def __iter__(self):
373 while True:
373 while True:
374 l = self.readline()
374 l = self.readline()
375 if not l:
375 if not l:
376 break
376 break
377 yield l
377 yield l
378
378
379 class abstractbackend(object):
379 class abstractbackend(object):
380 def __init__(self, ui):
380 def __init__(self, ui):
381 self.ui = ui
381 self.ui = ui
382
382
383 def getfile(self, fname):
383 def getfile(self, fname):
384 """Return target file data and flags as a (data, (islink,
384 """Return target file data and flags as a (data, (islink,
385 isexec)) tuple.
385 isexec)) tuple.
386 """
386 """
387 raise NotImplementedError
387 raise NotImplementedError
388
388
389 def setfile(self, fname, data, mode, copysource):
389 def setfile(self, fname, data, mode, copysource):
390 """Write data to target file fname and set its mode. mode is a
390 """Write data to target file fname and set its mode. mode is a
391 (islink, isexec) tuple. If data is None, the file content should
391 (islink, isexec) tuple. If data is None, the file content should
392 be left unchanged. If the file is modified after being copied,
392 be left unchanged. If the file is modified after being copied,
393 copysource is set to the original file name.
393 copysource is set to the original file name.
394 """
394 """
395 raise NotImplementedError
395 raise NotImplementedError
396
396
397 def unlink(self, fname):
397 def unlink(self, fname):
398 """Unlink target file."""
398 """Unlink target file."""
399 raise NotImplementedError
399 raise NotImplementedError
400
400
401 def writerej(self, fname, failed, total, lines):
401 def writerej(self, fname, failed, total, lines):
402 """Write rejected lines for fname. total is the number of hunks
402 """Write rejected lines for fname. total is the number of hunks
403 which failed to apply and total the total number of hunks for this
403 which failed to apply and total the total number of hunks for this
404 files.
404 files.
405 """
405 """
406 pass
406 pass
407
407
408 def exists(self, fname):
408 def exists(self, fname):
409 raise NotImplementedError
409 raise NotImplementedError
410
410
411 class fsbackend(abstractbackend):
411 class fsbackend(abstractbackend):
412 def __init__(self, ui, basedir):
412 def __init__(self, ui, basedir):
413 super(fsbackend, self).__init__(ui)
413 super(fsbackend, self).__init__(ui)
414 self.opener = scmutil.opener(basedir)
414 self.opener = scmutil.opener(basedir)
415
415
416 def _join(self, f):
416 def _join(self, f):
417 return os.path.join(self.opener.base, f)
417 return os.path.join(self.opener.base, f)
418
418
419 def getfile(self, fname):
419 def getfile(self, fname):
420 path = self._join(fname)
420 if self.opener.islink(fname):
421 if os.path.islink(path):
421 return (self.opener.readlink(fname), (True, False))
422 return (os.readlink(path), (True, False))
422
423 isexec = False
423 isexec = False
424 try:
424 try:
425 isexec = os.lstat(path).st_mode & 0100 != 0
425 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
426 except OSError, e:
426 except OSError, e:
427 if e.errno != errno.ENOENT:
427 if e.errno != errno.ENOENT:
428 raise
428 raise
429 return (self.opener.read(fname), (False, isexec))
429 return (self.opener.read(fname), (False, isexec))
430
430
431 def setfile(self, fname, data, mode, copysource):
431 def setfile(self, fname, data, mode, copysource):
432 islink, isexec = mode
432 islink, isexec = mode
433 if data is None:
433 if data is None:
434 util.setflags(self._join(fname), islink, isexec)
434 self.opener.setflags(fname, islink, isexec)
435 return
435 return
436 if islink:
436 if islink:
437 self.opener.symlink(data, fname)
437 self.opener.symlink(data, fname)
438 else:
438 else:
439 self.opener.write(fname, data)
439 self.opener.write(fname, data)
440 if isexec:
440 if isexec:
441 util.setflags(self._join(fname), False, True)
441 self.opener.setflags(fname, False, True)
442
442
443 def unlink(self, fname):
443 def unlink(self, fname):
444 util.unlinkpath(self._join(fname), ignoremissing=True)
444 self.opener.unlinkpath(fname, ignoremissing=True)
445
445
446 def writerej(self, fname, failed, total, lines):
446 def writerej(self, fname, failed, total, lines):
447 fname = fname + ".rej"
447 fname = fname + ".rej"
448 self.ui.warn(
448 self.ui.warn(
449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
449 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
450 (failed, total, fname))
450 (failed, total, fname))
451 fp = self.opener(fname, 'w')
451 fp = self.opener(fname, 'w')
452 fp.writelines(lines)
452 fp.writelines(lines)
453 fp.close()
453 fp.close()
454
454
455 def exists(self, fname):
455 def exists(self, fname):
456 return os.path.lexists(self._join(fname))
456 return self.opener.lexists(fname)
457
457
458 class workingbackend(fsbackend):
458 class workingbackend(fsbackend):
459 def __init__(self, ui, repo, similarity):
459 def __init__(self, ui, repo, similarity):
460 super(workingbackend, self).__init__(ui, repo.root)
460 super(workingbackend, self).__init__(ui, repo.root)
461 self.repo = repo
461 self.repo = repo
462 self.similarity = similarity
462 self.similarity = similarity
463 self.removed = set()
463 self.removed = set()
464 self.changed = set()
464 self.changed = set()
465 self.copied = []
465 self.copied = []
466
466
467 def _checkknown(self, fname):
467 def _checkknown(self, fname):
468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
468 if self.repo.dirstate[fname] == '?' and self.exists(fname):
469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
469 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
470
470
471 def setfile(self, fname, data, mode, copysource):
471 def setfile(self, fname, data, mode, copysource):
472 self._checkknown(fname)
472 self._checkknown(fname)
473 super(workingbackend, self).setfile(fname, data, mode, copysource)
473 super(workingbackend, self).setfile(fname, data, mode, copysource)
474 if copysource is not None:
474 if copysource is not None:
475 self.copied.append((copysource, fname))
475 self.copied.append((copysource, fname))
476 self.changed.add(fname)
476 self.changed.add(fname)
477
477
478 def unlink(self, fname):
478 def unlink(self, fname):
479 self._checkknown(fname)
479 self._checkknown(fname)
480 super(workingbackend, self).unlink(fname)
480 super(workingbackend, self).unlink(fname)
481 self.removed.add(fname)
481 self.removed.add(fname)
482 self.changed.add(fname)
482 self.changed.add(fname)
483
483
484 def close(self):
484 def close(self):
485 wctx = self.repo[None]
485 wctx = self.repo[None]
486 changed = set(self.changed)
486 changed = set(self.changed)
487 for src, dst in self.copied:
487 for src, dst in self.copied:
488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
488 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
489 if self.removed:
489 if self.removed:
490 wctx.forget(sorted(self.removed))
490 wctx.forget(sorted(self.removed))
491 for f in self.removed:
491 for f in self.removed:
492 if f not in self.repo.dirstate:
492 if f not in self.repo.dirstate:
493 # File was deleted and no longer belongs to the
493 # File was deleted and no longer belongs to the
494 # dirstate, it was probably marked added then
494 # dirstate, it was probably marked added then
495 # deleted, and should not be considered by
495 # deleted, and should not be considered by
496 # marktouched().
496 # marktouched().
497 changed.discard(f)
497 changed.discard(f)
498 if changed:
498 if changed:
499 scmutil.marktouched(self.repo, changed, self.similarity)
499 scmutil.marktouched(self.repo, changed, self.similarity)
500 return sorted(self.changed)
500 return sorted(self.changed)
501
501
502 class filestore(object):
502 class filestore(object):
503 def __init__(self, maxsize=None):
503 def __init__(self, maxsize=None):
504 self.opener = None
504 self.opener = None
505 self.files = {}
505 self.files = {}
506 self.created = 0
506 self.created = 0
507 self.maxsize = maxsize
507 self.maxsize = maxsize
508 if self.maxsize is None:
508 if self.maxsize is None:
509 self.maxsize = 4*(2**20)
509 self.maxsize = 4*(2**20)
510 self.size = 0
510 self.size = 0
511 self.data = {}
511 self.data = {}
512
512
513 def setfile(self, fname, data, mode, copied=None):
513 def setfile(self, fname, data, mode, copied=None):
514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
514 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
515 self.data[fname] = (data, mode, copied)
515 self.data[fname] = (data, mode, copied)
516 self.size += len(data)
516 self.size += len(data)
517 else:
517 else:
518 if self.opener is None:
518 if self.opener is None:
519 root = tempfile.mkdtemp(prefix='hg-patch-')
519 root = tempfile.mkdtemp(prefix='hg-patch-')
520 self.opener = scmutil.opener(root)
520 self.opener = scmutil.opener(root)
521 # Avoid filename issues with these simple names
521 # Avoid filename issues with these simple names
522 fn = str(self.created)
522 fn = str(self.created)
523 self.opener.write(fn, data)
523 self.opener.write(fn, data)
524 self.created += 1
524 self.created += 1
525 self.files[fname] = (fn, mode, copied)
525 self.files[fname] = (fn, mode, copied)
526
526
527 def getfile(self, fname):
527 def getfile(self, fname):
528 if fname in self.data:
528 if fname in self.data:
529 return self.data[fname]
529 return self.data[fname]
530 if not self.opener or fname not in self.files:
530 if not self.opener or fname not in self.files:
531 raise IOError
531 raise IOError
532 fn, mode, copied = self.files[fname]
532 fn, mode, copied = self.files[fname]
533 return self.opener.read(fn), mode, copied
533 return self.opener.read(fn), mode, copied
534
534
535 def close(self):
535 def close(self):
536 if self.opener:
536 if self.opener:
537 shutil.rmtree(self.opener.base)
537 shutil.rmtree(self.opener.base)
538
538
539 class repobackend(abstractbackend):
539 class repobackend(abstractbackend):
540 def __init__(self, ui, repo, ctx, store):
540 def __init__(self, ui, repo, ctx, store):
541 super(repobackend, self).__init__(ui)
541 super(repobackend, self).__init__(ui)
542 self.repo = repo
542 self.repo = repo
543 self.ctx = ctx
543 self.ctx = ctx
544 self.store = store
544 self.store = store
545 self.changed = set()
545 self.changed = set()
546 self.removed = set()
546 self.removed = set()
547 self.copied = {}
547 self.copied = {}
548
548
549 def _checkknown(self, fname):
549 def _checkknown(self, fname):
550 if fname not in self.ctx:
550 if fname not in self.ctx:
551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
551 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
552
552
553 def getfile(self, fname):
553 def getfile(self, fname):
554 try:
554 try:
555 fctx = self.ctx[fname]
555 fctx = self.ctx[fname]
556 except error.LookupError:
556 except error.LookupError:
557 raise IOError
557 raise IOError
558 flags = fctx.flags()
558 flags = fctx.flags()
559 return fctx.data(), ('l' in flags, 'x' in flags)
559 return fctx.data(), ('l' in flags, 'x' in flags)
560
560
561 def setfile(self, fname, data, mode, copysource):
561 def setfile(self, fname, data, mode, copysource):
562 if copysource:
562 if copysource:
563 self._checkknown(copysource)
563 self._checkknown(copysource)
564 if data is None:
564 if data is None:
565 data = self.ctx[fname].data()
565 data = self.ctx[fname].data()
566 self.store.setfile(fname, data, mode, copysource)
566 self.store.setfile(fname, data, mode, copysource)
567 self.changed.add(fname)
567 self.changed.add(fname)
568 if copysource:
568 if copysource:
569 self.copied[fname] = copysource
569 self.copied[fname] = copysource
570
570
571 def unlink(self, fname):
571 def unlink(self, fname):
572 self._checkknown(fname)
572 self._checkknown(fname)
573 self.removed.add(fname)
573 self.removed.add(fname)
574
574
575 def exists(self, fname):
575 def exists(self, fname):
576 return fname in self.ctx
576 return fname in self.ctx
577
577
578 def close(self):
578 def close(self):
579 return self.changed | self.removed
579 return self.changed | self.removed
580
580
581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
581 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
582 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
583 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
584 eolmodes = ['strict', 'crlf', 'lf', 'auto']
585
585
586 class patchfile(object):
586 class patchfile(object):
587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
587 def __init__(self, ui, gp, backend, store, eolmode='strict'):
588 self.fname = gp.path
588 self.fname = gp.path
589 self.eolmode = eolmode
589 self.eolmode = eolmode
590 self.eol = None
590 self.eol = None
591 self.backend = backend
591 self.backend = backend
592 self.ui = ui
592 self.ui = ui
593 self.lines = []
593 self.lines = []
594 self.exists = False
594 self.exists = False
595 self.missing = True
595 self.missing = True
596 self.mode = gp.mode
596 self.mode = gp.mode
597 self.copysource = gp.oldpath
597 self.copysource = gp.oldpath
598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
598 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
599 self.remove = gp.op == 'DELETE'
599 self.remove = gp.op == 'DELETE'
600 try:
600 try:
601 if self.copysource is None:
601 if self.copysource is None:
602 data, mode = backend.getfile(self.fname)
602 data, mode = backend.getfile(self.fname)
603 self.exists = True
603 self.exists = True
604 else:
604 else:
605 data, mode = store.getfile(self.copysource)[:2]
605 data, mode = store.getfile(self.copysource)[:2]
606 self.exists = backend.exists(self.fname)
606 self.exists = backend.exists(self.fname)
607 self.missing = False
607 self.missing = False
608 if data:
608 if data:
609 self.lines = mdiff.splitnewlines(data)
609 self.lines = mdiff.splitnewlines(data)
610 if self.mode is None:
610 if self.mode is None:
611 self.mode = mode
611 self.mode = mode
612 if self.lines:
612 if self.lines:
613 # Normalize line endings
613 # Normalize line endings
614 if self.lines[0].endswith('\r\n'):
614 if self.lines[0].endswith('\r\n'):
615 self.eol = '\r\n'
615 self.eol = '\r\n'
616 elif self.lines[0].endswith('\n'):
616 elif self.lines[0].endswith('\n'):
617 self.eol = '\n'
617 self.eol = '\n'
618 if eolmode != 'strict':
618 if eolmode != 'strict':
619 nlines = []
619 nlines = []
620 for l in self.lines:
620 for l in self.lines:
621 if l.endswith('\r\n'):
621 if l.endswith('\r\n'):
622 l = l[:-2] + '\n'
622 l = l[:-2] + '\n'
623 nlines.append(l)
623 nlines.append(l)
624 self.lines = nlines
624 self.lines = nlines
625 except IOError:
625 except IOError:
626 if self.create:
626 if self.create:
627 self.missing = False
627 self.missing = False
628 if self.mode is None:
628 if self.mode is None:
629 self.mode = (False, False)
629 self.mode = (False, False)
630 if self.missing:
630 if self.missing:
631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
631 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
632
632
633 self.hash = {}
633 self.hash = {}
634 self.dirty = 0
634 self.dirty = 0
635 self.offset = 0
635 self.offset = 0
636 self.skew = 0
636 self.skew = 0
637 self.rej = []
637 self.rej = []
638 self.fileprinted = False
638 self.fileprinted = False
639 self.printfile(False)
639 self.printfile(False)
640 self.hunks = 0
640 self.hunks = 0
641
641
642 def writelines(self, fname, lines, mode):
642 def writelines(self, fname, lines, mode):
643 if self.eolmode == 'auto':
643 if self.eolmode == 'auto':
644 eol = self.eol
644 eol = self.eol
645 elif self.eolmode == 'crlf':
645 elif self.eolmode == 'crlf':
646 eol = '\r\n'
646 eol = '\r\n'
647 else:
647 else:
648 eol = '\n'
648 eol = '\n'
649
649
650 if self.eolmode != 'strict' and eol and eol != '\n':
650 if self.eolmode != 'strict' and eol and eol != '\n':
651 rawlines = []
651 rawlines = []
652 for l in lines:
652 for l in lines:
653 if l and l[-1] == '\n':
653 if l and l[-1] == '\n':
654 l = l[:-1] + eol
654 l = l[:-1] + eol
655 rawlines.append(l)
655 rawlines.append(l)
656 lines = rawlines
656 lines = rawlines
657
657
658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
658 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
659
659
660 def printfile(self, warn):
660 def printfile(self, warn):
661 if self.fileprinted:
661 if self.fileprinted:
662 return
662 return
663 if warn or self.ui.verbose:
663 if warn or self.ui.verbose:
664 self.fileprinted = True
664 self.fileprinted = True
665 s = _("patching file %s\n") % self.fname
665 s = _("patching file %s\n") % self.fname
666 if warn:
666 if warn:
667 self.ui.warn(s)
667 self.ui.warn(s)
668 else:
668 else:
669 self.ui.note(s)
669 self.ui.note(s)
670
670
671
671
672 def findlines(self, l, linenum):
672 def findlines(self, l, linenum):
673 # looks through the hash and finds candidate lines. The
673 # looks through the hash and finds candidate lines. The
674 # result is a list of line numbers sorted based on distance
674 # result is a list of line numbers sorted based on distance
675 # from linenum
675 # from linenum
676
676
677 cand = self.hash.get(l, [])
677 cand = self.hash.get(l, [])
678 if len(cand) > 1:
678 if len(cand) > 1:
679 # resort our list of potentials forward then back.
679 # resort our list of potentials forward then back.
680 cand.sort(key=lambda x: abs(x - linenum))
680 cand.sort(key=lambda x: abs(x - linenum))
681 return cand
681 return cand
682
682
683 def write_rej(self):
683 def write_rej(self):
684 # our rejects are a little different from patch(1). This always
684 # our rejects are a little different from patch(1). This always
685 # creates rejects in the same form as the original patch. A file
685 # creates rejects in the same form as the original patch. A file
686 # header is inserted so that you can run the reject through patch again
686 # header is inserted so that you can run the reject through patch again
687 # without having to type the filename.
687 # without having to type the filename.
688 if not self.rej:
688 if not self.rej:
689 return
689 return
690 base = os.path.basename(self.fname)
690 base = os.path.basename(self.fname)
691 lines = ["--- %s\n+++ %s\n" % (base, base)]
691 lines = ["--- %s\n+++ %s\n" % (base, base)]
692 for x in self.rej:
692 for x in self.rej:
693 for l in x.hunk:
693 for l in x.hunk:
694 lines.append(l)
694 lines.append(l)
695 if l[-1] != '\n':
695 if l[-1] != '\n':
696 lines.append("\n\ No newline at end of file\n")
696 lines.append("\n\ No newline at end of file\n")
697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
697 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
698
698
699 def apply(self, h):
699 def apply(self, h):
700 if not h.complete():
700 if not h.complete():
701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
701 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
702 (h.number, h.desc, len(h.a), h.lena, len(h.b),
703 h.lenb))
703 h.lenb))
704
704
705 self.hunks += 1
705 self.hunks += 1
706
706
707 if self.missing:
707 if self.missing:
708 self.rej.append(h)
708 self.rej.append(h)
709 return -1
709 return -1
710
710
711 if self.exists and self.create:
711 if self.exists and self.create:
712 if self.copysource:
712 if self.copysource:
713 self.ui.warn(_("cannot create %s: destination already "
713 self.ui.warn(_("cannot create %s: destination already "
714 "exists\n") % self.fname)
714 "exists\n") % self.fname)
715 else:
715 else:
716 self.ui.warn(_("file %s already exists\n") % self.fname)
716 self.ui.warn(_("file %s already exists\n") % self.fname)
717 self.rej.append(h)
717 self.rej.append(h)
718 return -1
718 return -1
719
719
720 if isinstance(h, binhunk):
720 if isinstance(h, binhunk):
721 if self.remove:
721 if self.remove:
722 self.backend.unlink(self.fname)
722 self.backend.unlink(self.fname)
723 else:
723 else:
724 l = h.new(self.lines)
724 l = h.new(self.lines)
725 self.lines[:] = l
725 self.lines[:] = l
726 self.offset += len(l)
726 self.offset += len(l)
727 self.dirty = True
727 self.dirty = True
728 return 0
728 return 0
729
729
730 horig = h
730 horig = h
731 if (self.eolmode in ('crlf', 'lf')
731 if (self.eolmode in ('crlf', 'lf')
732 or self.eolmode == 'auto' and self.eol):
732 or self.eolmode == 'auto' and self.eol):
733 # If new eols are going to be normalized, then normalize
733 # If new eols are going to be normalized, then normalize
734 # hunk data before patching. Otherwise, preserve input
734 # hunk data before patching. Otherwise, preserve input
735 # line-endings.
735 # line-endings.
736 h = h.getnormalized()
736 h = h.getnormalized()
737
737
738 # fast case first, no offsets, no fuzz
738 # fast case first, no offsets, no fuzz
739 old, oldstart, new, newstart = h.fuzzit(0, False)
739 old, oldstart, new, newstart = h.fuzzit(0, False)
740 oldstart += self.offset
740 oldstart += self.offset
741 orig_start = oldstart
741 orig_start = oldstart
742 # if there's skew we want to emit the "(offset %d lines)" even
742 # if there's skew we want to emit the "(offset %d lines)" even
743 # when the hunk cleanly applies at start + skew, so skip the
743 # when the hunk cleanly applies at start + skew, so skip the
744 # fast case code
744 # fast case code
745 if (self.skew == 0 and
745 if (self.skew == 0 and
746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
746 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
747 if self.remove:
747 if self.remove:
748 self.backend.unlink(self.fname)
748 self.backend.unlink(self.fname)
749 else:
749 else:
750 self.lines[oldstart:oldstart + len(old)] = new
750 self.lines[oldstart:oldstart + len(old)] = new
751 self.offset += len(new) - len(old)
751 self.offset += len(new) - len(old)
752 self.dirty = True
752 self.dirty = True
753 return 0
753 return 0
754
754
755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
755 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
756 self.hash = {}
756 self.hash = {}
757 for x, s in enumerate(self.lines):
757 for x, s in enumerate(self.lines):
758 self.hash.setdefault(s, []).append(x)
758 self.hash.setdefault(s, []).append(x)
759
759
760 for fuzzlen in xrange(3):
760 for fuzzlen in xrange(3):
761 for toponly in [True, False]:
761 for toponly in [True, False]:
762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
762 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
763 oldstart = oldstart + self.offset + self.skew
763 oldstart = oldstart + self.offset + self.skew
764 oldstart = min(oldstart, len(self.lines))
764 oldstart = min(oldstart, len(self.lines))
765 if old:
765 if old:
766 cand = self.findlines(old[0][1:], oldstart)
766 cand = self.findlines(old[0][1:], oldstart)
767 else:
767 else:
768 # Only adding lines with no or fuzzed context, just
768 # Only adding lines with no or fuzzed context, just
769 # take the skew in account
769 # take the skew in account
770 cand = [oldstart]
770 cand = [oldstart]
771
771
772 for l in cand:
772 for l in cand:
773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
773 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
774 self.lines[l : l + len(old)] = new
774 self.lines[l : l + len(old)] = new
775 self.offset += len(new) - len(old)
775 self.offset += len(new) - len(old)
776 self.skew = l - orig_start
776 self.skew = l - orig_start
777 self.dirty = True
777 self.dirty = True
778 offset = l - orig_start - fuzzlen
778 offset = l - orig_start - fuzzlen
779 if fuzzlen:
779 if fuzzlen:
780 msg = _("Hunk #%d succeeded at %d "
780 msg = _("Hunk #%d succeeded at %d "
781 "with fuzz %d "
781 "with fuzz %d "
782 "(offset %d lines).\n")
782 "(offset %d lines).\n")
783 self.printfile(True)
783 self.printfile(True)
784 self.ui.warn(msg %
784 self.ui.warn(msg %
785 (h.number, l + 1, fuzzlen, offset))
785 (h.number, l + 1, fuzzlen, offset))
786 else:
786 else:
787 msg = _("Hunk #%d succeeded at %d "
787 msg = _("Hunk #%d succeeded at %d "
788 "(offset %d lines).\n")
788 "(offset %d lines).\n")
789 self.ui.note(msg % (h.number, l + 1, offset))
789 self.ui.note(msg % (h.number, l + 1, offset))
790 return fuzzlen
790 return fuzzlen
791 self.printfile(True)
791 self.printfile(True)
792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
792 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
793 self.rej.append(horig)
793 self.rej.append(horig)
794 return -1
794 return -1
795
795
796 def close(self):
796 def close(self):
797 if self.dirty:
797 if self.dirty:
798 self.writelines(self.fname, self.lines, self.mode)
798 self.writelines(self.fname, self.lines, self.mode)
799 self.write_rej()
799 self.write_rej()
800 return len(self.rej)
800 return len(self.rej)
801
801
802 class hunk(object):
802 class hunk(object):
803 def __init__(self, desc, num, lr, context):
803 def __init__(self, desc, num, lr, context):
804 self.number = num
804 self.number = num
805 self.desc = desc
805 self.desc = desc
806 self.hunk = [desc]
806 self.hunk = [desc]
807 self.a = []
807 self.a = []
808 self.b = []
808 self.b = []
809 self.starta = self.lena = None
809 self.starta = self.lena = None
810 self.startb = self.lenb = None
810 self.startb = self.lenb = None
811 if lr is not None:
811 if lr is not None:
812 if context:
812 if context:
813 self.read_context_hunk(lr)
813 self.read_context_hunk(lr)
814 else:
814 else:
815 self.read_unified_hunk(lr)
815 self.read_unified_hunk(lr)
816
816
817 def getnormalized(self):
817 def getnormalized(self):
818 """Return a copy with line endings normalized to LF."""
818 """Return a copy with line endings normalized to LF."""
819
819
820 def normalize(lines):
820 def normalize(lines):
821 nlines = []
821 nlines = []
822 for line in lines:
822 for line in lines:
823 if line.endswith('\r\n'):
823 if line.endswith('\r\n'):
824 line = line[:-2] + '\n'
824 line = line[:-2] + '\n'
825 nlines.append(line)
825 nlines.append(line)
826 return nlines
826 return nlines
827
827
828 # Dummy object, it is rebuilt manually
828 # Dummy object, it is rebuilt manually
829 nh = hunk(self.desc, self.number, None, None)
829 nh = hunk(self.desc, self.number, None, None)
830 nh.number = self.number
830 nh.number = self.number
831 nh.desc = self.desc
831 nh.desc = self.desc
832 nh.hunk = self.hunk
832 nh.hunk = self.hunk
833 nh.a = normalize(self.a)
833 nh.a = normalize(self.a)
834 nh.b = normalize(self.b)
834 nh.b = normalize(self.b)
835 nh.starta = self.starta
835 nh.starta = self.starta
836 nh.startb = self.startb
836 nh.startb = self.startb
837 nh.lena = self.lena
837 nh.lena = self.lena
838 nh.lenb = self.lenb
838 nh.lenb = self.lenb
839 return nh
839 return nh
840
840
841 def read_unified_hunk(self, lr):
841 def read_unified_hunk(self, lr):
842 m = unidesc.match(self.desc)
842 m = unidesc.match(self.desc)
843 if not m:
843 if not m:
844 raise PatchError(_("bad hunk #%d") % self.number)
844 raise PatchError(_("bad hunk #%d") % self.number)
845 self.starta, self.lena, self.startb, self.lenb = m.groups()
845 self.starta, self.lena, self.startb, self.lenb = m.groups()
846 if self.lena is None:
846 if self.lena is None:
847 self.lena = 1
847 self.lena = 1
848 else:
848 else:
849 self.lena = int(self.lena)
849 self.lena = int(self.lena)
850 if self.lenb is None:
850 if self.lenb is None:
851 self.lenb = 1
851 self.lenb = 1
852 else:
852 else:
853 self.lenb = int(self.lenb)
853 self.lenb = int(self.lenb)
854 self.starta = int(self.starta)
854 self.starta = int(self.starta)
855 self.startb = int(self.startb)
855 self.startb = int(self.startb)
856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
856 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
857 self.b)
857 self.b)
858 # if we hit eof before finishing out the hunk, the last line will
858 # if we hit eof before finishing out the hunk, the last line will
859 # be zero length. Lets try to fix it up.
859 # be zero length. Lets try to fix it up.
860 while len(self.hunk[-1]) == 0:
860 while len(self.hunk[-1]) == 0:
861 del self.hunk[-1]
861 del self.hunk[-1]
862 del self.a[-1]
862 del self.a[-1]
863 del self.b[-1]
863 del self.b[-1]
864 self.lena -= 1
864 self.lena -= 1
865 self.lenb -= 1
865 self.lenb -= 1
866 self._fixnewline(lr)
866 self._fixnewline(lr)
867
867
868 def read_context_hunk(self, lr):
868 def read_context_hunk(self, lr):
869 self.desc = lr.readline()
869 self.desc = lr.readline()
870 m = contextdesc.match(self.desc)
870 m = contextdesc.match(self.desc)
871 if not m:
871 if not m:
872 raise PatchError(_("bad hunk #%d") % self.number)
872 raise PatchError(_("bad hunk #%d") % self.number)
873 self.starta, aend = m.groups()
873 self.starta, aend = m.groups()
874 self.starta = int(self.starta)
874 self.starta = int(self.starta)
875 if aend is None:
875 if aend is None:
876 aend = self.starta
876 aend = self.starta
877 self.lena = int(aend) - self.starta
877 self.lena = int(aend) - self.starta
878 if self.starta:
878 if self.starta:
879 self.lena += 1
879 self.lena += 1
880 for x in xrange(self.lena):
880 for x in xrange(self.lena):
881 l = lr.readline()
881 l = lr.readline()
882 if l.startswith('---'):
882 if l.startswith('---'):
883 # lines addition, old block is empty
883 # lines addition, old block is empty
884 lr.push(l)
884 lr.push(l)
885 break
885 break
886 s = l[2:]
886 s = l[2:]
887 if l.startswith('- ') or l.startswith('! '):
887 if l.startswith('- ') or l.startswith('! '):
888 u = '-' + s
888 u = '-' + s
889 elif l.startswith(' '):
889 elif l.startswith(' '):
890 u = ' ' + s
890 u = ' ' + s
891 else:
891 else:
892 raise PatchError(_("bad hunk #%d old text line %d") %
892 raise PatchError(_("bad hunk #%d old text line %d") %
893 (self.number, x))
893 (self.number, x))
894 self.a.append(u)
894 self.a.append(u)
895 self.hunk.append(u)
895 self.hunk.append(u)
896
896
897 l = lr.readline()
897 l = lr.readline()
898 if l.startswith('\ '):
898 if l.startswith('\ '):
899 s = self.a[-1][:-1]
899 s = self.a[-1][:-1]
900 self.a[-1] = s
900 self.a[-1] = s
901 self.hunk[-1] = s
901 self.hunk[-1] = s
902 l = lr.readline()
902 l = lr.readline()
903 m = contextdesc.match(l)
903 m = contextdesc.match(l)
904 if not m:
904 if not m:
905 raise PatchError(_("bad hunk #%d") % self.number)
905 raise PatchError(_("bad hunk #%d") % self.number)
906 self.startb, bend = m.groups()
906 self.startb, bend = m.groups()
907 self.startb = int(self.startb)
907 self.startb = int(self.startb)
908 if bend is None:
908 if bend is None:
909 bend = self.startb
909 bend = self.startb
910 self.lenb = int(bend) - self.startb
910 self.lenb = int(bend) - self.startb
911 if self.startb:
911 if self.startb:
912 self.lenb += 1
912 self.lenb += 1
913 hunki = 1
913 hunki = 1
914 for x in xrange(self.lenb):
914 for x in xrange(self.lenb):
915 l = lr.readline()
915 l = lr.readline()
916 if l.startswith('\ '):
916 if l.startswith('\ '):
917 # XXX: the only way to hit this is with an invalid line range.
917 # XXX: the only way to hit this is with an invalid line range.
918 # The no-eol marker is not counted in the line range, but I
918 # The no-eol marker is not counted in the line range, but I
919 # guess there are diff(1) out there which behave differently.
919 # guess there are diff(1) out there which behave differently.
920 s = self.b[-1][:-1]
920 s = self.b[-1][:-1]
921 self.b[-1] = s
921 self.b[-1] = s
922 self.hunk[hunki - 1] = s
922 self.hunk[hunki - 1] = s
923 continue
923 continue
924 if not l:
924 if not l:
925 # line deletions, new block is empty and we hit EOF
925 # line deletions, new block is empty and we hit EOF
926 lr.push(l)
926 lr.push(l)
927 break
927 break
928 s = l[2:]
928 s = l[2:]
929 if l.startswith('+ ') or l.startswith('! '):
929 if l.startswith('+ ') or l.startswith('! '):
930 u = '+' + s
930 u = '+' + s
931 elif l.startswith(' '):
931 elif l.startswith(' '):
932 u = ' ' + s
932 u = ' ' + s
933 elif len(self.b) == 0:
933 elif len(self.b) == 0:
934 # line deletions, new block is empty
934 # line deletions, new block is empty
935 lr.push(l)
935 lr.push(l)
936 break
936 break
937 else:
937 else:
938 raise PatchError(_("bad hunk #%d old text line %d") %
938 raise PatchError(_("bad hunk #%d old text line %d") %
939 (self.number, x))
939 (self.number, x))
940 self.b.append(s)
940 self.b.append(s)
941 while True:
941 while True:
942 if hunki >= len(self.hunk):
942 if hunki >= len(self.hunk):
943 h = ""
943 h = ""
944 else:
944 else:
945 h = self.hunk[hunki]
945 h = self.hunk[hunki]
946 hunki += 1
946 hunki += 1
947 if h == u:
947 if h == u:
948 break
948 break
949 elif h.startswith('-'):
949 elif h.startswith('-'):
950 continue
950 continue
951 else:
951 else:
952 self.hunk.insert(hunki - 1, u)
952 self.hunk.insert(hunki - 1, u)
953 break
953 break
954
954
955 if not self.a:
955 if not self.a:
956 # this happens when lines were only added to the hunk
956 # this happens when lines were only added to the hunk
957 for x in self.hunk:
957 for x in self.hunk:
958 if x.startswith('-') or x.startswith(' '):
958 if x.startswith('-') or x.startswith(' '):
959 self.a.append(x)
959 self.a.append(x)
960 if not self.b:
960 if not self.b:
961 # this happens when lines were only deleted from the hunk
961 # this happens when lines were only deleted from the hunk
962 for x in self.hunk:
962 for x in self.hunk:
963 if x.startswith('+') or x.startswith(' '):
963 if x.startswith('+') or x.startswith(' '):
964 self.b.append(x[1:])
964 self.b.append(x[1:])
965 # @@ -start,len +start,len @@
965 # @@ -start,len +start,len @@
966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
966 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
967 self.startb, self.lenb)
967 self.startb, self.lenb)
968 self.hunk[0] = self.desc
968 self.hunk[0] = self.desc
969 self._fixnewline(lr)
969 self._fixnewline(lr)
970
970
971 def _fixnewline(self, lr):
971 def _fixnewline(self, lr):
972 l = lr.readline()
972 l = lr.readline()
973 if l.startswith('\ '):
973 if l.startswith('\ '):
974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
974 diffhelpers.fix_newline(self.hunk, self.a, self.b)
975 else:
975 else:
976 lr.push(l)
976 lr.push(l)
977
977
978 def complete(self):
978 def complete(self):
979 return len(self.a) == self.lena and len(self.b) == self.lenb
979 return len(self.a) == self.lena and len(self.b) == self.lenb
980
980
981 def _fuzzit(self, old, new, fuzz, toponly):
981 def _fuzzit(self, old, new, fuzz, toponly):
982 # this removes context lines from the top and bottom of list 'l'. It
982 # this removes context lines from the top and bottom of list 'l'. It
983 # checks the hunk to make sure only context lines are removed, and then
983 # checks the hunk to make sure only context lines are removed, and then
984 # returns a new shortened list of lines.
984 # returns a new shortened list of lines.
985 fuzz = min(fuzz, len(old))
985 fuzz = min(fuzz, len(old))
986 if fuzz:
986 if fuzz:
987 top = 0
987 top = 0
988 bot = 0
988 bot = 0
989 hlen = len(self.hunk)
989 hlen = len(self.hunk)
990 for x in xrange(hlen - 1):
990 for x in xrange(hlen - 1):
991 # the hunk starts with the @@ line, so use x+1
991 # the hunk starts with the @@ line, so use x+1
992 if self.hunk[x + 1][0] == ' ':
992 if self.hunk[x + 1][0] == ' ':
993 top += 1
993 top += 1
994 else:
994 else:
995 break
995 break
996 if not toponly:
996 if not toponly:
997 for x in xrange(hlen - 1):
997 for x in xrange(hlen - 1):
998 if self.hunk[hlen - bot - 1][0] == ' ':
998 if self.hunk[hlen - bot - 1][0] == ' ':
999 bot += 1
999 bot += 1
1000 else:
1000 else:
1001 break
1001 break
1002
1002
1003 bot = min(fuzz, bot)
1003 bot = min(fuzz, bot)
1004 top = min(fuzz, top)
1004 top = min(fuzz, top)
1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1005 return old[top:len(old) - bot], new[top:len(new) - bot], top
1006 return old, new, 0
1006 return old, new, 0
1007
1007
1008 def fuzzit(self, fuzz, toponly):
1008 def fuzzit(self, fuzz, toponly):
1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1009 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1010 oldstart = self.starta + top
1010 oldstart = self.starta + top
1011 newstart = self.startb + top
1011 newstart = self.startb + top
1012 # zero length hunk ranges already have their start decremented
1012 # zero length hunk ranges already have their start decremented
1013 if self.lena and oldstart > 0:
1013 if self.lena and oldstart > 0:
1014 oldstart -= 1
1014 oldstart -= 1
1015 if self.lenb and newstart > 0:
1015 if self.lenb and newstart > 0:
1016 newstart -= 1
1016 newstart -= 1
1017 return old, oldstart, new, newstart
1017 return old, oldstart, new, newstart
1018
1018
1019 class binhunk(object):
1019 class binhunk(object):
1020 'A binary patch file.'
1020 'A binary patch file.'
1021 def __init__(self, lr, fname):
1021 def __init__(self, lr, fname):
1022 self.text = None
1022 self.text = None
1023 self.delta = False
1023 self.delta = False
1024 self.hunk = ['GIT binary patch\n']
1024 self.hunk = ['GIT binary patch\n']
1025 self._fname = fname
1025 self._fname = fname
1026 self._read(lr)
1026 self._read(lr)
1027
1027
1028 def complete(self):
1028 def complete(self):
1029 return self.text is not None
1029 return self.text is not None
1030
1030
1031 def new(self, lines):
1031 def new(self, lines):
1032 if self.delta:
1032 if self.delta:
1033 return [applybindelta(self.text, ''.join(lines))]
1033 return [applybindelta(self.text, ''.join(lines))]
1034 return [self.text]
1034 return [self.text]
1035
1035
1036 def _read(self, lr):
1036 def _read(self, lr):
1037 def getline(lr, hunk):
1037 def getline(lr, hunk):
1038 l = lr.readline()
1038 l = lr.readline()
1039 hunk.append(l)
1039 hunk.append(l)
1040 return l.rstrip('\r\n')
1040 return l.rstrip('\r\n')
1041
1041
1042 size = 0
1042 size = 0
1043 while True:
1043 while True:
1044 line = getline(lr, self.hunk)
1044 line = getline(lr, self.hunk)
1045 if not line:
1045 if not line:
1046 raise PatchError(_('could not extract "%s" binary data')
1046 raise PatchError(_('could not extract "%s" binary data')
1047 % self._fname)
1047 % self._fname)
1048 if line.startswith('literal '):
1048 if line.startswith('literal '):
1049 size = int(line[8:].rstrip())
1049 size = int(line[8:].rstrip())
1050 break
1050 break
1051 if line.startswith('delta '):
1051 if line.startswith('delta '):
1052 size = int(line[6:].rstrip())
1052 size = int(line[6:].rstrip())
1053 self.delta = True
1053 self.delta = True
1054 break
1054 break
1055 dec = []
1055 dec = []
1056 line = getline(lr, self.hunk)
1056 line = getline(lr, self.hunk)
1057 while len(line) > 1:
1057 while len(line) > 1:
1058 l = line[0]
1058 l = line[0]
1059 if l <= 'Z' and l >= 'A':
1059 if l <= 'Z' and l >= 'A':
1060 l = ord(l) - ord('A') + 1
1060 l = ord(l) - ord('A') + 1
1061 else:
1061 else:
1062 l = ord(l) - ord('a') + 27
1062 l = ord(l) - ord('a') + 27
1063 try:
1063 try:
1064 dec.append(base85.b85decode(line[1:])[:l])
1064 dec.append(base85.b85decode(line[1:])[:l])
1065 except ValueError, e:
1065 except ValueError, e:
1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1066 raise PatchError(_('could not decode "%s" binary patch: %s')
1067 % (self._fname, str(e)))
1067 % (self._fname, str(e)))
1068 line = getline(lr, self.hunk)
1068 line = getline(lr, self.hunk)
1069 text = zlib.decompress(''.join(dec))
1069 text = zlib.decompress(''.join(dec))
1070 if len(text) != size:
1070 if len(text) != size:
1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1071 raise PatchError(_('"%s" length is %d bytes, should be %d')
1072 % (self._fname, len(text), size))
1072 % (self._fname, len(text), size))
1073 self.text = text
1073 self.text = text
1074
1074
1075 def parsefilename(str):
1075 def parsefilename(str):
1076 # --- filename \t|space stuff
1076 # --- filename \t|space stuff
1077 s = str[4:].rstrip('\r\n')
1077 s = str[4:].rstrip('\r\n')
1078 i = s.find('\t')
1078 i = s.find('\t')
1079 if i < 0:
1079 if i < 0:
1080 i = s.find(' ')
1080 i = s.find(' ')
1081 if i < 0:
1081 if i < 0:
1082 return s
1082 return s
1083 return s[:i]
1083 return s[:i]
1084
1084
1085 def pathstrip(path, strip):
1085 def pathstrip(path, strip):
1086 pathlen = len(path)
1086 pathlen = len(path)
1087 i = 0
1087 i = 0
1088 if strip == 0:
1088 if strip == 0:
1089 return '', path.rstrip()
1089 return '', path.rstrip()
1090 count = strip
1090 count = strip
1091 while count > 0:
1091 while count > 0:
1092 i = path.find('/', i)
1092 i = path.find('/', i)
1093 if i == -1:
1093 if i == -1:
1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1094 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1095 (count, strip, path))
1095 (count, strip, path))
1096 i += 1
1096 i += 1
1097 # consume '//' in the path
1097 # consume '//' in the path
1098 while i < pathlen - 1 and path[i] == '/':
1098 while i < pathlen - 1 and path[i] == '/':
1099 i += 1
1099 i += 1
1100 count -= 1
1100 count -= 1
1101 return path[:i].lstrip(), path[i:].rstrip()
1101 return path[:i].lstrip(), path[i:].rstrip()
1102
1102
1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1103 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1104 nulla = afile_orig == "/dev/null"
1104 nulla = afile_orig == "/dev/null"
1105 nullb = bfile_orig == "/dev/null"
1105 nullb = bfile_orig == "/dev/null"
1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1106 create = nulla and hunk.starta == 0 and hunk.lena == 0
1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1107 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1108 abase, afile = pathstrip(afile_orig, strip)
1108 abase, afile = pathstrip(afile_orig, strip)
1109 gooda = not nulla and backend.exists(afile)
1109 gooda = not nulla and backend.exists(afile)
1110 bbase, bfile = pathstrip(bfile_orig, strip)
1110 bbase, bfile = pathstrip(bfile_orig, strip)
1111 if afile == bfile:
1111 if afile == bfile:
1112 goodb = gooda
1112 goodb = gooda
1113 else:
1113 else:
1114 goodb = not nullb and backend.exists(bfile)
1114 goodb = not nullb and backend.exists(bfile)
1115 missing = not goodb and not gooda and not create
1115 missing = not goodb and not gooda and not create
1116
1116
1117 # some diff programs apparently produce patches where the afile is
1117 # some diff programs apparently produce patches where the afile is
1118 # not /dev/null, but afile starts with bfile
1118 # not /dev/null, but afile starts with bfile
1119 abasedir = afile[:afile.rfind('/') + 1]
1119 abasedir = afile[:afile.rfind('/') + 1]
1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1120 bbasedir = bfile[:bfile.rfind('/') + 1]
1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1121 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1122 and hunk.starta == 0 and hunk.lena == 0):
1122 and hunk.starta == 0 and hunk.lena == 0):
1123 create = True
1123 create = True
1124 missing = False
1124 missing = False
1125
1125
1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1126 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1127 # diff is between a file and its backup. In this case, the original
1127 # diff is between a file and its backup. In this case, the original
1128 # file should be patched (see original mpatch code).
1128 # file should be patched (see original mpatch code).
1129 isbackup = (abase == bbase and bfile.startswith(afile))
1129 isbackup = (abase == bbase and bfile.startswith(afile))
1130 fname = None
1130 fname = None
1131 if not missing:
1131 if not missing:
1132 if gooda and goodb:
1132 if gooda and goodb:
1133 fname = isbackup and afile or bfile
1133 fname = isbackup and afile or bfile
1134 elif gooda:
1134 elif gooda:
1135 fname = afile
1135 fname = afile
1136
1136
1137 if not fname:
1137 if not fname:
1138 if not nullb:
1138 if not nullb:
1139 fname = isbackup and afile or bfile
1139 fname = isbackup and afile or bfile
1140 elif not nulla:
1140 elif not nulla:
1141 fname = afile
1141 fname = afile
1142 else:
1142 else:
1143 raise PatchError(_("undefined source and destination files"))
1143 raise PatchError(_("undefined source and destination files"))
1144
1144
1145 gp = patchmeta(fname)
1145 gp = patchmeta(fname)
1146 if create:
1146 if create:
1147 gp.op = 'ADD'
1147 gp.op = 'ADD'
1148 elif remove:
1148 elif remove:
1149 gp.op = 'DELETE'
1149 gp.op = 'DELETE'
1150 return gp
1150 return gp
1151
1151
1152 def scangitpatch(lr, firstline):
1152 def scangitpatch(lr, firstline):
1153 """
1153 """
1154 Git patches can emit:
1154 Git patches can emit:
1155 - rename a to b
1155 - rename a to b
1156 - change b
1156 - change b
1157 - copy a to c
1157 - copy a to c
1158 - change c
1158 - change c
1159
1159
1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1160 We cannot apply this sequence as-is, the renamed 'a' could not be
1161 found for it would have been renamed already. And we cannot copy
1161 found for it would have been renamed already. And we cannot copy
1162 from 'b' instead because 'b' would have been changed already. So
1162 from 'b' instead because 'b' would have been changed already. So
1163 we scan the git patch for copy and rename commands so we can
1163 we scan the git patch for copy and rename commands so we can
1164 perform the copies ahead of time.
1164 perform the copies ahead of time.
1165 """
1165 """
1166 pos = 0
1166 pos = 0
1167 try:
1167 try:
1168 pos = lr.fp.tell()
1168 pos = lr.fp.tell()
1169 fp = lr.fp
1169 fp = lr.fp
1170 except IOError:
1170 except IOError:
1171 fp = cStringIO.StringIO(lr.fp.read())
1171 fp = cStringIO.StringIO(lr.fp.read())
1172 gitlr = linereader(fp)
1172 gitlr = linereader(fp)
1173 gitlr.push(firstline)
1173 gitlr.push(firstline)
1174 gitpatches = readgitpatch(gitlr)
1174 gitpatches = readgitpatch(gitlr)
1175 fp.seek(pos)
1175 fp.seek(pos)
1176 return gitpatches
1176 return gitpatches
1177
1177
1178 def iterhunks(fp):
1178 def iterhunks(fp):
1179 """Read a patch and yield the following events:
1179 """Read a patch and yield the following events:
1180 - ("file", afile, bfile, firsthunk): select a new target file.
1180 - ("file", afile, bfile, firsthunk): select a new target file.
1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1181 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1182 "file" event.
1182 "file" event.
1183 - ("git", gitchanges): current diff is in git format, gitchanges
1183 - ("git", gitchanges): current diff is in git format, gitchanges
1184 maps filenames to gitpatch records. Unique event.
1184 maps filenames to gitpatch records. Unique event.
1185 """
1185 """
1186 afile = ""
1186 afile = ""
1187 bfile = ""
1187 bfile = ""
1188 state = None
1188 state = None
1189 hunknum = 0
1189 hunknum = 0
1190 emitfile = newfile = False
1190 emitfile = newfile = False
1191 gitpatches = None
1191 gitpatches = None
1192
1192
1193 # our states
1193 # our states
1194 BFILE = 1
1194 BFILE = 1
1195 context = None
1195 context = None
1196 lr = linereader(fp)
1196 lr = linereader(fp)
1197
1197
1198 while True:
1198 while True:
1199 x = lr.readline()
1199 x = lr.readline()
1200 if not x:
1200 if not x:
1201 break
1201 break
1202 if state == BFILE and (
1202 if state == BFILE and (
1203 (not context and x[0] == '@')
1203 (not context and x[0] == '@')
1204 or (context is not False and x.startswith('***************'))
1204 or (context is not False and x.startswith('***************'))
1205 or x.startswith('GIT binary patch')):
1205 or x.startswith('GIT binary patch')):
1206 gp = None
1206 gp = None
1207 if (gitpatches and
1207 if (gitpatches and
1208 gitpatches[-1].ispatching(afile, bfile)):
1208 gitpatches[-1].ispatching(afile, bfile)):
1209 gp = gitpatches.pop()
1209 gp = gitpatches.pop()
1210 if x.startswith('GIT binary patch'):
1210 if x.startswith('GIT binary patch'):
1211 h = binhunk(lr, gp.path)
1211 h = binhunk(lr, gp.path)
1212 else:
1212 else:
1213 if context is None and x.startswith('***************'):
1213 if context is None and x.startswith('***************'):
1214 context = True
1214 context = True
1215 h = hunk(x, hunknum + 1, lr, context)
1215 h = hunk(x, hunknum + 1, lr, context)
1216 hunknum += 1
1216 hunknum += 1
1217 if emitfile:
1217 if emitfile:
1218 emitfile = False
1218 emitfile = False
1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1219 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1220 yield 'hunk', h
1220 yield 'hunk', h
1221 elif x.startswith('diff --git a/'):
1221 elif x.startswith('diff --git a/'):
1222 m = gitre.match(x.rstrip(' \r\n'))
1222 m = gitre.match(x.rstrip(' \r\n'))
1223 if not m:
1223 if not m:
1224 continue
1224 continue
1225 if gitpatches is None:
1225 if gitpatches is None:
1226 # scan whole input for git metadata
1226 # scan whole input for git metadata
1227 gitpatches = scangitpatch(lr, x)
1227 gitpatches = scangitpatch(lr, x)
1228 yield 'git', [g.copy() for g in gitpatches
1228 yield 'git', [g.copy() for g in gitpatches
1229 if g.op in ('COPY', 'RENAME')]
1229 if g.op in ('COPY', 'RENAME')]
1230 gitpatches.reverse()
1230 gitpatches.reverse()
1231 afile = 'a/' + m.group(1)
1231 afile = 'a/' + m.group(1)
1232 bfile = 'b/' + m.group(2)
1232 bfile = 'b/' + m.group(2)
1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1233 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1234 gp = gitpatches.pop()
1234 gp = gitpatches.pop()
1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1235 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1236 if not gitpatches:
1236 if not gitpatches:
1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1237 raise PatchError(_('failed to synchronize metadata for "%s"')
1238 % afile[2:])
1238 % afile[2:])
1239 gp = gitpatches[-1]
1239 gp = gitpatches[-1]
1240 newfile = True
1240 newfile = True
1241 elif x.startswith('---'):
1241 elif x.startswith('---'):
1242 # check for a unified diff
1242 # check for a unified diff
1243 l2 = lr.readline()
1243 l2 = lr.readline()
1244 if not l2.startswith('+++'):
1244 if not l2.startswith('+++'):
1245 lr.push(l2)
1245 lr.push(l2)
1246 continue
1246 continue
1247 newfile = True
1247 newfile = True
1248 context = False
1248 context = False
1249 afile = parsefilename(x)
1249 afile = parsefilename(x)
1250 bfile = parsefilename(l2)
1250 bfile = parsefilename(l2)
1251 elif x.startswith('***'):
1251 elif x.startswith('***'):
1252 # check for a context diff
1252 # check for a context diff
1253 l2 = lr.readline()
1253 l2 = lr.readline()
1254 if not l2.startswith('---'):
1254 if not l2.startswith('---'):
1255 lr.push(l2)
1255 lr.push(l2)
1256 continue
1256 continue
1257 l3 = lr.readline()
1257 l3 = lr.readline()
1258 lr.push(l3)
1258 lr.push(l3)
1259 if not l3.startswith("***************"):
1259 if not l3.startswith("***************"):
1260 lr.push(l2)
1260 lr.push(l2)
1261 continue
1261 continue
1262 newfile = True
1262 newfile = True
1263 context = True
1263 context = True
1264 afile = parsefilename(x)
1264 afile = parsefilename(x)
1265 bfile = parsefilename(l2)
1265 bfile = parsefilename(l2)
1266
1266
1267 if newfile:
1267 if newfile:
1268 newfile = False
1268 newfile = False
1269 emitfile = True
1269 emitfile = True
1270 state = BFILE
1270 state = BFILE
1271 hunknum = 0
1271 hunknum = 0
1272
1272
1273 while gitpatches:
1273 while gitpatches:
1274 gp = gitpatches.pop()
1274 gp = gitpatches.pop()
1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1275 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1276
1276
1277 def applybindelta(binchunk, data):
1277 def applybindelta(binchunk, data):
1278 """Apply a binary delta hunk
1278 """Apply a binary delta hunk
1279 The algorithm used is the algorithm from git's patch-delta.c
1279 The algorithm used is the algorithm from git's patch-delta.c
1280 """
1280 """
1281 def deltahead(binchunk):
1281 def deltahead(binchunk):
1282 i = 0
1282 i = 0
1283 for c in binchunk:
1283 for c in binchunk:
1284 i += 1
1284 i += 1
1285 if not (ord(c) & 0x80):
1285 if not (ord(c) & 0x80):
1286 return i
1286 return i
1287 return i
1287 return i
1288 out = ""
1288 out = ""
1289 s = deltahead(binchunk)
1289 s = deltahead(binchunk)
1290 binchunk = binchunk[s:]
1290 binchunk = binchunk[s:]
1291 s = deltahead(binchunk)
1291 s = deltahead(binchunk)
1292 binchunk = binchunk[s:]
1292 binchunk = binchunk[s:]
1293 i = 0
1293 i = 0
1294 while i < len(binchunk):
1294 while i < len(binchunk):
1295 cmd = ord(binchunk[i])
1295 cmd = ord(binchunk[i])
1296 i += 1
1296 i += 1
1297 if (cmd & 0x80):
1297 if (cmd & 0x80):
1298 offset = 0
1298 offset = 0
1299 size = 0
1299 size = 0
1300 if (cmd & 0x01):
1300 if (cmd & 0x01):
1301 offset = ord(binchunk[i])
1301 offset = ord(binchunk[i])
1302 i += 1
1302 i += 1
1303 if (cmd & 0x02):
1303 if (cmd & 0x02):
1304 offset |= ord(binchunk[i]) << 8
1304 offset |= ord(binchunk[i]) << 8
1305 i += 1
1305 i += 1
1306 if (cmd & 0x04):
1306 if (cmd & 0x04):
1307 offset |= ord(binchunk[i]) << 16
1307 offset |= ord(binchunk[i]) << 16
1308 i += 1
1308 i += 1
1309 if (cmd & 0x08):
1309 if (cmd & 0x08):
1310 offset |= ord(binchunk[i]) << 24
1310 offset |= ord(binchunk[i]) << 24
1311 i += 1
1311 i += 1
1312 if (cmd & 0x10):
1312 if (cmd & 0x10):
1313 size = ord(binchunk[i])
1313 size = ord(binchunk[i])
1314 i += 1
1314 i += 1
1315 if (cmd & 0x20):
1315 if (cmd & 0x20):
1316 size |= ord(binchunk[i]) << 8
1316 size |= ord(binchunk[i]) << 8
1317 i += 1
1317 i += 1
1318 if (cmd & 0x40):
1318 if (cmd & 0x40):
1319 size |= ord(binchunk[i]) << 16
1319 size |= ord(binchunk[i]) << 16
1320 i += 1
1320 i += 1
1321 if size == 0:
1321 if size == 0:
1322 size = 0x10000
1322 size = 0x10000
1323 offset_end = offset + size
1323 offset_end = offset + size
1324 out += data[offset:offset_end]
1324 out += data[offset:offset_end]
1325 elif cmd != 0:
1325 elif cmd != 0:
1326 offset_end = i + cmd
1326 offset_end = i + cmd
1327 out += binchunk[i:offset_end]
1327 out += binchunk[i:offset_end]
1328 i += cmd
1328 i += cmd
1329 else:
1329 else:
1330 raise PatchError(_('unexpected delta opcode 0'))
1330 raise PatchError(_('unexpected delta opcode 0'))
1331 return out
1331 return out
1332
1332
1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1333 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1334 """Reads a patch from fp and tries to apply it.
1334 """Reads a patch from fp and tries to apply it.
1335
1335
1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1336 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1337 there was any fuzz.
1337 there was any fuzz.
1338
1338
1339 If 'eolmode' is 'strict', the patch content and patched file are
1339 If 'eolmode' is 'strict', the patch content and patched file are
1340 read in binary mode. Otherwise, line endings are ignored when
1340 read in binary mode. Otherwise, line endings are ignored when
1341 patching then normalized according to 'eolmode'.
1341 patching then normalized according to 'eolmode'.
1342 """
1342 """
1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1343 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1344 eolmode=eolmode)
1344 eolmode=eolmode)
1345
1345
1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1346 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1347 eolmode='strict'):
1347 eolmode='strict'):
1348
1348
1349 def pstrip(p):
1349 def pstrip(p):
1350 return pathstrip(p, strip - 1)[1]
1350 return pathstrip(p, strip - 1)[1]
1351
1351
1352 rejects = 0
1352 rejects = 0
1353 err = 0
1353 err = 0
1354 current_file = None
1354 current_file = None
1355
1355
1356 for state, values in iterhunks(fp):
1356 for state, values in iterhunks(fp):
1357 if state == 'hunk':
1357 if state == 'hunk':
1358 if not current_file:
1358 if not current_file:
1359 continue
1359 continue
1360 ret = current_file.apply(values)
1360 ret = current_file.apply(values)
1361 if ret > 0:
1361 if ret > 0:
1362 err = 1
1362 err = 1
1363 elif state == 'file':
1363 elif state == 'file':
1364 if current_file:
1364 if current_file:
1365 rejects += current_file.close()
1365 rejects += current_file.close()
1366 current_file = None
1366 current_file = None
1367 afile, bfile, first_hunk, gp = values
1367 afile, bfile, first_hunk, gp = values
1368 if gp:
1368 if gp:
1369 gp.path = pstrip(gp.path)
1369 gp.path = pstrip(gp.path)
1370 if gp.oldpath:
1370 if gp.oldpath:
1371 gp.oldpath = pstrip(gp.oldpath)
1371 gp.oldpath = pstrip(gp.oldpath)
1372 else:
1372 else:
1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1373 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1374 if gp.op == 'RENAME':
1374 if gp.op == 'RENAME':
1375 backend.unlink(gp.oldpath)
1375 backend.unlink(gp.oldpath)
1376 if not first_hunk:
1376 if not first_hunk:
1377 if gp.op == 'DELETE':
1377 if gp.op == 'DELETE':
1378 backend.unlink(gp.path)
1378 backend.unlink(gp.path)
1379 continue
1379 continue
1380 data, mode = None, None
1380 data, mode = None, None
1381 if gp.op in ('RENAME', 'COPY'):
1381 if gp.op in ('RENAME', 'COPY'):
1382 data, mode = store.getfile(gp.oldpath)[:2]
1382 data, mode = store.getfile(gp.oldpath)[:2]
1383 if gp.mode:
1383 if gp.mode:
1384 mode = gp.mode
1384 mode = gp.mode
1385 if gp.op == 'ADD':
1385 if gp.op == 'ADD':
1386 # Added files without content have no hunk and
1386 # Added files without content have no hunk and
1387 # must be created
1387 # must be created
1388 data = ''
1388 data = ''
1389 if data or mode:
1389 if data or mode:
1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1390 if (gp.op in ('ADD', 'RENAME', 'COPY')
1391 and backend.exists(gp.path)):
1391 and backend.exists(gp.path)):
1392 raise PatchError(_("cannot create %s: destination "
1392 raise PatchError(_("cannot create %s: destination "
1393 "already exists") % gp.path)
1393 "already exists") % gp.path)
1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1394 backend.setfile(gp.path, data, mode, gp.oldpath)
1395 continue
1395 continue
1396 try:
1396 try:
1397 current_file = patcher(ui, gp, backend, store,
1397 current_file = patcher(ui, gp, backend, store,
1398 eolmode=eolmode)
1398 eolmode=eolmode)
1399 except PatchError, inst:
1399 except PatchError, inst:
1400 ui.warn(str(inst) + '\n')
1400 ui.warn(str(inst) + '\n')
1401 current_file = None
1401 current_file = None
1402 rejects += 1
1402 rejects += 1
1403 continue
1403 continue
1404 elif state == 'git':
1404 elif state == 'git':
1405 for gp in values:
1405 for gp in values:
1406 path = pstrip(gp.oldpath)
1406 path = pstrip(gp.oldpath)
1407 try:
1407 try:
1408 data, mode = backend.getfile(path)
1408 data, mode = backend.getfile(path)
1409 except IOError, e:
1409 except IOError, e:
1410 if e.errno != errno.ENOENT:
1410 if e.errno != errno.ENOENT:
1411 raise
1411 raise
1412 # The error ignored here will trigger a getfile()
1412 # The error ignored here will trigger a getfile()
1413 # error in a place more appropriate for error
1413 # error in a place more appropriate for error
1414 # handling, and will not interrupt the patching
1414 # handling, and will not interrupt the patching
1415 # process.
1415 # process.
1416 else:
1416 else:
1417 store.setfile(path, data, mode)
1417 store.setfile(path, data, mode)
1418 else:
1418 else:
1419 raise util.Abort(_('unsupported parser state: %s') % state)
1419 raise util.Abort(_('unsupported parser state: %s') % state)
1420
1420
1421 if current_file:
1421 if current_file:
1422 rejects += current_file.close()
1422 rejects += current_file.close()
1423
1423
1424 if rejects:
1424 if rejects:
1425 return -1
1425 return -1
1426 return err
1426 return err
1427
1427
1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1428 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1429 similarity):
1429 similarity):
1430 """use <patcher> to apply <patchname> to the working directory.
1430 """use <patcher> to apply <patchname> to the working directory.
1431 returns whether patch was applied with fuzz factor."""
1431 returns whether patch was applied with fuzz factor."""
1432
1432
1433 fuzz = False
1433 fuzz = False
1434 args = []
1434 args = []
1435 cwd = repo.root
1435 cwd = repo.root
1436 if cwd:
1436 if cwd:
1437 args.append('-d %s' % util.shellquote(cwd))
1437 args.append('-d %s' % util.shellquote(cwd))
1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1438 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1439 util.shellquote(patchname)))
1439 util.shellquote(patchname)))
1440 try:
1440 try:
1441 for line in fp:
1441 for line in fp:
1442 line = line.rstrip()
1442 line = line.rstrip()
1443 ui.note(line + '\n')
1443 ui.note(line + '\n')
1444 if line.startswith('patching file '):
1444 if line.startswith('patching file '):
1445 pf = util.parsepatchoutput(line)
1445 pf = util.parsepatchoutput(line)
1446 printed_file = False
1446 printed_file = False
1447 files.add(pf)
1447 files.add(pf)
1448 elif line.find('with fuzz') >= 0:
1448 elif line.find('with fuzz') >= 0:
1449 fuzz = True
1449 fuzz = True
1450 if not printed_file:
1450 if not printed_file:
1451 ui.warn(pf + '\n')
1451 ui.warn(pf + '\n')
1452 printed_file = True
1452 printed_file = True
1453 ui.warn(line + '\n')
1453 ui.warn(line + '\n')
1454 elif line.find('saving rejects to file') >= 0:
1454 elif line.find('saving rejects to file') >= 0:
1455 ui.warn(line + '\n')
1455 ui.warn(line + '\n')
1456 elif line.find('FAILED') >= 0:
1456 elif line.find('FAILED') >= 0:
1457 if not printed_file:
1457 if not printed_file:
1458 ui.warn(pf + '\n')
1458 ui.warn(pf + '\n')
1459 printed_file = True
1459 printed_file = True
1460 ui.warn(line + '\n')
1460 ui.warn(line + '\n')
1461 finally:
1461 finally:
1462 if files:
1462 if files:
1463 scmutil.marktouched(repo, files, similarity)
1463 scmutil.marktouched(repo, files, similarity)
1464 code = fp.close()
1464 code = fp.close()
1465 if code:
1465 if code:
1466 raise PatchError(_("patch command failed: %s") %
1466 raise PatchError(_("patch command failed: %s") %
1467 util.explainexit(code)[0])
1467 util.explainexit(code)[0])
1468 return fuzz
1468 return fuzz
1469
1469
1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1470 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1471 if files is None:
1471 if files is None:
1472 files = set()
1472 files = set()
1473 if eolmode is None:
1473 if eolmode is None:
1474 eolmode = ui.config('patch', 'eol', 'strict')
1474 eolmode = ui.config('patch', 'eol', 'strict')
1475 if eolmode.lower() not in eolmodes:
1475 if eolmode.lower() not in eolmodes:
1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1476 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1477 eolmode = eolmode.lower()
1477 eolmode = eolmode.lower()
1478
1478
1479 store = filestore()
1479 store = filestore()
1480 try:
1480 try:
1481 fp = open(patchobj, 'rb')
1481 fp = open(patchobj, 'rb')
1482 except TypeError:
1482 except TypeError:
1483 fp = patchobj
1483 fp = patchobj
1484 try:
1484 try:
1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1485 ret = applydiff(ui, fp, backend, store, strip=strip,
1486 eolmode=eolmode)
1486 eolmode=eolmode)
1487 finally:
1487 finally:
1488 if fp != patchobj:
1488 if fp != patchobj:
1489 fp.close()
1489 fp.close()
1490 files.update(backend.close())
1490 files.update(backend.close())
1491 store.close()
1491 store.close()
1492 if ret < 0:
1492 if ret < 0:
1493 raise PatchError(_('patch failed to apply'))
1493 raise PatchError(_('patch failed to apply'))
1494 return ret > 0
1494 return ret > 0
1495
1495
1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1496 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1497 similarity=0):
1497 similarity=0):
1498 """use builtin patch to apply <patchobj> to the working directory.
1498 """use builtin patch to apply <patchobj> to the working directory.
1499 returns whether patch was applied with fuzz factor."""
1499 returns whether patch was applied with fuzz factor."""
1500 backend = workingbackend(ui, repo, similarity)
1500 backend = workingbackend(ui, repo, similarity)
1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1501 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1502
1502
1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1503 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1504 eolmode='strict'):
1504 eolmode='strict'):
1505 backend = repobackend(ui, repo, ctx, store)
1505 backend = repobackend(ui, repo, ctx, store)
1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1506 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1507
1507
1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1508 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1509 similarity=0):
1509 similarity=0):
1510 """Apply <patchname> to the working directory.
1510 """Apply <patchname> to the working directory.
1511
1511
1512 'eolmode' specifies how end of lines should be handled. It can be:
1512 'eolmode' specifies how end of lines should be handled. It can be:
1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1513 - 'strict': inputs are read in binary mode, EOLs are preserved
1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1514 - 'crlf': EOLs are ignored when patching and reset to CRLF
1515 - 'lf': EOLs are ignored when patching and reset to LF
1515 - 'lf': EOLs are ignored when patching and reset to LF
1516 - None: get it from user settings, default to 'strict'
1516 - None: get it from user settings, default to 'strict'
1517 'eolmode' is ignored when using an external patcher program.
1517 'eolmode' is ignored when using an external patcher program.
1518
1518
1519 Returns whether patch was applied with fuzz factor.
1519 Returns whether patch was applied with fuzz factor.
1520 """
1520 """
1521 patcher = ui.config('ui', 'patch')
1521 patcher = ui.config('ui', 'patch')
1522 if files is None:
1522 if files is None:
1523 files = set()
1523 files = set()
1524 if patcher:
1524 if patcher:
1525 return _externalpatch(ui, repo, patcher, patchname, strip,
1525 return _externalpatch(ui, repo, patcher, patchname, strip,
1526 files, similarity)
1526 files, similarity)
1527 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1527 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1528 similarity)
1528 similarity)
1529
1529
1530 def changedfiles(ui, repo, patchpath, strip=1):
1530 def changedfiles(ui, repo, patchpath, strip=1):
1531 backend = fsbackend(ui, repo.root)
1531 backend = fsbackend(ui, repo.root)
1532 fp = open(patchpath, 'rb')
1532 fp = open(patchpath, 'rb')
1533 try:
1533 try:
1534 changed = set()
1534 changed = set()
1535 for state, values in iterhunks(fp):
1535 for state, values in iterhunks(fp):
1536 if state == 'file':
1536 if state == 'file':
1537 afile, bfile, first_hunk, gp = values
1537 afile, bfile, first_hunk, gp = values
1538 if gp:
1538 if gp:
1539 gp.path = pathstrip(gp.path, strip - 1)[1]
1539 gp.path = pathstrip(gp.path, strip - 1)[1]
1540 if gp.oldpath:
1540 if gp.oldpath:
1541 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1541 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1542 else:
1542 else:
1543 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1543 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1544 changed.add(gp.path)
1544 changed.add(gp.path)
1545 if gp.op == 'RENAME':
1545 if gp.op == 'RENAME':
1546 changed.add(gp.oldpath)
1546 changed.add(gp.oldpath)
1547 elif state not in ('hunk', 'git'):
1547 elif state not in ('hunk', 'git'):
1548 raise util.Abort(_('unsupported parser state: %s') % state)
1548 raise util.Abort(_('unsupported parser state: %s') % state)
1549 return changed
1549 return changed
1550 finally:
1550 finally:
1551 fp.close()
1551 fp.close()
1552
1552
1553 class GitDiffRequired(Exception):
1553 class GitDiffRequired(Exception):
1554 pass
1554 pass
1555
1555
1556 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1556 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1557 def get(key, name=None, getter=ui.configbool):
1557 def get(key, name=None, getter=ui.configbool):
1558 return ((opts and opts.get(key)) or
1558 return ((opts and opts.get(key)) or
1559 getter(section, name or key, None, untrusted=untrusted))
1559 getter(section, name or key, None, untrusted=untrusted))
1560 return mdiff.diffopts(
1560 return mdiff.diffopts(
1561 text=opts and opts.get('text'),
1561 text=opts and opts.get('text'),
1562 git=get('git'),
1562 git=get('git'),
1563 nodates=get('nodates'),
1563 nodates=get('nodates'),
1564 showfunc=get('show_function', 'showfunc'),
1564 showfunc=get('show_function', 'showfunc'),
1565 ignorews=get('ignore_all_space', 'ignorews'),
1565 ignorews=get('ignore_all_space', 'ignorews'),
1566 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1566 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1567 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1567 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1568 context=get('unified', getter=ui.config))
1568 context=get('unified', getter=ui.config))
1569
1569
1570 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1570 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1571 losedatafn=None, prefix=''):
1571 losedatafn=None, prefix=''):
1572 '''yields diff of changes to files between two nodes, or node and
1572 '''yields diff of changes to files between two nodes, or node and
1573 working directory.
1573 working directory.
1574
1574
1575 if node1 is None, use first dirstate parent instead.
1575 if node1 is None, use first dirstate parent instead.
1576 if node2 is None, compare node1 with working directory.
1576 if node2 is None, compare node1 with working directory.
1577
1577
1578 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1578 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1579 every time some change cannot be represented with the current
1579 every time some change cannot be represented with the current
1580 patch format. Return False to upgrade to git patch format, True to
1580 patch format. Return False to upgrade to git patch format, True to
1581 accept the loss or raise an exception to abort the diff. It is
1581 accept the loss or raise an exception to abort the diff. It is
1582 called with the name of current file being diffed as 'fn'. If set
1582 called with the name of current file being diffed as 'fn'. If set
1583 to None, patches will always be upgraded to git format when
1583 to None, patches will always be upgraded to git format when
1584 necessary.
1584 necessary.
1585
1585
1586 prefix is a filename prefix that is prepended to all filenames on
1586 prefix is a filename prefix that is prepended to all filenames on
1587 display (used for subrepos).
1587 display (used for subrepos).
1588 '''
1588 '''
1589
1589
1590 if opts is None:
1590 if opts is None:
1591 opts = mdiff.defaultopts
1591 opts = mdiff.defaultopts
1592
1592
1593 if not node1 and not node2:
1593 if not node1 and not node2:
1594 node1 = repo.dirstate.p1()
1594 node1 = repo.dirstate.p1()
1595
1595
1596 def lrugetfilectx():
1596 def lrugetfilectx():
1597 cache = {}
1597 cache = {}
1598 order = util.deque()
1598 order = util.deque()
1599 def getfilectx(f, ctx):
1599 def getfilectx(f, ctx):
1600 fctx = ctx.filectx(f, filelog=cache.get(f))
1600 fctx = ctx.filectx(f, filelog=cache.get(f))
1601 if f not in cache:
1601 if f not in cache:
1602 if len(cache) > 20:
1602 if len(cache) > 20:
1603 del cache[order.popleft()]
1603 del cache[order.popleft()]
1604 cache[f] = fctx.filelog()
1604 cache[f] = fctx.filelog()
1605 else:
1605 else:
1606 order.remove(f)
1606 order.remove(f)
1607 order.append(f)
1607 order.append(f)
1608 return fctx
1608 return fctx
1609 return getfilectx
1609 return getfilectx
1610 getfilectx = lrugetfilectx()
1610 getfilectx = lrugetfilectx()
1611
1611
1612 ctx1 = repo[node1]
1612 ctx1 = repo[node1]
1613 ctx2 = repo[node2]
1613 ctx2 = repo[node2]
1614
1614
1615 if not changes:
1615 if not changes:
1616 changes = repo.status(ctx1, ctx2, match=match)
1616 changes = repo.status(ctx1, ctx2, match=match)
1617 modified, added, removed = changes[:3]
1617 modified, added, removed = changes[:3]
1618
1618
1619 if not modified and not added and not removed:
1619 if not modified and not added and not removed:
1620 return []
1620 return []
1621
1621
1622 revs = None
1622 revs = None
1623 hexfunc = repo.ui.debugflag and hex or short
1623 hexfunc = repo.ui.debugflag and hex or short
1624 revs = [hexfunc(node) for node in [node1, node2] if node]
1624 revs = [hexfunc(node) for node in [node1, node2] if node]
1625
1625
1626 copy = {}
1626 copy = {}
1627 if opts.git or opts.upgrade:
1627 if opts.git or opts.upgrade:
1628 copy = copies.pathcopies(ctx1, ctx2)
1628 copy = copies.pathcopies(ctx1, ctx2)
1629
1629
1630 def difffn(opts, losedata):
1630 def difffn(opts, losedata):
1631 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1631 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1632 copy, getfilectx, opts, losedata, prefix)
1632 copy, getfilectx, opts, losedata, prefix)
1633 if opts.upgrade and not opts.git:
1633 if opts.upgrade and not opts.git:
1634 try:
1634 try:
1635 def losedata(fn):
1635 def losedata(fn):
1636 if not losedatafn or not losedatafn(fn=fn):
1636 if not losedatafn or not losedatafn(fn=fn):
1637 raise GitDiffRequired
1637 raise GitDiffRequired
1638 # Buffer the whole output until we are sure it can be generated
1638 # Buffer the whole output until we are sure it can be generated
1639 return list(difffn(opts.copy(git=False), losedata))
1639 return list(difffn(opts.copy(git=False), losedata))
1640 except GitDiffRequired:
1640 except GitDiffRequired:
1641 return difffn(opts.copy(git=True), None)
1641 return difffn(opts.copy(git=True), None)
1642 else:
1642 else:
1643 return difffn(opts, None)
1643 return difffn(opts, None)
1644
1644
1645 def difflabel(func, *args, **kw):
1645 def difflabel(func, *args, **kw):
1646 '''yields 2-tuples of (output, label) based on the output of func()'''
1646 '''yields 2-tuples of (output, label) based on the output of func()'''
1647 headprefixes = [('diff', 'diff.diffline'),
1647 headprefixes = [('diff', 'diff.diffline'),
1648 ('copy', 'diff.extended'),
1648 ('copy', 'diff.extended'),
1649 ('rename', 'diff.extended'),
1649 ('rename', 'diff.extended'),
1650 ('old', 'diff.extended'),
1650 ('old', 'diff.extended'),
1651 ('new', 'diff.extended'),
1651 ('new', 'diff.extended'),
1652 ('deleted', 'diff.extended'),
1652 ('deleted', 'diff.extended'),
1653 ('---', 'diff.file_a'),
1653 ('---', 'diff.file_a'),
1654 ('+++', 'diff.file_b')]
1654 ('+++', 'diff.file_b')]
1655 textprefixes = [('@', 'diff.hunk'),
1655 textprefixes = [('@', 'diff.hunk'),
1656 ('-', 'diff.deleted'),
1656 ('-', 'diff.deleted'),
1657 ('+', 'diff.inserted')]
1657 ('+', 'diff.inserted')]
1658 head = False
1658 head = False
1659 for chunk in func(*args, **kw):
1659 for chunk in func(*args, **kw):
1660 lines = chunk.split('\n')
1660 lines = chunk.split('\n')
1661 for i, line in enumerate(lines):
1661 for i, line in enumerate(lines):
1662 if i != 0:
1662 if i != 0:
1663 yield ('\n', '')
1663 yield ('\n', '')
1664 if head:
1664 if head:
1665 if line.startswith('@'):
1665 if line.startswith('@'):
1666 head = False
1666 head = False
1667 else:
1667 else:
1668 if line and line[0] not in ' +-@\\':
1668 if line and line[0] not in ' +-@\\':
1669 head = True
1669 head = True
1670 stripline = line
1670 stripline = line
1671 if not head and line and line[0] in '+-':
1671 if not head and line and line[0] in '+-':
1672 # highlight trailing whitespace, but only in changed lines
1672 # highlight trailing whitespace, but only in changed lines
1673 stripline = line.rstrip()
1673 stripline = line.rstrip()
1674 prefixes = textprefixes
1674 prefixes = textprefixes
1675 if head:
1675 if head:
1676 prefixes = headprefixes
1676 prefixes = headprefixes
1677 for prefix, label in prefixes:
1677 for prefix, label in prefixes:
1678 if stripline.startswith(prefix):
1678 if stripline.startswith(prefix):
1679 yield (stripline, label)
1679 yield (stripline, label)
1680 break
1680 break
1681 else:
1681 else:
1682 yield (line, '')
1682 yield (line, '')
1683 if line != stripline:
1683 if line != stripline:
1684 yield (line[len(stripline):], 'diff.trailingwhitespace')
1684 yield (line[len(stripline):], 'diff.trailingwhitespace')
1685
1685
1686 def diffui(*args, **kw):
1686 def diffui(*args, **kw):
1687 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1687 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1688 return difflabel(diff, *args, **kw)
1688 return difflabel(diff, *args, **kw)
1689
1689
1690 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1690 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1691 copy, getfilectx, opts, losedatafn, prefix):
1691 copy, getfilectx, opts, losedatafn, prefix):
1692
1692
1693 def join(f):
1693 def join(f):
1694 return posixpath.join(prefix, f)
1694 return posixpath.join(prefix, f)
1695
1695
1696 def addmodehdr(header, omode, nmode):
1696 def addmodehdr(header, omode, nmode):
1697 if omode != nmode:
1697 if omode != nmode:
1698 header.append('old mode %s\n' % omode)
1698 header.append('old mode %s\n' % omode)
1699 header.append('new mode %s\n' % nmode)
1699 header.append('new mode %s\n' % nmode)
1700
1700
1701 def addindexmeta(meta, revs):
1701 def addindexmeta(meta, revs):
1702 if opts.git:
1702 if opts.git:
1703 i = len(revs)
1703 i = len(revs)
1704 if i==2:
1704 if i==2:
1705 meta.append('index %s..%s\n' % tuple(revs))
1705 meta.append('index %s..%s\n' % tuple(revs))
1706 elif i==3:
1706 elif i==3:
1707 meta.append('index %s,%s..%s\n' % tuple(revs))
1707 meta.append('index %s,%s..%s\n' % tuple(revs))
1708
1708
1709 def gitindex(text):
1709 def gitindex(text):
1710 if not text:
1710 if not text:
1711 text = ""
1711 text = ""
1712 l = len(text)
1712 l = len(text)
1713 s = util.sha1('blob %d\0' % l)
1713 s = util.sha1('blob %d\0' % l)
1714 s.update(text)
1714 s.update(text)
1715 return s.hexdigest()
1715 return s.hexdigest()
1716
1716
1717 def diffline(a, b, revs):
1717 def diffline(a, b, revs):
1718 if opts.git:
1718 if opts.git:
1719 line = 'diff --git a/%s b/%s\n' % (a, b)
1719 line = 'diff --git a/%s b/%s\n' % (a, b)
1720 elif not repo.ui.quiet:
1720 elif not repo.ui.quiet:
1721 if revs:
1721 if revs:
1722 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1722 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1723 line = 'diff %s %s\n' % (revinfo, a)
1723 line = 'diff %s %s\n' % (revinfo, a)
1724 else:
1724 else:
1725 line = 'diff %s\n' % a
1725 line = 'diff %s\n' % a
1726 else:
1726 else:
1727 line = ''
1727 line = ''
1728 return line
1728 return line
1729
1729
1730 date1 = util.datestr(ctx1.date())
1730 date1 = util.datestr(ctx1.date())
1731 man1 = ctx1.manifest()
1731 man1 = ctx1.manifest()
1732
1732
1733 gone = set()
1733 gone = set()
1734 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1734 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1735
1735
1736 copyto = dict([(v, k) for k, v in copy.items()])
1736 copyto = dict([(v, k) for k, v in copy.items()])
1737
1737
1738 if opts.git:
1738 if opts.git:
1739 revs = None
1739 revs = None
1740
1740
1741 for f in sorted(modified + added + removed):
1741 for f in sorted(modified + added + removed):
1742 to = None
1742 to = None
1743 tn = None
1743 tn = None
1744 dodiff = True
1744 dodiff = True
1745 header = []
1745 header = []
1746 if f in man1:
1746 if f in man1:
1747 to = getfilectx(f, ctx1).data()
1747 to = getfilectx(f, ctx1).data()
1748 if f not in removed:
1748 if f not in removed:
1749 tn = getfilectx(f, ctx2).data()
1749 tn = getfilectx(f, ctx2).data()
1750 a, b = f, f
1750 a, b = f, f
1751 if opts.git or losedatafn:
1751 if opts.git or losedatafn:
1752 if f in added or (f in modified and to is None):
1752 if f in added or (f in modified and to is None):
1753 mode = gitmode[ctx2.flags(f)]
1753 mode = gitmode[ctx2.flags(f)]
1754 if f in copy or f in copyto:
1754 if f in copy or f in copyto:
1755 if opts.git:
1755 if opts.git:
1756 if f in copy:
1756 if f in copy:
1757 a = copy[f]
1757 a = copy[f]
1758 else:
1758 else:
1759 a = copyto[f]
1759 a = copyto[f]
1760 omode = gitmode[man1.flags(a)]
1760 omode = gitmode[man1.flags(a)]
1761 addmodehdr(header, omode, mode)
1761 addmodehdr(header, omode, mode)
1762 if a in removed and a not in gone:
1762 if a in removed and a not in gone:
1763 op = 'rename'
1763 op = 'rename'
1764 gone.add(a)
1764 gone.add(a)
1765 else:
1765 else:
1766 op = 'copy'
1766 op = 'copy'
1767 header.append('%s from %s\n' % (op, join(a)))
1767 header.append('%s from %s\n' % (op, join(a)))
1768 header.append('%s to %s\n' % (op, join(f)))
1768 header.append('%s to %s\n' % (op, join(f)))
1769 to = getfilectx(a, ctx1).data()
1769 to = getfilectx(a, ctx1).data()
1770 else:
1770 else:
1771 losedatafn(f)
1771 losedatafn(f)
1772 else:
1772 else:
1773 if opts.git:
1773 if opts.git:
1774 header.append('new file mode %s\n' % mode)
1774 header.append('new file mode %s\n' % mode)
1775 elif ctx2.flags(f):
1775 elif ctx2.flags(f):
1776 losedatafn(f)
1776 losedatafn(f)
1777 # In theory, if tn was copied or renamed we should check
1777 # In theory, if tn was copied or renamed we should check
1778 # if the source is binary too but the copy record already
1778 # if the source is binary too but the copy record already
1779 # forces git mode.
1779 # forces git mode.
1780 if util.binary(tn):
1780 if util.binary(tn):
1781 if opts.git:
1781 if opts.git:
1782 dodiff = 'binary'
1782 dodiff = 'binary'
1783 else:
1783 else:
1784 losedatafn(f)
1784 losedatafn(f)
1785 if not opts.git and not tn:
1785 if not opts.git and not tn:
1786 # regular diffs cannot represent new empty file
1786 # regular diffs cannot represent new empty file
1787 losedatafn(f)
1787 losedatafn(f)
1788 elif f in removed or (f in modified and tn is None):
1788 elif f in removed or (f in modified and tn is None):
1789 if opts.git:
1789 if opts.git:
1790 # have we already reported a copy above?
1790 # have we already reported a copy above?
1791 if ((f in copy and copy[f] in added
1791 if ((f in copy and copy[f] in added
1792 and copyto[copy[f]] == f) or
1792 and copyto[copy[f]] == f) or
1793 (f in copyto and copyto[f] in added
1793 (f in copyto and copyto[f] in added
1794 and copy[copyto[f]] == f)):
1794 and copy[copyto[f]] == f)):
1795 dodiff = False
1795 dodiff = False
1796 else:
1796 else:
1797 header.append('deleted file mode %s\n' %
1797 header.append('deleted file mode %s\n' %
1798 gitmode[man1.flags(f)])
1798 gitmode[man1.flags(f)])
1799 if util.binary(to):
1799 if util.binary(to):
1800 dodiff = 'binary'
1800 dodiff = 'binary'
1801 elif not to or util.binary(to):
1801 elif not to or util.binary(to):
1802 # regular diffs cannot represent empty file deletion
1802 # regular diffs cannot represent empty file deletion
1803 losedatafn(f)
1803 losedatafn(f)
1804 else:
1804 else:
1805 oflag = man1.flags(f)
1805 oflag = man1.flags(f)
1806 nflag = ctx2.flags(f)
1806 nflag = ctx2.flags(f)
1807 binary = util.binary(to) or util.binary(tn)
1807 binary = util.binary(to) or util.binary(tn)
1808 if opts.git:
1808 if opts.git:
1809 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1809 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1810 if binary:
1810 if binary:
1811 dodiff = 'binary'
1811 dodiff = 'binary'
1812 elif binary or nflag != oflag:
1812 elif binary or nflag != oflag:
1813 losedatafn(f)
1813 losedatafn(f)
1814
1814
1815 if dodiff:
1815 if dodiff:
1816 if opts.git or revs:
1816 if opts.git or revs:
1817 header.insert(0, diffline(join(a), join(b), revs))
1817 header.insert(0, diffline(join(a), join(b), revs))
1818 if dodiff == 'binary':
1818 if dodiff == 'binary':
1819 text = mdiff.b85diff(to, tn)
1819 text = mdiff.b85diff(to, tn)
1820 if text:
1820 if text:
1821 addindexmeta(header, [gitindex(to), gitindex(tn)])
1821 addindexmeta(header, [gitindex(to), gitindex(tn)])
1822 else:
1822 else:
1823 text = mdiff.unidiff(to, date1,
1823 text = mdiff.unidiff(to, date1,
1824 # ctx2 date may be dynamic
1824 # ctx2 date may be dynamic
1825 tn, util.datestr(ctx2.date()),
1825 tn, util.datestr(ctx2.date()),
1826 join(a), join(b), opts=opts)
1826 join(a), join(b), opts=opts)
1827 if header and (text or len(header) > 1):
1827 if header and (text or len(header) > 1):
1828 yield ''.join(header)
1828 yield ''.join(header)
1829 if text:
1829 if text:
1830 yield text
1830 yield text
1831
1831
1832 def diffstatsum(stats):
1832 def diffstatsum(stats):
1833 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1833 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1834 for f, a, r, b in stats:
1834 for f, a, r, b in stats:
1835 maxfile = max(maxfile, encoding.colwidth(f))
1835 maxfile = max(maxfile, encoding.colwidth(f))
1836 maxtotal = max(maxtotal, a + r)
1836 maxtotal = max(maxtotal, a + r)
1837 addtotal += a
1837 addtotal += a
1838 removetotal += r
1838 removetotal += r
1839 binary = binary or b
1839 binary = binary or b
1840
1840
1841 return maxfile, maxtotal, addtotal, removetotal, binary
1841 return maxfile, maxtotal, addtotal, removetotal, binary
1842
1842
1843 def diffstatdata(lines):
1843 def diffstatdata(lines):
1844 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1844 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1845
1845
1846 results = []
1846 results = []
1847 filename, adds, removes, isbinary = None, 0, 0, False
1847 filename, adds, removes, isbinary = None, 0, 0, False
1848
1848
1849 def addresult():
1849 def addresult():
1850 if filename:
1850 if filename:
1851 results.append((filename, adds, removes, isbinary))
1851 results.append((filename, adds, removes, isbinary))
1852
1852
1853 for line in lines:
1853 for line in lines:
1854 if line.startswith('diff'):
1854 if line.startswith('diff'):
1855 addresult()
1855 addresult()
1856 # set numbers to 0 anyway when starting new file
1856 # set numbers to 0 anyway when starting new file
1857 adds, removes, isbinary = 0, 0, False
1857 adds, removes, isbinary = 0, 0, False
1858 if line.startswith('diff --git a/'):
1858 if line.startswith('diff --git a/'):
1859 filename = gitre.search(line).group(2)
1859 filename = gitre.search(line).group(2)
1860 elif line.startswith('diff -r'):
1860 elif line.startswith('diff -r'):
1861 # format: "diff -r ... -r ... filename"
1861 # format: "diff -r ... -r ... filename"
1862 filename = diffre.search(line).group(1)
1862 filename = diffre.search(line).group(1)
1863 elif line.startswith('+') and not line.startswith('+++ '):
1863 elif line.startswith('+') and not line.startswith('+++ '):
1864 adds += 1
1864 adds += 1
1865 elif line.startswith('-') and not line.startswith('--- '):
1865 elif line.startswith('-') and not line.startswith('--- '):
1866 removes += 1
1866 removes += 1
1867 elif (line.startswith('GIT binary patch') or
1867 elif (line.startswith('GIT binary patch') or
1868 line.startswith('Binary file')):
1868 line.startswith('Binary file')):
1869 isbinary = True
1869 isbinary = True
1870 addresult()
1870 addresult()
1871 return results
1871 return results
1872
1872
1873 def diffstat(lines, width=80, git=False):
1873 def diffstat(lines, width=80, git=False):
1874 output = []
1874 output = []
1875 stats = diffstatdata(lines)
1875 stats = diffstatdata(lines)
1876 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1876 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1877
1877
1878 countwidth = len(str(maxtotal))
1878 countwidth = len(str(maxtotal))
1879 if hasbinary and countwidth < 3:
1879 if hasbinary and countwidth < 3:
1880 countwidth = 3
1880 countwidth = 3
1881 graphwidth = width - countwidth - maxname - 6
1881 graphwidth = width - countwidth - maxname - 6
1882 if graphwidth < 10:
1882 if graphwidth < 10:
1883 graphwidth = 10
1883 graphwidth = 10
1884
1884
1885 def scale(i):
1885 def scale(i):
1886 if maxtotal <= graphwidth:
1886 if maxtotal <= graphwidth:
1887 return i
1887 return i
1888 # If diffstat runs out of room it doesn't print anything,
1888 # If diffstat runs out of room it doesn't print anything,
1889 # which isn't very useful, so always print at least one + or -
1889 # which isn't very useful, so always print at least one + or -
1890 # if there were at least some changes.
1890 # if there were at least some changes.
1891 return max(i * graphwidth // maxtotal, int(bool(i)))
1891 return max(i * graphwidth // maxtotal, int(bool(i)))
1892
1892
1893 for filename, adds, removes, isbinary in stats:
1893 for filename, adds, removes, isbinary in stats:
1894 if isbinary:
1894 if isbinary:
1895 count = 'Bin'
1895 count = 'Bin'
1896 else:
1896 else:
1897 count = adds + removes
1897 count = adds + removes
1898 pluses = '+' * scale(adds)
1898 pluses = '+' * scale(adds)
1899 minuses = '-' * scale(removes)
1899 minuses = '-' * scale(removes)
1900 output.append(' %s%s | %*s %s%s\n' %
1900 output.append(' %s%s | %*s %s%s\n' %
1901 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1901 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1902 countwidth, count, pluses, minuses))
1902 countwidth, count, pluses, minuses))
1903
1903
1904 if stats:
1904 if stats:
1905 output.append(_(' %d files changed, %d insertions(+), '
1905 output.append(_(' %d files changed, %d insertions(+), '
1906 '%d deletions(-)\n')
1906 '%d deletions(-)\n')
1907 % (len(stats), totaladds, totalremoves))
1907 % (len(stats), totaladds, totalremoves))
1908
1908
1909 return ''.join(output)
1909 return ''.join(output)
1910
1910
1911 def diffstatui(*args, **kw):
1911 def diffstatui(*args, **kw):
1912 '''like diffstat(), but yields 2-tuples of (output, label) for
1912 '''like diffstat(), but yields 2-tuples of (output, label) for
1913 ui.write()
1913 ui.write()
1914 '''
1914 '''
1915
1915
1916 for line in diffstat(*args, **kw).splitlines():
1916 for line in diffstat(*args, **kw).splitlines():
1917 if line and line[-1] in '+-':
1917 if line and line[-1] in '+-':
1918 name, graph = line.rsplit(' ', 1)
1918 name, graph = line.rsplit(' ', 1)
1919 yield (name + ' ', '')
1919 yield (name + ' ', '')
1920 m = re.search(r'\++', graph)
1920 m = re.search(r'\++', graph)
1921 if m:
1921 if m:
1922 yield (m.group(0), 'diffstat.inserted')
1922 yield (m.group(0), 'diffstat.inserted')
1923 m = re.search(r'-+', graph)
1923 m = re.search(r'-+', graph)
1924 if m:
1924 if m:
1925 yield (m.group(0), 'diffstat.deleted')
1925 yield (m.group(0), 'diffstat.deleted')
1926 else:
1926 else:
1927 yield (line, '')
1927 yield (line, '')
1928 yield ('\n', '')
1928 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now