##// END OF EJS Templates
patch.trydiff: add support for stripping a relative root...
Siddharth Agarwal -
r24416:f07047a5 default
parent child Browse files
Show More
@@ -1,2427 +1,2439 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath, copy
9 import cStringIO, email, os, errno, re, posixpath, copy
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import cStringIO
18 import cStringIO
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20 import pathutil
20 import pathutil
21
21
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
24
24
25 class PatchError(Exception):
25 class PatchError(Exception):
26 pass
26 pass
27
27
28
28
29 # public functions
29 # public functions
30
30
31 def split(stream):
31 def split(stream):
32 '''return an iterator of individual patches from a stream'''
32 '''return an iterator of individual patches from a stream'''
33 def isheader(line, inheader):
33 def isheader(line, inheader):
34 if inheader and line[0] in (' ', '\t'):
34 if inheader and line[0] in (' ', '\t'):
35 # continuation
35 # continuation
36 return True
36 return True
37 if line[0] in (' ', '-', '+'):
37 if line[0] in (' ', '-', '+'):
38 # diff line - don't check for header pattern in there
38 # diff line - don't check for header pattern in there
39 return False
39 return False
40 l = line.split(': ', 1)
40 l = line.split(': ', 1)
41 return len(l) == 2 and ' ' not in l[0]
41 return len(l) == 2 and ' ' not in l[0]
42
42
43 def chunk(lines):
43 def chunk(lines):
44 return cStringIO.StringIO(''.join(lines))
44 return cStringIO.StringIO(''.join(lines))
45
45
46 def hgsplit(stream, cur):
46 def hgsplit(stream, cur):
47 inheader = True
47 inheader = True
48
48
49 for line in stream:
49 for line in stream:
50 if not line.strip():
50 if not line.strip():
51 inheader = False
51 inheader = False
52 if not inheader and line.startswith('# HG changeset patch'):
52 if not inheader and line.startswith('# HG changeset patch'):
53 yield chunk(cur)
53 yield chunk(cur)
54 cur = []
54 cur = []
55 inheader = True
55 inheader = True
56
56
57 cur.append(line)
57 cur.append(line)
58
58
59 if cur:
59 if cur:
60 yield chunk(cur)
60 yield chunk(cur)
61
61
62 def mboxsplit(stream, cur):
62 def mboxsplit(stream, cur):
63 for line in stream:
63 for line in stream:
64 if line.startswith('From '):
64 if line.startswith('From '):
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67 cur = []
67 cur = []
68
68
69 cur.append(line)
69 cur.append(line)
70
70
71 if cur:
71 if cur:
72 for c in split(chunk(cur[1:])):
72 for c in split(chunk(cur[1:])):
73 yield c
73 yield c
74
74
75 def mimesplit(stream, cur):
75 def mimesplit(stream, cur):
76 def msgfp(m):
76 def msgfp(m):
77 fp = cStringIO.StringIO()
77 fp = cStringIO.StringIO()
78 g = email.Generator.Generator(fp, mangle_from_=False)
78 g = email.Generator.Generator(fp, mangle_from_=False)
79 g.flatten(m)
79 g.flatten(m)
80 fp.seek(0)
80 fp.seek(0)
81 return fp
81 return fp
82
82
83 for line in stream:
83 for line in stream:
84 cur.append(line)
84 cur.append(line)
85 c = chunk(cur)
85 c = chunk(cur)
86
86
87 m = email.Parser.Parser().parse(c)
87 m = email.Parser.Parser().parse(c)
88 if not m.is_multipart():
88 if not m.is_multipart():
89 yield msgfp(m)
89 yield msgfp(m)
90 else:
90 else:
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
92 for part in m.walk():
92 for part in m.walk():
93 ct = part.get_content_type()
93 ct = part.get_content_type()
94 if ct not in ok_types:
94 if ct not in ok_types:
95 continue
95 continue
96 yield msgfp(part)
96 yield msgfp(part)
97
97
98 def headersplit(stream, cur):
98 def headersplit(stream, cur):
99 inheader = False
99 inheader = False
100
100
101 for line in stream:
101 for line in stream:
102 if not inheader and isheader(line, inheader):
102 if not inheader and isheader(line, inheader):
103 yield chunk(cur)
103 yield chunk(cur)
104 cur = []
104 cur = []
105 inheader = True
105 inheader = True
106 if inheader and not isheader(line, inheader):
106 if inheader and not isheader(line, inheader):
107 inheader = False
107 inheader = False
108
108
109 cur.append(line)
109 cur.append(line)
110
110
111 if cur:
111 if cur:
112 yield chunk(cur)
112 yield chunk(cur)
113
113
114 def remainder(cur):
114 def remainder(cur):
115 yield chunk(cur)
115 yield chunk(cur)
116
116
117 class fiter(object):
117 class fiter(object):
118 def __init__(self, fp):
118 def __init__(self, fp):
119 self.fp = fp
119 self.fp = fp
120
120
121 def __iter__(self):
121 def __iter__(self):
122 return self
122 return self
123
123
124 def next(self):
124 def next(self):
125 l = self.fp.readline()
125 l = self.fp.readline()
126 if not l:
126 if not l:
127 raise StopIteration
127 raise StopIteration
128 return l
128 return l
129
129
130 inheader = False
130 inheader = False
131 cur = []
131 cur = []
132
132
133 mimeheaders = ['content-type']
133 mimeheaders = ['content-type']
134
134
135 if not util.safehasattr(stream, 'next'):
135 if not util.safehasattr(stream, 'next'):
136 # http responses, for example, have readline but not next
136 # http responses, for example, have readline but not next
137 stream = fiter(stream)
137 stream = fiter(stream)
138
138
139 for line in stream:
139 for line in stream:
140 cur.append(line)
140 cur.append(line)
141 if line.startswith('# HG changeset patch'):
141 if line.startswith('# HG changeset patch'):
142 return hgsplit(stream, cur)
142 return hgsplit(stream, cur)
143 elif line.startswith('From '):
143 elif line.startswith('From '):
144 return mboxsplit(stream, cur)
144 return mboxsplit(stream, cur)
145 elif isheader(line, inheader):
145 elif isheader(line, inheader):
146 inheader = True
146 inheader = True
147 if line.split(':', 1)[0].lower() in mimeheaders:
147 if line.split(':', 1)[0].lower() in mimeheaders:
148 # let email parser handle this
148 # let email parser handle this
149 return mimesplit(stream, cur)
149 return mimesplit(stream, cur)
150 elif line.startswith('--- ') and inheader:
150 elif line.startswith('--- ') and inheader:
151 # No evil headers seen by diff start, split by hand
151 # No evil headers seen by diff start, split by hand
152 return headersplit(stream, cur)
152 return headersplit(stream, cur)
153 # Not enough info, keep reading
153 # Not enough info, keep reading
154
154
155 # if we are here, we have a very plain patch
155 # if we are here, we have a very plain patch
156 return remainder(cur)
156 return remainder(cur)
157
157
158 def extract(ui, fileobj):
158 def extract(ui, fileobj):
159 '''extract patch from data read from fileobj.
159 '''extract patch from data read from fileobj.
160
160
161 patch can be a normal patch or contained in an email message.
161 patch can be a normal patch or contained in an email message.
162
162
163 return tuple (filename, message, user, date, branch, node, p1, p2).
163 return tuple (filename, message, user, date, branch, node, p1, p2).
164 Any item in the returned tuple can be None. If filename is None,
164 Any item in the returned tuple can be None. If filename is None,
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
166
166
167 # attempt to detect the start of a patch
167 # attempt to detect the start of a patch
168 # (this heuristic is borrowed from quilt)
168 # (this heuristic is borrowed from quilt)
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
173
173
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
175 tmpfp = os.fdopen(fd, 'w')
175 tmpfp = os.fdopen(fd, 'w')
176 try:
176 try:
177 msg = email.Parser.Parser().parse(fileobj)
177 msg = email.Parser.Parser().parse(fileobj)
178
178
179 subject = msg['Subject']
179 subject = msg['Subject']
180 user = msg['From']
180 user = msg['From']
181 if not subject and not user:
181 if not subject and not user:
182 # Not an email, restore parsed headers if any
182 # Not an email, restore parsed headers if any
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
184
184
185 # should try to parse msg['Date']
185 # should try to parse msg['Date']
186 date = None
186 date = None
187 nodeid = None
187 nodeid = None
188 branch = None
188 branch = None
189 parents = []
189 parents = []
190
190
191 if subject:
191 if subject:
192 if subject.startswith('[PATCH'):
192 if subject.startswith('[PATCH'):
193 pend = subject.find(']')
193 pend = subject.find(']')
194 if pend >= 0:
194 if pend >= 0:
195 subject = subject[pend + 1:].lstrip()
195 subject = subject[pend + 1:].lstrip()
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
197 ui.debug('Subject: %s\n' % subject)
197 ui.debug('Subject: %s\n' % subject)
198 if user:
198 if user:
199 ui.debug('From: %s\n' % user)
199 ui.debug('From: %s\n' % user)
200 diffs_seen = 0
200 diffs_seen = 0
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
202 message = ''
202 message = ''
203 for part in msg.walk():
203 for part in msg.walk():
204 content_type = part.get_content_type()
204 content_type = part.get_content_type()
205 ui.debug('Content-Type: %s\n' % content_type)
205 ui.debug('Content-Type: %s\n' % content_type)
206 if content_type not in ok_types:
206 if content_type not in ok_types:
207 continue
207 continue
208 payload = part.get_payload(decode=True)
208 payload = part.get_payload(decode=True)
209 m = diffre.search(payload)
209 m = diffre.search(payload)
210 if m:
210 if m:
211 hgpatch = False
211 hgpatch = False
212 hgpatchheader = False
212 hgpatchheader = False
213 ignoretext = False
213 ignoretext = False
214
214
215 ui.debug('found patch at byte %d\n' % m.start(0))
215 ui.debug('found patch at byte %d\n' % m.start(0))
216 diffs_seen += 1
216 diffs_seen += 1
217 cfp = cStringIO.StringIO()
217 cfp = cStringIO.StringIO()
218 for line in payload[:m.start(0)].splitlines():
218 for line in payload[:m.start(0)].splitlines():
219 if line.startswith('# HG changeset patch') and not hgpatch:
219 if line.startswith('# HG changeset patch') and not hgpatch:
220 ui.debug('patch generated by hg export\n')
220 ui.debug('patch generated by hg export\n')
221 hgpatch = True
221 hgpatch = True
222 hgpatchheader = True
222 hgpatchheader = True
223 # drop earlier commit message content
223 # drop earlier commit message content
224 cfp.seek(0)
224 cfp.seek(0)
225 cfp.truncate()
225 cfp.truncate()
226 subject = None
226 subject = None
227 elif hgpatchheader:
227 elif hgpatchheader:
228 if line.startswith('# User '):
228 if line.startswith('# User '):
229 user = line[7:]
229 user = line[7:]
230 ui.debug('From: %s\n' % user)
230 ui.debug('From: %s\n' % user)
231 elif line.startswith("# Date "):
231 elif line.startswith("# Date "):
232 date = line[7:]
232 date = line[7:]
233 elif line.startswith("# Branch "):
233 elif line.startswith("# Branch "):
234 branch = line[9:]
234 branch = line[9:]
235 elif line.startswith("# Node ID "):
235 elif line.startswith("# Node ID "):
236 nodeid = line[10:]
236 nodeid = line[10:]
237 elif line.startswith("# Parent "):
237 elif line.startswith("# Parent "):
238 parents.append(line[9:].lstrip())
238 parents.append(line[9:].lstrip())
239 elif not line.startswith("# "):
239 elif not line.startswith("# "):
240 hgpatchheader = False
240 hgpatchheader = False
241 elif line == '---':
241 elif line == '---':
242 ignoretext = True
242 ignoretext = True
243 if not hgpatchheader and not ignoretext:
243 if not hgpatchheader and not ignoretext:
244 cfp.write(line)
244 cfp.write(line)
245 cfp.write('\n')
245 cfp.write('\n')
246 message = cfp.getvalue()
246 message = cfp.getvalue()
247 if tmpfp:
247 if tmpfp:
248 tmpfp.write(payload)
248 tmpfp.write(payload)
249 if not payload.endswith('\n'):
249 if not payload.endswith('\n'):
250 tmpfp.write('\n')
250 tmpfp.write('\n')
251 elif not diffs_seen and message and content_type == 'text/plain':
251 elif not diffs_seen and message and content_type == 'text/plain':
252 message += '\n' + payload
252 message += '\n' + payload
253 except: # re-raises
253 except: # re-raises
254 tmpfp.close()
254 tmpfp.close()
255 os.unlink(tmpname)
255 os.unlink(tmpname)
256 raise
256 raise
257
257
258 if subject and not message.startswith(subject):
258 if subject and not message.startswith(subject):
259 message = '%s\n%s' % (subject, message)
259 message = '%s\n%s' % (subject, message)
260 tmpfp.close()
260 tmpfp.close()
261 if not diffs_seen:
261 if not diffs_seen:
262 os.unlink(tmpname)
262 os.unlink(tmpname)
263 return None, message, user, date, branch, None, None, None
263 return None, message, user, date, branch, None, None, None
264
264
265 if parents:
265 if parents:
266 p1 = parents.pop(0)
266 p1 = parents.pop(0)
267 else:
267 else:
268 p1 = None
268 p1 = None
269
269
270 if parents:
270 if parents:
271 p2 = parents.pop(0)
271 p2 = parents.pop(0)
272 else:
272 else:
273 p2 = None
273 p2 = None
274
274
275 return tmpname, message, user, date, branch, nodeid, p1, p2
275 return tmpname, message, user, date, branch, nodeid, p1, p2
276
276
277 class patchmeta(object):
277 class patchmeta(object):
278 """Patched file metadata
278 """Patched file metadata
279
279
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
284 'islink' is True if the file is a symlink and 'isexec' is True if
284 'islink' is True if the file is a symlink and 'isexec' is True if
285 the file is executable. Otherwise, 'mode' is None.
285 the file is executable. Otherwise, 'mode' is None.
286 """
286 """
287 def __init__(self, path):
287 def __init__(self, path):
288 self.path = path
288 self.path = path
289 self.oldpath = None
289 self.oldpath = None
290 self.mode = None
290 self.mode = None
291 self.op = 'MODIFY'
291 self.op = 'MODIFY'
292 self.binary = False
292 self.binary = False
293
293
294 def setmode(self, mode):
294 def setmode(self, mode):
295 islink = mode & 020000
295 islink = mode & 020000
296 isexec = mode & 0100
296 isexec = mode & 0100
297 self.mode = (islink, isexec)
297 self.mode = (islink, isexec)
298
298
299 def copy(self):
299 def copy(self):
300 other = patchmeta(self.path)
300 other = patchmeta(self.path)
301 other.oldpath = self.oldpath
301 other.oldpath = self.oldpath
302 other.mode = self.mode
302 other.mode = self.mode
303 other.op = self.op
303 other.op = self.op
304 other.binary = self.binary
304 other.binary = self.binary
305 return other
305 return other
306
306
307 def _ispatchinga(self, afile):
307 def _ispatchinga(self, afile):
308 if afile == '/dev/null':
308 if afile == '/dev/null':
309 return self.op == 'ADD'
309 return self.op == 'ADD'
310 return afile == 'a/' + (self.oldpath or self.path)
310 return afile == 'a/' + (self.oldpath or self.path)
311
311
312 def _ispatchingb(self, bfile):
312 def _ispatchingb(self, bfile):
313 if bfile == '/dev/null':
313 if bfile == '/dev/null':
314 return self.op == 'DELETE'
314 return self.op == 'DELETE'
315 return bfile == 'b/' + self.path
315 return bfile == 'b/' + self.path
316
316
317 def ispatching(self, afile, bfile):
317 def ispatching(self, afile, bfile):
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
319
319
320 def __repr__(self):
320 def __repr__(self):
321 return "<patchmeta %s %r>" % (self.op, self.path)
321 return "<patchmeta %s %r>" % (self.op, self.path)
322
322
323 def readgitpatch(lr):
323 def readgitpatch(lr):
324 """extract git-style metadata about patches from <patchname>"""
324 """extract git-style metadata about patches from <patchname>"""
325
325
326 # Filter patch for git information
326 # Filter patch for git information
327 gp = None
327 gp = None
328 gitpatches = []
328 gitpatches = []
329 for line in lr:
329 for line in lr:
330 line = line.rstrip(' \r\n')
330 line = line.rstrip(' \r\n')
331 if line.startswith('diff --git a/'):
331 if line.startswith('diff --git a/'):
332 m = gitre.match(line)
332 m = gitre.match(line)
333 if m:
333 if m:
334 if gp:
334 if gp:
335 gitpatches.append(gp)
335 gitpatches.append(gp)
336 dst = m.group(2)
336 dst = m.group(2)
337 gp = patchmeta(dst)
337 gp = patchmeta(dst)
338 elif gp:
338 elif gp:
339 if line.startswith('--- '):
339 if line.startswith('--- '):
340 gitpatches.append(gp)
340 gitpatches.append(gp)
341 gp = None
341 gp = None
342 continue
342 continue
343 if line.startswith('rename from '):
343 if line.startswith('rename from '):
344 gp.op = 'RENAME'
344 gp.op = 'RENAME'
345 gp.oldpath = line[12:]
345 gp.oldpath = line[12:]
346 elif line.startswith('rename to '):
346 elif line.startswith('rename to '):
347 gp.path = line[10:]
347 gp.path = line[10:]
348 elif line.startswith('copy from '):
348 elif line.startswith('copy from '):
349 gp.op = 'COPY'
349 gp.op = 'COPY'
350 gp.oldpath = line[10:]
350 gp.oldpath = line[10:]
351 elif line.startswith('copy to '):
351 elif line.startswith('copy to '):
352 gp.path = line[8:]
352 gp.path = line[8:]
353 elif line.startswith('deleted file'):
353 elif line.startswith('deleted file'):
354 gp.op = 'DELETE'
354 gp.op = 'DELETE'
355 elif line.startswith('new file mode '):
355 elif line.startswith('new file mode '):
356 gp.op = 'ADD'
356 gp.op = 'ADD'
357 gp.setmode(int(line[-6:], 8))
357 gp.setmode(int(line[-6:], 8))
358 elif line.startswith('new mode '):
358 elif line.startswith('new mode '):
359 gp.setmode(int(line[-6:], 8))
359 gp.setmode(int(line[-6:], 8))
360 elif line.startswith('GIT binary patch'):
360 elif line.startswith('GIT binary patch'):
361 gp.binary = True
361 gp.binary = True
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364
364
365 return gitpatches
365 return gitpatches
366
366
367 class linereader(object):
367 class linereader(object):
368 # simple class to allow pushing lines back into the input stream
368 # simple class to allow pushing lines back into the input stream
369 def __init__(self, fp):
369 def __init__(self, fp):
370 self.fp = fp
370 self.fp = fp
371 self.buf = []
371 self.buf = []
372
372
373 def push(self, line):
373 def push(self, line):
374 if line is not None:
374 if line is not None:
375 self.buf.append(line)
375 self.buf.append(line)
376
376
377 def readline(self):
377 def readline(self):
378 if self.buf:
378 if self.buf:
379 l = self.buf[0]
379 l = self.buf[0]
380 del self.buf[0]
380 del self.buf[0]
381 return l
381 return l
382 return self.fp.readline()
382 return self.fp.readline()
383
383
384 def __iter__(self):
384 def __iter__(self):
385 while True:
385 while True:
386 l = self.readline()
386 l = self.readline()
387 if not l:
387 if not l:
388 break
388 break
389 yield l
389 yield l
390
390
391 class abstractbackend(object):
391 class abstractbackend(object):
392 def __init__(self, ui):
392 def __init__(self, ui):
393 self.ui = ui
393 self.ui = ui
394
394
395 def getfile(self, fname):
395 def getfile(self, fname):
396 """Return target file data and flags as a (data, (islink,
396 """Return target file data and flags as a (data, (islink,
397 isexec)) tuple. Data is None if file is missing/deleted.
397 isexec)) tuple. Data is None if file is missing/deleted.
398 """
398 """
399 raise NotImplementedError
399 raise NotImplementedError
400
400
401 def setfile(self, fname, data, mode, copysource):
401 def setfile(self, fname, data, mode, copysource):
402 """Write data to target file fname and set its mode. mode is a
402 """Write data to target file fname and set its mode. mode is a
403 (islink, isexec) tuple. If data is None, the file content should
403 (islink, isexec) tuple. If data is None, the file content should
404 be left unchanged. If the file is modified after being copied,
404 be left unchanged. If the file is modified after being copied,
405 copysource is set to the original file name.
405 copysource is set to the original file name.
406 """
406 """
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 def unlink(self, fname):
409 def unlink(self, fname):
410 """Unlink target file."""
410 """Unlink target file."""
411 raise NotImplementedError
411 raise NotImplementedError
412
412
413 def writerej(self, fname, failed, total, lines):
413 def writerej(self, fname, failed, total, lines):
414 """Write rejected lines for fname. total is the number of hunks
414 """Write rejected lines for fname. total is the number of hunks
415 which failed to apply and total the total number of hunks for this
415 which failed to apply and total the total number of hunks for this
416 files.
416 files.
417 """
417 """
418 pass
418 pass
419
419
420 def exists(self, fname):
420 def exists(self, fname):
421 raise NotImplementedError
421 raise NotImplementedError
422
422
423 class fsbackend(abstractbackend):
423 class fsbackend(abstractbackend):
424 def __init__(self, ui, basedir):
424 def __init__(self, ui, basedir):
425 super(fsbackend, self).__init__(ui)
425 super(fsbackend, self).__init__(ui)
426 self.opener = scmutil.opener(basedir)
426 self.opener = scmutil.opener(basedir)
427
427
428 def _join(self, f):
428 def _join(self, f):
429 return os.path.join(self.opener.base, f)
429 return os.path.join(self.opener.base, f)
430
430
431 def getfile(self, fname):
431 def getfile(self, fname):
432 if self.opener.islink(fname):
432 if self.opener.islink(fname):
433 return (self.opener.readlink(fname), (True, False))
433 return (self.opener.readlink(fname), (True, False))
434
434
435 isexec = False
435 isexec = False
436 try:
436 try:
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
438 except OSError, e:
438 except OSError, e:
439 if e.errno != errno.ENOENT:
439 if e.errno != errno.ENOENT:
440 raise
440 raise
441 try:
441 try:
442 return (self.opener.read(fname), (False, isexec))
442 return (self.opener.read(fname), (False, isexec))
443 except IOError, e:
443 except IOError, e:
444 if e.errno != errno.ENOENT:
444 if e.errno != errno.ENOENT:
445 raise
445 raise
446 return None, None
446 return None, None
447
447
448 def setfile(self, fname, data, mode, copysource):
448 def setfile(self, fname, data, mode, copysource):
449 islink, isexec = mode
449 islink, isexec = mode
450 if data is None:
450 if data is None:
451 self.opener.setflags(fname, islink, isexec)
451 self.opener.setflags(fname, islink, isexec)
452 return
452 return
453 if islink:
453 if islink:
454 self.opener.symlink(data, fname)
454 self.opener.symlink(data, fname)
455 else:
455 else:
456 self.opener.write(fname, data)
456 self.opener.write(fname, data)
457 if isexec:
457 if isexec:
458 self.opener.setflags(fname, False, True)
458 self.opener.setflags(fname, False, True)
459
459
460 def unlink(self, fname):
460 def unlink(self, fname):
461 self.opener.unlinkpath(fname, ignoremissing=True)
461 self.opener.unlinkpath(fname, ignoremissing=True)
462
462
463 def writerej(self, fname, failed, total, lines):
463 def writerej(self, fname, failed, total, lines):
464 fname = fname + ".rej"
464 fname = fname + ".rej"
465 self.ui.warn(
465 self.ui.warn(
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
467 (failed, total, fname))
467 (failed, total, fname))
468 fp = self.opener(fname, 'w')
468 fp = self.opener(fname, 'w')
469 fp.writelines(lines)
469 fp.writelines(lines)
470 fp.close()
470 fp.close()
471
471
472 def exists(self, fname):
472 def exists(self, fname):
473 return self.opener.lexists(fname)
473 return self.opener.lexists(fname)
474
474
475 class workingbackend(fsbackend):
475 class workingbackend(fsbackend):
476 def __init__(self, ui, repo, similarity):
476 def __init__(self, ui, repo, similarity):
477 super(workingbackend, self).__init__(ui, repo.root)
477 super(workingbackend, self).__init__(ui, repo.root)
478 self.repo = repo
478 self.repo = repo
479 self.similarity = similarity
479 self.similarity = similarity
480 self.removed = set()
480 self.removed = set()
481 self.changed = set()
481 self.changed = set()
482 self.copied = []
482 self.copied = []
483
483
484 def _checkknown(self, fname):
484 def _checkknown(self, fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 self._checkknown(fname)
489 self._checkknown(fname)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
491 if copysource is not None:
491 if copysource is not None:
492 self.copied.append((copysource, fname))
492 self.copied.append((copysource, fname))
493 self.changed.add(fname)
493 self.changed.add(fname)
494
494
495 def unlink(self, fname):
495 def unlink(self, fname):
496 self._checkknown(fname)
496 self._checkknown(fname)
497 super(workingbackend, self).unlink(fname)
497 super(workingbackend, self).unlink(fname)
498 self.removed.add(fname)
498 self.removed.add(fname)
499 self.changed.add(fname)
499 self.changed.add(fname)
500
500
501 def close(self):
501 def close(self):
502 wctx = self.repo[None]
502 wctx = self.repo[None]
503 changed = set(self.changed)
503 changed = set(self.changed)
504 for src, dst in self.copied:
504 for src, dst in self.copied:
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
506 if self.removed:
506 if self.removed:
507 wctx.forget(sorted(self.removed))
507 wctx.forget(sorted(self.removed))
508 for f in self.removed:
508 for f in self.removed:
509 if f not in self.repo.dirstate:
509 if f not in self.repo.dirstate:
510 # File was deleted and no longer belongs to the
510 # File was deleted and no longer belongs to the
511 # dirstate, it was probably marked added then
511 # dirstate, it was probably marked added then
512 # deleted, and should not be considered by
512 # deleted, and should not be considered by
513 # marktouched().
513 # marktouched().
514 changed.discard(f)
514 changed.discard(f)
515 if changed:
515 if changed:
516 scmutil.marktouched(self.repo, changed, self.similarity)
516 scmutil.marktouched(self.repo, changed, self.similarity)
517 return sorted(self.changed)
517 return sorted(self.changed)
518
518
519 class filestore(object):
519 class filestore(object):
520 def __init__(self, maxsize=None):
520 def __init__(self, maxsize=None):
521 self.opener = None
521 self.opener = None
522 self.files = {}
522 self.files = {}
523 self.created = 0
523 self.created = 0
524 self.maxsize = maxsize
524 self.maxsize = maxsize
525 if self.maxsize is None:
525 if self.maxsize is None:
526 self.maxsize = 4*(2**20)
526 self.maxsize = 4*(2**20)
527 self.size = 0
527 self.size = 0
528 self.data = {}
528 self.data = {}
529
529
530 def setfile(self, fname, data, mode, copied=None):
530 def setfile(self, fname, data, mode, copied=None):
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
532 self.data[fname] = (data, mode, copied)
532 self.data[fname] = (data, mode, copied)
533 self.size += len(data)
533 self.size += len(data)
534 else:
534 else:
535 if self.opener is None:
535 if self.opener is None:
536 root = tempfile.mkdtemp(prefix='hg-patch-')
536 root = tempfile.mkdtemp(prefix='hg-patch-')
537 self.opener = scmutil.opener(root)
537 self.opener = scmutil.opener(root)
538 # Avoid filename issues with these simple names
538 # Avoid filename issues with these simple names
539 fn = str(self.created)
539 fn = str(self.created)
540 self.opener.write(fn, data)
540 self.opener.write(fn, data)
541 self.created += 1
541 self.created += 1
542 self.files[fname] = (fn, mode, copied)
542 self.files[fname] = (fn, mode, copied)
543
543
544 def getfile(self, fname):
544 def getfile(self, fname):
545 if fname in self.data:
545 if fname in self.data:
546 return self.data[fname]
546 return self.data[fname]
547 if not self.opener or fname not in self.files:
547 if not self.opener or fname not in self.files:
548 return None, None, None
548 return None, None, None
549 fn, mode, copied = self.files[fname]
549 fn, mode, copied = self.files[fname]
550 return self.opener.read(fn), mode, copied
550 return self.opener.read(fn), mode, copied
551
551
552 def close(self):
552 def close(self):
553 if self.opener:
553 if self.opener:
554 shutil.rmtree(self.opener.base)
554 shutil.rmtree(self.opener.base)
555
555
556 class repobackend(abstractbackend):
556 class repobackend(abstractbackend):
557 def __init__(self, ui, repo, ctx, store):
557 def __init__(self, ui, repo, ctx, store):
558 super(repobackend, self).__init__(ui)
558 super(repobackend, self).__init__(ui)
559 self.repo = repo
559 self.repo = repo
560 self.ctx = ctx
560 self.ctx = ctx
561 self.store = store
561 self.store = store
562 self.changed = set()
562 self.changed = set()
563 self.removed = set()
563 self.removed = set()
564 self.copied = {}
564 self.copied = {}
565
565
566 def _checkknown(self, fname):
566 def _checkknown(self, fname):
567 if fname not in self.ctx:
567 if fname not in self.ctx:
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 try:
571 try:
572 fctx = self.ctx[fname]
572 fctx = self.ctx[fname]
573 except error.LookupError:
573 except error.LookupError:
574 return None, None
574 return None, None
575 flags = fctx.flags()
575 flags = fctx.flags()
576 return fctx.data(), ('l' in flags, 'x' in flags)
576 return fctx.data(), ('l' in flags, 'x' in flags)
577
577
578 def setfile(self, fname, data, mode, copysource):
578 def setfile(self, fname, data, mode, copysource):
579 if copysource:
579 if copysource:
580 self._checkknown(copysource)
580 self._checkknown(copysource)
581 if data is None:
581 if data is None:
582 data = self.ctx[fname].data()
582 data = self.ctx[fname].data()
583 self.store.setfile(fname, data, mode, copysource)
583 self.store.setfile(fname, data, mode, copysource)
584 self.changed.add(fname)
584 self.changed.add(fname)
585 if copysource:
585 if copysource:
586 self.copied[fname] = copysource
586 self.copied[fname] = copysource
587
587
588 def unlink(self, fname):
588 def unlink(self, fname):
589 self._checkknown(fname)
589 self._checkknown(fname)
590 self.removed.add(fname)
590 self.removed.add(fname)
591
591
592 def exists(self, fname):
592 def exists(self, fname):
593 return fname in self.ctx
593 return fname in self.ctx
594
594
595 def close(self):
595 def close(self):
596 return self.changed | self.removed
596 return self.changed | self.removed
597
597
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
602
602
603 class patchfile(object):
603 class patchfile(object):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
605 self.fname = gp.path
605 self.fname = gp.path
606 self.eolmode = eolmode
606 self.eolmode = eolmode
607 self.eol = None
607 self.eol = None
608 self.backend = backend
608 self.backend = backend
609 self.ui = ui
609 self.ui = ui
610 self.lines = []
610 self.lines = []
611 self.exists = False
611 self.exists = False
612 self.missing = True
612 self.missing = True
613 self.mode = gp.mode
613 self.mode = gp.mode
614 self.copysource = gp.oldpath
614 self.copysource = gp.oldpath
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
616 self.remove = gp.op == 'DELETE'
616 self.remove = gp.op == 'DELETE'
617 if self.copysource is None:
617 if self.copysource is None:
618 data, mode = backend.getfile(self.fname)
618 data, mode = backend.getfile(self.fname)
619 else:
619 else:
620 data, mode = store.getfile(self.copysource)[:2]
620 data, mode = store.getfile(self.copysource)[:2]
621 if data is not None:
621 if data is not None:
622 self.exists = self.copysource is None or backend.exists(self.fname)
622 self.exists = self.copysource is None or backend.exists(self.fname)
623 self.missing = False
623 self.missing = False
624 if data:
624 if data:
625 self.lines = mdiff.splitnewlines(data)
625 self.lines = mdiff.splitnewlines(data)
626 if self.mode is None:
626 if self.mode is None:
627 self.mode = mode
627 self.mode = mode
628 if self.lines:
628 if self.lines:
629 # Normalize line endings
629 # Normalize line endings
630 if self.lines[0].endswith('\r\n'):
630 if self.lines[0].endswith('\r\n'):
631 self.eol = '\r\n'
631 self.eol = '\r\n'
632 elif self.lines[0].endswith('\n'):
632 elif self.lines[0].endswith('\n'):
633 self.eol = '\n'
633 self.eol = '\n'
634 if eolmode != 'strict':
634 if eolmode != 'strict':
635 nlines = []
635 nlines = []
636 for l in self.lines:
636 for l in self.lines:
637 if l.endswith('\r\n'):
637 if l.endswith('\r\n'):
638 l = l[:-2] + '\n'
638 l = l[:-2] + '\n'
639 nlines.append(l)
639 nlines.append(l)
640 self.lines = nlines
640 self.lines = nlines
641 else:
641 else:
642 if self.create:
642 if self.create:
643 self.missing = False
643 self.missing = False
644 if self.mode is None:
644 if self.mode is None:
645 self.mode = (False, False)
645 self.mode = (False, False)
646 if self.missing:
646 if self.missing:
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
648
648
649 self.hash = {}
649 self.hash = {}
650 self.dirty = 0
650 self.dirty = 0
651 self.offset = 0
651 self.offset = 0
652 self.skew = 0
652 self.skew = 0
653 self.rej = []
653 self.rej = []
654 self.fileprinted = False
654 self.fileprinted = False
655 self.printfile(False)
655 self.printfile(False)
656 self.hunks = 0
656 self.hunks = 0
657
657
658 def writelines(self, fname, lines, mode):
658 def writelines(self, fname, lines, mode):
659 if self.eolmode == 'auto':
659 if self.eolmode == 'auto':
660 eol = self.eol
660 eol = self.eol
661 elif self.eolmode == 'crlf':
661 elif self.eolmode == 'crlf':
662 eol = '\r\n'
662 eol = '\r\n'
663 else:
663 else:
664 eol = '\n'
664 eol = '\n'
665
665
666 if self.eolmode != 'strict' and eol and eol != '\n':
666 if self.eolmode != 'strict' and eol and eol != '\n':
667 rawlines = []
667 rawlines = []
668 for l in lines:
668 for l in lines:
669 if l and l[-1] == '\n':
669 if l and l[-1] == '\n':
670 l = l[:-1] + eol
670 l = l[:-1] + eol
671 rawlines.append(l)
671 rawlines.append(l)
672 lines = rawlines
672 lines = rawlines
673
673
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
675
675
676 def printfile(self, warn):
676 def printfile(self, warn):
677 if self.fileprinted:
677 if self.fileprinted:
678 return
678 return
679 if warn or self.ui.verbose:
679 if warn or self.ui.verbose:
680 self.fileprinted = True
680 self.fileprinted = True
681 s = _("patching file %s\n") % self.fname
681 s = _("patching file %s\n") % self.fname
682 if warn:
682 if warn:
683 self.ui.warn(s)
683 self.ui.warn(s)
684 else:
684 else:
685 self.ui.note(s)
685 self.ui.note(s)
686
686
687
687
688 def findlines(self, l, linenum):
688 def findlines(self, l, linenum):
689 # looks through the hash and finds candidate lines. The
689 # looks through the hash and finds candidate lines. The
690 # result is a list of line numbers sorted based on distance
690 # result is a list of line numbers sorted based on distance
691 # from linenum
691 # from linenum
692
692
693 cand = self.hash.get(l, [])
693 cand = self.hash.get(l, [])
694 if len(cand) > 1:
694 if len(cand) > 1:
695 # resort our list of potentials forward then back.
695 # resort our list of potentials forward then back.
696 cand.sort(key=lambda x: abs(x - linenum))
696 cand.sort(key=lambda x: abs(x - linenum))
697 return cand
697 return cand
698
698
699 def write_rej(self):
699 def write_rej(self):
700 # our rejects are a little different from patch(1). This always
700 # our rejects are a little different from patch(1). This always
701 # creates rejects in the same form as the original patch. A file
701 # creates rejects in the same form as the original patch. A file
702 # header is inserted so that you can run the reject through patch again
702 # header is inserted so that you can run the reject through patch again
703 # without having to type the filename.
703 # without having to type the filename.
704 if not self.rej:
704 if not self.rej:
705 return
705 return
706 base = os.path.basename(self.fname)
706 base = os.path.basename(self.fname)
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
708 for x in self.rej:
708 for x in self.rej:
709 for l in x.hunk:
709 for l in x.hunk:
710 lines.append(l)
710 lines.append(l)
711 if l[-1] != '\n':
711 if l[-1] != '\n':
712 lines.append("\n\ No newline at end of file\n")
712 lines.append("\n\ No newline at end of file\n")
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
714
714
715 def apply(self, h):
715 def apply(self, h):
716 if not h.complete():
716 if not h.complete():
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
719 h.lenb))
719 h.lenb))
720
720
721 self.hunks += 1
721 self.hunks += 1
722
722
723 if self.missing:
723 if self.missing:
724 self.rej.append(h)
724 self.rej.append(h)
725 return -1
725 return -1
726
726
727 if self.exists and self.create:
727 if self.exists and self.create:
728 if self.copysource:
728 if self.copysource:
729 self.ui.warn(_("cannot create %s: destination already "
729 self.ui.warn(_("cannot create %s: destination already "
730 "exists\n") % self.fname)
730 "exists\n") % self.fname)
731 else:
731 else:
732 self.ui.warn(_("file %s already exists\n") % self.fname)
732 self.ui.warn(_("file %s already exists\n") % self.fname)
733 self.rej.append(h)
733 self.rej.append(h)
734 return -1
734 return -1
735
735
736 if isinstance(h, binhunk):
736 if isinstance(h, binhunk):
737 if self.remove:
737 if self.remove:
738 self.backend.unlink(self.fname)
738 self.backend.unlink(self.fname)
739 else:
739 else:
740 l = h.new(self.lines)
740 l = h.new(self.lines)
741 self.lines[:] = l
741 self.lines[:] = l
742 self.offset += len(l)
742 self.offset += len(l)
743 self.dirty = True
743 self.dirty = True
744 return 0
744 return 0
745
745
746 horig = h
746 horig = h
747 if (self.eolmode in ('crlf', 'lf')
747 if (self.eolmode in ('crlf', 'lf')
748 or self.eolmode == 'auto' and self.eol):
748 or self.eolmode == 'auto' and self.eol):
749 # If new eols are going to be normalized, then normalize
749 # If new eols are going to be normalized, then normalize
750 # hunk data before patching. Otherwise, preserve input
750 # hunk data before patching. Otherwise, preserve input
751 # line-endings.
751 # line-endings.
752 h = h.getnormalized()
752 h = h.getnormalized()
753
753
754 # fast case first, no offsets, no fuzz
754 # fast case first, no offsets, no fuzz
755 old, oldstart, new, newstart = h.fuzzit(0, False)
755 old, oldstart, new, newstart = h.fuzzit(0, False)
756 oldstart += self.offset
756 oldstart += self.offset
757 orig_start = oldstart
757 orig_start = oldstart
758 # if there's skew we want to emit the "(offset %d lines)" even
758 # if there's skew we want to emit the "(offset %d lines)" even
759 # when the hunk cleanly applies at start + skew, so skip the
759 # when the hunk cleanly applies at start + skew, so skip the
760 # fast case code
760 # fast case code
761 if (self.skew == 0 and
761 if (self.skew == 0 and
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 self.lines[oldstart:oldstart + len(old)] = new
766 self.lines[oldstart:oldstart + len(old)] = new
767 self.offset += len(new) - len(old)
767 self.offset += len(new) - len(old)
768 self.dirty = True
768 self.dirty = True
769 return 0
769 return 0
770
770
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
772 self.hash = {}
772 self.hash = {}
773 for x, s in enumerate(self.lines):
773 for x, s in enumerate(self.lines):
774 self.hash.setdefault(s, []).append(x)
774 self.hash.setdefault(s, []).append(x)
775
775
776 for fuzzlen in xrange(3):
776 for fuzzlen in xrange(3):
777 for toponly in [True, False]:
777 for toponly in [True, False]:
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
779 oldstart = oldstart + self.offset + self.skew
779 oldstart = oldstart + self.offset + self.skew
780 oldstart = min(oldstart, len(self.lines))
780 oldstart = min(oldstart, len(self.lines))
781 if old:
781 if old:
782 cand = self.findlines(old[0][1:], oldstart)
782 cand = self.findlines(old[0][1:], oldstart)
783 else:
783 else:
784 # Only adding lines with no or fuzzed context, just
784 # Only adding lines with no or fuzzed context, just
785 # take the skew in account
785 # take the skew in account
786 cand = [oldstart]
786 cand = [oldstart]
787
787
788 for l in cand:
788 for l in cand:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
790 self.lines[l : l + len(old)] = new
790 self.lines[l : l + len(old)] = new
791 self.offset += len(new) - len(old)
791 self.offset += len(new) - len(old)
792 self.skew = l - orig_start
792 self.skew = l - orig_start
793 self.dirty = True
793 self.dirty = True
794 offset = l - orig_start - fuzzlen
794 offset = l - orig_start - fuzzlen
795 if fuzzlen:
795 if fuzzlen:
796 msg = _("Hunk #%d succeeded at %d "
796 msg = _("Hunk #%d succeeded at %d "
797 "with fuzz %d "
797 "with fuzz %d "
798 "(offset %d lines).\n")
798 "(offset %d lines).\n")
799 self.printfile(True)
799 self.printfile(True)
800 self.ui.warn(msg %
800 self.ui.warn(msg %
801 (h.number, l + 1, fuzzlen, offset))
801 (h.number, l + 1, fuzzlen, offset))
802 else:
802 else:
803 msg = _("Hunk #%d succeeded at %d "
803 msg = _("Hunk #%d succeeded at %d "
804 "(offset %d lines).\n")
804 "(offset %d lines).\n")
805 self.ui.note(msg % (h.number, l + 1, offset))
805 self.ui.note(msg % (h.number, l + 1, offset))
806 return fuzzlen
806 return fuzzlen
807 self.printfile(True)
807 self.printfile(True)
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
809 self.rej.append(horig)
809 self.rej.append(horig)
810 return -1
810 return -1
811
811
812 def close(self):
812 def close(self):
813 if self.dirty:
813 if self.dirty:
814 self.writelines(self.fname, self.lines, self.mode)
814 self.writelines(self.fname, self.lines, self.mode)
815 self.write_rej()
815 self.write_rej()
816 return len(self.rej)
816 return len(self.rej)
817
817
818 class header(object):
818 class header(object):
819 """patch header
819 """patch header
820 """
820 """
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
823 allhunks_re = re.compile('(?:index|deleted file) ')
823 allhunks_re = re.compile('(?:index|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
825 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
825 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
826
826
827 def __init__(self, header):
827 def __init__(self, header):
828 self.header = header
828 self.header = header
829 self.hunks = []
829 self.hunks = []
830
830
831 def binary(self):
831 def binary(self):
832 return util.any(h.startswith('index ') for h in self.header)
832 return util.any(h.startswith('index ') for h in self.header)
833
833
834 def pretty(self, fp):
834 def pretty(self, fp):
835 for h in self.header:
835 for h in self.header:
836 if h.startswith('index '):
836 if h.startswith('index '):
837 fp.write(_('this modifies a binary file (all or nothing)\n'))
837 fp.write(_('this modifies a binary file (all or nothing)\n'))
838 break
838 break
839 if self.pretty_re.match(h):
839 if self.pretty_re.match(h):
840 fp.write(h)
840 fp.write(h)
841 if self.binary():
841 if self.binary():
842 fp.write(_('this is a binary file\n'))
842 fp.write(_('this is a binary file\n'))
843 break
843 break
844 if h.startswith('---'):
844 if h.startswith('---'):
845 fp.write(_('%d hunks, %d lines changed\n') %
845 fp.write(_('%d hunks, %d lines changed\n') %
846 (len(self.hunks),
846 (len(self.hunks),
847 sum([max(h.added, h.removed) for h in self.hunks])))
847 sum([max(h.added, h.removed) for h in self.hunks])))
848 break
848 break
849 fp.write(h)
849 fp.write(h)
850
850
851 def write(self, fp):
851 def write(self, fp):
852 fp.write(''.join(self.header))
852 fp.write(''.join(self.header))
853
853
854 def allhunks(self):
854 def allhunks(self):
855 return util.any(self.allhunks_re.match(h) for h in self.header)
855 return util.any(self.allhunks_re.match(h) for h in self.header)
856
856
857 def files(self):
857 def files(self):
858 match = self.diffgit_re.match(self.header[0])
858 match = self.diffgit_re.match(self.header[0])
859 if match:
859 if match:
860 fromfile, tofile = match.groups()
860 fromfile, tofile = match.groups()
861 if fromfile == tofile:
861 if fromfile == tofile:
862 return [fromfile]
862 return [fromfile]
863 return [fromfile, tofile]
863 return [fromfile, tofile]
864 else:
864 else:
865 return self.diff_re.match(self.header[0]).groups()
865 return self.diff_re.match(self.header[0]).groups()
866
866
867 def filename(self):
867 def filename(self):
868 return self.files()[-1]
868 return self.files()[-1]
869
869
870 def __repr__(self):
870 def __repr__(self):
871 return '<header %s>' % (' '.join(map(repr, self.files())))
871 return '<header %s>' % (' '.join(map(repr, self.files())))
872
872
873 def special(self):
873 def special(self):
874 return util.any(self.special_re.match(h) for h in self.header)
874 return util.any(self.special_re.match(h) for h in self.header)
875
875
876 class recordhunk(object):
876 class recordhunk(object):
877 """patch hunk
877 """patch hunk
878
878
879 XXX shouldn't we merge this with the other hunk class?
879 XXX shouldn't we merge this with the other hunk class?
880 """
880 """
881 maxcontext = 3
881 maxcontext = 3
882
882
883 def __init__(self, header, fromline, toline, proc, before, hunk, after):
883 def __init__(self, header, fromline, toline, proc, before, hunk, after):
884 def trimcontext(number, lines):
884 def trimcontext(number, lines):
885 delta = len(lines) - self.maxcontext
885 delta = len(lines) - self.maxcontext
886 if False and delta > 0:
886 if False and delta > 0:
887 return number + delta, lines[:self.maxcontext]
887 return number + delta, lines[:self.maxcontext]
888 return number, lines
888 return number, lines
889
889
890 self.header = header
890 self.header = header
891 self.fromline, self.before = trimcontext(fromline, before)
891 self.fromline, self.before = trimcontext(fromline, before)
892 self.toline, self.after = trimcontext(toline, after)
892 self.toline, self.after = trimcontext(toline, after)
893 self.proc = proc
893 self.proc = proc
894 self.hunk = hunk
894 self.hunk = hunk
895 self.added, self.removed = self.countchanges(self.hunk)
895 self.added, self.removed = self.countchanges(self.hunk)
896
896
897 def __eq__(self, v):
897 def __eq__(self, v):
898 if not isinstance(v, recordhunk):
898 if not isinstance(v, recordhunk):
899 return False
899 return False
900
900
901 return ((v.hunk == self.hunk) and
901 return ((v.hunk == self.hunk) and
902 (v.proc == self.proc) and
902 (v.proc == self.proc) and
903 (self.fromline == v.fromline) and
903 (self.fromline == v.fromline) and
904 (self.header.files() == v.header.files()))
904 (self.header.files() == v.header.files()))
905
905
906 def __hash__(self):
906 def __hash__(self):
907 return hash((tuple(self.hunk),
907 return hash((tuple(self.hunk),
908 tuple(self.header.files()),
908 tuple(self.header.files()),
909 self.fromline,
909 self.fromline,
910 self.proc))
910 self.proc))
911
911
912 def countchanges(self, hunk):
912 def countchanges(self, hunk):
913 """hunk -> (n+,n-)"""
913 """hunk -> (n+,n-)"""
914 add = len([h for h in hunk if h[0] == '+'])
914 add = len([h for h in hunk if h[0] == '+'])
915 rem = len([h for h in hunk if h[0] == '-'])
915 rem = len([h for h in hunk if h[0] == '-'])
916 return add, rem
916 return add, rem
917
917
918 def write(self, fp):
918 def write(self, fp):
919 delta = len(self.before) + len(self.after)
919 delta = len(self.before) + len(self.after)
920 if self.after and self.after[-1] == '\\ No newline at end of file\n':
920 if self.after and self.after[-1] == '\\ No newline at end of file\n':
921 delta -= 1
921 delta -= 1
922 fromlen = delta + self.removed
922 fromlen = delta + self.removed
923 tolen = delta + self.added
923 tolen = delta + self.added
924 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
924 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
925 (self.fromline, fromlen, self.toline, tolen,
925 (self.fromline, fromlen, self.toline, tolen,
926 self.proc and (' ' + self.proc)))
926 self.proc and (' ' + self.proc)))
927 fp.write(''.join(self.before + self.hunk + self.after))
927 fp.write(''.join(self.before + self.hunk + self.after))
928
928
929 pretty = write
929 pretty = write
930
930
931 def filename(self):
931 def filename(self):
932 return self.header.filename()
932 return self.header.filename()
933
933
934 def __repr__(self):
934 def __repr__(self):
935 return '<hunk %r@%d>' % (self.filename(), self.fromline)
935 return '<hunk %r@%d>' % (self.filename(), self.fromline)
936
936
937 def filterpatch(ui, headers):
937 def filterpatch(ui, headers):
938 """Interactively filter patch chunks into applied-only chunks"""
938 """Interactively filter patch chunks into applied-only chunks"""
939
939
940 def prompt(skipfile, skipall, query, chunk):
940 def prompt(skipfile, skipall, query, chunk):
941 """prompt query, and process base inputs
941 """prompt query, and process base inputs
942
942
943 - y/n for the rest of file
943 - y/n for the rest of file
944 - y/n for the rest
944 - y/n for the rest
945 - ? (help)
945 - ? (help)
946 - q (quit)
946 - q (quit)
947
947
948 Return True/False and possibly updated skipfile and skipall.
948 Return True/False and possibly updated skipfile and skipall.
949 """
949 """
950 newpatches = None
950 newpatches = None
951 if skipall is not None:
951 if skipall is not None:
952 return skipall, skipfile, skipall, newpatches
952 return skipall, skipfile, skipall, newpatches
953 if skipfile is not None:
953 if skipfile is not None:
954 return skipfile, skipfile, skipall, newpatches
954 return skipfile, skipfile, skipall, newpatches
955 while True:
955 while True:
956 resps = _('[Ynesfdaq?]'
956 resps = _('[Ynesfdaq?]'
957 '$$ &Yes, record this change'
957 '$$ &Yes, record this change'
958 '$$ &No, skip this change'
958 '$$ &No, skip this change'
959 '$$ &Edit this change manually'
959 '$$ &Edit this change manually'
960 '$$ &Skip remaining changes to this file'
960 '$$ &Skip remaining changes to this file'
961 '$$ Record remaining changes to this &file'
961 '$$ Record remaining changes to this &file'
962 '$$ &Done, skip remaining changes and files'
962 '$$ &Done, skip remaining changes and files'
963 '$$ Record &all changes to all remaining files'
963 '$$ Record &all changes to all remaining files'
964 '$$ &Quit, recording no changes'
964 '$$ &Quit, recording no changes'
965 '$$ &? (display help)')
965 '$$ &? (display help)')
966 r = ui.promptchoice("%s %s" % (query, resps))
966 r = ui.promptchoice("%s %s" % (query, resps))
967 ui.write("\n")
967 ui.write("\n")
968 if r == 8: # ?
968 if r == 8: # ?
969 for c, t in ui.extractchoices(resps)[1]:
969 for c, t in ui.extractchoices(resps)[1]:
970 ui.write('%s - %s\n' % (c, t.lower()))
970 ui.write('%s - %s\n' % (c, t.lower()))
971 continue
971 continue
972 elif r == 0: # yes
972 elif r == 0: # yes
973 ret = True
973 ret = True
974 elif r == 1: # no
974 elif r == 1: # no
975 ret = False
975 ret = False
976 elif r == 2: # Edit patch
976 elif r == 2: # Edit patch
977 if chunk is None:
977 if chunk is None:
978 ui.write(_('cannot edit patch for whole file'))
978 ui.write(_('cannot edit patch for whole file'))
979 ui.write("\n")
979 ui.write("\n")
980 continue
980 continue
981 if chunk.header.binary():
981 if chunk.header.binary():
982 ui.write(_('cannot edit patch for binary file'))
982 ui.write(_('cannot edit patch for binary file'))
983 ui.write("\n")
983 ui.write("\n")
984 continue
984 continue
985 # Patch comment based on the Git one (based on comment at end of
985 # Patch comment based on the Git one (based on comment at end of
986 # http://mercurial.selenic.com/wiki/RecordExtension)
986 # http://mercurial.selenic.com/wiki/RecordExtension)
987 phelp = '---' + _("""
987 phelp = '---' + _("""
988 To remove '-' lines, make them ' ' lines (context).
988 To remove '-' lines, make them ' ' lines (context).
989 To remove '+' lines, delete them.
989 To remove '+' lines, delete them.
990 Lines starting with # will be removed from the patch.
990 Lines starting with # will be removed from the patch.
991
991
992 If the patch applies cleanly, the edited hunk will immediately be
992 If the patch applies cleanly, the edited hunk will immediately be
993 added to the record list. If it does not apply cleanly, a rejects
993 added to the record list. If it does not apply cleanly, a rejects
994 file will be generated: you can use that when you try again. If
994 file will be generated: you can use that when you try again. If
995 all lines of the hunk are removed, then the edit is aborted and
995 all lines of the hunk are removed, then the edit is aborted and
996 the hunk is left unchanged.
996 the hunk is left unchanged.
997 """)
997 """)
998 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
998 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
999 suffix=".diff", text=True)
999 suffix=".diff", text=True)
1000 ncpatchfp = None
1000 ncpatchfp = None
1001 try:
1001 try:
1002 # Write the initial patch
1002 # Write the initial patch
1003 f = os.fdopen(patchfd, "w")
1003 f = os.fdopen(patchfd, "w")
1004 chunk.header.write(f)
1004 chunk.header.write(f)
1005 chunk.write(f)
1005 chunk.write(f)
1006 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1006 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1007 f.close()
1007 f.close()
1008 # Start the editor and wait for it to complete
1008 # Start the editor and wait for it to complete
1009 editor = ui.geteditor()
1009 editor = ui.geteditor()
1010 ui.system("%s \"%s\"" % (editor, patchfn),
1010 ui.system("%s \"%s\"" % (editor, patchfn),
1011 environ={'HGUSER': ui.username()},
1011 environ={'HGUSER': ui.username()},
1012 onerr=util.Abort, errprefix=_("edit failed"))
1012 onerr=util.Abort, errprefix=_("edit failed"))
1013 # Remove comment lines
1013 # Remove comment lines
1014 patchfp = open(patchfn)
1014 patchfp = open(patchfn)
1015 ncpatchfp = cStringIO.StringIO()
1015 ncpatchfp = cStringIO.StringIO()
1016 for line in patchfp:
1016 for line in patchfp:
1017 if not line.startswith('#'):
1017 if not line.startswith('#'):
1018 ncpatchfp.write(line)
1018 ncpatchfp.write(line)
1019 patchfp.close()
1019 patchfp.close()
1020 ncpatchfp.seek(0)
1020 ncpatchfp.seek(0)
1021 newpatches = parsepatch(ncpatchfp)
1021 newpatches = parsepatch(ncpatchfp)
1022 finally:
1022 finally:
1023 os.unlink(patchfn)
1023 os.unlink(patchfn)
1024 del ncpatchfp
1024 del ncpatchfp
1025 # Signal that the chunk shouldn't be applied as-is, but
1025 # Signal that the chunk shouldn't be applied as-is, but
1026 # provide the new patch to be used instead.
1026 # provide the new patch to be used instead.
1027 ret = False
1027 ret = False
1028 elif r == 3: # Skip
1028 elif r == 3: # Skip
1029 ret = skipfile = False
1029 ret = skipfile = False
1030 elif r == 4: # file (Record remaining)
1030 elif r == 4: # file (Record remaining)
1031 ret = skipfile = True
1031 ret = skipfile = True
1032 elif r == 5: # done, skip remaining
1032 elif r == 5: # done, skip remaining
1033 ret = skipall = False
1033 ret = skipall = False
1034 elif r == 6: # all
1034 elif r == 6: # all
1035 ret = skipall = True
1035 ret = skipall = True
1036 elif r == 7: # quit
1036 elif r == 7: # quit
1037 raise util.Abort(_('user quit'))
1037 raise util.Abort(_('user quit'))
1038 return ret, skipfile, skipall, newpatches
1038 return ret, skipfile, skipall, newpatches
1039
1039
1040 seen = set()
1040 seen = set()
1041 applied = {} # 'filename' -> [] of chunks
1041 applied = {} # 'filename' -> [] of chunks
1042 skipfile, skipall = None, None
1042 skipfile, skipall = None, None
1043 pos, total = 1, sum(len(h.hunks) for h in headers)
1043 pos, total = 1, sum(len(h.hunks) for h in headers)
1044 for h in headers:
1044 for h in headers:
1045 pos += len(h.hunks)
1045 pos += len(h.hunks)
1046 skipfile = None
1046 skipfile = None
1047 fixoffset = 0
1047 fixoffset = 0
1048 hdr = ''.join(h.header)
1048 hdr = ''.join(h.header)
1049 if hdr in seen:
1049 if hdr in seen:
1050 continue
1050 continue
1051 seen.add(hdr)
1051 seen.add(hdr)
1052 if skipall is None:
1052 if skipall is None:
1053 h.pretty(ui)
1053 h.pretty(ui)
1054 msg = (_('examine changes to %s?') %
1054 msg = (_('examine changes to %s?') %
1055 _(' and ').join("'%s'" % f for f in h.files()))
1055 _(' and ').join("'%s'" % f for f in h.files()))
1056 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1056 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1057 if not r:
1057 if not r:
1058 continue
1058 continue
1059 applied[h.filename()] = [h]
1059 applied[h.filename()] = [h]
1060 if h.allhunks():
1060 if h.allhunks():
1061 applied[h.filename()] += h.hunks
1061 applied[h.filename()] += h.hunks
1062 continue
1062 continue
1063 for i, chunk in enumerate(h.hunks):
1063 for i, chunk in enumerate(h.hunks):
1064 if skipfile is None and skipall is None:
1064 if skipfile is None and skipall is None:
1065 chunk.pretty(ui)
1065 chunk.pretty(ui)
1066 if total == 1:
1066 if total == 1:
1067 msg = _("record this change to '%s'?") % chunk.filename()
1067 msg = _("record this change to '%s'?") % chunk.filename()
1068 else:
1068 else:
1069 idx = pos - len(h.hunks) + i
1069 idx = pos - len(h.hunks) + i
1070 msg = _("record change %d/%d to '%s'?") % (idx, total,
1070 msg = _("record change %d/%d to '%s'?") % (idx, total,
1071 chunk.filename())
1071 chunk.filename())
1072 r, skipfile, skipall, newpatches = prompt(skipfile,
1072 r, skipfile, skipall, newpatches = prompt(skipfile,
1073 skipall, msg, chunk)
1073 skipall, msg, chunk)
1074 if r:
1074 if r:
1075 if fixoffset:
1075 if fixoffset:
1076 chunk = copy.copy(chunk)
1076 chunk = copy.copy(chunk)
1077 chunk.toline += fixoffset
1077 chunk.toline += fixoffset
1078 applied[chunk.filename()].append(chunk)
1078 applied[chunk.filename()].append(chunk)
1079 elif newpatches is not None:
1079 elif newpatches is not None:
1080 for newpatch in newpatches:
1080 for newpatch in newpatches:
1081 for newhunk in newpatch.hunks:
1081 for newhunk in newpatch.hunks:
1082 if fixoffset:
1082 if fixoffset:
1083 newhunk.toline += fixoffset
1083 newhunk.toline += fixoffset
1084 applied[newhunk.filename()].append(newhunk)
1084 applied[newhunk.filename()].append(newhunk)
1085 else:
1085 else:
1086 fixoffset += chunk.removed - chunk.added
1086 fixoffset += chunk.removed - chunk.added
1087 return sum([h for h in applied.itervalues()
1087 return sum([h for h in applied.itervalues()
1088 if h[0].special() or len(h) > 1], [])
1088 if h[0].special() or len(h) > 1], [])
1089 class hunk(object):
1089 class hunk(object):
1090 def __init__(self, desc, num, lr, context):
1090 def __init__(self, desc, num, lr, context):
1091 self.number = num
1091 self.number = num
1092 self.desc = desc
1092 self.desc = desc
1093 self.hunk = [desc]
1093 self.hunk = [desc]
1094 self.a = []
1094 self.a = []
1095 self.b = []
1095 self.b = []
1096 self.starta = self.lena = None
1096 self.starta = self.lena = None
1097 self.startb = self.lenb = None
1097 self.startb = self.lenb = None
1098 if lr is not None:
1098 if lr is not None:
1099 if context:
1099 if context:
1100 self.read_context_hunk(lr)
1100 self.read_context_hunk(lr)
1101 else:
1101 else:
1102 self.read_unified_hunk(lr)
1102 self.read_unified_hunk(lr)
1103
1103
1104 def getnormalized(self):
1104 def getnormalized(self):
1105 """Return a copy with line endings normalized to LF."""
1105 """Return a copy with line endings normalized to LF."""
1106
1106
1107 def normalize(lines):
1107 def normalize(lines):
1108 nlines = []
1108 nlines = []
1109 for line in lines:
1109 for line in lines:
1110 if line.endswith('\r\n'):
1110 if line.endswith('\r\n'):
1111 line = line[:-2] + '\n'
1111 line = line[:-2] + '\n'
1112 nlines.append(line)
1112 nlines.append(line)
1113 return nlines
1113 return nlines
1114
1114
1115 # Dummy object, it is rebuilt manually
1115 # Dummy object, it is rebuilt manually
1116 nh = hunk(self.desc, self.number, None, None)
1116 nh = hunk(self.desc, self.number, None, None)
1117 nh.number = self.number
1117 nh.number = self.number
1118 nh.desc = self.desc
1118 nh.desc = self.desc
1119 nh.hunk = self.hunk
1119 nh.hunk = self.hunk
1120 nh.a = normalize(self.a)
1120 nh.a = normalize(self.a)
1121 nh.b = normalize(self.b)
1121 nh.b = normalize(self.b)
1122 nh.starta = self.starta
1122 nh.starta = self.starta
1123 nh.startb = self.startb
1123 nh.startb = self.startb
1124 nh.lena = self.lena
1124 nh.lena = self.lena
1125 nh.lenb = self.lenb
1125 nh.lenb = self.lenb
1126 return nh
1126 return nh
1127
1127
1128 def read_unified_hunk(self, lr):
1128 def read_unified_hunk(self, lr):
1129 m = unidesc.match(self.desc)
1129 m = unidesc.match(self.desc)
1130 if not m:
1130 if not m:
1131 raise PatchError(_("bad hunk #%d") % self.number)
1131 raise PatchError(_("bad hunk #%d") % self.number)
1132 self.starta, self.lena, self.startb, self.lenb = m.groups()
1132 self.starta, self.lena, self.startb, self.lenb = m.groups()
1133 if self.lena is None:
1133 if self.lena is None:
1134 self.lena = 1
1134 self.lena = 1
1135 else:
1135 else:
1136 self.lena = int(self.lena)
1136 self.lena = int(self.lena)
1137 if self.lenb is None:
1137 if self.lenb is None:
1138 self.lenb = 1
1138 self.lenb = 1
1139 else:
1139 else:
1140 self.lenb = int(self.lenb)
1140 self.lenb = int(self.lenb)
1141 self.starta = int(self.starta)
1141 self.starta = int(self.starta)
1142 self.startb = int(self.startb)
1142 self.startb = int(self.startb)
1143 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1143 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1144 self.b)
1144 self.b)
1145 # if we hit eof before finishing out the hunk, the last line will
1145 # if we hit eof before finishing out the hunk, the last line will
1146 # be zero length. Lets try to fix it up.
1146 # be zero length. Lets try to fix it up.
1147 while len(self.hunk[-1]) == 0:
1147 while len(self.hunk[-1]) == 0:
1148 del self.hunk[-1]
1148 del self.hunk[-1]
1149 del self.a[-1]
1149 del self.a[-1]
1150 del self.b[-1]
1150 del self.b[-1]
1151 self.lena -= 1
1151 self.lena -= 1
1152 self.lenb -= 1
1152 self.lenb -= 1
1153 self._fixnewline(lr)
1153 self._fixnewline(lr)
1154
1154
1155 def read_context_hunk(self, lr):
1155 def read_context_hunk(self, lr):
1156 self.desc = lr.readline()
1156 self.desc = lr.readline()
1157 m = contextdesc.match(self.desc)
1157 m = contextdesc.match(self.desc)
1158 if not m:
1158 if not m:
1159 raise PatchError(_("bad hunk #%d") % self.number)
1159 raise PatchError(_("bad hunk #%d") % self.number)
1160 self.starta, aend = m.groups()
1160 self.starta, aend = m.groups()
1161 self.starta = int(self.starta)
1161 self.starta = int(self.starta)
1162 if aend is None:
1162 if aend is None:
1163 aend = self.starta
1163 aend = self.starta
1164 self.lena = int(aend) - self.starta
1164 self.lena = int(aend) - self.starta
1165 if self.starta:
1165 if self.starta:
1166 self.lena += 1
1166 self.lena += 1
1167 for x in xrange(self.lena):
1167 for x in xrange(self.lena):
1168 l = lr.readline()
1168 l = lr.readline()
1169 if l.startswith('---'):
1169 if l.startswith('---'):
1170 # lines addition, old block is empty
1170 # lines addition, old block is empty
1171 lr.push(l)
1171 lr.push(l)
1172 break
1172 break
1173 s = l[2:]
1173 s = l[2:]
1174 if l.startswith('- ') or l.startswith('! '):
1174 if l.startswith('- ') or l.startswith('! '):
1175 u = '-' + s
1175 u = '-' + s
1176 elif l.startswith(' '):
1176 elif l.startswith(' '):
1177 u = ' ' + s
1177 u = ' ' + s
1178 else:
1178 else:
1179 raise PatchError(_("bad hunk #%d old text line %d") %
1179 raise PatchError(_("bad hunk #%d old text line %d") %
1180 (self.number, x))
1180 (self.number, x))
1181 self.a.append(u)
1181 self.a.append(u)
1182 self.hunk.append(u)
1182 self.hunk.append(u)
1183
1183
1184 l = lr.readline()
1184 l = lr.readline()
1185 if l.startswith('\ '):
1185 if l.startswith('\ '):
1186 s = self.a[-1][:-1]
1186 s = self.a[-1][:-1]
1187 self.a[-1] = s
1187 self.a[-1] = s
1188 self.hunk[-1] = s
1188 self.hunk[-1] = s
1189 l = lr.readline()
1189 l = lr.readline()
1190 m = contextdesc.match(l)
1190 m = contextdesc.match(l)
1191 if not m:
1191 if not m:
1192 raise PatchError(_("bad hunk #%d") % self.number)
1192 raise PatchError(_("bad hunk #%d") % self.number)
1193 self.startb, bend = m.groups()
1193 self.startb, bend = m.groups()
1194 self.startb = int(self.startb)
1194 self.startb = int(self.startb)
1195 if bend is None:
1195 if bend is None:
1196 bend = self.startb
1196 bend = self.startb
1197 self.lenb = int(bend) - self.startb
1197 self.lenb = int(bend) - self.startb
1198 if self.startb:
1198 if self.startb:
1199 self.lenb += 1
1199 self.lenb += 1
1200 hunki = 1
1200 hunki = 1
1201 for x in xrange(self.lenb):
1201 for x in xrange(self.lenb):
1202 l = lr.readline()
1202 l = lr.readline()
1203 if l.startswith('\ '):
1203 if l.startswith('\ '):
1204 # XXX: the only way to hit this is with an invalid line range.
1204 # XXX: the only way to hit this is with an invalid line range.
1205 # The no-eol marker is not counted in the line range, but I
1205 # The no-eol marker is not counted in the line range, but I
1206 # guess there are diff(1) out there which behave differently.
1206 # guess there are diff(1) out there which behave differently.
1207 s = self.b[-1][:-1]
1207 s = self.b[-1][:-1]
1208 self.b[-1] = s
1208 self.b[-1] = s
1209 self.hunk[hunki - 1] = s
1209 self.hunk[hunki - 1] = s
1210 continue
1210 continue
1211 if not l:
1211 if not l:
1212 # line deletions, new block is empty and we hit EOF
1212 # line deletions, new block is empty and we hit EOF
1213 lr.push(l)
1213 lr.push(l)
1214 break
1214 break
1215 s = l[2:]
1215 s = l[2:]
1216 if l.startswith('+ ') or l.startswith('! '):
1216 if l.startswith('+ ') or l.startswith('! '):
1217 u = '+' + s
1217 u = '+' + s
1218 elif l.startswith(' '):
1218 elif l.startswith(' '):
1219 u = ' ' + s
1219 u = ' ' + s
1220 elif len(self.b) == 0:
1220 elif len(self.b) == 0:
1221 # line deletions, new block is empty
1221 # line deletions, new block is empty
1222 lr.push(l)
1222 lr.push(l)
1223 break
1223 break
1224 else:
1224 else:
1225 raise PatchError(_("bad hunk #%d old text line %d") %
1225 raise PatchError(_("bad hunk #%d old text line %d") %
1226 (self.number, x))
1226 (self.number, x))
1227 self.b.append(s)
1227 self.b.append(s)
1228 while True:
1228 while True:
1229 if hunki >= len(self.hunk):
1229 if hunki >= len(self.hunk):
1230 h = ""
1230 h = ""
1231 else:
1231 else:
1232 h = self.hunk[hunki]
1232 h = self.hunk[hunki]
1233 hunki += 1
1233 hunki += 1
1234 if h == u:
1234 if h == u:
1235 break
1235 break
1236 elif h.startswith('-'):
1236 elif h.startswith('-'):
1237 continue
1237 continue
1238 else:
1238 else:
1239 self.hunk.insert(hunki - 1, u)
1239 self.hunk.insert(hunki - 1, u)
1240 break
1240 break
1241
1241
1242 if not self.a:
1242 if not self.a:
1243 # this happens when lines were only added to the hunk
1243 # this happens when lines were only added to the hunk
1244 for x in self.hunk:
1244 for x in self.hunk:
1245 if x.startswith('-') or x.startswith(' '):
1245 if x.startswith('-') or x.startswith(' '):
1246 self.a.append(x)
1246 self.a.append(x)
1247 if not self.b:
1247 if not self.b:
1248 # this happens when lines were only deleted from the hunk
1248 # this happens when lines were only deleted from the hunk
1249 for x in self.hunk:
1249 for x in self.hunk:
1250 if x.startswith('+') or x.startswith(' '):
1250 if x.startswith('+') or x.startswith(' '):
1251 self.b.append(x[1:])
1251 self.b.append(x[1:])
1252 # @@ -start,len +start,len @@
1252 # @@ -start,len +start,len @@
1253 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1253 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1254 self.startb, self.lenb)
1254 self.startb, self.lenb)
1255 self.hunk[0] = self.desc
1255 self.hunk[0] = self.desc
1256 self._fixnewline(lr)
1256 self._fixnewline(lr)
1257
1257
1258 def _fixnewline(self, lr):
1258 def _fixnewline(self, lr):
1259 l = lr.readline()
1259 l = lr.readline()
1260 if l.startswith('\ '):
1260 if l.startswith('\ '):
1261 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1261 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1262 else:
1262 else:
1263 lr.push(l)
1263 lr.push(l)
1264
1264
1265 def complete(self):
1265 def complete(self):
1266 return len(self.a) == self.lena and len(self.b) == self.lenb
1266 return len(self.a) == self.lena and len(self.b) == self.lenb
1267
1267
1268 def _fuzzit(self, old, new, fuzz, toponly):
1268 def _fuzzit(self, old, new, fuzz, toponly):
1269 # this removes context lines from the top and bottom of list 'l'. It
1269 # this removes context lines from the top and bottom of list 'l'. It
1270 # checks the hunk to make sure only context lines are removed, and then
1270 # checks the hunk to make sure only context lines are removed, and then
1271 # returns a new shortened list of lines.
1271 # returns a new shortened list of lines.
1272 fuzz = min(fuzz, len(old))
1272 fuzz = min(fuzz, len(old))
1273 if fuzz:
1273 if fuzz:
1274 top = 0
1274 top = 0
1275 bot = 0
1275 bot = 0
1276 hlen = len(self.hunk)
1276 hlen = len(self.hunk)
1277 for x in xrange(hlen - 1):
1277 for x in xrange(hlen - 1):
1278 # the hunk starts with the @@ line, so use x+1
1278 # the hunk starts with the @@ line, so use x+1
1279 if self.hunk[x + 1][0] == ' ':
1279 if self.hunk[x + 1][0] == ' ':
1280 top += 1
1280 top += 1
1281 else:
1281 else:
1282 break
1282 break
1283 if not toponly:
1283 if not toponly:
1284 for x in xrange(hlen - 1):
1284 for x in xrange(hlen - 1):
1285 if self.hunk[hlen - bot - 1][0] == ' ':
1285 if self.hunk[hlen - bot - 1][0] == ' ':
1286 bot += 1
1286 bot += 1
1287 else:
1287 else:
1288 break
1288 break
1289
1289
1290 bot = min(fuzz, bot)
1290 bot = min(fuzz, bot)
1291 top = min(fuzz, top)
1291 top = min(fuzz, top)
1292 return old[top:len(old) - bot], new[top:len(new) - bot], top
1292 return old[top:len(old) - bot], new[top:len(new) - bot], top
1293 return old, new, 0
1293 return old, new, 0
1294
1294
1295 def fuzzit(self, fuzz, toponly):
1295 def fuzzit(self, fuzz, toponly):
1296 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1296 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1297 oldstart = self.starta + top
1297 oldstart = self.starta + top
1298 newstart = self.startb + top
1298 newstart = self.startb + top
1299 # zero length hunk ranges already have their start decremented
1299 # zero length hunk ranges already have their start decremented
1300 if self.lena and oldstart > 0:
1300 if self.lena and oldstart > 0:
1301 oldstart -= 1
1301 oldstart -= 1
1302 if self.lenb and newstart > 0:
1302 if self.lenb and newstart > 0:
1303 newstart -= 1
1303 newstart -= 1
1304 return old, oldstart, new, newstart
1304 return old, oldstart, new, newstart
1305
1305
1306 class binhunk(object):
1306 class binhunk(object):
1307 'A binary patch file.'
1307 'A binary patch file.'
1308 def __init__(self, lr, fname):
1308 def __init__(self, lr, fname):
1309 self.text = None
1309 self.text = None
1310 self.delta = False
1310 self.delta = False
1311 self.hunk = ['GIT binary patch\n']
1311 self.hunk = ['GIT binary patch\n']
1312 self._fname = fname
1312 self._fname = fname
1313 self._read(lr)
1313 self._read(lr)
1314
1314
1315 def complete(self):
1315 def complete(self):
1316 return self.text is not None
1316 return self.text is not None
1317
1317
1318 def new(self, lines):
1318 def new(self, lines):
1319 if self.delta:
1319 if self.delta:
1320 return [applybindelta(self.text, ''.join(lines))]
1320 return [applybindelta(self.text, ''.join(lines))]
1321 return [self.text]
1321 return [self.text]
1322
1322
1323 def _read(self, lr):
1323 def _read(self, lr):
1324 def getline(lr, hunk):
1324 def getline(lr, hunk):
1325 l = lr.readline()
1325 l = lr.readline()
1326 hunk.append(l)
1326 hunk.append(l)
1327 return l.rstrip('\r\n')
1327 return l.rstrip('\r\n')
1328
1328
1329 size = 0
1329 size = 0
1330 while True:
1330 while True:
1331 line = getline(lr, self.hunk)
1331 line = getline(lr, self.hunk)
1332 if not line:
1332 if not line:
1333 raise PatchError(_('could not extract "%s" binary data')
1333 raise PatchError(_('could not extract "%s" binary data')
1334 % self._fname)
1334 % self._fname)
1335 if line.startswith('literal '):
1335 if line.startswith('literal '):
1336 size = int(line[8:].rstrip())
1336 size = int(line[8:].rstrip())
1337 break
1337 break
1338 if line.startswith('delta '):
1338 if line.startswith('delta '):
1339 size = int(line[6:].rstrip())
1339 size = int(line[6:].rstrip())
1340 self.delta = True
1340 self.delta = True
1341 break
1341 break
1342 dec = []
1342 dec = []
1343 line = getline(lr, self.hunk)
1343 line = getline(lr, self.hunk)
1344 while len(line) > 1:
1344 while len(line) > 1:
1345 l = line[0]
1345 l = line[0]
1346 if l <= 'Z' and l >= 'A':
1346 if l <= 'Z' and l >= 'A':
1347 l = ord(l) - ord('A') + 1
1347 l = ord(l) - ord('A') + 1
1348 else:
1348 else:
1349 l = ord(l) - ord('a') + 27
1349 l = ord(l) - ord('a') + 27
1350 try:
1350 try:
1351 dec.append(base85.b85decode(line[1:])[:l])
1351 dec.append(base85.b85decode(line[1:])[:l])
1352 except ValueError, e:
1352 except ValueError, e:
1353 raise PatchError(_('could not decode "%s" binary patch: %s')
1353 raise PatchError(_('could not decode "%s" binary patch: %s')
1354 % (self._fname, str(e)))
1354 % (self._fname, str(e)))
1355 line = getline(lr, self.hunk)
1355 line = getline(lr, self.hunk)
1356 text = zlib.decompress(''.join(dec))
1356 text = zlib.decompress(''.join(dec))
1357 if len(text) != size:
1357 if len(text) != size:
1358 raise PatchError(_('"%s" length is %d bytes, should be %d')
1358 raise PatchError(_('"%s" length is %d bytes, should be %d')
1359 % (self._fname, len(text), size))
1359 % (self._fname, len(text), size))
1360 self.text = text
1360 self.text = text
1361
1361
1362 def parsefilename(str):
1362 def parsefilename(str):
1363 # --- filename \t|space stuff
1363 # --- filename \t|space stuff
1364 s = str[4:].rstrip('\r\n')
1364 s = str[4:].rstrip('\r\n')
1365 i = s.find('\t')
1365 i = s.find('\t')
1366 if i < 0:
1366 if i < 0:
1367 i = s.find(' ')
1367 i = s.find(' ')
1368 if i < 0:
1368 if i < 0:
1369 return s
1369 return s
1370 return s[:i]
1370 return s[:i]
1371
1371
1372 def parsepatch(originalchunks):
1372 def parsepatch(originalchunks):
1373 """patch -> [] of headers -> [] of hunks """
1373 """patch -> [] of headers -> [] of hunks """
1374 class parser(object):
1374 class parser(object):
1375 """patch parsing state machine"""
1375 """patch parsing state machine"""
1376 def __init__(self):
1376 def __init__(self):
1377 self.fromline = 0
1377 self.fromline = 0
1378 self.toline = 0
1378 self.toline = 0
1379 self.proc = ''
1379 self.proc = ''
1380 self.header = None
1380 self.header = None
1381 self.context = []
1381 self.context = []
1382 self.before = []
1382 self.before = []
1383 self.hunk = []
1383 self.hunk = []
1384 self.headers = []
1384 self.headers = []
1385
1385
1386 def addrange(self, limits):
1386 def addrange(self, limits):
1387 fromstart, fromend, tostart, toend, proc = limits
1387 fromstart, fromend, tostart, toend, proc = limits
1388 self.fromline = int(fromstart)
1388 self.fromline = int(fromstart)
1389 self.toline = int(tostart)
1389 self.toline = int(tostart)
1390 self.proc = proc
1390 self.proc = proc
1391
1391
1392 def addcontext(self, context):
1392 def addcontext(self, context):
1393 if self.hunk:
1393 if self.hunk:
1394 h = recordhunk(self.header, self.fromline, self.toline,
1394 h = recordhunk(self.header, self.fromline, self.toline,
1395 self.proc, self.before, self.hunk, context)
1395 self.proc, self.before, self.hunk, context)
1396 self.header.hunks.append(h)
1396 self.header.hunks.append(h)
1397 self.fromline += len(self.before) + h.removed
1397 self.fromline += len(self.before) + h.removed
1398 self.toline += len(self.before) + h.added
1398 self.toline += len(self.before) + h.added
1399 self.before = []
1399 self.before = []
1400 self.hunk = []
1400 self.hunk = []
1401 self.proc = ''
1401 self.proc = ''
1402 self.context = context
1402 self.context = context
1403
1403
1404 def addhunk(self, hunk):
1404 def addhunk(self, hunk):
1405 if self.context:
1405 if self.context:
1406 self.before = self.context
1406 self.before = self.context
1407 self.context = []
1407 self.context = []
1408 self.hunk = hunk
1408 self.hunk = hunk
1409
1409
1410 def newfile(self, hdr):
1410 def newfile(self, hdr):
1411 self.addcontext([])
1411 self.addcontext([])
1412 h = header(hdr)
1412 h = header(hdr)
1413 self.headers.append(h)
1413 self.headers.append(h)
1414 self.header = h
1414 self.header = h
1415
1415
1416 def addother(self, line):
1416 def addother(self, line):
1417 pass # 'other' lines are ignored
1417 pass # 'other' lines are ignored
1418
1418
1419 def finished(self):
1419 def finished(self):
1420 self.addcontext([])
1420 self.addcontext([])
1421 return self.headers
1421 return self.headers
1422
1422
1423 transitions = {
1423 transitions = {
1424 'file': {'context': addcontext,
1424 'file': {'context': addcontext,
1425 'file': newfile,
1425 'file': newfile,
1426 'hunk': addhunk,
1426 'hunk': addhunk,
1427 'range': addrange},
1427 'range': addrange},
1428 'context': {'file': newfile,
1428 'context': {'file': newfile,
1429 'hunk': addhunk,
1429 'hunk': addhunk,
1430 'range': addrange,
1430 'range': addrange,
1431 'other': addother},
1431 'other': addother},
1432 'hunk': {'context': addcontext,
1432 'hunk': {'context': addcontext,
1433 'file': newfile,
1433 'file': newfile,
1434 'range': addrange},
1434 'range': addrange},
1435 'range': {'context': addcontext,
1435 'range': {'context': addcontext,
1436 'hunk': addhunk},
1436 'hunk': addhunk},
1437 'other': {'other': addother},
1437 'other': {'other': addother},
1438 }
1438 }
1439
1439
1440 p = parser()
1440 p = parser()
1441 fp = cStringIO.StringIO()
1441 fp = cStringIO.StringIO()
1442 fp.write(''.join(originalchunks))
1442 fp.write(''.join(originalchunks))
1443 fp.seek(0)
1443 fp.seek(0)
1444
1444
1445 state = 'context'
1445 state = 'context'
1446 for newstate, data in scanpatch(fp):
1446 for newstate, data in scanpatch(fp):
1447 try:
1447 try:
1448 p.transitions[state][newstate](p, data)
1448 p.transitions[state][newstate](p, data)
1449 except KeyError:
1449 except KeyError:
1450 raise PatchError('unhandled transition: %s -> %s' %
1450 raise PatchError('unhandled transition: %s -> %s' %
1451 (state, newstate))
1451 (state, newstate))
1452 state = newstate
1452 state = newstate
1453 del fp
1453 del fp
1454 return p.finished()
1454 return p.finished()
1455
1455
1456 def pathtransform(path, strip, prefix):
1456 def pathtransform(path, strip, prefix):
1457 '''turn a path from a patch into a path suitable for the repository
1457 '''turn a path from a patch into a path suitable for the repository
1458
1458
1459 prefix, if not empty, is expected to be normalized with a / at the end.
1459 prefix, if not empty, is expected to be normalized with a / at the end.
1460
1460
1461 Returns (stripped components, path in repository).
1461 Returns (stripped components, path in repository).
1462
1462
1463 >>> pathtransform('a/b/c', 0, '')
1463 >>> pathtransform('a/b/c', 0, '')
1464 ('', 'a/b/c')
1464 ('', 'a/b/c')
1465 >>> pathtransform(' a/b/c ', 0, '')
1465 >>> pathtransform(' a/b/c ', 0, '')
1466 ('', ' a/b/c')
1466 ('', ' a/b/c')
1467 >>> pathtransform(' a/b/c ', 2, '')
1467 >>> pathtransform(' a/b/c ', 2, '')
1468 ('a/b/', 'c')
1468 ('a/b/', 'c')
1469 >>> pathtransform('a/b/c', 0, 'd/e/')
1469 >>> pathtransform('a/b/c', 0, 'd/e/')
1470 ('', 'd/e/a/b/c')
1470 ('', 'd/e/a/b/c')
1471 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1471 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1472 ('a//b/', 'd/e/c')
1472 ('a//b/', 'd/e/c')
1473 >>> pathtransform('a/b/c', 3, '')
1473 >>> pathtransform('a/b/c', 3, '')
1474 Traceback (most recent call last):
1474 Traceback (most recent call last):
1475 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1475 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1476 '''
1476 '''
1477 pathlen = len(path)
1477 pathlen = len(path)
1478 i = 0
1478 i = 0
1479 if strip == 0:
1479 if strip == 0:
1480 return '', prefix + path.rstrip()
1480 return '', prefix + path.rstrip()
1481 count = strip
1481 count = strip
1482 while count > 0:
1482 while count > 0:
1483 i = path.find('/', i)
1483 i = path.find('/', i)
1484 if i == -1:
1484 if i == -1:
1485 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1485 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1486 (count, strip, path))
1486 (count, strip, path))
1487 i += 1
1487 i += 1
1488 # consume '//' in the path
1488 # consume '//' in the path
1489 while i < pathlen - 1 and path[i] == '/':
1489 while i < pathlen - 1 and path[i] == '/':
1490 i += 1
1490 i += 1
1491 count -= 1
1491 count -= 1
1492 return path[:i].lstrip(), prefix + path[i:].rstrip()
1492 return path[:i].lstrip(), prefix + path[i:].rstrip()
1493
1493
1494 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1494 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1495 nulla = afile_orig == "/dev/null"
1495 nulla = afile_orig == "/dev/null"
1496 nullb = bfile_orig == "/dev/null"
1496 nullb = bfile_orig == "/dev/null"
1497 create = nulla and hunk.starta == 0 and hunk.lena == 0
1497 create = nulla and hunk.starta == 0 and hunk.lena == 0
1498 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1498 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1499 abase, afile = pathtransform(afile_orig, strip, prefix)
1499 abase, afile = pathtransform(afile_orig, strip, prefix)
1500 gooda = not nulla and backend.exists(afile)
1500 gooda = not nulla and backend.exists(afile)
1501 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1501 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1502 if afile == bfile:
1502 if afile == bfile:
1503 goodb = gooda
1503 goodb = gooda
1504 else:
1504 else:
1505 goodb = not nullb and backend.exists(bfile)
1505 goodb = not nullb and backend.exists(bfile)
1506 missing = not goodb and not gooda and not create
1506 missing = not goodb and not gooda and not create
1507
1507
1508 # some diff programs apparently produce patches where the afile is
1508 # some diff programs apparently produce patches where the afile is
1509 # not /dev/null, but afile starts with bfile
1509 # not /dev/null, but afile starts with bfile
1510 abasedir = afile[:afile.rfind('/') + 1]
1510 abasedir = afile[:afile.rfind('/') + 1]
1511 bbasedir = bfile[:bfile.rfind('/') + 1]
1511 bbasedir = bfile[:bfile.rfind('/') + 1]
1512 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1512 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1513 and hunk.starta == 0 and hunk.lena == 0):
1513 and hunk.starta == 0 and hunk.lena == 0):
1514 create = True
1514 create = True
1515 missing = False
1515 missing = False
1516
1516
1517 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1517 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1518 # diff is between a file and its backup. In this case, the original
1518 # diff is between a file and its backup. In this case, the original
1519 # file should be patched (see original mpatch code).
1519 # file should be patched (see original mpatch code).
1520 isbackup = (abase == bbase and bfile.startswith(afile))
1520 isbackup = (abase == bbase and bfile.startswith(afile))
1521 fname = None
1521 fname = None
1522 if not missing:
1522 if not missing:
1523 if gooda and goodb:
1523 if gooda and goodb:
1524 if isbackup:
1524 if isbackup:
1525 fname = afile
1525 fname = afile
1526 else:
1526 else:
1527 fname = bfile
1527 fname = bfile
1528 elif gooda:
1528 elif gooda:
1529 fname = afile
1529 fname = afile
1530
1530
1531 if not fname:
1531 if not fname:
1532 if not nullb:
1532 if not nullb:
1533 if isbackup:
1533 if isbackup:
1534 fname = afile
1534 fname = afile
1535 else:
1535 else:
1536 fname = bfile
1536 fname = bfile
1537 elif not nulla:
1537 elif not nulla:
1538 fname = afile
1538 fname = afile
1539 else:
1539 else:
1540 raise PatchError(_("undefined source and destination files"))
1540 raise PatchError(_("undefined source and destination files"))
1541
1541
1542 gp = patchmeta(fname)
1542 gp = patchmeta(fname)
1543 if create:
1543 if create:
1544 gp.op = 'ADD'
1544 gp.op = 'ADD'
1545 elif remove:
1545 elif remove:
1546 gp.op = 'DELETE'
1546 gp.op = 'DELETE'
1547 return gp
1547 return gp
1548
1548
1549 def scanpatch(fp):
1549 def scanpatch(fp):
1550 """like patch.iterhunks, but yield different events
1550 """like patch.iterhunks, but yield different events
1551
1551
1552 - ('file', [header_lines + fromfile + tofile])
1552 - ('file', [header_lines + fromfile + tofile])
1553 - ('context', [context_lines])
1553 - ('context', [context_lines])
1554 - ('hunk', [hunk_lines])
1554 - ('hunk', [hunk_lines])
1555 - ('range', (-start,len, +start,len, proc))
1555 - ('range', (-start,len, +start,len, proc))
1556 """
1556 """
1557 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1557 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1558 lr = linereader(fp)
1558 lr = linereader(fp)
1559
1559
1560 def scanwhile(first, p):
1560 def scanwhile(first, p):
1561 """scan lr while predicate holds"""
1561 """scan lr while predicate holds"""
1562 lines = [first]
1562 lines = [first]
1563 while True:
1563 while True:
1564 line = lr.readline()
1564 line = lr.readline()
1565 if not line:
1565 if not line:
1566 break
1566 break
1567 if p(line):
1567 if p(line):
1568 lines.append(line)
1568 lines.append(line)
1569 else:
1569 else:
1570 lr.push(line)
1570 lr.push(line)
1571 break
1571 break
1572 return lines
1572 return lines
1573
1573
1574 while True:
1574 while True:
1575 line = lr.readline()
1575 line = lr.readline()
1576 if not line:
1576 if not line:
1577 break
1577 break
1578 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1578 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1579 def notheader(line):
1579 def notheader(line):
1580 s = line.split(None, 1)
1580 s = line.split(None, 1)
1581 return not s or s[0] not in ('---', 'diff')
1581 return not s or s[0] not in ('---', 'diff')
1582 header = scanwhile(line, notheader)
1582 header = scanwhile(line, notheader)
1583 fromfile = lr.readline()
1583 fromfile = lr.readline()
1584 if fromfile.startswith('---'):
1584 if fromfile.startswith('---'):
1585 tofile = lr.readline()
1585 tofile = lr.readline()
1586 header += [fromfile, tofile]
1586 header += [fromfile, tofile]
1587 else:
1587 else:
1588 lr.push(fromfile)
1588 lr.push(fromfile)
1589 yield 'file', header
1589 yield 'file', header
1590 elif line[0] == ' ':
1590 elif line[0] == ' ':
1591 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1591 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1592 elif line[0] in '-+':
1592 elif line[0] in '-+':
1593 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1593 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1594 else:
1594 else:
1595 m = lines_re.match(line)
1595 m = lines_re.match(line)
1596 if m:
1596 if m:
1597 yield 'range', m.groups()
1597 yield 'range', m.groups()
1598 else:
1598 else:
1599 yield 'other', line
1599 yield 'other', line
1600
1600
1601 def scangitpatch(lr, firstline):
1601 def scangitpatch(lr, firstline):
1602 """
1602 """
1603 Git patches can emit:
1603 Git patches can emit:
1604 - rename a to b
1604 - rename a to b
1605 - change b
1605 - change b
1606 - copy a to c
1606 - copy a to c
1607 - change c
1607 - change c
1608
1608
1609 We cannot apply this sequence as-is, the renamed 'a' could not be
1609 We cannot apply this sequence as-is, the renamed 'a' could not be
1610 found for it would have been renamed already. And we cannot copy
1610 found for it would have been renamed already. And we cannot copy
1611 from 'b' instead because 'b' would have been changed already. So
1611 from 'b' instead because 'b' would have been changed already. So
1612 we scan the git patch for copy and rename commands so we can
1612 we scan the git patch for copy and rename commands so we can
1613 perform the copies ahead of time.
1613 perform the copies ahead of time.
1614 """
1614 """
1615 pos = 0
1615 pos = 0
1616 try:
1616 try:
1617 pos = lr.fp.tell()
1617 pos = lr.fp.tell()
1618 fp = lr.fp
1618 fp = lr.fp
1619 except IOError:
1619 except IOError:
1620 fp = cStringIO.StringIO(lr.fp.read())
1620 fp = cStringIO.StringIO(lr.fp.read())
1621 gitlr = linereader(fp)
1621 gitlr = linereader(fp)
1622 gitlr.push(firstline)
1622 gitlr.push(firstline)
1623 gitpatches = readgitpatch(gitlr)
1623 gitpatches = readgitpatch(gitlr)
1624 fp.seek(pos)
1624 fp.seek(pos)
1625 return gitpatches
1625 return gitpatches
1626
1626
1627 def iterhunks(fp):
1627 def iterhunks(fp):
1628 """Read a patch and yield the following events:
1628 """Read a patch and yield the following events:
1629 - ("file", afile, bfile, firsthunk): select a new target file.
1629 - ("file", afile, bfile, firsthunk): select a new target file.
1630 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1630 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1631 "file" event.
1631 "file" event.
1632 - ("git", gitchanges): current diff is in git format, gitchanges
1632 - ("git", gitchanges): current diff is in git format, gitchanges
1633 maps filenames to gitpatch records. Unique event.
1633 maps filenames to gitpatch records. Unique event.
1634 """
1634 """
1635 afile = ""
1635 afile = ""
1636 bfile = ""
1636 bfile = ""
1637 state = None
1637 state = None
1638 hunknum = 0
1638 hunknum = 0
1639 emitfile = newfile = False
1639 emitfile = newfile = False
1640 gitpatches = None
1640 gitpatches = None
1641
1641
1642 # our states
1642 # our states
1643 BFILE = 1
1643 BFILE = 1
1644 context = None
1644 context = None
1645 lr = linereader(fp)
1645 lr = linereader(fp)
1646
1646
1647 while True:
1647 while True:
1648 x = lr.readline()
1648 x = lr.readline()
1649 if not x:
1649 if not x:
1650 break
1650 break
1651 if state == BFILE and (
1651 if state == BFILE and (
1652 (not context and x[0] == '@')
1652 (not context and x[0] == '@')
1653 or (context is not False and x.startswith('***************'))
1653 or (context is not False and x.startswith('***************'))
1654 or x.startswith('GIT binary patch')):
1654 or x.startswith('GIT binary patch')):
1655 gp = None
1655 gp = None
1656 if (gitpatches and
1656 if (gitpatches and
1657 gitpatches[-1].ispatching(afile, bfile)):
1657 gitpatches[-1].ispatching(afile, bfile)):
1658 gp = gitpatches.pop()
1658 gp = gitpatches.pop()
1659 if x.startswith('GIT binary patch'):
1659 if x.startswith('GIT binary patch'):
1660 h = binhunk(lr, gp.path)
1660 h = binhunk(lr, gp.path)
1661 else:
1661 else:
1662 if context is None and x.startswith('***************'):
1662 if context is None and x.startswith('***************'):
1663 context = True
1663 context = True
1664 h = hunk(x, hunknum + 1, lr, context)
1664 h = hunk(x, hunknum + 1, lr, context)
1665 hunknum += 1
1665 hunknum += 1
1666 if emitfile:
1666 if emitfile:
1667 emitfile = False
1667 emitfile = False
1668 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1668 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1669 yield 'hunk', h
1669 yield 'hunk', h
1670 elif x.startswith('diff --git a/'):
1670 elif x.startswith('diff --git a/'):
1671 m = gitre.match(x.rstrip(' \r\n'))
1671 m = gitre.match(x.rstrip(' \r\n'))
1672 if not m:
1672 if not m:
1673 continue
1673 continue
1674 if gitpatches is None:
1674 if gitpatches is None:
1675 # scan whole input for git metadata
1675 # scan whole input for git metadata
1676 gitpatches = scangitpatch(lr, x)
1676 gitpatches = scangitpatch(lr, x)
1677 yield 'git', [g.copy() for g in gitpatches
1677 yield 'git', [g.copy() for g in gitpatches
1678 if g.op in ('COPY', 'RENAME')]
1678 if g.op in ('COPY', 'RENAME')]
1679 gitpatches.reverse()
1679 gitpatches.reverse()
1680 afile = 'a/' + m.group(1)
1680 afile = 'a/' + m.group(1)
1681 bfile = 'b/' + m.group(2)
1681 bfile = 'b/' + m.group(2)
1682 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1682 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1683 gp = gitpatches.pop()
1683 gp = gitpatches.pop()
1684 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1684 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1685 if not gitpatches:
1685 if not gitpatches:
1686 raise PatchError(_('failed to synchronize metadata for "%s"')
1686 raise PatchError(_('failed to synchronize metadata for "%s"')
1687 % afile[2:])
1687 % afile[2:])
1688 gp = gitpatches[-1]
1688 gp = gitpatches[-1]
1689 newfile = True
1689 newfile = True
1690 elif x.startswith('---'):
1690 elif x.startswith('---'):
1691 # check for a unified diff
1691 # check for a unified diff
1692 l2 = lr.readline()
1692 l2 = lr.readline()
1693 if not l2.startswith('+++'):
1693 if not l2.startswith('+++'):
1694 lr.push(l2)
1694 lr.push(l2)
1695 continue
1695 continue
1696 newfile = True
1696 newfile = True
1697 context = False
1697 context = False
1698 afile = parsefilename(x)
1698 afile = parsefilename(x)
1699 bfile = parsefilename(l2)
1699 bfile = parsefilename(l2)
1700 elif x.startswith('***'):
1700 elif x.startswith('***'):
1701 # check for a context diff
1701 # check for a context diff
1702 l2 = lr.readline()
1702 l2 = lr.readline()
1703 if not l2.startswith('---'):
1703 if not l2.startswith('---'):
1704 lr.push(l2)
1704 lr.push(l2)
1705 continue
1705 continue
1706 l3 = lr.readline()
1706 l3 = lr.readline()
1707 lr.push(l3)
1707 lr.push(l3)
1708 if not l3.startswith("***************"):
1708 if not l3.startswith("***************"):
1709 lr.push(l2)
1709 lr.push(l2)
1710 continue
1710 continue
1711 newfile = True
1711 newfile = True
1712 context = True
1712 context = True
1713 afile = parsefilename(x)
1713 afile = parsefilename(x)
1714 bfile = parsefilename(l2)
1714 bfile = parsefilename(l2)
1715
1715
1716 if newfile:
1716 if newfile:
1717 newfile = False
1717 newfile = False
1718 emitfile = True
1718 emitfile = True
1719 state = BFILE
1719 state = BFILE
1720 hunknum = 0
1720 hunknum = 0
1721
1721
1722 while gitpatches:
1722 while gitpatches:
1723 gp = gitpatches.pop()
1723 gp = gitpatches.pop()
1724 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1724 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1725
1725
1726 def applybindelta(binchunk, data):
1726 def applybindelta(binchunk, data):
1727 """Apply a binary delta hunk
1727 """Apply a binary delta hunk
1728 The algorithm used is the algorithm from git's patch-delta.c
1728 The algorithm used is the algorithm from git's patch-delta.c
1729 """
1729 """
1730 def deltahead(binchunk):
1730 def deltahead(binchunk):
1731 i = 0
1731 i = 0
1732 for c in binchunk:
1732 for c in binchunk:
1733 i += 1
1733 i += 1
1734 if not (ord(c) & 0x80):
1734 if not (ord(c) & 0x80):
1735 return i
1735 return i
1736 return i
1736 return i
1737 out = ""
1737 out = ""
1738 s = deltahead(binchunk)
1738 s = deltahead(binchunk)
1739 binchunk = binchunk[s:]
1739 binchunk = binchunk[s:]
1740 s = deltahead(binchunk)
1740 s = deltahead(binchunk)
1741 binchunk = binchunk[s:]
1741 binchunk = binchunk[s:]
1742 i = 0
1742 i = 0
1743 while i < len(binchunk):
1743 while i < len(binchunk):
1744 cmd = ord(binchunk[i])
1744 cmd = ord(binchunk[i])
1745 i += 1
1745 i += 1
1746 if (cmd & 0x80):
1746 if (cmd & 0x80):
1747 offset = 0
1747 offset = 0
1748 size = 0
1748 size = 0
1749 if (cmd & 0x01):
1749 if (cmd & 0x01):
1750 offset = ord(binchunk[i])
1750 offset = ord(binchunk[i])
1751 i += 1
1751 i += 1
1752 if (cmd & 0x02):
1752 if (cmd & 0x02):
1753 offset |= ord(binchunk[i]) << 8
1753 offset |= ord(binchunk[i]) << 8
1754 i += 1
1754 i += 1
1755 if (cmd & 0x04):
1755 if (cmd & 0x04):
1756 offset |= ord(binchunk[i]) << 16
1756 offset |= ord(binchunk[i]) << 16
1757 i += 1
1757 i += 1
1758 if (cmd & 0x08):
1758 if (cmd & 0x08):
1759 offset |= ord(binchunk[i]) << 24
1759 offset |= ord(binchunk[i]) << 24
1760 i += 1
1760 i += 1
1761 if (cmd & 0x10):
1761 if (cmd & 0x10):
1762 size = ord(binchunk[i])
1762 size = ord(binchunk[i])
1763 i += 1
1763 i += 1
1764 if (cmd & 0x20):
1764 if (cmd & 0x20):
1765 size |= ord(binchunk[i]) << 8
1765 size |= ord(binchunk[i]) << 8
1766 i += 1
1766 i += 1
1767 if (cmd & 0x40):
1767 if (cmd & 0x40):
1768 size |= ord(binchunk[i]) << 16
1768 size |= ord(binchunk[i]) << 16
1769 i += 1
1769 i += 1
1770 if size == 0:
1770 if size == 0:
1771 size = 0x10000
1771 size = 0x10000
1772 offset_end = offset + size
1772 offset_end = offset + size
1773 out += data[offset:offset_end]
1773 out += data[offset:offset_end]
1774 elif cmd != 0:
1774 elif cmd != 0:
1775 offset_end = i + cmd
1775 offset_end = i + cmd
1776 out += binchunk[i:offset_end]
1776 out += binchunk[i:offset_end]
1777 i += cmd
1777 i += cmd
1778 else:
1778 else:
1779 raise PatchError(_('unexpected delta opcode 0'))
1779 raise PatchError(_('unexpected delta opcode 0'))
1780 return out
1780 return out
1781
1781
1782 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1782 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1783 """Reads a patch from fp and tries to apply it.
1783 """Reads a patch from fp and tries to apply it.
1784
1784
1785 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1785 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1786 there was any fuzz.
1786 there was any fuzz.
1787
1787
1788 If 'eolmode' is 'strict', the patch content and patched file are
1788 If 'eolmode' is 'strict', the patch content and patched file are
1789 read in binary mode. Otherwise, line endings are ignored when
1789 read in binary mode. Otherwise, line endings are ignored when
1790 patching then normalized according to 'eolmode'.
1790 patching then normalized according to 'eolmode'.
1791 """
1791 """
1792 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1792 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1793 prefix=prefix, eolmode=eolmode)
1793 prefix=prefix, eolmode=eolmode)
1794
1794
1795 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1795 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1796 eolmode='strict'):
1796 eolmode='strict'):
1797
1797
1798 if prefix:
1798 if prefix:
1799 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1799 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1800 prefix)
1800 prefix)
1801 if prefix != '':
1801 if prefix != '':
1802 prefix += '/'
1802 prefix += '/'
1803 def pstrip(p):
1803 def pstrip(p):
1804 return pathtransform(p, strip - 1, prefix)[1]
1804 return pathtransform(p, strip - 1, prefix)[1]
1805
1805
1806 rejects = 0
1806 rejects = 0
1807 err = 0
1807 err = 0
1808 current_file = None
1808 current_file = None
1809
1809
1810 for state, values in iterhunks(fp):
1810 for state, values in iterhunks(fp):
1811 if state == 'hunk':
1811 if state == 'hunk':
1812 if not current_file:
1812 if not current_file:
1813 continue
1813 continue
1814 ret = current_file.apply(values)
1814 ret = current_file.apply(values)
1815 if ret > 0:
1815 if ret > 0:
1816 err = 1
1816 err = 1
1817 elif state == 'file':
1817 elif state == 'file':
1818 if current_file:
1818 if current_file:
1819 rejects += current_file.close()
1819 rejects += current_file.close()
1820 current_file = None
1820 current_file = None
1821 afile, bfile, first_hunk, gp = values
1821 afile, bfile, first_hunk, gp = values
1822 if gp:
1822 if gp:
1823 gp.path = pstrip(gp.path)
1823 gp.path = pstrip(gp.path)
1824 if gp.oldpath:
1824 if gp.oldpath:
1825 gp.oldpath = pstrip(gp.oldpath)
1825 gp.oldpath = pstrip(gp.oldpath)
1826 else:
1826 else:
1827 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1827 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1828 prefix)
1828 prefix)
1829 if gp.op == 'RENAME':
1829 if gp.op == 'RENAME':
1830 backend.unlink(gp.oldpath)
1830 backend.unlink(gp.oldpath)
1831 if not first_hunk:
1831 if not first_hunk:
1832 if gp.op == 'DELETE':
1832 if gp.op == 'DELETE':
1833 backend.unlink(gp.path)
1833 backend.unlink(gp.path)
1834 continue
1834 continue
1835 data, mode = None, None
1835 data, mode = None, None
1836 if gp.op in ('RENAME', 'COPY'):
1836 if gp.op in ('RENAME', 'COPY'):
1837 data, mode = store.getfile(gp.oldpath)[:2]
1837 data, mode = store.getfile(gp.oldpath)[:2]
1838 # FIXME: failing getfile has never been handled here
1838 # FIXME: failing getfile has never been handled here
1839 assert data is not None
1839 assert data is not None
1840 if gp.mode:
1840 if gp.mode:
1841 mode = gp.mode
1841 mode = gp.mode
1842 if gp.op == 'ADD':
1842 if gp.op == 'ADD':
1843 # Added files without content have no hunk and
1843 # Added files without content have no hunk and
1844 # must be created
1844 # must be created
1845 data = ''
1845 data = ''
1846 if data or mode:
1846 if data or mode:
1847 if (gp.op in ('ADD', 'RENAME', 'COPY')
1847 if (gp.op in ('ADD', 'RENAME', 'COPY')
1848 and backend.exists(gp.path)):
1848 and backend.exists(gp.path)):
1849 raise PatchError(_("cannot create %s: destination "
1849 raise PatchError(_("cannot create %s: destination "
1850 "already exists") % gp.path)
1850 "already exists") % gp.path)
1851 backend.setfile(gp.path, data, mode, gp.oldpath)
1851 backend.setfile(gp.path, data, mode, gp.oldpath)
1852 continue
1852 continue
1853 try:
1853 try:
1854 current_file = patcher(ui, gp, backend, store,
1854 current_file = patcher(ui, gp, backend, store,
1855 eolmode=eolmode)
1855 eolmode=eolmode)
1856 except PatchError, inst:
1856 except PatchError, inst:
1857 ui.warn(str(inst) + '\n')
1857 ui.warn(str(inst) + '\n')
1858 current_file = None
1858 current_file = None
1859 rejects += 1
1859 rejects += 1
1860 continue
1860 continue
1861 elif state == 'git':
1861 elif state == 'git':
1862 for gp in values:
1862 for gp in values:
1863 path = pstrip(gp.oldpath)
1863 path = pstrip(gp.oldpath)
1864 data, mode = backend.getfile(path)
1864 data, mode = backend.getfile(path)
1865 if data is None:
1865 if data is None:
1866 # The error ignored here will trigger a getfile()
1866 # The error ignored here will trigger a getfile()
1867 # error in a place more appropriate for error
1867 # error in a place more appropriate for error
1868 # handling, and will not interrupt the patching
1868 # handling, and will not interrupt the patching
1869 # process.
1869 # process.
1870 pass
1870 pass
1871 else:
1871 else:
1872 store.setfile(path, data, mode)
1872 store.setfile(path, data, mode)
1873 else:
1873 else:
1874 raise util.Abort(_('unsupported parser state: %s') % state)
1874 raise util.Abort(_('unsupported parser state: %s') % state)
1875
1875
1876 if current_file:
1876 if current_file:
1877 rejects += current_file.close()
1877 rejects += current_file.close()
1878
1878
1879 if rejects:
1879 if rejects:
1880 return -1
1880 return -1
1881 return err
1881 return err
1882
1882
1883 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1883 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1884 similarity):
1884 similarity):
1885 """use <patcher> to apply <patchname> to the working directory.
1885 """use <patcher> to apply <patchname> to the working directory.
1886 returns whether patch was applied with fuzz factor."""
1886 returns whether patch was applied with fuzz factor."""
1887
1887
1888 fuzz = False
1888 fuzz = False
1889 args = []
1889 args = []
1890 cwd = repo.root
1890 cwd = repo.root
1891 if cwd:
1891 if cwd:
1892 args.append('-d %s' % util.shellquote(cwd))
1892 args.append('-d %s' % util.shellquote(cwd))
1893 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1893 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1894 util.shellquote(patchname)))
1894 util.shellquote(patchname)))
1895 try:
1895 try:
1896 for line in fp:
1896 for line in fp:
1897 line = line.rstrip()
1897 line = line.rstrip()
1898 ui.note(line + '\n')
1898 ui.note(line + '\n')
1899 if line.startswith('patching file '):
1899 if line.startswith('patching file '):
1900 pf = util.parsepatchoutput(line)
1900 pf = util.parsepatchoutput(line)
1901 printed_file = False
1901 printed_file = False
1902 files.add(pf)
1902 files.add(pf)
1903 elif line.find('with fuzz') >= 0:
1903 elif line.find('with fuzz') >= 0:
1904 fuzz = True
1904 fuzz = True
1905 if not printed_file:
1905 if not printed_file:
1906 ui.warn(pf + '\n')
1906 ui.warn(pf + '\n')
1907 printed_file = True
1907 printed_file = True
1908 ui.warn(line + '\n')
1908 ui.warn(line + '\n')
1909 elif line.find('saving rejects to file') >= 0:
1909 elif line.find('saving rejects to file') >= 0:
1910 ui.warn(line + '\n')
1910 ui.warn(line + '\n')
1911 elif line.find('FAILED') >= 0:
1911 elif line.find('FAILED') >= 0:
1912 if not printed_file:
1912 if not printed_file:
1913 ui.warn(pf + '\n')
1913 ui.warn(pf + '\n')
1914 printed_file = True
1914 printed_file = True
1915 ui.warn(line + '\n')
1915 ui.warn(line + '\n')
1916 finally:
1916 finally:
1917 if files:
1917 if files:
1918 scmutil.marktouched(repo, files, similarity)
1918 scmutil.marktouched(repo, files, similarity)
1919 code = fp.close()
1919 code = fp.close()
1920 if code:
1920 if code:
1921 raise PatchError(_("patch command failed: %s") %
1921 raise PatchError(_("patch command failed: %s") %
1922 util.explainexit(code)[0])
1922 util.explainexit(code)[0])
1923 return fuzz
1923 return fuzz
1924
1924
1925 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1925 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1926 eolmode='strict'):
1926 eolmode='strict'):
1927 if files is None:
1927 if files is None:
1928 files = set()
1928 files = set()
1929 if eolmode is None:
1929 if eolmode is None:
1930 eolmode = ui.config('patch', 'eol', 'strict')
1930 eolmode = ui.config('patch', 'eol', 'strict')
1931 if eolmode.lower() not in eolmodes:
1931 if eolmode.lower() not in eolmodes:
1932 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1932 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1933 eolmode = eolmode.lower()
1933 eolmode = eolmode.lower()
1934
1934
1935 store = filestore()
1935 store = filestore()
1936 try:
1936 try:
1937 fp = open(patchobj, 'rb')
1937 fp = open(patchobj, 'rb')
1938 except TypeError:
1938 except TypeError:
1939 fp = patchobj
1939 fp = patchobj
1940 try:
1940 try:
1941 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1941 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1942 eolmode=eolmode)
1942 eolmode=eolmode)
1943 finally:
1943 finally:
1944 if fp != patchobj:
1944 if fp != patchobj:
1945 fp.close()
1945 fp.close()
1946 files.update(backend.close())
1946 files.update(backend.close())
1947 store.close()
1947 store.close()
1948 if ret < 0:
1948 if ret < 0:
1949 raise PatchError(_('patch failed to apply'))
1949 raise PatchError(_('patch failed to apply'))
1950 return ret > 0
1950 return ret > 0
1951
1951
1952 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1952 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1953 eolmode='strict', similarity=0):
1953 eolmode='strict', similarity=0):
1954 """use builtin patch to apply <patchobj> to the working directory.
1954 """use builtin patch to apply <patchobj> to the working directory.
1955 returns whether patch was applied with fuzz factor."""
1955 returns whether patch was applied with fuzz factor."""
1956 backend = workingbackend(ui, repo, similarity)
1956 backend = workingbackend(ui, repo, similarity)
1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1958
1958
1959 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1959 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1960 eolmode='strict'):
1960 eolmode='strict'):
1961 backend = repobackend(ui, repo, ctx, store)
1961 backend = repobackend(ui, repo, ctx, store)
1962 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1962 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1963
1963
1964 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1964 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1965 similarity=0):
1965 similarity=0):
1966 """Apply <patchname> to the working directory.
1966 """Apply <patchname> to the working directory.
1967
1967
1968 'eolmode' specifies how end of lines should be handled. It can be:
1968 'eolmode' specifies how end of lines should be handled. It can be:
1969 - 'strict': inputs are read in binary mode, EOLs are preserved
1969 - 'strict': inputs are read in binary mode, EOLs are preserved
1970 - 'crlf': EOLs are ignored when patching and reset to CRLF
1970 - 'crlf': EOLs are ignored when patching and reset to CRLF
1971 - 'lf': EOLs are ignored when patching and reset to LF
1971 - 'lf': EOLs are ignored when patching and reset to LF
1972 - None: get it from user settings, default to 'strict'
1972 - None: get it from user settings, default to 'strict'
1973 'eolmode' is ignored when using an external patcher program.
1973 'eolmode' is ignored when using an external patcher program.
1974
1974
1975 Returns whether patch was applied with fuzz factor.
1975 Returns whether patch was applied with fuzz factor.
1976 """
1976 """
1977 patcher = ui.config('ui', 'patch')
1977 patcher = ui.config('ui', 'patch')
1978 if files is None:
1978 if files is None:
1979 files = set()
1979 files = set()
1980 if patcher:
1980 if patcher:
1981 return _externalpatch(ui, repo, patcher, patchname, strip,
1981 return _externalpatch(ui, repo, patcher, patchname, strip,
1982 files, similarity)
1982 files, similarity)
1983 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1983 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1984 similarity)
1984 similarity)
1985
1985
1986 def changedfiles(ui, repo, patchpath, strip=1):
1986 def changedfiles(ui, repo, patchpath, strip=1):
1987 backend = fsbackend(ui, repo.root)
1987 backend = fsbackend(ui, repo.root)
1988 fp = open(patchpath, 'rb')
1988 fp = open(patchpath, 'rb')
1989 try:
1989 try:
1990 changed = set()
1990 changed = set()
1991 for state, values in iterhunks(fp):
1991 for state, values in iterhunks(fp):
1992 if state == 'file':
1992 if state == 'file':
1993 afile, bfile, first_hunk, gp = values
1993 afile, bfile, first_hunk, gp = values
1994 if gp:
1994 if gp:
1995 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1995 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1996 if gp.oldpath:
1996 if gp.oldpath:
1997 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1997 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1998 else:
1998 else:
1999 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1999 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2000 '')
2000 '')
2001 changed.add(gp.path)
2001 changed.add(gp.path)
2002 if gp.op == 'RENAME':
2002 if gp.op == 'RENAME':
2003 changed.add(gp.oldpath)
2003 changed.add(gp.oldpath)
2004 elif state not in ('hunk', 'git'):
2004 elif state not in ('hunk', 'git'):
2005 raise util.Abort(_('unsupported parser state: %s') % state)
2005 raise util.Abort(_('unsupported parser state: %s') % state)
2006 return changed
2006 return changed
2007 finally:
2007 finally:
2008 fp.close()
2008 fp.close()
2009
2009
2010 class GitDiffRequired(Exception):
2010 class GitDiffRequired(Exception):
2011 pass
2011 pass
2012
2012
2013 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2013 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2014 '''return diffopts with all features supported and parsed'''
2014 '''return diffopts with all features supported and parsed'''
2015 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2015 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2016 git=True, whitespace=True, formatchanging=True)
2016 git=True, whitespace=True, formatchanging=True)
2017
2017
2018 diffopts = diffallopts
2018 diffopts = diffallopts
2019
2019
2020 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2020 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2021 whitespace=False, formatchanging=False):
2021 whitespace=False, formatchanging=False):
2022 '''return diffopts with only opted-in features parsed
2022 '''return diffopts with only opted-in features parsed
2023
2023
2024 Features:
2024 Features:
2025 - git: git-style diffs
2025 - git: git-style diffs
2026 - whitespace: whitespace options like ignoreblanklines and ignorews
2026 - whitespace: whitespace options like ignoreblanklines and ignorews
2027 - formatchanging: options that will likely break or cause correctness issues
2027 - formatchanging: options that will likely break or cause correctness issues
2028 with most diff parsers
2028 with most diff parsers
2029 '''
2029 '''
2030 def get(key, name=None, getter=ui.configbool, forceplain=None):
2030 def get(key, name=None, getter=ui.configbool, forceplain=None):
2031 if opts:
2031 if opts:
2032 v = opts.get(key)
2032 v = opts.get(key)
2033 if v:
2033 if v:
2034 return v
2034 return v
2035 if forceplain is not None and ui.plain():
2035 if forceplain is not None and ui.plain():
2036 return forceplain
2036 return forceplain
2037 return getter(section, name or key, None, untrusted=untrusted)
2037 return getter(section, name or key, None, untrusted=untrusted)
2038
2038
2039 # core options, expected to be understood by every diff parser
2039 # core options, expected to be understood by every diff parser
2040 buildopts = {
2040 buildopts = {
2041 'nodates': get('nodates'),
2041 'nodates': get('nodates'),
2042 'showfunc': get('show_function', 'showfunc'),
2042 'showfunc': get('show_function', 'showfunc'),
2043 'context': get('unified', getter=ui.config),
2043 'context': get('unified', getter=ui.config),
2044 }
2044 }
2045
2045
2046 if git:
2046 if git:
2047 buildopts['git'] = get('git')
2047 buildopts['git'] = get('git')
2048 if whitespace:
2048 if whitespace:
2049 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2049 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2050 buildopts['ignorewsamount'] = get('ignore_space_change',
2050 buildopts['ignorewsamount'] = get('ignore_space_change',
2051 'ignorewsamount')
2051 'ignorewsamount')
2052 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2052 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2053 'ignoreblanklines')
2053 'ignoreblanklines')
2054 if formatchanging:
2054 if formatchanging:
2055 buildopts['text'] = opts and opts.get('text')
2055 buildopts['text'] = opts and opts.get('text')
2056 buildopts['nobinary'] = get('nobinary')
2056 buildopts['nobinary'] = get('nobinary')
2057 buildopts['noprefix'] = get('noprefix', forceplain=False)
2057 buildopts['noprefix'] = get('noprefix', forceplain=False)
2058
2058
2059 return mdiff.diffopts(**buildopts)
2059 return mdiff.diffopts(**buildopts)
2060
2060
2061 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2061 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2062 losedatafn=None, prefix=''):
2062 losedatafn=None, prefix=''):
2063 '''yields diff of changes to files between two nodes, or node and
2063 '''yields diff of changes to files between two nodes, or node and
2064 working directory.
2064 working directory.
2065
2065
2066 if node1 is None, use first dirstate parent instead.
2066 if node1 is None, use first dirstate parent instead.
2067 if node2 is None, compare node1 with working directory.
2067 if node2 is None, compare node1 with working directory.
2068
2068
2069 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2069 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2070 every time some change cannot be represented with the current
2070 every time some change cannot be represented with the current
2071 patch format. Return False to upgrade to git patch format, True to
2071 patch format. Return False to upgrade to git patch format, True to
2072 accept the loss or raise an exception to abort the diff. It is
2072 accept the loss or raise an exception to abort the diff. It is
2073 called with the name of current file being diffed as 'fn'. If set
2073 called with the name of current file being diffed as 'fn'. If set
2074 to None, patches will always be upgraded to git format when
2074 to None, patches will always be upgraded to git format when
2075 necessary.
2075 necessary.
2076
2076
2077 prefix is a filename prefix that is prepended to all filenames on
2077 prefix is a filename prefix that is prepended to all filenames on
2078 display (used for subrepos).
2078 display (used for subrepos).
2079 '''
2079 '''
2080
2080
2081 if opts is None:
2081 if opts is None:
2082 opts = mdiff.defaultopts
2082 opts = mdiff.defaultopts
2083
2083
2084 if not node1 and not node2:
2084 if not node1 and not node2:
2085 node1 = repo.dirstate.p1()
2085 node1 = repo.dirstate.p1()
2086
2086
2087 def lrugetfilectx():
2087 def lrugetfilectx():
2088 cache = {}
2088 cache = {}
2089 order = util.deque()
2089 order = util.deque()
2090 def getfilectx(f, ctx):
2090 def getfilectx(f, ctx):
2091 fctx = ctx.filectx(f, filelog=cache.get(f))
2091 fctx = ctx.filectx(f, filelog=cache.get(f))
2092 if f not in cache:
2092 if f not in cache:
2093 if len(cache) > 20:
2093 if len(cache) > 20:
2094 del cache[order.popleft()]
2094 del cache[order.popleft()]
2095 cache[f] = fctx.filelog()
2095 cache[f] = fctx.filelog()
2096 else:
2096 else:
2097 order.remove(f)
2097 order.remove(f)
2098 order.append(f)
2098 order.append(f)
2099 return fctx
2099 return fctx
2100 return getfilectx
2100 return getfilectx
2101 getfilectx = lrugetfilectx()
2101 getfilectx = lrugetfilectx()
2102
2102
2103 ctx1 = repo[node1]
2103 ctx1 = repo[node1]
2104 ctx2 = repo[node2]
2104 ctx2 = repo[node2]
2105
2105
2106 if not changes:
2106 if not changes:
2107 changes = repo.status(ctx1, ctx2, match=match)
2107 changes = repo.status(ctx1, ctx2, match=match)
2108 modified, added, removed = changes[:3]
2108 modified, added, removed = changes[:3]
2109
2109
2110 if not modified and not added and not removed:
2110 if not modified and not added and not removed:
2111 return []
2111 return []
2112
2112
2113 if repo.ui.debugflag:
2113 if repo.ui.debugflag:
2114 hexfunc = hex
2114 hexfunc = hex
2115 else:
2115 else:
2116 hexfunc = short
2116 hexfunc = short
2117 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2117 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2118
2118
2119 copy = {}
2119 copy = {}
2120 if opts.git or opts.upgrade:
2120 if opts.git or opts.upgrade:
2121 copy = copies.pathcopies(ctx1, ctx2)
2121 copy = copies.pathcopies(ctx1, ctx2)
2122
2122
2123 def difffn(opts, losedata):
2123 def difffn(opts, losedata):
2124 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2124 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2125 copy, getfilectx, opts, losedata, prefix)
2125 copy, getfilectx, opts, losedata, prefix, '')
2126 if opts.upgrade and not opts.git:
2126 if opts.upgrade and not opts.git:
2127 try:
2127 try:
2128 def losedata(fn):
2128 def losedata(fn):
2129 if not losedatafn or not losedatafn(fn=fn):
2129 if not losedatafn or not losedatafn(fn=fn):
2130 raise GitDiffRequired
2130 raise GitDiffRequired
2131 # Buffer the whole output until we are sure it can be generated
2131 # Buffer the whole output until we are sure it can be generated
2132 return list(difffn(opts.copy(git=False), losedata))
2132 return list(difffn(opts.copy(git=False), losedata))
2133 except GitDiffRequired:
2133 except GitDiffRequired:
2134 return difffn(opts.copy(git=True), None)
2134 return difffn(opts.copy(git=True), None)
2135 else:
2135 else:
2136 return difffn(opts, None)
2136 return difffn(opts, None)
2137
2137
2138 def difflabel(func, *args, **kw):
2138 def difflabel(func, *args, **kw):
2139 '''yields 2-tuples of (output, label) based on the output of func()'''
2139 '''yields 2-tuples of (output, label) based on the output of func()'''
2140 headprefixes = [('diff', 'diff.diffline'),
2140 headprefixes = [('diff', 'diff.diffline'),
2141 ('copy', 'diff.extended'),
2141 ('copy', 'diff.extended'),
2142 ('rename', 'diff.extended'),
2142 ('rename', 'diff.extended'),
2143 ('old', 'diff.extended'),
2143 ('old', 'diff.extended'),
2144 ('new', 'diff.extended'),
2144 ('new', 'diff.extended'),
2145 ('deleted', 'diff.extended'),
2145 ('deleted', 'diff.extended'),
2146 ('---', 'diff.file_a'),
2146 ('---', 'diff.file_a'),
2147 ('+++', 'diff.file_b')]
2147 ('+++', 'diff.file_b')]
2148 textprefixes = [('@', 'diff.hunk'),
2148 textprefixes = [('@', 'diff.hunk'),
2149 ('-', 'diff.deleted'),
2149 ('-', 'diff.deleted'),
2150 ('+', 'diff.inserted')]
2150 ('+', 'diff.inserted')]
2151 head = False
2151 head = False
2152 for chunk in func(*args, **kw):
2152 for chunk in func(*args, **kw):
2153 lines = chunk.split('\n')
2153 lines = chunk.split('\n')
2154 for i, line in enumerate(lines):
2154 for i, line in enumerate(lines):
2155 if i != 0:
2155 if i != 0:
2156 yield ('\n', '')
2156 yield ('\n', '')
2157 if head:
2157 if head:
2158 if line.startswith('@'):
2158 if line.startswith('@'):
2159 head = False
2159 head = False
2160 else:
2160 else:
2161 if line and line[0] not in ' +-@\\':
2161 if line and line[0] not in ' +-@\\':
2162 head = True
2162 head = True
2163 stripline = line
2163 stripline = line
2164 diffline = False
2164 diffline = False
2165 if not head and line and line[0] in '+-':
2165 if not head and line and line[0] in '+-':
2166 # highlight tabs and trailing whitespace, but only in
2166 # highlight tabs and trailing whitespace, but only in
2167 # changed lines
2167 # changed lines
2168 stripline = line.rstrip()
2168 stripline = line.rstrip()
2169 diffline = True
2169 diffline = True
2170
2170
2171 prefixes = textprefixes
2171 prefixes = textprefixes
2172 if head:
2172 if head:
2173 prefixes = headprefixes
2173 prefixes = headprefixes
2174 for prefix, label in prefixes:
2174 for prefix, label in prefixes:
2175 if stripline.startswith(prefix):
2175 if stripline.startswith(prefix):
2176 if diffline:
2176 if diffline:
2177 for token in tabsplitter.findall(stripline):
2177 for token in tabsplitter.findall(stripline):
2178 if '\t' == token[0]:
2178 if '\t' == token[0]:
2179 yield (token, 'diff.tab')
2179 yield (token, 'diff.tab')
2180 else:
2180 else:
2181 yield (token, label)
2181 yield (token, label)
2182 else:
2182 else:
2183 yield (stripline, label)
2183 yield (stripline, label)
2184 break
2184 break
2185 else:
2185 else:
2186 yield (line, '')
2186 yield (line, '')
2187 if line != stripline:
2187 if line != stripline:
2188 yield (line[len(stripline):], 'diff.trailingwhitespace')
2188 yield (line[len(stripline):], 'diff.trailingwhitespace')
2189
2189
2190 def diffui(*args, **kw):
2190 def diffui(*args, **kw):
2191 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2191 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2192 return difflabel(diff, *args, **kw)
2192 return difflabel(diff, *args, **kw)
2193
2193
2194 def _filepairs(ctx1, modified, added, removed, copy, opts):
2194 def _filepairs(ctx1, modified, added, removed, copy, opts):
2195 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2195 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2196 before and f2 is the the name after. For added files, f1 will be None,
2196 before and f2 is the the name after. For added files, f1 will be None,
2197 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2197 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2198 or 'rename' (the latter two only if opts.git is set).'''
2198 or 'rename' (the latter two only if opts.git is set).'''
2199 gone = set()
2199 gone = set()
2200
2200
2201 copyto = dict([(v, k) for k, v in copy.items()])
2201 copyto = dict([(v, k) for k, v in copy.items()])
2202
2202
2203 addedset, removedset = set(added), set(removed)
2203 addedset, removedset = set(added), set(removed)
2204 # Fix up added, since merged-in additions appear as
2204 # Fix up added, since merged-in additions appear as
2205 # modifications during merges
2205 # modifications during merges
2206 for f in modified:
2206 for f in modified:
2207 if f not in ctx1:
2207 if f not in ctx1:
2208 addedset.add(f)
2208 addedset.add(f)
2209
2209
2210 for f in sorted(modified + added + removed):
2210 for f in sorted(modified + added + removed):
2211 copyop = None
2211 copyop = None
2212 f1, f2 = f, f
2212 f1, f2 = f, f
2213 if f in addedset:
2213 if f in addedset:
2214 f1 = None
2214 f1 = None
2215 if f in copy:
2215 if f in copy:
2216 if opts.git:
2216 if opts.git:
2217 f1 = copy[f]
2217 f1 = copy[f]
2218 if f1 in removedset and f1 not in gone:
2218 if f1 in removedset and f1 not in gone:
2219 copyop = 'rename'
2219 copyop = 'rename'
2220 gone.add(f1)
2220 gone.add(f1)
2221 else:
2221 else:
2222 copyop = 'copy'
2222 copyop = 'copy'
2223 elif f in removedset:
2223 elif f in removedset:
2224 f2 = None
2224 f2 = None
2225 if opts.git:
2225 if opts.git:
2226 # have we already reported a copy above?
2226 # have we already reported a copy above?
2227 if (f in copyto and copyto[f] in addedset
2227 if (f in copyto and copyto[f] in addedset
2228 and copy[copyto[f]] == f):
2228 and copy[copyto[f]] == f):
2229 continue
2229 continue
2230 yield f1, f2, copyop
2230 yield f1, f2, copyop
2231
2231
2232 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2232 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2233 copy, getfilectx, opts, losedatafn, prefix):
2233 copy, getfilectx, opts, losedatafn, prefix, relroot):
2234 '''given input data, generate a diff and yield it in blocks
2234 '''given input data, generate a diff and yield it in blocks
2235
2235
2236 If generating a diff would lose data like flags or binary data and
2236 If generating a diff would lose data like flags or binary data and
2237 losedatafn is not None, it will be called.
2237 losedatafn is not None, it will be called.
2238
2238
2239 prefix is added to every path in the diff output.'''
2239 relroot is removed and prefix is added to every path in the diff output.
2240
2241 If relroot is not empty, this function expects every path in modified,
2242 added, removed and copy to start with it.'''
2240
2243
2241 def gitindex(text):
2244 def gitindex(text):
2242 if not text:
2245 if not text:
2243 text = ""
2246 text = ""
2244 l = len(text)
2247 l = len(text)
2245 s = util.sha1('blob %d\0' % l)
2248 s = util.sha1('blob %d\0' % l)
2246 s.update(text)
2249 s.update(text)
2247 return s.hexdigest()
2250 return s.hexdigest()
2248
2251
2249 if opts.noprefix:
2252 if opts.noprefix:
2250 aprefix = bprefix = ''
2253 aprefix = bprefix = ''
2251 else:
2254 else:
2252 aprefix = 'a/'
2255 aprefix = 'a/'
2253 bprefix = 'b/'
2256 bprefix = 'b/'
2254
2257
2255 def diffline(f, revs):
2258 def diffline(f, revs):
2256 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2259 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2257 return 'diff %s %s' % (revinfo, f)
2260 return 'diff %s %s' % (revinfo, f)
2258
2261
2259 date1 = util.datestr(ctx1.date())
2262 date1 = util.datestr(ctx1.date())
2260 date2 = util.datestr(ctx2.date())
2263 date2 = util.datestr(ctx2.date())
2261
2264
2262 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2265 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2263
2266
2267 if relroot != '' and (repo.ui.configbool('devel', 'all')
2268 or repo.ui.configbool('devel', 'check-relroot')):
2269 for f in modified + added + removed + copy.keys() + copy.values():
2270 if f is not None and not f.startswith(relroot):
2271 raise AssertionError(
2272 "file %s doesn't start with relroot %s" % (f, relroot))
2273
2264 for f1, f2, copyop in _filepairs(
2274 for f1, f2, copyop in _filepairs(
2265 ctx1, modified, added, removed, copy, opts):
2275 ctx1, modified, added, removed, copy, opts):
2266 content1 = None
2276 content1 = None
2267 content2 = None
2277 content2 = None
2268 flag1 = None
2278 flag1 = None
2269 flag2 = None
2279 flag2 = None
2270 if f1:
2280 if f1:
2271 content1 = getfilectx(f1, ctx1).data()
2281 content1 = getfilectx(f1, ctx1).data()
2272 if opts.git or losedatafn:
2282 if opts.git or losedatafn:
2273 flag1 = ctx1.flags(f1)
2283 flag1 = ctx1.flags(f1)
2274 if f2:
2284 if f2:
2275 content2 = getfilectx(f2, ctx2).data()
2285 content2 = getfilectx(f2, ctx2).data()
2276 if opts.git or losedatafn:
2286 if opts.git or losedatafn:
2277 flag2 = ctx2.flags(f2)
2287 flag2 = ctx2.flags(f2)
2278 binary = False
2288 binary = False
2279 if opts.git or losedatafn:
2289 if opts.git or losedatafn:
2280 binary = util.binary(content1) or util.binary(content2)
2290 binary = util.binary(content1) or util.binary(content2)
2281
2291
2282 if losedatafn and not opts.git:
2292 if losedatafn and not opts.git:
2283 if (binary or
2293 if (binary or
2284 # copy/rename
2294 # copy/rename
2285 f2 in copy or
2295 f2 in copy or
2286 # empty file creation
2296 # empty file creation
2287 (not f1 and not content2) or
2297 (not f1 and not content2) or
2288 # empty file deletion
2298 # empty file deletion
2289 (not content1 and not f2) or
2299 (not content1 and not f2) or
2290 # create with flags
2300 # create with flags
2291 (not f1 and flag2) or
2301 (not f1 and flag2) or
2292 # change flags
2302 # change flags
2293 (f1 and f2 and flag1 != flag2)):
2303 (f1 and f2 and flag1 != flag2)):
2294 losedatafn(f2 or f1)
2304 losedatafn(f2 or f1)
2295
2305
2296 path1 = posixpath.join(prefix, f1 or f2)
2306 path1 = f1 or f2
2297 path2 = posixpath.join(prefix, f2 or f1)
2307 path2 = f2 or f1
2308 path1 = posixpath.join(prefix, path1[len(relroot):])
2309 path2 = posixpath.join(prefix, path2[len(relroot):])
2298 header = []
2310 header = []
2299 if opts.git:
2311 if opts.git:
2300 header.append('diff --git %s%s %s%s' %
2312 header.append('diff --git %s%s %s%s' %
2301 (aprefix, path1, bprefix, path2))
2313 (aprefix, path1, bprefix, path2))
2302 if not f1: # added
2314 if not f1: # added
2303 header.append('new file mode %s' % gitmode[flag2])
2315 header.append('new file mode %s' % gitmode[flag2])
2304 elif not f2: # removed
2316 elif not f2: # removed
2305 header.append('deleted file mode %s' % gitmode[flag1])
2317 header.append('deleted file mode %s' % gitmode[flag1])
2306 else: # modified/copied/renamed
2318 else: # modified/copied/renamed
2307 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2319 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2308 if mode1 != mode2:
2320 if mode1 != mode2:
2309 header.append('old mode %s' % mode1)
2321 header.append('old mode %s' % mode1)
2310 header.append('new mode %s' % mode2)
2322 header.append('new mode %s' % mode2)
2311 if copyop is not None:
2323 if copyop is not None:
2312 header.append('%s from %s' % (copyop, path1))
2324 header.append('%s from %s' % (copyop, path1))
2313 header.append('%s to %s' % (copyop, path2))
2325 header.append('%s to %s' % (copyop, path2))
2314 elif revs and not repo.ui.quiet:
2326 elif revs and not repo.ui.quiet:
2315 header.append(diffline(path1, revs))
2327 header.append(diffline(path1, revs))
2316
2328
2317 if binary and opts.git and not opts.nobinary:
2329 if binary and opts.git and not opts.nobinary:
2318 text = mdiff.b85diff(content1, content2)
2330 text = mdiff.b85diff(content1, content2)
2319 if text:
2331 if text:
2320 header.append('index %s..%s' %
2332 header.append('index %s..%s' %
2321 (gitindex(content1), gitindex(content2)))
2333 (gitindex(content1), gitindex(content2)))
2322 else:
2334 else:
2323 text = mdiff.unidiff(content1, date1,
2335 text = mdiff.unidiff(content1, date1,
2324 content2, date2,
2336 content2, date2,
2325 path1, path2, opts=opts)
2337 path1, path2, opts=opts)
2326 if header and (text or len(header) > 1):
2338 if header and (text or len(header) > 1):
2327 yield '\n'.join(header) + '\n'
2339 yield '\n'.join(header) + '\n'
2328 if text:
2340 if text:
2329 yield text
2341 yield text
2330
2342
2331 def diffstatsum(stats):
2343 def diffstatsum(stats):
2332 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2344 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2333 for f, a, r, b in stats:
2345 for f, a, r, b in stats:
2334 maxfile = max(maxfile, encoding.colwidth(f))
2346 maxfile = max(maxfile, encoding.colwidth(f))
2335 maxtotal = max(maxtotal, a + r)
2347 maxtotal = max(maxtotal, a + r)
2336 addtotal += a
2348 addtotal += a
2337 removetotal += r
2349 removetotal += r
2338 binary = binary or b
2350 binary = binary or b
2339
2351
2340 return maxfile, maxtotal, addtotal, removetotal, binary
2352 return maxfile, maxtotal, addtotal, removetotal, binary
2341
2353
2342 def diffstatdata(lines):
2354 def diffstatdata(lines):
2343 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2355 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2344
2356
2345 results = []
2357 results = []
2346 filename, adds, removes, isbinary = None, 0, 0, False
2358 filename, adds, removes, isbinary = None, 0, 0, False
2347
2359
2348 def addresult():
2360 def addresult():
2349 if filename:
2361 if filename:
2350 results.append((filename, adds, removes, isbinary))
2362 results.append((filename, adds, removes, isbinary))
2351
2363
2352 for line in lines:
2364 for line in lines:
2353 if line.startswith('diff'):
2365 if line.startswith('diff'):
2354 addresult()
2366 addresult()
2355 # set numbers to 0 anyway when starting new file
2367 # set numbers to 0 anyway when starting new file
2356 adds, removes, isbinary = 0, 0, False
2368 adds, removes, isbinary = 0, 0, False
2357 if line.startswith('diff --git a/'):
2369 if line.startswith('diff --git a/'):
2358 filename = gitre.search(line).group(2)
2370 filename = gitre.search(line).group(2)
2359 elif line.startswith('diff -r'):
2371 elif line.startswith('diff -r'):
2360 # format: "diff -r ... -r ... filename"
2372 # format: "diff -r ... -r ... filename"
2361 filename = diffre.search(line).group(1)
2373 filename = diffre.search(line).group(1)
2362 elif line.startswith('+') and not line.startswith('+++ '):
2374 elif line.startswith('+') and not line.startswith('+++ '):
2363 adds += 1
2375 adds += 1
2364 elif line.startswith('-') and not line.startswith('--- '):
2376 elif line.startswith('-') and not line.startswith('--- '):
2365 removes += 1
2377 removes += 1
2366 elif (line.startswith('GIT binary patch') or
2378 elif (line.startswith('GIT binary patch') or
2367 line.startswith('Binary file')):
2379 line.startswith('Binary file')):
2368 isbinary = True
2380 isbinary = True
2369 addresult()
2381 addresult()
2370 return results
2382 return results
2371
2383
2372 def diffstat(lines, width=80, git=False):
2384 def diffstat(lines, width=80, git=False):
2373 output = []
2385 output = []
2374 stats = diffstatdata(lines)
2386 stats = diffstatdata(lines)
2375 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2387 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2376
2388
2377 countwidth = len(str(maxtotal))
2389 countwidth = len(str(maxtotal))
2378 if hasbinary and countwidth < 3:
2390 if hasbinary and countwidth < 3:
2379 countwidth = 3
2391 countwidth = 3
2380 graphwidth = width - countwidth - maxname - 6
2392 graphwidth = width - countwidth - maxname - 6
2381 if graphwidth < 10:
2393 if graphwidth < 10:
2382 graphwidth = 10
2394 graphwidth = 10
2383
2395
2384 def scale(i):
2396 def scale(i):
2385 if maxtotal <= graphwidth:
2397 if maxtotal <= graphwidth:
2386 return i
2398 return i
2387 # If diffstat runs out of room it doesn't print anything,
2399 # If diffstat runs out of room it doesn't print anything,
2388 # which isn't very useful, so always print at least one + or -
2400 # which isn't very useful, so always print at least one + or -
2389 # if there were at least some changes.
2401 # if there were at least some changes.
2390 return max(i * graphwidth // maxtotal, int(bool(i)))
2402 return max(i * graphwidth // maxtotal, int(bool(i)))
2391
2403
2392 for filename, adds, removes, isbinary in stats:
2404 for filename, adds, removes, isbinary in stats:
2393 if isbinary:
2405 if isbinary:
2394 count = 'Bin'
2406 count = 'Bin'
2395 else:
2407 else:
2396 count = adds + removes
2408 count = adds + removes
2397 pluses = '+' * scale(adds)
2409 pluses = '+' * scale(adds)
2398 minuses = '-' * scale(removes)
2410 minuses = '-' * scale(removes)
2399 output.append(' %s%s | %*s %s%s\n' %
2411 output.append(' %s%s | %*s %s%s\n' %
2400 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2412 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2401 countwidth, count, pluses, minuses))
2413 countwidth, count, pluses, minuses))
2402
2414
2403 if stats:
2415 if stats:
2404 output.append(_(' %d files changed, %d insertions(+), '
2416 output.append(_(' %d files changed, %d insertions(+), '
2405 '%d deletions(-)\n')
2417 '%d deletions(-)\n')
2406 % (len(stats), totaladds, totalremoves))
2418 % (len(stats), totaladds, totalremoves))
2407
2419
2408 return ''.join(output)
2420 return ''.join(output)
2409
2421
2410 def diffstatui(*args, **kw):
2422 def diffstatui(*args, **kw):
2411 '''like diffstat(), but yields 2-tuples of (output, label) for
2423 '''like diffstat(), but yields 2-tuples of (output, label) for
2412 ui.write()
2424 ui.write()
2413 '''
2425 '''
2414
2426
2415 for line in diffstat(*args, **kw).splitlines():
2427 for line in diffstat(*args, **kw).splitlines():
2416 if line and line[-1] in '+-':
2428 if line and line[-1] in '+-':
2417 name, graph = line.rsplit(' ', 1)
2429 name, graph = line.rsplit(' ', 1)
2418 yield (name + ' ', '')
2430 yield (name + ' ', '')
2419 m = re.search(r'\++', graph)
2431 m = re.search(r'\++', graph)
2420 if m:
2432 if m:
2421 yield (m.group(0), 'diffstat.inserted')
2433 yield (m.group(0), 'diffstat.inserted')
2422 m = re.search(r'-+', graph)
2434 m = re.search(r'-+', graph)
2423 if m:
2435 if m:
2424 yield (m.group(0), 'diffstat.deleted')
2436 yield (m.group(0), 'diffstat.deleted')
2425 else:
2437 else:
2426 yield (line, '')
2438 yield (line, '')
2427 yield ('\n', '')
2439 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now