##// END OF EJS Templates
record: add comparison methods for recordhunk class
Laurent Charignon -
r24346:31edcea5 default
parent child Browse files
Show More
@@ -1,2401 +1,2416 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath, copy
9 import cStringIO, email, os, errno, re, posixpath, copy
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import cStringIO
18 import cStringIO
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20
20
21 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 gitre = re.compile('diff --git a/(.*) b/(.*)')
22 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22 tabsplitter = re.compile(r'(\t+|[^\t]+)')
23
23
24 class PatchError(Exception):
24 class PatchError(Exception):
25 pass
25 pass
26
26
27
27
28 # public functions
28 # public functions
29
29
30 def split(stream):
30 def split(stream):
31 '''return an iterator of individual patches from a stream'''
31 '''return an iterator of individual patches from a stream'''
32 def isheader(line, inheader):
32 def isheader(line, inheader):
33 if inheader and line[0] in (' ', '\t'):
33 if inheader and line[0] in (' ', '\t'):
34 # continuation
34 # continuation
35 return True
35 return True
36 if line[0] in (' ', '-', '+'):
36 if line[0] in (' ', '-', '+'):
37 # diff line - don't check for header pattern in there
37 # diff line - don't check for header pattern in there
38 return False
38 return False
39 l = line.split(': ', 1)
39 l = line.split(': ', 1)
40 return len(l) == 2 and ' ' not in l[0]
40 return len(l) == 2 and ' ' not in l[0]
41
41
42 def chunk(lines):
42 def chunk(lines):
43 return cStringIO.StringIO(''.join(lines))
43 return cStringIO.StringIO(''.join(lines))
44
44
45 def hgsplit(stream, cur):
45 def hgsplit(stream, cur):
46 inheader = True
46 inheader = True
47
47
48 for line in stream:
48 for line in stream:
49 if not line.strip():
49 if not line.strip():
50 inheader = False
50 inheader = False
51 if not inheader and line.startswith('# HG changeset patch'):
51 if not inheader and line.startswith('# HG changeset patch'):
52 yield chunk(cur)
52 yield chunk(cur)
53 cur = []
53 cur = []
54 inheader = True
54 inheader = True
55
55
56 cur.append(line)
56 cur.append(line)
57
57
58 if cur:
58 if cur:
59 yield chunk(cur)
59 yield chunk(cur)
60
60
61 def mboxsplit(stream, cur):
61 def mboxsplit(stream, cur):
62 for line in stream:
62 for line in stream:
63 if line.startswith('From '):
63 if line.startswith('From '):
64 for c in split(chunk(cur[1:])):
64 for c in split(chunk(cur[1:])):
65 yield c
65 yield c
66 cur = []
66 cur = []
67
67
68 cur.append(line)
68 cur.append(line)
69
69
70 if cur:
70 if cur:
71 for c in split(chunk(cur[1:])):
71 for c in split(chunk(cur[1:])):
72 yield c
72 yield c
73
73
74 def mimesplit(stream, cur):
74 def mimesplit(stream, cur):
75 def msgfp(m):
75 def msgfp(m):
76 fp = cStringIO.StringIO()
76 fp = cStringIO.StringIO()
77 g = email.Generator.Generator(fp, mangle_from_=False)
77 g = email.Generator.Generator(fp, mangle_from_=False)
78 g.flatten(m)
78 g.flatten(m)
79 fp.seek(0)
79 fp.seek(0)
80 return fp
80 return fp
81
81
82 for line in stream:
82 for line in stream:
83 cur.append(line)
83 cur.append(line)
84 c = chunk(cur)
84 c = chunk(cur)
85
85
86 m = email.Parser.Parser().parse(c)
86 m = email.Parser.Parser().parse(c)
87 if not m.is_multipart():
87 if not m.is_multipart():
88 yield msgfp(m)
88 yield msgfp(m)
89 else:
89 else:
90 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
91 for part in m.walk():
91 for part in m.walk():
92 ct = part.get_content_type()
92 ct = part.get_content_type()
93 if ct not in ok_types:
93 if ct not in ok_types:
94 continue
94 continue
95 yield msgfp(part)
95 yield msgfp(part)
96
96
97 def headersplit(stream, cur):
97 def headersplit(stream, cur):
98 inheader = False
98 inheader = False
99
99
100 for line in stream:
100 for line in stream:
101 if not inheader and isheader(line, inheader):
101 if not inheader and isheader(line, inheader):
102 yield chunk(cur)
102 yield chunk(cur)
103 cur = []
103 cur = []
104 inheader = True
104 inheader = True
105 if inheader and not isheader(line, inheader):
105 if inheader and not isheader(line, inheader):
106 inheader = False
106 inheader = False
107
107
108 cur.append(line)
108 cur.append(line)
109
109
110 if cur:
110 if cur:
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 def remainder(cur):
113 def remainder(cur):
114 yield chunk(cur)
114 yield chunk(cur)
115
115
116 class fiter(object):
116 class fiter(object):
117 def __init__(self, fp):
117 def __init__(self, fp):
118 self.fp = fp
118 self.fp = fp
119
119
120 def __iter__(self):
120 def __iter__(self):
121 return self
121 return self
122
122
123 def next(self):
123 def next(self):
124 l = self.fp.readline()
124 l = self.fp.readline()
125 if not l:
125 if not l:
126 raise StopIteration
126 raise StopIteration
127 return l
127 return l
128
128
129 inheader = False
129 inheader = False
130 cur = []
130 cur = []
131
131
132 mimeheaders = ['content-type']
132 mimeheaders = ['content-type']
133
133
134 if not util.safehasattr(stream, 'next'):
134 if not util.safehasattr(stream, 'next'):
135 # http responses, for example, have readline but not next
135 # http responses, for example, have readline but not next
136 stream = fiter(stream)
136 stream = fiter(stream)
137
137
138 for line in stream:
138 for line in stream:
139 cur.append(line)
139 cur.append(line)
140 if line.startswith('# HG changeset patch'):
140 if line.startswith('# HG changeset patch'):
141 return hgsplit(stream, cur)
141 return hgsplit(stream, cur)
142 elif line.startswith('From '):
142 elif line.startswith('From '):
143 return mboxsplit(stream, cur)
143 return mboxsplit(stream, cur)
144 elif isheader(line, inheader):
144 elif isheader(line, inheader):
145 inheader = True
145 inheader = True
146 if line.split(':', 1)[0].lower() in mimeheaders:
146 if line.split(':', 1)[0].lower() in mimeheaders:
147 # let email parser handle this
147 # let email parser handle this
148 return mimesplit(stream, cur)
148 return mimesplit(stream, cur)
149 elif line.startswith('--- ') and inheader:
149 elif line.startswith('--- ') and inheader:
150 # No evil headers seen by diff start, split by hand
150 # No evil headers seen by diff start, split by hand
151 return headersplit(stream, cur)
151 return headersplit(stream, cur)
152 # Not enough info, keep reading
152 # Not enough info, keep reading
153
153
154 # if we are here, we have a very plain patch
154 # if we are here, we have a very plain patch
155 return remainder(cur)
155 return remainder(cur)
156
156
157 def extract(ui, fileobj):
157 def extract(ui, fileobj):
158 '''extract patch from data read from fileobj.
158 '''extract patch from data read from fileobj.
159
159
160 patch can be a normal patch or contained in an email message.
160 patch can be a normal patch or contained in an email message.
161
161
162 return tuple (filename, message, user, date, branch, node, p1, p2).
162 return tuple (filename, message, user, date, branch, node, p1, p2).
163 Any item in the returned tuple can be None. If filename is None,
163 Any item in the returned tuple can be None. If filename is None,
164 fileobj did not contain a patch. Caller must unlink filename when done.'''
164 fileobj did not contain a patch. Caller must unlink filename when done.'''
165
165
166 # attempt to detect the start of a patch
166 # attempt to detect the start of a patch
167 # (this heuristic is borrowed from quilt)
167 # (this heuristic is borrowed from quilt)
168 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
169 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
170 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'---[ \t].*?^\+\+\+[ \t]|'
171 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
172
172
173 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
174 tmpfp = os.fdopen(fd, 'w')
174 tmpfp = os.fdopen(fd, 'w')
175 try:
175 try:
176 msg = email.Parser.Parser().parse(fileobj)
176 msg = email.Parser.Parser().parse(fileobj)
177
177
178 subject = msg['Subject']
178 subject = msg['Subject']
179 user = msg['From']
179 user = msg['From']
180 if not subject and not user:
180 if not subject and not user:
181 # Not an email, restore parsed headers if any
181 # Not an email, restore parsed headers if any
182 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
183
183
184 # should try to parse msg['Date']
184 # should try to parse msg['Date']
185 date = None
185 date = None
186 nodeid = None
186 nodeid = None
187 branch = None
187 branch = None
188 parents = []
188 parents = []
189
189
190 if subject:
190 if subject:
191 if subject.startswith('[PATCH'):
191 if subject.startswith('[PATCH'):
192 pend = subject.find(']')
192 pend = subject.find(']')
193 if pend >= 0:
193 if pend >= 0:
194 subject = subject[pend + 1:].lstrip()
194 subject = subject[pend + 1:].lstrip()
195 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 subject = re.sub(r'\n[ \t]+', ' ', subject)
196 ui.debug('Subject: %s\n' % subject)
196 ui.debug('Subject: %s\n' % subject)
197 if user:
197 if user:
198 ui.debug('From: %s\n' % user)
198 ui.debug('From: %s\n' % user)
199 diffs_seen = 0
199 diffs_seen = 0
200 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
201 message = ''
201 message = ''
202 for part in msg.walk():
202 for part in msg.walk():
203 content_type = part.get_content_type()
203 content_type = part.get_content_type()
204 ui.debug('Content-Type: %s\n' % content_type)
204 ui.debug('Content-Type: %s\n' % content_type)
205 if content_type not in ok_types:
205 if content_type not in ok_types:
206 continue
206 continue
207 payload = part.get_payload(decode=True)
207 payload = part.get_payload(decode=True)
208 m = diffre.search(payload)
208 m = diffre.search(payload)
209 if m:
209 if m:
210 hgpatch = False
210 hgpatch = False
211 hgpatchheader = False
211 hgpatchheader = False
212 ignoretext = False
212 ignoretext = False
213
213
214 ui.debug('found patch at byte %d\n' % m.start(0))
214 ui.debug('found patch at byte %d\n' % m.start(0))
215 diffs_seen += 1
215 diffs_seen += 1
216 cfp = cStringIO.StringIO()
216 cfp = cStringIO.StringIO()
217 for line in payload[:m.start(0)].splitlines():
217 for line in payload[:m.start(0)].splitlines():
218 if line.startswith('# HG changeset patch') and not hgpatch:
218 if line.startswith('# HG changeset patch') and not hgpatch:
219 ui.debug('patch generated by hg export\n')
219 ui.debug('patch generated by hg export\n')
220 hgpatch = True
220 hgpatch = True
221 hgpatchheader = True
221 hgpatchheader = True
222 # drop earlier commit message content
222 # drop earlier commit message content
223 cfp.seek(0)
223 cfp.seek(0)
224 cfp.truncate()
224 cfp.truncate()
225 subject = None
225 subject = None
226 elif hgpatchheader:
226 elif hgpatchheader:
227 if line.startswith('# User '):
227 if line.startswith('# User '):
228 user = line[7:]
228 user = line[7:]
229 ui.debug('From: %s\n' % user)
229 ui.debug('From: %s\n' % user)
230 elif line.startswith("# Date "):
230 elif line.startswith("# Date "):
231 date = line[7:]
231 date = line[7:]
232 elif line.startswith("# Branch "):
232 elif line.startswith("# Branch "):
233 branch = line[9:]
233 branch = line[9:]
234 elif line.startswith("# Node ID "):
234 elif line.startswith("# Node ID "):
235 nodeid = line[10:]
235 nodeid = line[10:]
236 elif line.startswith("# Parent "):
236 elif line.startswith("# Parent "):
237 parents.append(line[9:].lstrip())
237 parents.append(line[9:].lstrip())
238 elif not line.startswith("# "):
238 elif not line.startswith("# "):
239 hgpatchheader = False
239 hgpatchheader = False
240 elif line == '---':
240 elif line == '---':
241 ignoretext = True
241 ignoretext = True
242 if not hgpatchheader and not ignoretext:
242 if not hgpatchheader and not ignoretext:
243 cfp.write(line)
243 cfp.write(line)
244 cfp.write('\n')
244 cfp.write('\n')
245 message = cfp.getvalue()
245 message = cfp.getvalue()
246 if tmpfp:
246 if tmpfp:
247 tmpfp.write(payload)
247 tmpfp.write(payload)
248 if not payload.endswith('\n'):
248 if not payload.endswith('\n'):
249 tmpfp.write('\n')
249 tmpfp.write('\n')
250 elif not diffs_seen and message and content_type == 'text/plain':
250 elif not diffs_seen and message and content_type == 'text/plain':
251 message += '\n' + payload
251 message += '\n' + payload
252 except: # re-raises
252 except: # re-raises
253 tmpfp.close()
253 tmpfp.close()
254 os.unlink(tmpname)
254 os.unlink(tmpname)
255 raise
255 raise
256
256
257 if subject and not message.startswith(subject):
257 if subject and not message.startswith(subject):
258 message = '%s\n%s' % (subject, message)
258 message = '%s\n%s' % (subject, message)
259 tmpfp.close()
259 tmpfp.close()
260 if not diffs_seen:
260 if not diffs_seen:
261 os.unlink(tmpname)
261 os.unlink(tmpname)
262 return None, message, user, date, branch, None, None, None
262 return None, message, user, date, branch, None, None, None
263
263
264 if parents:
264 if parents:
265 p1 = parents.pop(0)
265 p1 = parents.pop(0)
266 else:
266 else:
267 p1 = None
267 p1 = None
268
268
269 if parents:
269 if parents:
270 p2 = parents.pop(0)
270 p2 = parents.pop(0)
271 else:
271 else:
272 p2 = None
272 p2 = None
273
273
274 return tmpname, message, user, date, branch, nodeid, p1, p2
274 return tmpname, message, user, date, branch, nodeid, p1, p2
275
275
276 class patchmeta(object):
276 class patchmeta(object):
277 """Patched file metadata
277 """Patched file metadata
278
278
279 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
279 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
280 or COPY. 'path' is patched file path. 'oldpath' is set to the
280 or COPY. 'path' is patched file path. 'oldpath' is set to the
281 origin file when 'op' is either COPY or RENAME, None otherwise. If
281 origin file when 'op' is either COPY or RENAME, None otherwise. If
282 file mode is changed, 'mode' is a tuple (islink, isexec) where
282 file mode is changed, 'mode' is a tuple (islink, isexec) where
283 'islink' is True if the file is a symlink and 'isexec' is True if
283 'islink' is True if the file is a symlink and 'isexec' is True if
284 the file is executable. Otherwise, 'mode' is None.
284 the file is executable. Otherwise, 'mode' is None.
285 """
285 """
286 def __init__(self, path):
286 def __init__(self, path):
287 self.path = path
287 self.path = path
288 self.oldpath = None
288 self.oldpath = None
289 self.mode = None
289 self.mode = None
290 self.op = 'MODIFY'
290 self.op = 'MODIFY'
291 self.binary = False
291 self.binary = False
292
292
293 def setmode(self, mode):
293 def setmode(self, mode):
294 islink = mode & 020000
294 islink = mode & 020000
295 isexec = mode & 0100
295 isexec = mode & 0100
296 self.mode = (islink, isexec)
296 self.mode = (islink, isexec)
297
297
298 def copy(self):
298 def copy(self):
299 other = patchmeta(self.path)
299 other = patchmeta(self.path)
300 other.oldpath = self.oldpath
300 other.oldpath = self.oldpath
301 other.mode = self.mode
301 other.mode = self.mode
302 other.op = self.op
302 other.op = self.op
303 other.binary = self.binary
303 other.binary = self.binary
304 return other
304 return other
305
305
306 def _ispatchinga(self, afile):
306 def _ispatchinga(self, afile):
307 if afile == '/dev/null':
307 if afile == '/dev/null':
308 return self.op == 'ADD'
308 return self.op == 'ADD'
309 return afile == 'a/' + (self.oldpath or self.path)
309 return afile == 'a/' + (self.oldpath or self.path)
310
310
311 def _ispatchingb(self, bfile):
311 def _ispatchingb(self, bfile):
312 if bfile == '/dev/null':
312 if bfile == '/dev/null':
313 return self.op == 'DELETE'
313 return self.op == 'DELETE'
314 return bfile == 'b/' + self.path
314 return bfile == 'b/' + self.path
315
315
316 def ispatching(self, afile, bfile):
316 def ispatching(self, afile, bfile):
317 return self._ispatchinga(afile) and self._ispatchingb(bfile)
317 return self._ispatchinga(afile) and self._ispatchingb(bfile)
318
318
319 def __repr__(self):
319 def __repr__(self):
320 return "<patchmeta %s %r>" % (self.op, self.path)
320 return "<patchmeta %s %r>" % (self.op, self.path)
321
321
322 def readgitpatch(lr):
322 def readgitpatch(lr):
323 """extract git-style metadata about patches from <patchname>"""
323 """extract git-style metadata about patches from <patchname>"""
324
324
325 # Filter patch for git information
325 # Filter patch for git information
326 gp = None
326 gp = None
327 gitpatches = []
327 gitpatches = []
328 for line in lr:
328 for line in lr:
329 line = line.rstrip(' \r\n')
329 line = line.rstrip(' \r\n')
330 if line.startswith('diff --git a/'):
330 if line.startswith('diff --git a/'):
331 m = gitre.match(line)
331 m = gitre.match(line)
332 if m:
332 if m:
333 if gp:
333 if gp:
334 gitpatches.append(gp)
334 gitpatches.append(gp)
335 dst = m.group(2)
335 dst = m.group(2)
336 gp = patchmeta(dst)
336 gp = patchmeta(dst)
337 elif gp:
337 elif gp:
338 if line.startswith('--- '):
338 if line.startswith('--- '):
339 gitpatches.append(gp)
339 gitpatches.append(gp)
340 gp = None
340 gp = None
341 continue
341 continue
342 if line.startswith('rename from '):
342 if line.startswith('rename from '):
343 gp.op = 'RENAME'
343 gp.op = 'RENAME'
344 gp.oldpath = line[12:]
344 gp.oldpath = line[12:]
345 elif line.startswith('rename to '):
345 elif line.startswith('rename to '):
346 gp.path = line[10:]
346 gp.path = line[10:]
347 elif line.startswith('copy from '):
347 elif line.startswith('copy from '):
348 gp.op = 'COPY'
348 gp.op = 'COPY'
349 gp.oldpath = line[10:]
349 gp.oldpath = line[10:]
350 elif line.startswith('copy to '):
350 elif line.startswith('copy to '):
351 gp.path = line[8:]
351 gp.path = line[8:]
352 elif line.startswith('deleted file'):
352 elif line.startswith('deleted file'):
353 gp.op = 'DELETE'
353 gp.op = 'DELETE'
354 elif line.startswith('new file mode '):
354 elif line.startswith('new file mode '):
355 gp.op = 'ADD'
355 gp.op = 'ADD'
356 gp.setmode(int(line[-6:], 8))
356 gp.setmode(int(line[-6:], 8))
357 elif line.startswith('new mode '):
357 elif line.startswith('new mode '):
358 gp.setmode(int(line[-6:], 8))
358 gp.setmode(int(line[-6:], 8))
359 elif line.startswith('GIT binary patch'):
359 elif line.startswith('GIT binary patch'):
360 gp.binary = True
360 gp.binary = True
361 if gp:
361 if gp:
362 gitpatches.append(gp)
362 gitpatches.append(gp)
363
363
364 return gitpatches
364 return gitpatches
365
365
366 class linereader(object):
366 class linereader(object):
367 # simple class to allow pushing lines back into the input stream
367 # simple class to allow pushing lines back into the input stream
368 def __init__(self, fp):
368 def __init__(self, fp):
369 self.fp = fp
369 self.fp = fp
370 self.buf = []
370 self.buf = []
371
371
372 def push(self, line):
372 def push(self, line):
373 if line is not None:
373 if line is not None:
374 self.buf.append(line)
374 self.buf.append(line)
375
375
376 def readline(self):
376 def readline(self):
377 if self.buf:
377 if self.buf:
378 l = self.buf[0]
378 l = self.buf[0]
379 del self.buf[0]
379 del self.buf[0]
380 return l
380 return l
381 return self.fp.readline()
381 return self.fp.readline()
382
382
383 def __iter__(self):
383 def __iter__(self):
384 while True:
384 while True:
385 l = self.readline()
385 l = self.readline()
386 if not l:
386 if not l:
387 break
387 break
388 yield l
388 yield l
389
389
390 class abstractbackend(object):
390 class abstractbackend(object):
391 def __init__(self, ui):
391 def __init__(self, ui):
392 self.ui = ui
392 self.ui = ui
393
393
394 def getfile(self, fname):
394 def getfile(self, fname):
395 """Return target file data and flags as a (data, (islink,
395 """Return target file data and flags as a (data, (islink,
396 isexec)) tuple. Data is None if file is missing/deleted.
396 isexec)) tuple. Data is None if file is missing/deleted.
397 """
397 """
398 raise NotImplementedError
398 raise NotImplementedError
399
399
400 def setfile(self, fname, data, mode, copysource):
400 def setfile(self, fname, data, mode, copysource):
401 """Write data to target file fname and set its mode. mode is a
401 """Write data to target file fname and set its mode. mode is a
402 (islink, isexec) tuple. If data is None, the file content should
402 (islink, isexec) tuple. If data is None, the file content should
403 be left unchanged. If the file is modified after being copied,
403 be left unchanged. If the file is modified after being copied,
404 copysource is set to the original file name.
404 copysource is set to the original file name.
405 """
405 """
406 raise NotImplementedError
406 raise NotImplementedError
407
407
408 def unlink(self, fname):
408 def unlink(self, fname):
409 """Unlink target file."""
409 """Unlink target file."""
410 raise NotImplementedError
410 raise NotImplementedError
411
411
412 def writerej(self, fname, failed, total, lines):
412 def writerej(self, fname, failed, total, lines):
413 """Write rejected lines for fname. total is the number of hunks
413 """Write rejected lines for fname. total is the number of hunks
414 which failed to apply and total the total number of hunks for this
414 which failed to apply and total the total number of hunks for this
415 files.
415 files.
416 """
416 """
417 pass
417 pass
418
418
419 def exists(self, fname):
419 def exists(self, fname):
420 raise NotImplementedError
420 raise NotImplementedError
421
421
422 class fsbackend(abstractbackend):
422 class fsbackend(abstractbackend):
423 def __init__(self, ui, basedir):
423 def __init__(self, ui, basedir):
424 super(fsbackend, self).__init__(ui)
424 super(fsbackend, self).__init__(ui)
425 self.opener = scmutil.opener(basedir)
425 self.opener = scmutil.opener(basedir)
426
426
427 def _join(self, f):
427 def _join(self, f):
428 return os.path.join(self.opener.base, f)
428 return os.path.join(self.opener.base, f)
429
429
430 def getfile(self, fname):
430 def getfile(self, fname):
431 if self.opener.islink(fname):
431 if self.opener.islink(fname):
432 return (self.opener.readlink(fname), (True, False))
432 return (self.opener.readlink(fname), (True, False))
433
433
434 isexec = False
434 isexec = False
435 try:
435 try:
436 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
436 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
437 except OSError, e:
437 except OSError, e:
438 if e.errno != errno.ENOENT:
438 if e.errno != errno.ENOENT:
439 raise
439 raise
440 try:
440 try:
441 return (self.opener.read(fname), (False, isexec))
441 return (self.opener.read(fname), (False, isexec))
442 except IOError, e:
442 except IOError, e:
443 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
444 raise
444 raise
445 return None, None
445 return None, None
446
446
447 def setfile(self, fname, data, mode, copysource):
447 def setfile(self, fname, data, mode, copysource):
448 islink, isexec = mode
448 islink, isexec = mode
449 if data is None:
449 if data is None:
450 self.opener.setflags(fname, islink, isexec)
450 self.opener.setflags(fname, islink, isexec)
451 return
451 return
452 if islink:
452 if islink:
453 self.opener.symlink(data, fname)
453 self.opener.symlink(data, fname)
454 else:
454 else:
455 self.opener.write(fname, data)
455 self.opener.write(fname, data)
456 if isexec:
456 if isexec:
457 self.opener.setflags(fname, False, True)
457 self.opener.setflags(fname, False, True)
458
458
459 def unlink(self, fname):
459 def unlink(self, fname):
460 self.opener.unlinkpath(fname, ignoremissing=True)
460 self.opener.unlinkpath(fname, ignoremissing=True)
461
461
462 def writerej(self, fname, failed, total, lines):
462 def writerej(self, fname, failed, total, lines):
463 fname = fname + ".rej"
463 fname = fname + ".rej"
464 self.ui.warn(
464 self.ui.warn(
465 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
465 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
466 (failed, total, fname))
466 (failed, total, fname))
467 fp = self.opener(fname, 'w')
467 fp = self.opener(fname, 'w')
468 fp.writelines(lines)
468 fp.writelines(lines)
469 fp.close()
469 fp.close()
470
470
471 def exists(self, fname):
471 def exists(self, fname):
472 return self.opener.lexists(fname)
472 return self.opener.lexists(fname)
473
473
474 class workingbackend(fsbackend):
474 class workingbackend(fsbackend):
475 def __init__(self, ui, repo, similarity):
475 def __init__(self, ui, repo, similarity):
476 super(workingbackend, self).__init__(ui, repo.root)
476 super(workingbackend, self).__init__(ui, repo.root)
477 self.repo = repo
477 self.repo = repo
478 self.similarity = similarity
478 self.similarity = similarity
479 self.removed = set()
479 self.removed = set()
480 self.changed = set()
480 self.changed = set()
481 self.copied = []
481 self.copied = []
482
482
483 def _checkknown(self, fname):
483 def _checkknown(self, fname):
484 if self.repo.dirstate[fname] == '?' and self.exists(fname):
484 if self.repo.dirstate[fname] == '?' and self.exists(fname):
485 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
485 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
486
486
487 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
488 self._checkknown(fname)
488 self._checkknown(fname)
489 super(workingbackend, self).setfile(fname, data, mode, copysource)
489 super(workingbackend, self).setfile(fname, data, mode, copysource)
490 if copysource is not None:
490 if copysource is not None:
491 self.copied.append((copysource, fname))
491 self.copied.append((copysource, fname))
492 self.changed.add(fname)
492 self.changed.add(fname)
493
493
494 def unlink(self, fname):
494 def unlink(self, fname):
495 self._checkknown(fname)
495 self._checkknown(fname)
496 super(workingbackend, self).unlink(fname)
496 super(workingbackend, self).unlink(fname)
497 self.removed.add(fname)
497 self.removed.add(fname)
498 self.changed.add(fname)
498 self.changed.add(fname)
499
499
500 def close(self):
500 def close(self):
501 wctx = self.repo[None]
501 wctx = self.repo[None]
502 changed = set(self.changed)
502 changed = set(self.changed)
503 for src, dst in self.copied:
503 for src, dst in self.copied:
504 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
504 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
505 if self.removed:
505 if self.removed:
506 wctx.forget(sorted(self.removed))
506 wctx.forget(sorted(self.removed))
507 for f in self.removed:
507 for f in self.removed:
508 if f not in self.repo.dirstate:
508 if f not in self.repo.dirstate:
509 # File was deleted and no longer belongs to the
509 # File was deleted and no longer belongs to the
510 # dirstate, it was probably marked added then
510 # dirstate, it was probably marked added then
511 # deleted, and should not be considered by
511 # deleted, and should not be considered by
512 # marktouched().
512 # marktouched().
513 changed.discard(f)
513 changed.discard(f)
514 if changed:
514 if changed:
515 scmutil.marktouched(self.repo, changed, self.similarity)
515 scmutil.marktouched(self.repo, changed, self.similarity)
516 return sorted(self.changed)
516 return sorted(self.changed)
517
517
518 class filestore(object):
518 class filestore(object):
519 def __init__(self, maxsize=None):
519 def __init__(self, maxsize=None):
520 self.opener = None
520 self.opener = None
521 self.files = {}
521 self.files = {}
522 self.created = 0
522 self.created = 0
523 self.maxsize = maxsize
523 self.maxsize = maxsize
524 if self.maxsize is None:
524 if self.maxsize is None:
525 self.maxsize = 4*(2**20)
525 self.maxsize = 4*(2**20)
526 self.size = 0
526 self.size = 0
527 self.data = {}
527 self.data = {}
528
528
529 def setfile(self, fname, data, mode, copied=None):
529 def setfile(self, fname, data, mode, copied=None):
530 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
530 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
531 self.data[fname] = (data, mode, copied)
531 self.data[fname] = (data, mode, copied)
532 self.size += len(data)
532 self.size += len(data)
533 else:
533 else:
534 if self.opener is None:
534 if self.opener is None:
535 root = tempfile.mkdtemp(prefix='hg-patch-')
535 root = tempfile.mkdtemp(prefix='hg-patch-')
536 self.opener = scmutil.opener(root)
536 self.opener = scmutil.opener(root)
537 # Avoid filename issues with these simple names
537 # Avoid filename issues with these simple names
538 fn = str(self.created)
538 fn = str(self.created)
539 self.opener.write(fn, data)
539 self.opener.write(fn, data)
540 self.created += 1
540 self.created += 1
541 self.files[fname] = (fn, mode, copied)
541 self.files[fname] = (fn, mode, copied)
542
542
543 def getfile(self, fname):
543 def getfile(self, fname):
544 if fname in self.data:
544 if fname in self.data:
545 return self.data[fname]
545 return self.data[fname]
546 if not self.opener or fname not in self.files:
546 if not self.opener or fname not in self.files:
547 return None, None, None
547 return None, None, None
548 fn, mode, copied = self.files[fname]
548 fn, mode, copied = self.files[fname]
549 return self.opener.read(fn), mode, copied
549 return self.opener.read(fn), mode, copied
550
550
551 def close(self):
551 def close(self):
552 if self.opener:
552 if self.opener:
553 shutil.rmtree(self.opener.base)
553 shutil.rmtree(self.opener.base)
554
554
555 class repobackend(abstractbackend):
555 class repobackend(abstractbackend):
556 def __init__(self, ui, repo, ctx, store):
556 def __init__(self, ui, repo, ctx, store):
557 super(repobackend, self).__init__(ui)
557 super(repobackend, self).__init__(ui)
558 self.repo = repo
558 self.repo = repo
559 self.ctx = ctx
559 self.ctx = ctx
560 self.store = store
560 self.store = store
561 self.changed = set()
561 self.changed = set()
562 self.removed = set()
562 self.removed = set()
563 self.copied = {}
563 self.copied = {}
564
564
565 def _checkknown(self, fname):
565 def _checkknown(self, fname):
566 if fname not in self.ctx:
566 if fname not in self.ctx:
567 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
567 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
568
568
569 def getfile(self, fname):
569 def getfile(self, fname):
570 try:
570 try:
571 fctx = self.ctx[fname]
571 fctx = self.ctx[fname]
572 except error.LookupError:
572 except error.LookupError:
573 return None, None
573 return None, None
574 flags = fctx.flags()
574 flags = fctx.flags()
575 return fctx.data(), ('l' in flags, 'x' in flags)
575 return fctx.data(), ('l' in flags, 'x' in flags)
576
576
577 def setfile(self, fname, data, mode, copysource):
577 def setfile(self, fname, data, mode, copysource):
578 if copysource:
578 if copysource:
579 self._checkknown(copysource)
579 self._checkknown(copysource)
580 if data is None:
580 if data is None:
581 data = self.ctx[fname].data()
581 data = self.ctx[fname].data()
582 self.store.setfile(fname, data, mode, copysource)
582 self.store.setfile(fname, data, mode, copysource)
583 self.changed.add(fname)
583 self.changed.add(fname)
584 if copysource:
584 if copysource:
585 self.copied[fname] = copysource
585 self.copied[fname] = copysource
586
586
587 def unlink(self, fname):
587 def unlink(self, fname):
588 self._checkknown(fname)
588 self._checkknown(fname)
589 self.removed.add(fname)
589 self.removed.add(fname)
590
590
591 def exists(self, fname):
591 def exists(self, fname):
592 return fname in self.ctx
592 return fname in self.ctx
593
593
594 def close(self):
594 def close(self):
595 return self.changed | self.removed
595 return self.changed | self.removed
596
596
597 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
597 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
598 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
598 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
599 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
599 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
600 eolmodes = ['strict', 'crlf', 'lf', 'auto']
600 eolmodes = ['strict', 'crlf', 'lf', 'auto']
601
601
602 class patchfile(object):
602 class patchfile(object):
603 def __init__(self, ui, gp, backend, store, eolmode='strict'):
603 def __init__(self, ui, gp, backend, store, eolmode='strict'):
604 self.fname = gp.path
604 self.fname = gp.path
605 self.eolmode = eolmode
605 self.eolmode = eolmode
606 self.eol = None
606 self.eol = None
607 self.backend = backend
607 self.backend = backend
608 self.ui = ui
608 self.ui = ui
609 self.lines = []
609 self.lines = []
610 self.exists = False
610 self.exists = False
611 self.missing = True
611 self.missing = True
612 self.mode = gp.mode
612 self.mode = gp.mode
613 self.copysource = gp.oldpath
613 self.copysource = gp.oldpath
614 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
614 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
615 self.remove = gp.op == 'DELETE'
615 self.remove = gp.op == 'DELETE'
616 if self.copysource is None:
616 if self.copysource is None:
617 data, mode = backend.getfile(self.fname)
617 data, mode = backend.getfile(self.fname)
618 else:
618 else:
619 data, mode = store.getfile(self.copysource)[:2]
619 data, mode = store.getfile(self.copysource)[:2]
620 if data is not None:
620 if data is not None:
621 self.exists = self.copysource is None or backend.exists(self.fname)
621 self.exists = self.copysource is None or backend.exists(self.fname)
622 self.missing = False
622 self.missing = False
623 if data:
623 if data:
624 self.lines = mdiff.splitnewlines(data)
624 self.lines = mdiff.splitnewlines(data)
625 if self.mode is None:
625 if self.mode is None:
626 self.mode = mode
626 self.mode = mode
627 if self.lines:
627 if self.lines:
628 # Normalize line endings
628 # Normalize line endings
629 if self.lines[0].endswith('\r\n'):
629 if self.lines[0].endswith('\r\n'):
630 self.eol = '\r\n'
630 self.eol = '\r\n'
631 elif self.lines[0].endswith('\n'):
631 elif self.lines[0].endswith('\n'):
632 self.eol = '\n'
632 self.eol = '\n'
633 if eolmode != 'strict':
633 if eolmode != 'strict':
634 nlines = []
634 nlines = []
635 for l in self.lines:
635 for l in self.lines:
636 if l.endswith('\r\n'):
636 if l.endswith('\r\n'):
637 l = l[:-2] + '\n'
637 l = l[:-2] + '\n'
638 nlines.append(l)
638 nlines.append(l)
639 self.lines = nlines
639 self.lines = nlines
640 else:
640 else:
641 if self.create:
641 if self.create:
642 self.missing = False
642 self.missing = False
643 if self.mode is None:
643 if self.mode is None:
644 self.mode = (False, False)
644 self.mode = (False, False)
645 if self.missing:
645 if self.missing:
646 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
646 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
647
647
648 self.hash = {}
648 self.hash = {}
649 self.dirty = 0
649 self.dirty = 0
650 self.offset = 0
650 self.offset = 0
651 self.skew = 0
651 self.skew = 0
652 self.rej = []
652 self.rej = []
653 self.fileprinted = False
653 self.fileprinted = False
654 self.printfile(False)
654 self.printfile(False)
655 self.hunks = 0
655 self.hunks = 0
656
656
657 def writelines(self, fname, lines, mode):
657 def writelines(self, fname, lines, mode):
658 if self.eolmode == 'auto':
658 if self.eolmode == 'auto':
659 eol = self.eol
659 eol = self.eol
660 elif self.eolmode == 'crlf':
660 elif self.eolmode == 'crlf':
661 eol = '\r\n'
661 eol = '\r\n'
662 else:
662 else:
663 eol = '\n'
663 eol = '\n'
664
664
665 if self.eolmode != 'strict' and eol and eol != '\n':
665 if self.eolmode != 'strict' and eol and eol != '\n':
666 rawlines = []
666 rawlines = []
667 for l in lines:
667 for l in lines:
668 if l and l[-1] == '\n':
668 if l and l[-1] == '\n':
669 l = l[:-1] + eol
669 l = l[:-1] + eol
670 rawlines.append(l)
670 rawlines.append(l)
671 lines = rawlines
671 lines = rawlines
672
672
673 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
673 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
674
674
675 def printfile(self, warn):
675 def printfile(self, warn):
676 if self.fileprinted:
676 if self.fileprinted:
677 return
677 return
678 if warn or self.ui.verbose:
678 if warn or self.ui.verbose:
679 self.fileprinted = True
679 self.fileprinted = True
680 s = _("patching file %s\n") % self.fname
680 s = _("patching file %s\n") % self.fname
681 if warn:
681 if warn:
682 self.ui.warn(s)
682 self.ui.warn(s)
683 else:
683 else:
684 self.ui.note(s)
684 self.ui.note(s)
685
685
686
686
687 def findlines(self, l, linenum):
687 def findlines(self, l, linenum):
688 # looks through the hash and finds candidate lines. The
688 # looks through the hash and finds candidate lines. The
689 # result is a list of line numbers sorted based on distance
689 # result is a list of line numbers sorted based on distance
690 # from linenum
690 # from linenum
691
691
692 cand = self.hash.get(l, [])
692 cand = self.hash.get(l, [])
693 if len(cand) > 1:
693 if len(cand) > 1:
694 # resort our list of potentials forward then back.
694 # resort our list of potentials forward then back.
695 cand.sort(key=lambda x: abs(x - linenum))
695 cand.sort(key=lambda x: abs(x - linenum))
696 return cand
696 return cand
697
697
698 def write_rej(self):
698 def write_rej(self):
699 # our rejects are a little different from patch(1). This always
699 # our rejects are a little different from patch(1). This always
700 # creates rejects in the same form as the original patch. A file
700 # creates rejects in the same form as the original patch. A file
701 # header is inserted so that you can run the reject through patch again
701 # header is inserted so that you can run the reject through patch again
702 # without having to type the filename.
702 # without having to type the filename.
703 if not self.rej:
703 if not self.rej:
704 return
704 return
705 base = os.path.basename(self.fname)
705 base = os.path.basename(self.fname)
706 lines = ["--- %s\n+++ %s\n" % (base, base)]
706 lines = ["--- %s\n+++ %s\n" % (base, base)]
707 for x in self.rej:
707 for x in self.rej:
708 for l in x.hunk:
708 for l in x.hunk:
709 lines.append(l)
709 lines.append(l)
710 if l[-1] != '\n':
710 if l[-1] != '\n':
711 lines.append("\n\ No newline at end of file\n")
711 lines.append("\n\ No newline at end of file\n")
712 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
712 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
713
713
714 def apply(self, h):
714 def apply(self, h):
715 if not h.complete():
715 if not h.complete():
716 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
716 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
717 (h.number, h.desc, len(h.a), h.lena, len(h.b),
717 (h.number, h.desc, len(h.a), h.lena, len(h.b),
718 h.lenb))
718 h.lenb))
719
719
720 self.hunks += 1
720 self.hunks += 1
721
721
722 if self.missing:
722 if self.missing:
723 self.rej.append(h)
723 self.rej.append(h)
724 return -1
724 return -1
725
725
726 if self.exists and self.create:
726 if self.exists and self.create:
727 if self.copysource:
727 if self.copysource:
728 self.ui.warn(_("cannot create %s: destination already "
728 self.ui.warn(_("cannot create %s: destination already "
729 "exists\n") % self.fname)
729 "exists\n") % self.fname)
730 else:
730 else:
731 self.ui.warn(_("file %s already exists\n") % self.fname)
731 self.ui.warn(_("file %s already exists\n") % self.fname)
732 self.rej.append(h)
732 self.rej.append(h)
733 return -1
733 return -1
734
734
735 if isinstance(h, binhunk):
735 if isinstance(h, binhunk):
736 if self.remove:
736 if self.remove:
737 self.backend.unlink(self.fname)
737 self.backend.unlink(self.fname)
738 else:
738 else:
739 l = h.new(self.lines)
739 l = h.new(self.lines)
740 self.lines[:] = l
740 self.lines[:] = l
741 self.offset += len(l)
741 self.offset += len(l)
742 self.dirty = True
742 self.dirty = True
743 return 0
743 return 0
744
744
745 horig = h
745 horig = h
746 if (self.eolmode in ('crlf', 'lf')
746 if (self.eolmode in ('crlf', 'lf')
747 or self.eolmode == 'auto' and self.eol):
747 or self.eolmode == 'auto' and self.eol):
748 # If new eols are going to be normalized, then normalize
748 # If new eols are going to be normalized, then normalize
749 # hunk data before patching. Otherwise, preserve input
749 # hunk data before patching. Otherwise, preserve input
750 # line-endings.
750 # line-endings.
751 h = h.getnormalized()
751 h = h.getnormalized()
752
752
753 # fast case first, no offsets, no fuzz
753 # fast case first, no offsets, no fuzz
754 old, oldstart, new, newstart = h.fuzzit(0, False)
754 old, oldstart, new, newstart = h.fuzzit(0, False)
755 oldstart += self.offset
755 oldstart += self.offset
756 orig_start = oldstart
756 orig_start = oldstart
757 # if there's skew we want to emit the "(offset %d lines)" even
757 # if there's skew we want to emit the "(offset %d lines)" even
758 # when the hunk cleanly applies at start + skew, so skip the
758 # when the hunk cleanly applies at start + skew, so skip the
759 # fast case code
759 # fast case code
760 if (self.skew == 0 and
760 if (self.skew == 0 and
761 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
761 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
762 if self.remove:
762 if self.remove:
763 self.backend.unlink(self.fname)
763 self.backend.unlink(self.fname)
764 else:
764 else:
765 self.lines[oldstart:oldstart + len(old)] = new
765 self.lines[oldstart:oldstart + len(old)] = new
766 self.offset += len(new) - len(old)
766 self.offset += len(new) - len(old)
767 self.dirty = True
767 self.dirty = True
768 return 0
768 return 0
769
769
770 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
770 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
771 self.hash = {}
771 self.hash = {}
772 for x, s in enumerate(self.lines):
772 for x, s in enumerate(self.lines):
773 self.hash.setdefault(s, []).append(x)
773 self.hash.setdefault(s, []).append(x)
774
774
775 for fuzzlen in xrange(3):
775 for fuzzlen in xrange(3):
776 for toponly in [True, False]:
776 for toponly in [True, False]:
777 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
777 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
778 oldstart = oldstart + self.offset + self.skew
778 oldstart = oldstart + self.offset + self.skew
779 oldstart = min(oldstart, len(self.lines))
779 oldstart = min(oldstart, len(self.lines))
780 if old:
780 if old:
781 cand = self.findlines(old[0][1:], oldstart)
781 cand = self.findlines(old[0][1:], oldstart)
782 else:
782 else:
783 # Only adding lines with no or fuzzed context, just
783 # Only adding lines with no or fuzzed context, just
784 # take the skew in account
784 # take the skew in account
785 cand = [oldstart]
785 cand = [oldstart]
786
786
787 for l in cand:
787 for l in cand:
788 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
788 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
789 self.lines[l : l + len(old)] = new
789 self.lines[l : l + len(old)] = new
790 self.offset += len(new) - len(old)
790 self.offset += len(new) - len(old)
791 self.skew = l - orig_start
791 self.skew = l - orig_start
792 self.dirty = True
792 self.dirty = True
793 offset = l - orig_start - fuzzlen
793 offset = l - orig_start - fuzzlen
794 if fuzzlen:
794 if fuzzlen:
795 msg = _("Hunk #%d succeeded at %d "
795 msg = _("Hunk #%d succeeded at %d "
796 "with fuzz %d "
796 "with fuzz %d "
797 "(offset %d lines).\n")
797 "(offset %d lines).\n")
798 self.printfile(True)
798 self.printfile(True)
799 self.ui.warn(msg %
799 self.ui.warn(msg %
800 (h.number, l + 1, fuzzlen, offset))
800 (h.number, l + 1, fuzzlen, offset))
801 else:
801 else:
802 msg = _("Hunk #%d succeeded at %d "
802 msg = _("Hunk #%d succeeded at %d "
803 "(offset %d lines).\n")
803 "(offset %d lines).\n")
804 self.ui.note(msg % (h.number, l + 1, offset))
804 self.ui.note(msg % (h.number, l + 1, offset))
805 return fuzzlen
805 return fuzzlen
806 self.printfile(True)
806 self.printfile(True)
807 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
807 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
808 self.rej.append(horig)
808 self.rej.append(horig)
809 return -1
809 return -1
810
810
811 def close(self):
811 def close(self):
812 if self.dirty:
812 if self.dirty:
813 self.writelines(self.fname, self.lines, self.mode)
813 self.writelines(self.fname, self.lines, self.mode)
814 self.write_rej()
814 self.write_rej()
815 return len(self.rej)
815 return len(self.rej)
816
816
817 class header(object):
817 class header(object):
818 """patch header
818 """patch header
819 """
819 """
820 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
820 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
821 diff_re = re.compile('diff -r .* (.*)$')
821 diff_re = re.compile('diff -r .* (.*)$')
822 allhunks_re = re.compile('(?:index|deleted file) ')
822 allhunks_re = re.compile('(?:index|deleted file) ')
823 pretty_re = re.compile('(?:new file|deleted file) ')
823 pretty_re = re.compile('(?:new file|deleted file) ')
824 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
824 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
825
825
826 def __init__(self, header):
826 def __init__(self, header):
827 self.header = header
827 self.header = header
828 self.hunks = []
828 self.hunks = []
829
829
830 def binary(self):
830 def binary(self):
831 return util.any(h.startswith('index ') for h in self.header)
831 return util.any(h.startswith('index ') for h in self.header)
832
832
833 def pretty(self, fp):
833 def pretty(self, fp):
834 for h in self.header:
834 for h in self.header:
835 if h.startswith('index '):
835 if h.startswith('index '):
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
837 break
837 break
838 if self.pretty_re.match(h):
838 if self.pretty_re.match(h):
839 fp.write(h)
839 fp.write(h)
840 if self.binary():
840 if self.binary():
841 fp.write(_('this is a binary file\n'))
841 fp.write(_('this is a binary file\n'))
842 break
842 break
843 if h.startswith('---'):
843 if h.startswith('---'):
844 fp.write(_('%d hunks, %d lines changed\n') %
844 fp.write(_('%d hunks, %d lines changed\n') %
845 (len(self.hunks),
845 (len(self.hunks),
846 sum([max(h.added, h.removed) for h in self.hunks])))
846 sum([max(h.added, h.removed) for h in self.hunks])))
847 break
847 break
848 fp.write(h)
848 fp.write(h)
849
849
850 def write(self, fp):
850 def write(self, fp):
851 fp.write(''.join(self.header))
851 fp.write(''.join(self.header))
852
852
853 def allhunks(self):
853 def allhunks(self):
854 return util.any(self.allhunks_re.match(h) for h in self.header)
854 return util.any(self.allhunks_re.match(h) for h in self.header)
855
855
856 def files(self):
856 def files(self):
857 match = self.diffgit_re.match(self.header[0])
857 match = self.diffgit_re.match(self.header[0])
858 if match:
858 if match:
859 fromfile, tofile = match.groups()
859 fromfile, tofile = match.groups()
860 if fromfile == tofile:
860 if fromfile == tofile:
861 return [fromfile]
861 return [fromfile]
862 return [fromfile, tofile]
862 return [fromfile, tofile]
863 else:
863 else:
864 return self.diff_re.match(self.header[0]).groups()
864 return self.diff_re.match(self.header[0]).groups()
865
865
866 def filename(self):
866 def filename(self):
867 return self.files()[-1]
867 return self.files()[-1]
868
868
869 def __repr__(self):
869 def __repr__(self):
870 return '<header %s>' % (' '.join(map(repr, self.files())))
870 return '<header %s>' % (' '.join(map(repr, self.files())))
871
871
872 def special(self):
872 def special(self):
873 return util.any(self.special_re.match(h) for h in self.header)
873 return util.any(self.special_re.match(h) for h in self.header)
874
874
875 class recordhunk(object):
875 class recordhunk(object):
876 """patch hunk
876 """patch hunk
877
877
878 XXX shouldn't we merge this with the other hunk class?
878 XXX shouldn't we merge this with the other hunk class?
879 """
879 """
880 maxcontext = 3
880 maxcontext = 3
881
881
882 def __init__(self, header, fromline, toline, proc, before, hunk, after):
882 def __init__(self, header, fromline, toline, proc, before, hunk, after):
883 def trimcontext(number, lines):
883 def trimcontext(number, lines):
884 delta = len(lines) - self.maxcontext
884 delta = len(lines) - self.maxcontext
885 if False and delta > 0:
885 if False and delta > 0:
886 return number + delta, lines[:self.maxcontext]
886 return number + delta, lines[:self.maxcontext]
887 return number, lines
887 return number, lines
888
888
889 self.header = header
889 self.header = header
890 self.fromline, self.before = trimcontext(fromline, before)
890 self.fromline, self.before = trimcontext(fromline, before)
891 self.toline, self.after = trimcontext(toline, after)
891 self.toline, self.after = trimcontext(toline, after)
892 self.proc = proc
892 self.proc = proc
893 self.hunk = hunk
893 self.hunk = hunk
894 self.added, self.removed = self.countchanges(self.hunk)
894 self.added, self.removed = self.countchanges(self.hunk)
895
895
896 def __eq__(self, v):
897 if not isinstance(v, recordhunk):
898 return False
899
900 return ((v.hunk == self.hunk) and
901 (v.proc == self.proc) and
902 (self.fromline == v.fromline) and
903 (self.header.files() == v.header.files()))
904
905 def __hash__(self):
906 return hash((tuple(self.hunk),
907 tuple(self.header.files()),
908 self.fromline,
909 self.proc))
910
896 def countchanges(self, hunk):
911 def countchanges(self, hunk):
897 """hunk -> (n+,n-)"""
912 """hunk -> (n+,n-)"""
898 add = len([h for h in hunk if h[0] == '+'])
913 add = len([h for h in hunk if h[0] == '+'])
899 rem = len([h for h in hunk if h[0] == '-'])
914 rem = len([h for h in hunk if h[0] == '-'])
900 return add, rem
915 return add, rem
901
916
902 def write(self, fp):
917 def write(self, fp):
903 delta = len(self.before) + len(self.after)
918 delta = len(self.before) + len(self.after)
904 if self.after and self.after[-1] == '\\ No newline at end of file\n':
919 if self.after and self.after[-1] == '\\ No newline at end of file\n':
905 delta -= 1
920 delta -= 1
906 fromlen = delta + self.removed
921 fromlen = delta + self.removed
907 tolen = delta + self.added
922 tolen = delta + self.added
908 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
923 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
909 (self.fromline, fromlen, self.toline, tolen,
924 (self.fromline, fromlen, self.toline, tolen,
910 self.proc and (' ' + self.proc)))
925 self.proc and (' ' + self.proc)))
911 fp.write(''.join(self.before + self.hunk + self.after))
926 fp.write(''.join(self.before + self.hunk + self.after))
912
927
913 pretty = write
928 pretty = write
914
929
915 def filename(self):
930 def filename(self):
916 return self.header.filename()
931 return self.header.filename()
917
932
918 def __repr__(self):
933 def __repr__(self):
919 return '<hunk %r@%d>' % (self.filename(), self.fromline)
934 return '<hunk %r@%d>' % (self.filename(), self.fromline)
920
935
921 def filterpatch(ui, headers):
936 def filterpatch(ui, headers):
922 """Interactively filter patch chunks into applied-only chunks"""
937 """Interactively filter patch chunks into applied-only chunks"""
923
938
924 def prompt(skipfile, skipall, query, chunk):
939 def prompt(skipfile, skipall, query, chunk):
925 """prompt query, and process base inputs
940 """prompt query, and process base inputs
926
941
927 - y/n for the rest of file
942 - y/n for the rest of file
928 - y/n for the rest
943 - y/n for the rest
929 - ? (help)
944 - ? (help)
930 - q (quit)
945 - q (quit)
931
946
932 Return True/False and possibly updated skipfile and skipall.
947 Return True/False and possibly updated skipfile and skipall.
933 """
948 """
934 newpatches = None
949 newpatches = None
935 if skipall is not None:
950 if skipall is not None:
936 return skipall, skipfile, skipall, newpatches
951 return skipall, skipfile, skipall, newpatches
937 if skipfile is not None:
952 if skipfile is not None:
938 return skipfile, skipfile, skipall, newpatches
953 return skipfile, skipfile, skipall, newpatches
939 while True:
954 while True:
940 resps = _('[Ynesfdaq?]'
955 resps = _('[Ynesfdaq?]'
941 '$$ &Yes, record this change'
956 '$$ &Yes, record this change'
942 '$$ &No, skip this change'
957 '$$ &No, skip this change'
943 '$$ &Edit this change manually'
958 '$$ &Edit this change manually'
944 '$$ &Skip remaining changes to this file'
959 '$$ &Skip remaining changes to this file'
945 '$$ Record remaining changes to this &file'
960 '$$ Record remaining changes to this &file'
946 '$$ &Done, skip remaining changes and files'
961 '$$ &Done, skip remaining changes and files'
947 '$$ Record &all changes to all remaining files'
962 '$$ Record &all changes to all remaining files'
948 '$$ &Quit, recording no changes'
963 '$$ &Quit, recording no changes'
949 '$$ &? (display help)')
964 '$$ &? (display help)')
950 r = ui.promptchoice("%s %s" % (query, resps))
965 r = ui.promptchoice("%s %s" % (query, resps))
951 ui.write("\n")
966 ui.write("\n")
952 if r == 8: # ?
967 if r == 8: # ?
953 for c, t in ui.extractchoices(resps)[1]:
968 for c, t in ui.extractchoices(resps)[1]:
954 ui.write('%s - %s\n' % (c, t.lower()))
969 ui.write('%s - %s\n' % (c, t.lower()))
955 continue
970 continue
956 elif r == 0: # yes
971 elif r == 0: # yes
957 ret = True
972 ret = True
958 elif r == 1: # no
973 elif r == 1: # no
959 ret = False
974 ret = False
960 elif r == 2: # Edit patch
975 elif r == 2: # Edit patch
961 if chunk is None:
976 if chunk is None:
962 ui.write(_('cannot edit patch for whole file'))
977 ui.write(_('cannot edit patch for whole file'))
963 ui.write("\n")
978 ui.write("\n")
964 continue
979 continue
965 if chunk.header.binary():
980 if chunk.header.binary():
966 ui.write(_('cannot edit patch for binary file'))
981 ui.write(_('cannot edit patch for binary file'))
967 ui.write("\n")
982 ui.write("\n")
968 continue
983 continue
969 # Patch comment based on the Git one (based on comment at end of
984 # Patch comment based on the Git one (based on comment at end of
970 # http://mercurial.selenic.com/wiki/RecordExtension)
985 # http://mercurial.selenic.com/wiki/RecordExtension)
971 phelp = '---' + _("""
986 phelp = '---' + _("""
972 To remove '-' lines, make them ' ' lines (context).
987 To remove '-' lines, make them ' ' lines (context).
973 To remove '+' lines, delete them.
988 To remove '+' lines, delete them.
974 Lines starting with # will be removed from the patch.
989 Lines starting with # will be removed from the patch.
975
990
976 If the patch applies cleanly, the edited hunk will immediately be
991 If the patch applies cleanly, the edited hunk will immediately be
977 added to the record list. If it does not apply cleanly, a rejects
992 added to the record list. If it does not apply cleanly, a rejects
978 file will be generated: you can use that when you try again. If
993 file will be generated: you can use that when you try again. If
979 all lines of the hunk are removed, then the edit is aborted and
994 all lines of the hunk are removed, then the edit is aborted and
980 the hunk is left unchanged.
995 the hunk is left unchanged.
981 """)
996 """)
982 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
997 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
983 suffix=".diff", text=True)
998 suffix=".diff", text=True)
984 ncpatchfp = None
999 ncpatchfp = None
985 try:
1000 try:
986 # Write the initial patch
1001 # Write the initial patch
987 f = os.fdopen(patchfd, "w")
1002 f = os.fdopen(patchfd, "w")
988 chunk.header.write(f)
1003 chunk.header.write(f)
989 chunk.write(f)
1004 chunk.write(f)
990 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1005 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
991 f.close()
1006 f.close()
992 # Start the editor and wait for it to complete
1007 # Start the editor and wait for it to complete
993 editor = ui.geteditor()
1008 editor = ui.geteditor()
994 ui.system("%s \"%s\"" % (editor, patchfn),
1009 ui.system("%s \"%s\"" % (editor, patchfn),
995 environ={'HGUSER': ui.username()},
1010 environ={'HGUSER': ui.username()},
996 onerr=util.Abort, errprefix=_("edit failed"))
1011 onerr=util.Abort, errprefix=_("edit failed"))
997 # Remove comment lines
1012 # Remove comment lines
998 patchfp = open(patchfn)
1013 patchfp = open(patchfn)
999 ncpatchfp = cStringIO.StringIO()
1014 ncpatchfp = cStringIO.StringIO()
1000 for line in patchfp:
1015 for line in patchfp:
1001 if not line.startswith('#'):
1016 if not line.startswith('#'):
1002 ncpatchfp.write(line)
1017 ncpatchfp.write(line)
1003 patchfp.close()
1018 patchfp.close()
1004 ncpatchfp.seek(0)
1019 ncpatchfp.seek(0)
1005 newpatches = parsepatch(ncpatchfp)
1020 newpatches = parsepatch(ncpatchfp)
1006 finally:
1021 finally:
1007 os.unlink(patchfn)
1022 os.unlink(patchfn)
1008 del ncpatchfp
1023 del ncpatchfp
1009 # Signal that the chunk shouldn't be applied as-is, but
1024 # Signal that the chunk shouldn't be applied as-is, but
1010 # provide the new patch to be used instead.
1025 # provide the new patch to be used instead.
1011 ret = False
1026 ret = False
1012 elif r == 3: # Skip
1027 elif r == 3: # Skip
1013 ret = skipfile = False
1028 ret = skipfile = False
1014 elif r == 4: # file (Record remaining)
1029 elif r == 4: # file (Record remaining)
1015 ret = skipfile = True
1030 ret = skipfile = True
1016 elif r == 5: # done, skip remaining
1031 elif r == 5: # done, skip remaining
1017 ret = skipall = False
1032 ret = skipall = False
1018 elif r == 6: # all
1033 elif r == 6: # all
1019 ret = skipall = True
1034 ret = skipall = True
1020 elif r == 7: # quit
1035 elif r == 7: # quit
1021 raise util.Abort(_('user quit'))
1036 raise util.Abort(_('user quit'))
1022 return ret, skipfile, skipall, newpatches
1037 return ret, skipfile, skipall, newpatches
1023
1038
1024 seen = set()
1039 seen = set()
1025 applied = {} # 'filename' -> [] of chunks
1040 applied = {} # 'filename' -> [] of chunks
1026 skipfile, skipall = None, None
1041 skipfile, skipall = None, None
1027 pos, total = 1, sum(len(h.hunks) for h in headers)
1042 pos, total = 1, sum(len(h.hunks) for h in headers)
1028 for h in headers:
1043 for h in headers:
1029 pos += len(h.hunks)
1044 pos += len(h.hunks)
1030 skipfile = None
1045 skipfile = None
1031 fixoffset = 0
1046 fixoffset = 0
1032 hdr = ''.join(h.header)
1047 hdr = ''.join(h.header)
1033 if hdr in seen:
1048 if hdr in seen:
1034 continue
1049 continue
1035 seen.add(hdr)
1050 seen.add(hdr)
1036 if skipall is None:
1051 if skipall is None:
1037 h.pretty(ui)
1052 h.pretty(ui)
1038 msg = (_('examine changes to %s?') %
1053 msg = (_('examine changes to %s?') %
1039 _(' and ').join("'%s'" % f for f in h.files()))
1054 _(' and ').join("'%s'" % f for f in h.files()))
1040 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1055 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1041 if not r:
1056 if not r:
1042 continue
1057 continue
1043 applied[h.filename()] = [h]
1058 applied[h.filename()] = [h]
1044 if h.allhunks():
1059 if h.allhunks():
1045 applied[h.filename()] += h.hunks
1060 applied[h.filename()] += h.hunks
1046 continue
1061 continue
1047 for i, chunk in enumerate(h.hunks):
1062 for i, chunk in enumerate(h.hunks):
1048 if skipfile is None and skipall is None:
1063 if skipfile is None and skipall is None:
1049 chunk.pretty(ui)
1064 chunk.pretty(ui)
1050 if total == 1:
1065 if total == 1:
1051 msg = _("record this change to '%s'?") % chunk.filename()
1066 msg = _("record this change to '%s'?") % chunk.filename()
1052 else:
1067 else:
1053 idx = pos - len(h.hunks) + i
1068 idx = pos - len(h.hunks) + i
1054 msg = _("record change %d/%d to '%s'?") % (idx, total,
1069 msg = _("record change %d/%d to '%s'?") % (idx, total,
1055 chunk.filename())
1070 chunk.filename())
1056 r, skipfile, skipall, newpatches = prompt(skipfile,
1071 r, skipfile, skipall, newpatches = prompt(skipfile,
1057 skipall, msg, chunk)
1072 skipall, msg, chunk)
1058 if r:
1073 if r:
1059 if fixoffset:
1074 if fixoffset:
1060 chunk = copy.copy(chunk)
1075 chunk = copy.copy(chunk)
1061 chunk.toline += fixoffset
1076 chunk.toline += fixoffset
1062 applied[chunk.filename()].append(chunk)
1077 applied[chunk.filename()].append(chunk)
1063 elif newpatches is not None:
1078 elif newpatches is not None:
1064 for newpatch in newpatches:
1079 for newpatch in newpatches:
1065 for newhunk in newpatch.hunks:
1080 for newhunk in newpatch.hunks:
1066 if fixoffset:
1081 if fixoffset:
1067 newhunk.toline += fixoffset
1082 newhunk.toline += fixoffset
1068 applied[newhunk.filename()].append(newhunk)
1083 applied[newhunk.filename()].append(newhunk)
1069 else:
1084 else:
1070 fixoffset += chunk.removed - chunk.added
1085 fixoffset += chunk.removed - chunk.added
1071 return sum([h for h in applied.itervalues()
1086 return sum([h for h in applied.itervalues()
1072 if h[0].special() or len(h) > 1], [])
1087 if h[0].special() or len(h) > 1], [])
1073 class hunk(object):
1088 class hunk(object):
1074 def __init__(self, desc, num, lr, context):
1089 def __init__(self, desc, num, lr, context):
1075 self.number = num
1090 self.number = num
1076 self.desc = desc
1091 self.desc = desc
1077 self.hunk = [desc]
1092 self.hunk = [desc]
1078 self.a = []
1093 self.a = []
1079 self.b = []
1094 self.b = []
1080 self.starta = self.lena = None
1095 self.starta = self.lena = None
1081 self.startb = self.lenb = None
1096 self.startb = self.lenb = None
1082 if lr is not None:
1097 if lr is not None:
1083 if context:
1098 if context:
1084 self.read_context_hunk(lr)
1099 self.read_context_hunk(lr)
1085 else:
1100 else:
1086 self.read_unified_hunk(lr)
1101 self.read_unified_hunk(lr)
1087
1102
1088 def getnormalized(self):
1103 def getnormalized(self):
1089 """Return a copy with line endings normalized to LF."""
1104 """Return a copy with line endings normalized to LF."""
1090
1105
1091 def normalize(lines):
1106 def normalize(lines):
1092 nlines = []
1107 nlines = []
1093 for line in lines:
1108 for line in lines:
1094 if line.endswith('\r\n'):
1109 if line.endswith('\r\n'):
1095 line = line[:-2] + '\n'
1110 line = line[:-2] + '\n'
1096 nlines.append(line)
1111 nlines.append(line)
1097 return nlines
1112 return nlines
1098
1113
1099 # Dummy object, it is rebuilt manually
1114 # Dummy object, it is rebuilt manually
1100 nh = hunk(self.desc, self.number, None, None)
1115 nh = hunk(self.desc, self.number, None, None)
1101 nh.number = self.number
1116 nh.number = self.number
1102 nh.desc = self.desc
1117 nh.desc = self.desc
1103 nh.hunk = self.hunk
1118 nh.hunk = self.hunk
1104 nh.a = normalize(self.a)
1119 nh.a = normalize(self.a)
1105 nh.b = normalize(self.b)
1120 nh.b = normalize(self.b)
1106 nh.starta = self.starta
1121 nh.starta = self.starta
1107 nh.startb = self.startb
1122 nh.startb = self.startb
1108 nh.lena = self.lena
1123 nh.lena = self.lena
1109 nh.lenb = self.lenb
1124 nh.lenb = self.lenb
1110 return nh
1125 return nh
1111
1126
1112 def read_unified_hunk(self, lr):
1127 def read_unified_hunk(self, lr):
1113 m = unidesc.match(self.desc)
1128 m = unidesc.match(self.desc)
1114 if not m:
1129 if not m:
1115 raise PatchError(_("bad hunk #%d") % self.number)
1130 raise PatchError(_("bad hunk #%d") % self.number)
1116 self.starta, self.lena, self.startb, self.lenb = m.groups()
1131 self.starta, self.lena, self.startb, self.lenb = m.groups()
1117 if self.lena is None:
1132 if self.lena is None:
1118 self.lena = 1
1133 self.lena = 1
1119 else:
1134 else:
1120 self.lena = int(self.lena)
1135 self.lena = int(self.lena)
1121 if self.lenb is None:
1136 if self.lenb is None:
1122 self.lenb = 1
1137 self.lenb = 1
1123 else:
1138 else:
1124 self.lenb = int(self.lenb)
1139 self.lenb = int(self.lenb)
1125 self.starta = int(self.starta)
1140 self.starta = int(self.starta)
1126 self.startb = int(self.startb)
1141 self.startb = int(self.startb)
1127 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1142 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1128 self.b)
1143 self.b)
1129 # if we hit eof before finishing out the hunk, the last line will
1144 # if we hit eof before finishing out the hunk, the last line will
1130 # be zero length. Lets try to fix it up.
1145 # be zero length. Lets try to fix it up.
1131 while len(self.hunk[-1]) == 0:
1146 while len(self.hunk[-1]) == 0:
1132 del self.hunk[-1]
1147 del self.hunk[-1]
1133 del self.a[-1]
1148 del self.a[-1]
1134 del self.b[-1]
1149 del self.b[-1]
1135 self.lena -= 1
1150 self.lena -= 1
1136 self.lenb -= 1
1151 self.lenb -= 1
1137 self._fixnewline(lr)
1152 self._fixnewline(lr)
1138
1153
1139 def read_context_hunk(self, lr):
1154 def read_context_hunk(self, lr):
1140 self.desc = lr.readline()
1155 self.desc = lr.readline()
1141 m = contextdesc.match(self.desc)
1156 m = contextdesc.match(self.desc)
1142 if not m:
1157 if not m:
1143 raise PatchError(_("bad hunk #%d") % self.number)
1158 raise PatchError(_("bad hunk #%d") % self.number)
1144 self.starta, aend = m.groups()
1159 self.starta, aend = m.groups()
1145 self.starta = int(self.starta)
1160 self.starta = int(self.starta)
1146 if aend is None:
1161 if aend is None:
1147 aend = self.starta
1162 aend = self.starta
1148 self.lena = int(aend) - self.starta
1163 self.lena = int(aend) - self.starta
1149 if self.starta:
1164 if self.starta:
1150 self.lena += 1
1165 self.lena += 1
1151 for x in xrange(self.lena):
1166 for x in xrange(self.lena):
1152 l = lr.readline()
1167 l = lr.readline()
1153 if l.startswith('---'):
1168 if l.startswith('---'):
1154 # lines addition, old block is empty
1169 # lines addition, old block is empty
1155 lr.push(l)
1170 lr.push(l)
1156 break
1171 break
1157 s = l[2:]
1172 s = l[2:]
1158 if l.startswith('- ') or l.startswith('! '):
1173 if l.startswith('- ') or l.startswith('! '):
1159 u = '-' + s
1174 u = '-' + s
1160 elif l.startswith(' '):
1175 elif l.startswith(' '):
1161 u = ' ' + s
1176 u = ' ' + s
1162 else:
1177 else:
1163 raise PatchError(_("bad hunk #%d old text line %d") %
1178 raise PatchError(_("bad hunk #%d old text line %d") %
1164 (self.number, x))
1179 (self.number, x))
1165 self.a.append(u)
1180 self.a.append(u)
1166 self.hunk.append(u)
1181 self.hunk.append(u)
1167
1182
1168 l = lr.readline()
1183 l = lr.readline()
1169 if l.startswith('\ '):
1184 if l.startswith('\ '):
1170 s = self.a[-1][:-1]
1185 s = self.a[-1][:-1]
1171 self.a[-1] = s
1186 self.a[-1] = s
1172 self.hunk[-1] = s
1187 self.hunk[-1] = s
1173 l = lr.readline()
1188 l = lr.readline()
1174 m = contextdesc.match(l)
1189 m = contextdesc.match(l)
1175 if not m:
1190 if not m:
1176 raise PatchError(_("bad hunk #%d") % self.number)
1191 raise PatchError(_("bad hunk #%d") % self.number)
1177 self.startb, bend = m.groups()
1192 self.startb, bend = m.groups()
1178 self.startb = int(self.startb)
1193 self.startb = int(self.startb)
1179 if bend is None:
1194 if bend is None:
1180 bend = self.startb
1195 bend = self.startb
1181 self.lenb = int(bend) - self.startb
1196 self.lenb = int(bend) - self.startb
1182 if self.startb:
1197 if self.startb:
1183 self.lenb += 1
1198 self.lenb += 1
1184 hunki = 1
1199 hunki = 1
1185 for x in xrange(self.lenb):
1200 for x in xrange(self.lenb):
1186 l = lr.readline()
1201 l = lr.readline()
1187 if l.startswith('\ '):
1202 if l.startswith('\ '):
1188 # XXX: the only way to hit this is with an invalid line range.
1203 # XXX: the only way to hit this is with an invalid line range.
1189 # The no-eol marker is not counted in the line range, but I
1204 # The no-eol marker is not counted in the line range, but I
1190 # guess there are diff(1) out there which behave differently.
1205 # guess there are diff(1) out there which behave differently.
1191 s = self.b[-1][:-1]
1206 s = self.b[-1][:-1]
1192 self.b[-1] = s
1207 self.b[-1] = s
1193 self.hunk[hunki - 1] = s
1208 self.hunk[hunki - 1] = s
1194 continue
1209 continue
1195 if not l:
1210 if not l:
1196 # line deletions, new block is empty and we hit EOF
1211 # line deletions, new block is empty and we hit EOF
1197 lr.push(l)
1212 lr.push(l)
1198 break
1213 break
1199 s = l[2:]
1214 s = l[2:]
1200 if l.startswith('+ ') or l.startswith('! '):
1215 if l.startswith('+ ') or l.startswith('! '):
1201 u = '+' + s
1216 u = '+' + s
1202 elif l.startswith(' '):
1217 elif l.startswith(' '):
1203 u = ' ' + s
1218 u = ' ' + s
1204 elif len(self.b) == 0:
1219 elif len(self.b) == 0:
1205 # line deletions, new block is empty
1220 # line deletions, new block is empty
1206 lr.push(l)
1221 lr.push(l)
1207 break
1222 break
1208 else:
1223 else:
1209 raise PatchError(_("bad hunk #%d old text line %d") %
1224 raise PatchError(_("bad hunk #%d old text line %d") %
1210 (self.number, x))
1225 (self.number, x))
1211 self.b.append(s)
1226 self.b.append(s)
1212 while True:
1227 while True:
1213 if hunki >= len(self.hunk):
1228 if hunki >= len(self.hunk):
1214 h = ""
1229 h = ""
1215 else:
1230 else:
1216 h = self.hunk[hunki]
1231 h = self.hunk[hunki]
1217 hunki += 1
1232 hunki += 1
1218 if h == u:
1233 if h == u:
1219 break
1234 break
1220 elif h.startswith('-'):
1235 elif h.startswith('-'):
1221 continue
1236 continue
1222 else:
1237 else:
1223 self.hunk.insert(hunki - 1, u)
1238 self.hunk.insert(hunki - 1, u)
1224 break
1239 break
1225
1240
1226 if not self.a:
1241 if not self.a:
1227 # this happens when lines were only added to the hunk
1242 # this happens when lines were only added to the hunk
1228 for x in self.hunk:
1243 for x in self.hunk:
1229 if x.startswith('-') or x.startswith(' '):
1244 if x.startswith('-') or x.startswith(' '):
1230 self.a.append(x)
1245 self.a.append(x)
1231 if not self.b:
1246 if not self.b:
1232 # this happens when lines were only deleted from the hunk
1247 # this happens when lines were only deleted from the hunk
1233 for x in self.hunk:
1248 for x in self.hunk:
1234 if x.startswith('+') or x.startswith(' '):
1249 if x.startswith('+') or x.startswith(' '):
1235 self.b.append(x[1:])
1250 self.b.append(x[1:])
1236 # @@ -start,len +start,len @@
1251 # @@ -start,len +start,len @@
1237 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1252 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1238 self.startb, self.lenb)
1253 self.startb, self.lenb)
1239 self.hunk[0] = self.desc
1254 self.hunk[0] = self.desc
1240 self._fixnewline(lr)
1255 self._fixnewline(lr)
1241
1256
1242 def _fixnewline(self, lr):
1257 def _fixnewline(self, lr):
1243 l = lr.readline()
1258 l = lr.readline()
1244 if l.startswith('\ '):
1259 if l.startswith('\ '):
1245 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1260 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1246 else:
1261 else:
1247 lr.push(l)
1262 lr.push(l)
1248
1263
1249 def complete(self):
1264 def complete(self):
1250 return len(self.a) == self.lena and len(self.b) == self.lenb
1265 return len(self.a) == self.lena and len(self.b) == self.lenb
1251
1266
1252 def _fuzzit(self, old, new, fuzz, toponly):
1267 def _fuzzit(self, old, new, fuzz, toponly):
1253 # this removes context lines from the top and bottom of list 'l'. It
1268 # this removes context lines from the top and bottom of list 'l'. It
1254 # checks the hunk to make sure only context lines are removed, and then
1269 # checks the hunk to make sure only context lines are removed, and then
1255 # returns a new shortened list of lines.
1270 # returns a new shortened list of lines.
1256 fuzz = min(fuzz, len(old))
1271 fuzz = min(fuzz, len(old))
1257 if fuzz:
1272 if fuzz:
1258 top = 0
1273 top = 0
1259 bot = 0
1274 bot = 0
1260 hlen = len(self.hunk)
1275 hlen = len(self.hunk)
1261 for x in xrange(hlen - 1):
1276 for x in xrange(hlen - 1):
1262 # the hunk starts with the @@ line, so use x+1
1277 # the hunk starts with the @@ line, so use x+1
1263 if self.hunk[x + 1][0] == ' ':
1278 if self.hunk[x + 1][0] == ' ':
1264 top += 1
1279 top += 1
1265 else:
1280 else:
1266 break
1281 break
1267 if not toponly:
1282 if not toponly:
1268 for x in xrange(hlen - 1):
1283 for x in xrange(hlen - 1):
1269 if self.hunk[hlen - bot - 1][0] == ' ':
1284 if self.hunk[hlen - bot - 1][0] == ' ':
1270 bot += 1
1285 bot += 1
1271 else:
1286 else:
1272 break
1287 break
1273
1288
1274 bot = min(fuzz, bot)
1289 bot = min(fuzz, bot)
1275 top = min(fuzz, top)
1290 top = min(fuzz, top)
1276 return old[top:len(old) - bot], new[top:len(new) - bot], top
1291 return old[top:len(old) - bot], new[top:len(new) - bot], top
1277 return old, new, 0
1292 return old, new, 0
1278
1293
1279 def fuzzit(self, fuzz, toponly):
1294 def fuzzit(self, fuzz, toponly):
1280 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1295 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1281 oldstart = self.starta + top
1296 oldstart = self.starta + top
1282 newstart = self.startb + top
1297 newstart = self.startb + top
1283 # zero length hunk ranges already have their start decremented
1298 # zero length hunk ranges already have their start decremented
1284 if self.lena and oldstart > 0:
1299 if self.lena and oldstart > 0:
1285 oldstart -= 1
1300 oldstart -= 1
1286 if self.lenb and newstart > 0:
1301 if self.lenb and newstart > 0:
1287 newstart -= 1
1302 newstart -= 1
1288 return old, oldstart, new, newstart
1303 return old, oldstart, new, newstart
1289
1304
1290 class binhunk(object):
1305 class binhunk(object):
1291 'A binary patch file.'
1306 'A binary patch file.'
1292 def __init__(self, lr, fname):
1307 def __init__(self, lr, fname):
1293 self.text = None
1308 self.text = None
1294 self.delta = False
1309 self.delta = False
1295 self.hunk = ['GIT binary patch\n']
1310 self.hunk = ['GIT binary patch\n']
1296 self._fname = fname
1311 self._fname = fname
1297 self._read(lr)
1312 self._read(lr)
1298
1313
1299 def complete(self):
1314 def complete(self):
1300 return self.text is not None
1315 return self.text is not None
1301
1316
1302 def new(self, lines):
1317 def new(self, lines):
1303 if self.delta:
1318 if self.delta:
1304 return [applybindelta(self.text, ''.join(lines))]
1319 return [applybindelta(self.text, ''.join(lines))]
1305 return [self.text]
1320 return [self.text]
1306
1321
1307 def _read(self, lr):
1322 def _read(self, lr):
1308 def getline(lr, hunk):
1323 def getline(lr, hunk):
1309 l = lr.readline()
1324 l = lr.readline()
1310 hunk.append(l)
1325 hunk.append(l)
1311 return l.rstrip('\r\n')
1326 return l.rstrip('\r\n')
1312
1327
1313 size = 0
1328 size = 0
1314 while True:
1329 while True:
1315 line = getline(lr, self.hunk)
1330 line = getline(lr, self.hunk)
1316 if not line:
1331 if not line:
1317 raise PatchError(_('could not extract "%s" binary data')
1332 raise PatchError(_('could not extract "%s" binary data')
1318 % self._fname)
1333 % self._fname)
1319 if line.startswith('literal '):
1334 if line.startswith('literal '):
1320 size = int(line[8:].rstrip())
1335 size = int(line[8:].rstrip())
1321 break
1336 break
1322 if line.startswith('delta '):
1337 if line.startswith('delta '):
1323 size = int(line[6:].rstrip())
1338 size = int(line[6:].rstrip())
1324 self.delta = True
1339 self.delta = True
1325 break
1340 break
1326 dec = []
1341 dec = []
1327 line = getline(lr, self.hunk)
1342 line = getline(lr, self.hunk)
1328 while len(line) > 1:
1343 while len(line) > 1:
1329 l = line[0]
1344 l = line[0]
1330 if l <= 'Z' and l >= 'A':
1345 if l <= 'Z' and l >= 'A':
1331 l = ord(l) - ord('A') + 1
1346 l = ord(l) - ord('A') + 1
1332 else:
1347 else:
1333 l = ord(l) - ord('a') + 27
1348 l = ord(l) - ord('a') + 27
1334 try:
1349 try:
1335 dec.append(base85.b85decode(line[1:])[:l])
1350 dec.append(base85.b85decode(line[1:])[:l])
1336 except ValueError, e:
1351 except ValueError, e:
1337 raise PatchError(_('could not decode "%s" binary patch: %s')
1352 raise PatchError(_('could not decode "%s" binary patch: %s')
1338 % (self._fname, str(e)))
1353 % (self._fname, str(e)))
1339 line = getline(lr, self.hunk)
1354 line = getline(lr, self.hunk)
1340 text = zlib.decompress(''.join(dec))
1355 text = zlib.decompress(''.join(dec))
1341 if len(text) != size:
1356 if len(text) != size:
1342 raise PatchError(_('"%s" length is %d bytes, should be %d')
1357 raise PatchError(_('"%s" length is %d bytes, should be %d')
1343 % (self._fname, len(text), size))
1358 % (self._fname, len(text), size))
1344 self.text = text
1359 self.text = text
1345
1360
1346 def parsefilename(str):
1361 def parsefilename(str):
1347 # --- filename \t|space stuff
1362 # --- filename \t|space stuff
1348 s = str[4:].rstrip('\r\n')
1363 s = str[4:].rstrip('\r\n')
1349 i = s.find('\t')
1364 i = s.find('\t')
1350 if i < 0:
1365 if i < 0:
1351 i = s.find(' ')
1366 i = s.find(' ')
1352 if i < 0:
1367 if i < 0:
1353 return s
1368 return s
1354 return s[:i]
1369 return s[:i]
1355
1370
1356 def parsepatch(originalchunks):
1371 def parsepatch(originalchunks):
1357 """patch -> [] of headers -> [] of hunks """
1372 """patch -> [] of headers -> [] of hunks """
1358 class parser(object):
1373 class parser(object):
1359 """patch parsing state machine"""
1374 """patch parsing state machine"""
1360 def __init__(self):
1375 def __init__(self):
1361 self.fromline = 0
1376 self.fromline = 0
1362 self.toline = 0
1377 self.toline = 0
1363 self.proc = ''
1378 self.proc = ''
1364 self.header = None
1379 self.header = None
1365 self.context = []
1380 self.context = []
1366 self.before = []
1381 self.before = []
1367 self.hunk = []
1382 self.hunk = []
1368 self.headers = []
1383 self.headers = []
1369
1384
1370 def addrange(self, limits):
1385 def addrange(self, limits):
1371 fromstart, fromend, tostart, toend, proc = limits
1386 fromstart, fromend, tostart, toend, proc = limits
1372 self.fromline = int(fromstart)
1387 self.fromline = int(fromstart)
1373 self.toline = int(tostart)
1388 self.toline = int(tostart)
1374 self.proc = proc
1389 self.proc = proc
1375
1390
1376 def addcontext(self, context):
1391 def addcontext(self, context):
1377 if self.hunk:
1392 if self.hunk:
1378 h = recordhunk(self.header, self.fromline, self.toline,
1393 h = recordhunk(self.header, self.fromline, self.toline,
1379 self.proc, self.before, self.hunk, context)
1394 self.proc, self.before, self.hunk, context)
1380 self.header.hunks.append(h)
1395 self.header.hunks.append(h)
1381 self.fromline += len(self.before) + h.removed
1396 self.fromline += len(self.before) + h.removed
1382 self.toline += len(self.before) + h.added
1397 self.toline += len(self.before) + h.added
1383 self.before = []
1398 self.before = []
1384 self.hunk = []
1399 self.hunk = []
1385 self.proc = ''
1400 self.proc = ''
1386 self.context = context
1401 self.context = context
1387
1402
1388 def addhunk(self, hunk):
1403 def addhunk(self, hunk):
1389 if self.context:
1404 if self.context:
1390 self.before = self.context
1405 self.before = self.context
1391 self.context = []
1406 self.context = []
1392 self.hunk = hunk
1407 self.hunk = hunk
1393
1408
1394 def newfile(self, hdr):
1409 def newfile(self, hdr):
1395 self.addcontext([])
1410 self.addcontext([])
1396 h = header(hdr)
1411 h = header(hdr)
1397 self.headers.append(h)
1412 self.headers.append(h)
1398 self.header = h
1413 self.header = h
1399
1414
1400 def addother(self, line):
1415 def addother(self, line):
1401 pass # 'other' lines are ignored
1416 pass # 'other' lines are ignored
1402
1417
1403 def finished(self):
1418 def finished(self):
1404 self.addcontext([])
1419 self.addcontext([])
1405 return self.headers
1420 return self.headers
1406
1421
1407 transitions = {
1422 transitions = {
1408 'file': {'context': addcontext,
1423 'file': {'context': addcontext,
1409 'file': newfile,
1424 'file': newfile,
1410 'hunk': addhunk,
1425 'hunk': addhunk,
1411 'range': addrange},
1426 'range': addrange},
1412 'context': {'file': newfile,
1427 'context': {'file': newfile,
1413 'hunk': addhunk,
1428 'hunk': addhunk,
1414 'range': addrange,
1429 'range': addrange,
1415 'other': addother},
1430 'other': addother},
1416 'hunk': {'context': addcontext,
1431 'hunk': {'context': addcontext,
1417 'file': newfile,
1432 'file': newfile,
1418 'range': addrange},
1433 'range': addrange},
1419 'range': {'context': addcontext,
1434 'range': {'context': addcontext,
1420 'hunk': addhunk},
1435 'hunk': addhunk},
1421 'other': {'other': addother},
1436 'other': {'other': addother},
1422 }
1437 }
1423
1438
1424 p = parser()
1439 p = parser()
1425 fp = cStringIO.StringIO()
1440 fp = cStringIO.StringIO()
1426 fp.write(''.join(originalchunks))
1441 fp.write(''.join(originalchunks))
1427 fp.seek(0)
1442 fp.seek(0)
1428
1443
1429 state = 'context'
1444 state = 'context'
1430 for newstate, data in scanpatch(fp):
1445 for newstate, data in scanpatch(fp):
1431 try:
1446 try:
1432 p.transitions[state][newstate](p, data)
1447 p.transitions[state][newstate](p, data)
1433 except KeyError:
1448 except KeyError:
1434 raise PatchError('unhandled transition: %s -> %s' %
1449 raise PatchError('unhandled transition: %s -> %s' %
1435 (state, newstate))
1450 (state, newstate))
1436 state = newstate
1451 state = newstate
1437 del fp
1452 del fp
1438 return p.finished()
1453 return p.finished()
1439
1454
1440 def pathtransform(path, strip, prefix):
1455 def pathtransform(path, strip, prefix):
1441 '''turn a path from a patch into a path suitable for the repository
1456 '''turn a path from a patch into a path suitable for the repository
1442
1457
1443 prefix, if not empty, is expected to be normalized with a / at the end.
1458 prefix, if not empty, is expected to be normalized with a / at the end.
1444
1459
1445 Returns (stripped components, path in repository).
1460 Returns (stripped components, path in repository).
1446
1461
1447 >>> pathtransform('a/b/c', 0, '')
1462 >>> pathtransform('a/b/c', 0, '')
1448 ('', 'a/b/c')
1463 ('', 'a/b/c')
1449 >>> pathtransform(' a/b/c ', 0, '')
1464 >>> pathtransform(' a/b/c ', 0, '')
1450 ('', ' a/b/c')
1465 ('', ' a/b/c')
1451 >>> pathtransform(' a/b/c ', 2, '')
1466 >>> pathtransform(' a/b/c ', 2, '')
1452 ('a/b/', 'c')
1467 ('a/b/', 'c')
1453 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1468 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1454 ('a//b/', 'd/e/c')
1469 ('a//b/', 'd/e/c')
1455 >>> pathtransform('a/b/c', 3, '')
1470 >>> pathtransform('a/b/c', 3, '')
1456 Traceback (most recent call last):
1471 Traceback (most recent call last):
1457 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1472 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1458 '''
1473 '''
1459 pathlen = len(path)
1474 pathlen = len(path)
1460 i = 0
1475 i = 0
1461 if strip == 0:
1476 if strip == 0:
1462 return '', path.rstrip()
1477 return '', path.rstrip()
1463 count = strip
1478 count = strip
1464 while count > 0:
1479 while count > 0:
1465 i = path.find('/', i)
1480 i = path.find('/', i)
1466 if i == -1:
1481 if i == -1:
1467 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1482 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1468 (count, strip, path))
1483 (count, strip, path))
1469 i += 1
1484 i += 1
1470 # consume '//' in the path
1485 # consume '//' in the path
1471 while i < pathlen - 1 and path[i] == '/':
1486 while i < pathlen - 1 and path[i] == '/':
1472 i += 1
1487 i += 1
1473 count -= 1
1488 count -= 1
1474 return path[:i].lstrip(), prefix + path[i:].rstrip()
1489 return path[:i].lstrip(), prefix + path[i:].rstrip()
1475
1490
1476 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1491 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1477 nulla = afile_orig == "/dev/null"
1492 nulla = afile_orig == "/dev/null"
1478 nullb = bfile_orig == "/dev/null"
1493 nullb = bfile_orig == "/dev/null"
1479 create = nulla and hunk.starta == 0 and hunk.lena == 0
1494 create = nulla and hunk.starta == 0 and hunk.lena == 0
1480 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1495 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1481 abase, afile = pathtransform(afile_orig, strip, prefix)
1496 abase, afile = pathtransform(afile_orig, strip, prefix)
1482 gooda = not nulla and backend.exists(afile)
1497 gooda = not nulla and backend.exists(afile)
1483 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1498 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1484 if afile == bfile:
1499 if afile == bfile:
1485 goodb = gooda
1500 goodb = gooda
1486 else:
1501 else:
1487 goodb = not nullb and backend.exists(bfile)
1502 goodb = not nullb and backend.exists(bfile)
1488 missing = not goodb and not gooda and not create
1503 missing = not goodb and not gooda and not create
1489
1504
1490 # some diff programs apparently produce patches where the afile is
1505 # some diff programs apparently produce patches where the afile is
1491 # not /dev/null, but afile starts with bfile
1506 # not /dev/null, but afile starts with bfile
1492 abasedir = afile[:afile.rfind('/') + 1]
1507 abasedir = afile[:afile.rfind('/') + 1]
1493 bbasedir = bfile[:bfile.rfind('/') + 1]
1508 bbasedir = bfile[:bfile.rfind('/') + 1]
1494 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1509 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1495 and hunk.starta == 0 and hunk.lena == 0):
1510 and hunk.starta == 0 and hunk.lena == 0):
1496 create = True
1511 create = True
1497 missing = False
1512 missing = False
1498
1513
1499 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1514 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1500 # diff is between a file and its backup. In this case, the original
1515 # diff is between a file and its backup. In this case, the original
1501 # file should be patched (see original mpatch code).
1516 # file should be patched (see original mpatch code).
1502 isbackup = (abase == bbase and bfile.startswith(afile))
1517 isbackup = (abase == bbase and bfile.startswith(afile))
1503 fname = None
1518 fname = None
1504 if not missing:
1519 if not missing:
1505 if gooda and goodb:
1520 if gooda and goodb:
1506 if isbackup:
1521 if isbackup:
1507 fname = afile
1522 fname = afile
1508 else:
1523 else:
1509 fname = bfile
1524 fname = bfile
1510 elif gooda:
1525 elif gooda:
1511 fname = afile
1526 fname = afile
1512
1527
1513 if not fname:
1528 if not fname:
1514 if not nullb:
1529 if not nullb:
1515 if isbackup:
1530 if isbackup:
1516 fname = afile
1531 fname = afile
1517 else:
1532 else:
1518 fname = bfile
1533 fname = bfile
1519 elif not nulla:
1534 elif not nulla:
1520 fname = afile
1535 fname = afile
1521 else:
1536 else:
1522 raise PatchError(_("undefined source and destination files"))
1537 raise PatchError(_("undefined source and destination files"))
1523
1538
1524 gp = patchmeta(fname)
1539 gp = patchmeta(fname)
1525 if create:
1540 if create:
1526 gp.op = 'ADD'
1541 gp.op = 'ADD'
1527 elif remove:
1542 elif remove:
1528 gp.op = 'DELETE'
1543 gp.op = 'DELETE'
1529 return gp
1544 return gp
1530
1545
1531 def scanpatch(fp):
1546 def scanpatch(fp):
1532 """like patch.iterhunks, but yield different events
1547 """like patch.iterhunks, but yield different events
1533
1548
1534 - ('file', [header_lines + fromfile + tofile])
1549 - ('file', [header_lines + fromfile + tofile])
1535 - ('context', [context_lines])
1550 - ('context', [context_lines])
1536 - ('hunk', [hunk_lines])
1551 - ('hunk', [hunk_lines])
1537 - ('range', (-start,len, +start,len, proc))
1552 - ('range', (-start,len, +start,len, proc))
1538 """
1553 """
1539 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1554 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1540 lr = linereader(fp)
1555 lr = linereader(fp)
1541
1556
1542 def scanwhile(first, p):
1557 def scanwhile(first, p):
1543 """scan lr while predicate holds"""
1558 """scan lr while predicate holds"""
1544 lines = [first]
1559 lines = [first]
1545 while True:
1560 while True:
1546 line = lr.readline()
1561 line = lr.readline()
1547 if not line:
1562 if not line:
1548 break
1563 break
1549 if p(line):
1564 if p(line):
1550 lines.append(line)
1565 lines.append(line)
1551 else:
1566 else:
1552 lr.push(line)
1567 lr.push(line)
1553 break
1568 break
1554 return lines
1569 return lines
1555
1570
1556 while True:
1571 while True:
1557 line = lr.readline()
1572 line = lr.readline()
1558 if not line:
1573 if not line:
1559 break
1574 break
1560 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1575 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1561 def notheader(line):
1576 def notheader(line):
1562 s = line.split(None, 1)
1577 s = line.split(None, 1)
1563 return not s or s[0] not in ('---', 'diff')
1578 return not s or s[0] not in ('---', 'diff')
1564 header = scanwhile(line, notheader)
1579 header = scanwhile(line, notheader)
1565 fromfile = lr.readline()
1580 fromfile = lr.readline()
1566 if fromfile.startswith('---'):
1581 if fromfile.startswith('---'):
1567 tofile = lr.readline()
1582 tofile = lr.readline()
1568 header += [fromfile, tofile]
1583 header += [fromfile, tofile]
1569 else:
1584 else:
1570 lr.push(fromfile)
1585 lr.push(fromfile)
1571 yield 'file', header
1586 yield 'file', header
1572 elif line[0] == ' ':
1587 elif line[0] == ' ':
1573 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1588 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1574 elif line[0] in '-+':
1589 elif line[0] in '-+':
1575 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1590 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1576 else:
1591 else:
1577 m = lines_re.match(line)
1592 m = lines_re.match(line)
1578 if m:
1593 if m:
1579 yield 'range', m.groups()
1594 yield 'range', m.groups()
1580 else:
1595 else:
1581 yield 'other', line
1596 yield 'other', line
1582
1597
1583 def scangitpatch(lr, firstline):
1598 def scangitpatch(lr, firstline):
1584 """
1599 """
1585 Git patches can emit:
1600 Git patches can emit:
1586 - rename a to b
1601 - rename a to b
1587 - change b
1602 - change b
1588 - copy a to c
1603 - copy a to c
1589 - change c
1604 - change c
1590
1605
1591 We cannot apply this sequence as-is, the renamed 'a' could not be
1606 We cannot apply this sequence as-is, the renamed 'a' could not be
1592 found for it would have been renamed already. And we cannot copy
1607 found for it would have been renamed already. And we cannot copy
1593 from 'b' instead because 'b' would have been changed already. So
1608 from 'b' instead because 'b' would have been changed already. So
1594 we scan the git patch for copy and rename commands so we can
1609 we scan the git patch for copy and rename commands so we can
1595 perform the copies ahead of time.
1610 perform the copies ahead of time.
1596 """
1611 """
1597 pos = 0
1612 pos = 0
1598 try:
1613 try:
1599 pos = lr.fp.tell()
1614 pos = lr.fp.tell()
1600 fp = lr.fp
1615 fp = lr.fp
1601 except IOError:
1616 except IOError:
1602 fp = cStringIO.StringIO(lr.fp.read())
1617 fp = cStringIO.StringIO(lr.fp.read())
1603 gitlr = linereader(fp)
1618 gitlr = linereader(fp)
1604 gitlr.push(firstline)
1619 gitlr.push(firstline)
1605 gitpatches = readgitpatch(gitlr)
1620 gitpatches = readgitpatch(gitlr)
1606 fp.seek(pos)
1621 fp.seek(pos)
1607 return gitpatches
1622 return gitpatches
1608
1623
1609 def iterhunks(fp):
1624 def iterhunks(fp):
1610 """Read a patch and yield the following events:
1625 """Read a patch and yield the following events:
1611 - ("file", afile, bfile, firsthunk): select a new target file.
1626 - ("file", afile, bfile, firsthunk): select a new target file.
1612 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1627 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1613 "file" event.
1628 "file" event.
1614 - ("git", gitchanges): current diff is in git format, gitchanges
1629 - ("git", gitchanges): current diff is in git format, gitchanges
1615 maps filenames to gitpatch records. Unique event.
1630 maps filenames to gitpatch records. Unique event.
1616 """
1631 """
1617 afile = ""
1632 afile = ""
1618 bfile = ""
1633 bfile = ""
1619 state = None
1634 state = None
1620 hunknum = 0
1635 hunknum = 0
1621 emitfile = newfile = False
1636 emitfile = newfile = False
1622 gitpatches = None
1637 gitpatches = None
1623
1638
1624 # our states
1639 # our states
1625 BFILE = 1
1640 BFILE = 1
1626 context = None
1641 context = None
1627 lr = linereader(fp)
1642 lr = linereader(fp)
1628
1643
1629 while True:
1644 while True:
1630 x = lr.readline()
1645 x = lr.readline()
1631 if not x:
1646 if not x:
1632 break
1647 break
1633 if state == BFILE and (
1648 if state == BFILE and (
1634 (not context and x[0] == '@')
1649 (not context and x[0] == '@')
1635 or (context is not False and x.startswith('***************'))
1650 or (context is not False and x.startswith('***************'))
1636 or x.startswith('GIT binary patch')):
1651 or x.startswith('GIT binary patch')):
1637 gp = None
1652 gp = None
1638 if (gitpatches and
1653 if (gitpatches and
1639 gitpatches[-1].ispatching(afile, bfile)):
1654 gitpatches[-1].ispatching(afile, bfile)):
1640 gp = gitpatches.pop()
1655 gp = gitpatches.pop()
1641 if x.startswith('GIT binary patch'):
1656 if x.startswith('GIT binary patch'):
1642 h = binhunk(lr, gp.path)
1657 h = binhunk(lr, gp.path)
1643 else:
1658 else:
1644 if context is None and x.startswith('***************'):
1659 if context is None and x.startswith('***************'):
1645 context = True
1660 context = True
1646 h = hunk(x, hunknum + 1, lr, context)
1661 h = hunk(x, hunknum + 1, lr, context)
1647 hunknum += 1
1662 hunknum += 1
1648 if emitfile:
1663 if emitfile:
1649 emitfile = False
1664 emitfile = False
1650 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1665 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1651 yield 'hunk', h
1666 yield 'hunk', h
1652 elif x.startswith('diff --git a/'):
1667 elif x.startswith('diff --git a/'):
1653 m = gitre.match(x.rstrip(' \r\n'))
1668 m = gitre.match(x.rstrip(' \r\n'))
1654 if not m:
1669 if not m:
1655 continue
1670 continue
1656 if gitpatches is None:
1671 if gitpatches is None:
1657 # scan whole input for git metadata
1672 # scan whole input for git metadata
1658 gitpatches = scangitpatch(lr, x)
1673 gitpatches = scangitpatch(lr, x)
1659 yield 'git', [g.copy() for g in gitpatches
1674 yield 'git', [g.copy() for g in gitpatches
1660 if g.op in ('COPY', 'RENAME')]
1675 if g.op in ('COPY', 'RENAME')]
1661 gitpatches.reverse()
1676 gitpatches.reverse()
1662 afile = 'a/' + m.group(1)
1677 afile = 'a/' + m.group(1)
1663 bfile = 'b/' + m.group(2)
1678 bfile = 'b/' + m.group(2)
1664 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1679 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1665 gp = gitpatches.pop()
1680 gp = gitpatches.pop()
1666 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1681 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1667 if not gitpatches:
1682 if not gitpatches:
1668 raise PatchError(_('failed to synchronize metadata for "%s"')
1683 raise PatchError(_('failed to synchronize metadata for "%s"')
1669 % afile[2:])
1684 % afile[2:])
1670 gp = gitpatches[-1]
1685 gp = gitpatches[-1]
1671 newfile = True
1686 newfile = True
1672 elif x.startswith('---'):
1687 elif x.startswith('---'):
1673 # check for a unified diff
1688 # check for a unified diff
1674 l2 = lr.readline()
1689 l2 = lr.readline()
1675 if not l2.startswith('+++'):
1690 if not l2.startswith('+++'):
1676 lr.push(l2)
1691 lr.push(l2)
1677 continue
1692 continue
1678 newfile = True
1693 newfile = True
1679 context = False
1694 context = False
1680 afile = parsefilename(x)
1695 afile = parsefilename(x)
1681 bfile = parsefilename(l2)
1696 bfile = parsefilename(l2)
1682 elif x.startswith('***'):
1697 elif x.startswith('***'):
1683 # check for a context diff
1698 # check for a context diff
1684 l2 = lr.readline()
1699 l2 = lr.readline()
1685 if not l2.startswith('---'):
1700 if not l2.startswith('---'):
1686 lr.push(l2)
1701 lr.push(l2)
1687 continue
1702 continue
1688 l3 = lr.readline()
1703 l3 = lr.readline()
1689 lr.push(l3)
1704 lr.push(l3)
1690 if not l3.startswith("***************"):
1705 if not l3.startswith("***************"):
1691 lr.push(l2)
1706 lr.push(l2)
1692 continue
1707 continue
1693 newfile = True
1708 newfile = True
1694 context = True
1709 context = True
1695 afile = parsefilename(x)
1710 afile = parsefilename(x)
1696 bfile = parsefilename(l2)
1711 bfile = parsefilename(l2)
1697
1712
1698 if newfile:
1713 if newfile:
1699 newfile = False
1714 newfile = False
1700 emitfile = True
1715 emitfile = True
1701 state = BFILE
1716 state = BFILE
1702 hunknum = 0
1717 hunknum = 0
1703
1718
1704 while gitpatches:
1719 while gitpatches:
1705 gp = gitpatches.pop()
1720 gp = gitpatches.pop()
1706 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1721 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1707
1722
1708 def applybindelta(binchunk, data):
1723 def applybindelta(binchunk, data):
1709 """Apply a binary delta hunk
1724 """Apply a binary delta hunk
1710 The algorithm used is the algorithm from git's patch-delta.c
1725 The algorithm used is the algorithm from git's patch-delta.c
1711 """
1726 """
1712 def deltahead(binchunk):
1727 def deltahead(binchunk):
1713 i = 0
1728 i = 0
1714 for c in binchunk:
1729 for c in binchunk:
1715 i += 1
1730 i += 1
1716 if not (ord(c) & 0x80):
1731 if not (ord(c) & 0x80):
1717 return i
1732 return i
1718 return i
1733 return i
1719 out = ""
1734 out = ""
1720 s = deltahead(binchunk)
1735 s = deltahead(binchunk)
1721 binchunk = binchunk[s:]
1736 binchunk = binchunk[s:]
1722 s = deltahead(binchunk)
1737 s = deltahead(binchunk)
1723 binchunk = binchunk[s:]
1738 binchunk = binchunk[s:]
1724 i = 0
1739 i = 0
1725 while i < len(binchunk):
1740 while i < len(binchunk):
1726 cmd = ord(binchunk[i])
1741 cmd = ord(binchunk[i])
1727 i += 1
1742 i += 1
1728 if (cmd & 0x80):
1743 if (cmd & 0x80):
1729 offset = 0
1744 offset = 0
1730 size = 0
1745 size = 0
1731 if (cmd & 0x01):
1746 if (cmd & 0x01):
1732 offset = ord(binchunk[i])
1747 offset = ord(binchunk[i])
1733 i += 1
1748 i += 1
1734 if (cmd & 0x02):
1749 if (cmd & 0x02):
1735 offset |= ord(binchunk[i]) << 8
1750 offset |= ord(binchunk[i]) << 8
1736 i += 1
1751 i += 1
1737 if (cmd & 0x04):
1752 if (cmd & 0x04):
1738 offset |= ord(binchunk[i]) << 16
1753 offset |= ord(binchunk[i]) << 16
1739 i += 1
1754 i += 1
1740 if (cmd & 0x08):
1755 if (cmd & 0x08):
1741 offset |= ord(binchunk[i]) << 24
1756 offset |= ord(binchunk[i]) << 24
1742 i += 1
1757 i += 1
1743 if (cmd & 0x10):
1758 if (cmd & 0x10):
1744 size = ord(binchunk[i])
1759 size = ord(binchunk[i])
1745 i += 1
1760 i += 1
1746 if (cmd & 0x20):
1761 if (cmd & 0x20):
1747 size |= ord(binchunk[i]) << 8
1762 size |= ord(binchunk[i]) << 8
1748 i += 1
1763 i += 1
1749 if (cmd & 0x40):
1764 if (cmd & 0x40):
1750 size |= ord(binchunk[i]) << 16
1765 size |= ord(binchunk[i]) << 16
1751 i += 1
1766 i += 1
1752 if size == 0:
1767 if size == 0:
1753 size = 0x10000
1768 size = 0x10000
1754 offset_end = offset + size
1769 offset_end = offset + size
1755 out += data[offset:offset_end]
1770 out += data[offset:offset_end]
1756 elif cmd != 0:
1771 elif cmd != 0:
1757 offset_end = i + cmd
1772 offset_end = i + cmd
1758 out += binchunk[i:offset_end]
1773 out += binchunk[i:offset_end]
1759 i += cmd
1774 i += cmd
1760 else:
1775 else:
1761 raise PatchError(_('unexpected delta opcode 0'))
1776 raise PatchError(_('unexpected delta opcode 0'))
1762 return out
1777 return out
1763
1778
1764 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1779 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1765 """Reads a patch from fp and tries to apply it.
1780 """Reads a patch from fp and tries to apply it.
1766
1781
1767 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1782 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1768 there was any fuzz.
1783 there was any fuzz.
1769
1784
1770 If 'eolmode' is 'strict', the patch content and patched file are
1785 If 'eolmode' is 'strict', the patch content and patched file are
1771 read in binary mode. Otherwise, line endings are ignored when
1786 read in binary mode. Otherwise, line endings are ignored when
1772 patching then normalized according to 'eolmode'.
1787 patching then normalized according to 'eolmode'.
1773 """
1788 """
1774 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1789 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1775 prefix=prefix, eolmode=eolmode)
1790 prefix=prefix, eolmode=eolmode)
1776
1791
1777 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1792 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1778 eolmode='strict'):
1793 eolmode='strict'):
1779
1794
1780 if prefix:
1795 if prefix:
1781 # clean up double slashes, lack of trailing slashes, etc
1796 # clean up double slashes, lack of trailing slashes, etc
1782 prefix = util.normpath(prefix) + '/'
1797 prefix = util.normpath(prefix) + '/'
1783 def pstrip(p):
1798 def pstrip(p):
1784 return pathtransform(p, strip - 1, prefix)[1]
1799 return pathtransform(p, strip - 1, prefix)[1]
1785
1800
1786 rejects = 0
1801 rejects = 0
1787 err = 0
1802 err = 0
1788 current_file = None
1803 current_file = None
1789
1804
1790 for state, values in iterhunks(fp):
1805 for state, values in iterhunks(fp):
1791 if state == 'hunk':
1806 if state == 'hunk':
1792 if not current_file:
1807 if not current_file:
1793 continue
1808 continue
1794 ret = current_file.apply(values)
1809 ret = current_file.apply(values)
1795 if ret > 0:
1810 if ret > 0:
1796 err = 1
1811 err = 1
1797 elif state == 'file':
1812 elif state == 'file':
1798 if current_file:
1813 if current_file:
1799 rejects += current_file.close()
1814 rejects += current_file.close()
1800 current_file = None
1815 current_file = None
1801 afile, bfile, first_hunk, gp = values
1816 afile, bfile, first_hunk, gp = values
1802 if gp:
1817 if gp:
1803 gp.path = pstrip(gp.path)
1818 gp.path = pstrip(gp.path)
1804 if gp.oldpath:
1819 if gp.oldpath:
1805 gp.oldpath = pstrip(gp.oldpath)
1820 gp.oldpath = pstrip(gp.oldpath)
1806 else:
1821 else:
1807 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1822 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1808 prefix)
1823 prefix)
1809 if gp.op == 'RENAME':
1824 if gp.op == 'RENAME':
1810 backend.unlink(gp.oldpath)
1825 backend.unlink(gp.oldpath)
1811 if not first_hunk:
1826 if not first_hunk:
1812 if gp.op == 'DELETE':
1827 if gp.op == 'DELETE':
1813 backend.unlink(gp.path)
1828 backend.unlink(gp.path)
1814 continue
1829 continue
1815 data, mode = None, None
1830 data, mode = None, None
1816 if gp.op in ('RENAME', 'COPY'):
1831 if gp.op in ('RENAME', 'COPY'):
1817 data, mode = store.getfile(gp.oldpath)[:2]
1832 data, mode = store.getfile(gp.oldpath)[:2]
1818 # FIXME: failing getfile has never been handled here
1833 # FIXME: failing getfile has never been handled here
1819 assert data is not None
1834 assert data is not None
1820 if gp.mode:
1835 if gp.mode:
1821 mode = gp.mode
1836 mode = gp.mode
1822 if gp.op == 'ADD':
1837 if gp.op == 'ADD':
1823 # Added files without content have no hunk and
1838 # Added files without content have no hunk and
1824 # must be created
1839 # must be created
1825 data = ''
1840 data = ''
1826 if data or mode:
1841 if data or mode:
1827 if (gp.op in ('ADD', 'RENAME', 'COPY')
1842 if (gp.op in ('ADD', 'RENAME', 'COPY')
1828 and backend.exists(gp.path)):
1843 and backend.exists(gp.path)):
1829 raise PatchError(_("cannot create %s: destination "
1844 raise PatchError(_("cannot create %s: destination "
1830 "already exists") % gp.path)
1845 "already exists") % gp.path)
1831 backend.setfile(gp.path, data, mode, gp.oldpath)
1846 backend.setfile(gp.path, data, mode, gp.oldpath)
1832 continue
1847 continue
1833 try:
1848 try:
1834 current_file = patcher(ui, gp, backend, store,
1849 current_file = patcher(ui, gp, backend, store,
1835 eolmode=eolmode)
1850 eolmode=eolmode)
1836 except PatchError, inst:
1851 except PatchError, inst:
1837 ui.warn(str(inst) + '\n')
1852 ui.warn(str(inst) + '\n')
1838 current_file = None
1853 current_file = None
1839 rejects += 1
1854 rejects += 1
1840 continue
1855 continue
1841 elif state == 'git':
1856 elif state == 'git':
1842 for gp in values:
1857 for gp in values:
1843 path = pstrip(gp.oldpath)
1858 path = pstrip(gp.oldpath)
1844 data, mode = backend.getfile(path)
1859 data, mode = backend.getfile(path)
1845 if data is None:
1860 if data is None:
1846 # The error ignored here will trigger a getfile()
1861 # The error ignored here will trigger a getfile()
1847 # error in a place more appropriate for error
1862 # error in a place more appropriate for error
1848 # handling, and will not interrupt the patching
1863 # handling, and will not interrupt the patching
1849 # process.
1864 # process.
1850 pass
1865 pass
1851 else:
1866 else:
1852 store.setfile(path, data, mode)
1867 store.setfile(path, data, mode)
1853 else:
1868 else:
1854 raise util.Abort(_('unsupported parser state: %s') % state)
1869 raise util.Abort(_('unsupported parser state: %s') % state)
1855
1870
1856 if current_file:
1871 if current_file:
1857 rejects += current_file.close()
1872 rejects += current_file.close()
1858
1873
1859 if rejects:
1874 if rejects:
1860 return -1
1875 return -1
1861 return err
1876 return err
1862
1877
1863 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1878 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1864 similarity):
1879 similarity):
1865 """use <patcher> to apply <patchname> to the working directory.
1880 """use <patcher> to apply <patchname> to the working directory.
1866 returns whether patch was applied with fuzz factor."""
1881 returns whether patch was applied with fuzz factor."""
1867
1882
1868 fuzz = False
1883 fuzz = False
1869 args = []
1884 args = []
1870 cwd = repo.root
1885 cwd = repo.root
1871 if cwd:
1886 if cwd:
1872 args.append('-d %s' % util.shellquote(cwd))
1887 args.append('-d %s' % util.shellquote(cwd))
1873 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1888 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1874 util.shellquote(patchname)))
1889 util.shellquote(patchname)))
1875 try:
1890 try:
1876 for line in fp:
1891 for line in fp:
1877 line = line.rstrip()
1892 line = line.rstrip()
1878 ui.note(line + '\n')
1893 ui.note(line + '\n')
1879 if line.startswith('patching file '):
1894 if line.startswith('patching file '):
1880 pf = util.parsepatchoutput(line)
1895 pf = util.parsepatchoutput(line)
1881 printed_file = False
1896 printed_file = False
1882 files.add(pf)
1897 files.add(pf)
1883 elif line.find('with fuzz') >= 0:
1898 elif line.find('with fuzz') >= 0:
1884 fuzz = True
1899 fuzz = True
1885 if not printed_file:
1900 if not printed_file:
1886 ui.warn(pf + '\n')
1901 ui.warn(pf + '\n')
1887 printed_file = True
1902 printed_file = True
1888 ui.warn(line + '\n')
1903 ui.warn(line + '\n')
1889 elif line.find('saving rejects to file') >= 0:
1904 elif line.find('saving rejects to file') >= 0:
1890 ui.warn(line + '\n')
1905 ui.warn(line + '\n')
1891 elif line.find('FAILED') >= 0:
1906 elif line.find('FAILED') >= 0:
1892 if not printed_file:
1907 if not printed_file:
1893 ui.warn(pf + '\n')
1908 ui.warn(pf + '\n')
1894 printed_file = True
1909 printed_file = True
1895 ui.warn(line + '\n')
1910 ui.warn(line + '\n')
1896 finally:
1911 finally:
1897 if files:
1912 if files:
1898 scmutil.marktouched(repo, files, similarity)
1913 scmutil.marktouched(repo, files, similarity)
1899 code = fp.close()
1914 code = fp.close()
1900 if code:
1915 if code:
1901 raise PatchError(_("patch command failed: %s") %
1916 raise PatchError(_("patch command failed: %s") %
1902 util.explainexit(code)[0])
1917 util.explainexit(code)[0])
1903 return fuzz
1918 return fuzz
1904
1919
1905 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1920 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1906 eolmode='strict'):
1921 eolmode='strict'):
1907 if files is None:
1922 if files is None:
1908 files = set()
1923 files = set()
1909 if eolmode is None:
1924 if eolmode is None:
1910 eolmode = ui.config('patch', 'eol', 'strict')
1925 eolmode = ui.config('patch', 'eol', 'strict')
1911 if eolmode.lower() not in eolmodes:
1926 if eolmode.lower() not in eolmodes:
1912 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1927 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1913 eolmode = eolmode.lower()
1928 eolmode = eolmode.lower()
1914
1929
1915 store = filestore()
1930 store = filestore()
1916 try:
1931 try:
1917 fp = open(patchobj, 'rb')
1932 fp = open(patchobj, 'rb')
1918 except TypeError:
1933 except TypeError:
1919 fp = patchobj
1934 fp = patchobj
1920 try:
1935 try:
1921 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1936 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1922 eolmode=eolmode)
1937 eolmode=eolmode)
1923 finally:
1938 finally:
1924 if fp != patchobj:
1939 if fp != patchobj:
1925 fp.close()
1940 fp.close()
1926 files.update(backend.close())
1941 files.update(backend.close())
1927 store.close()
1942 store.close()
1928 if ret < 0:
1943 if ret < 0:
1929 raise PatchError(_('patch failed to apply'))
1944 raise PatchError(_('patch failed to apply'))
1930 return ret > 0
1945 return ret > 0
1931
1946
1932 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1947 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1933 eolmode='strict', similarity=0):
1948 eolmode='strict', similarity=0):
1934 """use builtin patch to apply <patchobj> to the working directory.
1949 """use builtin patch to apply <patchobj> to the working directory.
1935 returns whether patch was applied with fuzz factor."""
1950 returns whether patch was applied with fuzz factor."""
1936 backend = workingbackend(ui, repo, similarity)
1951 backend = workingbackend(ui, repo, similarity)
1937 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1952 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1938
1953
1939 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1954 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1940 eolmode='strict'):
1955 eolmode='strict'):
1941 backend = repobackend(ui, repo, ctx, store)
1956 backend = repobackend(ui, repo, ctx, store)
1942 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1957 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1943
1958
1944 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1959 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1945 similarity=0):
1960 similarity=0):
1946 """Apply <patchname> to the working directory.
1961 """Apply <patchname> to the working directory.
1947
1962
1948 'eolmode' specifies how end of lines should be handled. It can be:
1963 'eolmode' specifies how end of lines should be handled. It can be:
1949 - 'strict': inputs are read in binary mode, EOLs are preserved
1964 - 'strict': inputs are read in binary mode, EOLs are preserved
1950 - 'crlf': EOLs are ignored when patching and reset to CRLF
1965 - 'crlf': EOLs are ignored when patching and reset to CRLF
1951 - 'lf': EOLs are ignored when patching and reset to LF
1966 - 'lf': EOLs are ignored when patching and reset to LF
1952 - None: get it from user settings, default to 'strict'
1967 - None: get it from user settings, default to 'strict'
1953 'eolmode' is ignored when using an external patcher program.
1968 'eolmode' is ignored when using an external patcher program.
1954
1969
1955 Returns whether patch was applied with fuzz factor.
1970 Returns whether patch was applied with fuzz factor.
1956 """
1971 """
1957 patcher = ui.config('ui', 'patch')
1972 patcher = ui.config('ui', 'patch')
1958 if files is None:
1973 if files is None:
1959 files = set()
1974 files = set()
1960 if patcher:
1975 if patcher:
1961 return _externalpatch(ui, repo, patcher, patchname, strip,
1976 return _externalpatch(ui, repo, patcher, patchname, strip,
1962 files, similarity)
1977 files, similarity)
1963 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1978 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1964 similarity)
1979 similarity)
1965
1980
1966 def changedfiles(ui, repo, patchpath, strip=1):
1981 def changedfiles(ui, repo, patchpath, strip=1):
1967 backend = fsbackend(ui, repo.root)
1982 backend = fsbackend(ui, repo.root)
1968 fp = open(patchpath, 'rb')
1983 fp = open(patchpath, 'rb')
1969 try:
1984 try:
1970 changed = set()
1985 changed = set()
1971 for state, values in iterhunks(fp):
1986 for state, values in iterhunks(fp):
1972 if state == 'file':
1987 if state == 'file':
1973 afile, bfile, first_hunk, gp = values
1988 afile, bfile, first_hunk, gp = values
1974 if gp:
1989 if gp:
1975 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1990 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1976 if gp.oldpath:
1991 if gp.oldpath:
1977 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1992 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1978 else:
1993 else:
1979 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1994 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1980 '')
1995 '')
1981 changed.add(gp.path)
1996 changed.add(gp.path)
1982 if gp.op == 'RENAME':
1997 if gp.op == 'RENAME':
1983 changed.add(gp.oldpath)
1998 changed.add(gp.oldpath)
1984 elif state not in ('hunk', 'git'):
1999 elif state not in ('hunk', 'git'):
1985 raise util.Abort(_('unsupported parser state: %s') % state)
2000 raise util.Abort(_('unsupported parser state: %s') % state)
1986 return changed
2001 return changed
1987 finally:
2002 finally:
1988 fp.close()
2003 fp.close()
1989
2004
1990 class GitDiffRequired(Exception):
2005 class GitDiffRequired(Exception):
1991 pass
2006 pass
1992
2007
1993 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2008 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1994 '''return diffopts with all features supported and parsed'''
2009 '''return diffopts with all features supported and parsed'''
1995 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2010 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1996 git=True, whitespace=True, formatchanging=True)
2011 git=True, whitespace=True, formatchanging=True)
1997
2012
1998 diffopts = diffallopts
2013 diffopts = diffallopts
1999
2014
2000 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2015 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2001 whitespace=False, formatchanging=False):
2016 whitespace=False, formatchanging=False):
2002 '''return diffopts with only opted-in features parsed
2017 '''return diffopts with only opted-in features parsed
2003
2018
2004 Features:
2019 Features:
2005 - git: git-style diffs
2020 - git: git-style diffs
2006 - whitespace: whitespace options like ignoreblanklines and ignorews
2021 - whitespace: whitespace options like ignoreblanklines and ignorews
2007 - formatchanging: options that will likely break or cause correctness issues
2022 - formatchanging: options that will likely break or cause correctness issues
2008 with most diff parsers
2023 with most diff parsers
2009 '''
2024 '''
2010 def get(key, name=None, getter=ui.configbool, forceplain=None):
2025 def get(key, name=None, getter=ui.configbool, forceplain=None):
2011 if opts:
2026 if opts:
2012 v = opts.get(key)
2027 v = opts.get(key)
2013 if v:
2028 if v:
2014 return v
2029 return v
2015 if forceplain is not None and ui.plain():
2030 if forceplain is not None and ui.plain():
2016 return forceplain
2031 return forceplain
2017 return getter(section, name or key, None, untrusted=untrusted)
2032 return getter(section, name or key, None, untrusted=untrusted)
2018
2033
2019 # core options, expected to be understood by every diff parser
2034 # core options, expected to be understood by every diff parser
2020 buildopts = {
2035 buildopts = {
2021 'nodates': get('nodates'),
2036 'nodates': get('nodates'),
2022 'showfunc': get('show_function', 'showfunc'),
2037 'showfunc': get('show_function', 'showfunc'),
2023 'context': get('unified', getter=ui.config),
2038 'context': get('unified', getter=ui.config),
2024 }
2039 }
2025
2040
2026 if git:
2041 if git:
2027 buildopts['git'] = get('git')
2042 buildopts['git'] = get('git')
2028 if whitespace:
2043 if whitespace:
2029 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2044 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2030 buildopts['ignorewsamount'] = get('ignore_space_change',
2045 buildopts['ignorewsamount'] = get('ignore_space_change',
2031 'ignorewsamount')
2046 'ignorewsamount')
2032 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2047 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2033 'ignoreblanklines')
2048 'ignoreblanklines')
2034 if formatchanging:
2049 if formatchanging:
2035 buildopts['text'] = opts and opts.get('text')
2050 buildopts['text'] = opts and opts.get('text')
2036 buildopts['nobinary'] = get('nobinary')
2051 buildopts['nobinary'] = get('nobinary')
2037 buildopts['noprefix'] = get('noprefix', forceplain=False)
2052 buildopts['noprefix'] = get('noprefix', forceplain=False)
2038
2053
2039 return mdiff.diffopts(**buildopts)
2054 return mdiff.diffopts(**buildopts)
2040
2055
2041 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2056 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2042 losedatafn=None, prefix=''):
2057 losedatafn=None, prefix=''):
2043 '''yields diff of changes to files between two nodes, or node and
2058 '''yields diff of changes to files between two nodes, or node and
2044 working directory.
2059 working directory.
2045
2060
2046 if node1 is None, use first dirstate parent instead.
2061 if node1 is None, use first dirstate parent instead.
2047 if node2 is None, compare node1 with working directory.
2062 if node2 is None, compare node1 with working directory.
2048
2063
2049 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2064 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2050 every time some change cannot be represented with the current
2065 every time some change cannot be represented with the current
2051 patch format. Return False to upgrade to git patch format, True to
2066 patch format. Return False to upgrade to git patch format, True to
2052 accept the loss or raise an exception to abort the diff. It is
2067 accept the loss or raise an exception to abort the diff. It is
2053 called with the name of current file being diffed as 'fn'. If set
2068 called with the name of current file being diffed as 'fn'. If set
2054 to None, patches will always be upgraded to git format when
2069 to None, patches will always be upgraded to git format when
2055 necessary.
2070 necessary.
2056
2071
2057 prefix is a filename prefix that is prepended to all filenames on
2072 prefix is a filename prefix that is prepended to all filenames on
2058 display (used for subrepos).
2073 display (used for subrepos).
2059 '''
2074 '''
2060
2075
2061 if opts is None:
2076 if opts is None:
2062 opts = mdiff.defaultopts
2077 opts = mdiff.defaultopts
2063
2078
2064 if not node1 and not node2:
2079 if not node1 and not node2:
2065 node1 = repo.dirstate.p1()
2080 node1 = repo.dirstate.p1()
2066
2081
2067 def lrugetfilectx():
2082 def lrugetfilectx():
2068 cache = {}
2083 cache = {}
2069 order = util.deque()
2084 order = util.deque()
2070 def getfilectx(f, ctx):
2085 def getfilectx(f, ctx):
2071 fctx = ctx.filectx(f, filelog=cache.get(f))
2086 fctx = ctx.filectx(f, filelog=cache.get(f))
2072 if f not in cache:
2087 if f not in cache:
2073 if len(cache) > 20:
2088 if len(cache) > 20:
2074 del cache[order.popleft()]
2089 del cache[order.popleft()]
2075 cache[f] = fctx.filelog()
2090 cache[f] = fctx.filelog()
2076 else:
2091 else:
2077 order.remove(f)
2092 order.remove(f)
2078 order.append(f)
2093 order.append(f)
2079 return fctx
2094 return fctx
2080 return getfilectx
2095 return getfilectx
2081 getfilectx = lrugetfilectx()
2096 getfilectx = lrugetfilectx()
2082
2097
2083 ctx1 = repo[node1]
2098 ctx1 = repo[node1]
2084 ctx2 = repo[node2]
2099 ctx2 = repo[node2]
2085
2100
2086 if not changes:
2101 if not changes:
2087 changes = repo.status(ctx1, ctx2, match=match)
2102 changes = repo.status(ctx1, ctx2, match=match)
2088 modified, added, removed = changes[:3]
2103 modified, added, removed = changes[:3]
2089
2104
2090 if not modified and not added and not removed:
2105 if not modified and not added and not removed:
2091 return []
2106 return []
2092
2107
2093 if repo.ui.debugflag:
2108 if repo.ui.debugflag:
2094 hexfunc = hex
2109 hexfunc = hex
2095 else:
2110 else:
2096 hexfunc = short
2111 hexfunc = short
2097 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2112 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2098
2113
2099 copy = {}
2114 copy = {}
2100 if opts.git or opts.upgrade:
2115 if opts.git or opts.upgrade:
2101 copy = copies.pathcopies(ctx1, ctx2)
2116 copy = copies.pathcopies(ctx1, ctx2)
2102
2117
2103 def difffn(opts, losedata):
2118 def difffn(opts, losedata):
2104 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2119 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2105 copy, getfilectx, opts, losedata, prefix)
2120 copy, getfilectx, opts, losedata, prefix)
2106 if opts.upgrade and not opts.git:
2121 if opts.upgrade and not opts.git:
2107 try:
2122 try:
2108 def losedata(fn):
2123 def losedata(fn):
2109 if not losedatafn or not losedatafn(fn=fn):
2124 if not losedatafn or not losedatafn(fn=fn):
2110 raise GitDiffRequired
2125 raise GitDiffRequired
2111 # Buffer the whole output until we are sure it can be generated
2126 # Buffer the whole output until we are sure it can be generated
2112 return list(difffn(opts.copy(git=False), losedata))
2127 return list(difffn(opts.copy(git=False), losedata))
2113 except GitDiffRequired:
2128 except GitDiffRequired:
2114 return difffn(opts.copy(git=True), None)
2129 return difffn(opts.copy(git=True), None)
2115 else:
2130 else:
2116 return difffn(opts, None)
2131 return difffn(opts, None)
2117
2132
2118 def difflabel(func, *args, **kw):
2133 def difflabel(func, *args, **kw):
2119 '''yields 2-tuples of (output, label) based on the output of func()'''
2134 '''yields 2-tuples of (output, label) based on the output of func()'''
2120 headprefixes = [('diff', 'diff.diffline'),
2135 headprefixes = [('diff', 'diff.diffline'),
2121 ('copy', 'diff.extended'),
2136 ('copy', 'diff.extended'),
2122 ('rename', 'diff.extended'),
2137 ('rename', 'diff.extended'),
2123 ('old', 'diff.extended'),
2138 ('old', 'diff.extended'),
2124 ('new', 'diff.extended'),
2139 ('new', 'diff.extended'),
2125 ('deleted', 'diff.extended'),
2140 ('deleted', 'diff.extended'),
2126 ('---', 'diff.file_a'),
2141 ('---', 'diff.file_a'),
2127 ('+++', 'diff.file_b')]
2142 ('+++', 'diff.file_b')]
2128 textprefixes = [('@', 'diff.hunk'),
2143 textprefixes = [('@', 'diff.hunk'),
2129 ('-', 'diff.deleted'),
2144 ('-', 'diff.deleted'),
2130 ('+', 'diff.inserted')]
2145 ('+', 'diff.inserted')]
2131 head = False
2146 head = False
2132 for chunk in func(*args, **kw):
2147 for chunk in func(*args, **kw):
2133 lines = chunk.split('\n')
2148 lines = chunk.split('\n')
2134 for i, line in enumerate(lines):
2149 for i, line in enumerate(lines):
2135 if i != 0:
2150 if i != 0:
2136 yield ('\n', '')
2151 yield ('\n', '')
2137 if head:
2152 if head:
2138 if line.startswith('@'):
2153 if line.startswith('@'):
2139 head = False
2154 head = False
2140 else:
2155 else:
2141 if line and line[0] not in ' +-@\\':
2156 if line and line[0] not in ' +-@\\':
2142 head = True
2157 head = True
2143 stripline = line
2158 stripline = line
2144 diffline = False
2159 diffline = False
2145 if not head and line and line[0] in '+-':
2160 if not head and line and line[0] in '+-':
2146 # highlight tabs and trailing whitespace, but only in
2161 # highlight tabs and trailing whitespace, but only in
2147 # changed lines
2162 # changed lines
2148 stripline = line.rstrip()
2163 stripline = line.rstrip()
2149 diffline = True
2164 diffline = True
2150
2165
2151 prefixes = textprefixes
2166 prefixes = textprefixes
2152 if head:
2167 if head:
2153 prefixes = headprefixes
2168 prefixes = headprefixes
2154 for prefix, label in prefixes:
2169 for prefix, label in prefixes:
2155 if stripline.startswith(prefix):
2170 if stripline.startswith(prefix):
2156 if diffline:
2171 if diffline:
2157 for token in tabsplitter.findall(stripline):
2172 for token in tabsplitter.findall(stripline):
2158 if '\t' == token[0]:
2173 if '\t' == token[0]:
2159 yield (token, 'diff.tab')
2174 yield (token, 'diff.tab')
2160 else:
2175 else:
2161 yield (token, label)
2176 yield (token, label)
2162 else:
2177 else:
2163 yield (stripline, label)
2178 yield (stripline, label)
2164 break
2179 break
2165 else:
2180 else:
2166 yield (line, '')
2181 yield (line, '')
2167 if line != stripline:
2182 if line != stripline:
2168 yield (line[len(stripline):], 'diff.trailingwhitespace')
2183 yield (line[len(stripline):], 'diff.trailingwhitespace')
2169
2184
2170 def diffui(*args, **kw):
2185 def diffui(*args, **kw):
2171 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2186 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2172 return difflabel(diff, *args, **kw)
2187 return difflabel(diff, *args, **kw)
2173
2188
2174 def _filepairs(ctx1, modified, added, removed, copy, opts):
2189 def _filepairs(ctx1, modified, added, removed, copy, opts):
2175 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2190 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2176 before and f2 is the the name after. For added files, f1 will be None,
2191 before and f2 is the the name after. For added files, f1 will be None,
2177 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2192 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2178 or 'rename' (the latter two only if opts.git is set).'''
2193 or 'rename' (the latter two only if opts.git is set).'''
2179 gone = set()
2194 gone = set()
2180
2195
2181 copyto = dict([(v, k) for k, v in copy.items()])
2196 copyto = dict([(v, k) for k, v in copy.items()])
2182
2197
2183 addedset, removedset = set(added), set(removed)
2198 addedset, removedset = set(added), set(removed)
2184 # Fix up added, since merged-in additions appear as
2199 # Fix up added, since merged-in additions appear as
2185 # modifications during merges
2200 # modifications during merges
2186 for f in modified:
2201 for f in modified:
2187 if f not in ctx1:
2202 if f not in ctx1:
2188 addedset.add(f)
2203 addedset.add(f)
2189
2204
2190 for f in sorted(modified + added + removed):
2205 for f in sorted(modified + added + removed):
2191 copyop = None
2206 copyop = None
2192 f1, f2 = f, f
2207 f1, f2 = f, f
2193 if f in addedset:
2208 if f in addedset:
2194 f1 = None
2209 f1 = None
2195 if f in copy:
2210 if f in copy:
2196 if opts.git:
2211 if opts.git:
2197 f1 = copy[f]
2212 f1 = copy[f]
2198 if f1 in removedset and f1 not in gone:
2213 if f1 in removedset and f1 not in gone:
2199 copyop = 'rename'
2214 copyop = 'rename'
2200 gone.add(f1)
2215 gone.add(f1)
2201 else:
2216 else:
2202 copyop = 'copy'
2217 copyop = 'copy'
2203 elif f in removedset:
2218 elif f in removedset:
2204 f2 = None
2219 f2 = None
2205 if opts.git:
2220 if opts.git:
2206 # have we already reported a copy above?
2221 # have we already reported a copy above?
2207 if (f in copyto and copyto[f] in addedset
2222 if (f in copyto and copyto[f] in addedset
2208 and copy[copyto[f]] == f):
2223 and copy[copyto[f]] == f):
2209 continue
2224 continue
2210 yield f1, f2, copyop
2225 yield f1, f2, copyop
2211
2226
2212 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2227 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2213 copy, getfilectx, opts, losedatafn, prefix):
2228 copy, getfilectx, opts, losedatafn, prefix):
2214
2229
2215 def gitindex(text):
2230 def gitindex(text):
2216 if not text:
2231 if not text:
2217 text = ""
2232 text = ""
2218 l = len(text)
2233 l = len(text)
2219 s = util.sha1('blob %d\0' % l)
2234 s = util.sha1('blob %d\0' % l)
2220 s.update(text)
2235 s.update(text)
2221 return s.hexdigest()
2236 return s.hexdigest()
2222
2237
2223 if opts.noprefix:
2238 if opts.noprefix:
2224 aprefix = bprefix = ''
2239 aprefix = bprefix = ''
2225 else:
2240 else:
2226 aprefix = 'a/'
2241 aprefix = 'a/'
2227 bprefix = 'b/'
2242 bprefix = 'b/'
2228
2243
2229 def diffline(f, revs):
2244 def diffline(f, revs):
2230 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2245 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2231 return 'diff %s %s' % (revinfo, f)
2246 return 'diff %s %s' % (revinfo, f)
2232
2247
2233 date1 = util.datestr(ctx1.date())
2248 date1 = util.datestr(ctx1.date())
2234 date2 = util.datestr(ctx2.date())
2249 date2 = util.datestr(ctx2.date())
2235
2250
2236 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2251 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2237
2252
2238 for f1, f2, copyop in _filepairs(
2253 for f1, f2, copyop in _filepairs(
2239 ctx1, modified, added, removed, copy, opts):
2254 ctx1, modified, added, removed, copy, opts):
2240 content1 = None
2255 content1 = None
2241 content2 = None
2256 content2 = None
2242 flag1 = None
2257 flag1 = None
2243 flag2 = None
2258 flag2 = None
2244 if f1:
2259 if f1:
2245 content1 = getfilectx(f1, ctx1).data()
2260 content1 = getfilectx(f1, ctx1).data()
2246 if opts.git or losedatafn:
2261 if opts.git or losedatafn:
2247 flag1 = ctx1.flags(f1)
2262 flag1 = ctx1.flags(f1)
2248 if f2:
2263 if f2:
2249 content2 = getfilectx(f2, ctx2).data()
2264 content2 = getfilectx(f2, ctx2).data()
2250 if opts.git or losedatafn:
2265 if opts.git or losedatafn:
2251 flag2 = ctx2.flags(f2)
2266 flag2 = ctx2.flags(f2)
2252 binary = False
2267 binary = False
2253 if opts.git or losedatafn:
2268 if opts.git or losedatafn:
2254 binary = util.binary(content1) or util.binary(content2)
2269 binary = util.binary(content1) or util.binary(content2)
2255
2270
2256 if losedatafn and not opts.git:
2271 if losedatafn and not opts.git:
2257 if (binary or
2272 if (binary or
2258 # copy/rename
2273 # copy/rename
2259 f2 in copy or
2274 f2 in copy or
2260 # empty file creation
2275 # empty file creation
2261 (not f1 and not content2) or
2276 (not f1 and not content2) or
2262 # empty file deletion
2277 # empty file deletion
2263 (not content1 and not f2) or
2278 (not content1 and not f2) or
2264 # create with flags
2279 # create with flags
2265 (not f1 and flag2) or
2280 (not f1 and flag2) or
2266 # change flags
2281 # change flags
2267 (f1 and f2 and flag1 != flag2)):
2282 (f1 and f2 and flag1 != flag2)):
2268 losedatafn(f2 or f1)
2283 losedatafn(f2 or f1)
2269
2284
2270 path1 = posixpath.join(prefix, f1 or f2)
2285 path1 = posixpath.join(prefix, f1 or f2)
2271 path2 = posixpath.join(prefix, f2 or f1)
2286 path2 = posixpath.join(prefix, f2 or f1)
2272 header = []
2287 header = []
2273 if opts.git:
2288 if opts.git:
2274 header.append('diff --git %s%s %s%s' %
2289 header.append('diff --git %s%s %s%s' %
2275 (aprefix, path1, bprefix, path2))
2290 (aprefix, path1, bprefix, path2))
2276 if not f1: # added
2291 if not f1: # added
2277 header.append('new file mode %s' % gitmode[flag2])
2292 header.append('new file mode %s' % gitmode[flag2])
2278 elif not f2: # removed
2293 elif not f2: # removed
2279 header.append('deleted file mode %s' % gitmode[flag1])
2294 header.append('deleted file mode %s' % gitmode[flag1])
2280 else: # modified/copied/renamed
2295 else: # modified/copied/renamed
2281 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2296 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2282 if mode1 != mode2:
2297 if mode1 != mode2:
2283 header.append('old mode %s' % mode1)
2298 header.append('old mode %s' % mode1)
2284 header.append('new mode %s' % mode2)
2299 header.append('new mode %s' % mode2)
2285 if copyop is not None:
2300 if copyop is not None:
2286 header.append('%s from %s' % (copyop, path1))
2301 header.append('%s from %s' % (copyop, path1))
2287 header.append('%s to %s' % (copyop, path2))
2302 header.append('%s to %s' % (copyop, path2))
2288 elif revs and not repo.ui.quiet:
2303 elif revs and not repo.ui.quiet:
2289 header.append(diffline(path1, revs))
2304 header.append(diffline(path1, revs))
2290
2305
2291 if binary and opts.git and not opts.nobinary:
2306 if binary and opts.git and not opts.nobinary:
2292 text = mdiff.b85diff(content1, content2)
2307 text = mdiff.b85diff(content1, content2)
2293 if text:
2308 if text:
2294 header.append('index %s..%s' %
2309 header.append('index %s..%s' %
2295 (gitindex(content1), gitindex(content2)))
2310 (gitindex(content1), gitindex(content2)))
2296 else:
2311 else:
2297 text = mdiff.unidiff(content1, date1,
2312 text = mdiff.unidiff(content1, date1,
2298 content2, date2,
2313 content2, date2,
2299 path1, path2, opts=opts)
2314 path1, path2, opts=opts)
2300 if header and (text or len(header) > 1):
2315 if header and (text or len(header) > 1):
2301 yield '\n'.join(header) + '\n'
2316 yield '\n'.join(header) + '\n'
2302 if text:
2317 if text:
2303 yield text
2318 yield text
2304
2319
2305 def diffstatsum(stats):
2320 def diffstatsum(stats):
2306 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2321 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2307 for f, a, r, b in stats:
2322 for f, a, r, b in stats:
2308 maxfile = max(maxfile, encoding.colwidth(f))
2323 maxfile = max(maxfile, encoding.colwidth(f))
2309 maxtotal = max(maxtotal, a + r)
2324 maxtotal = max(maxtotal, a + r)
2310 addtotal += a
2325 addtotal += a
2311 removetotal += r
2326 removetotal += r
2312 binary = binary or b
2327 binary = binary or b
2313
2328
2314 return maxfile, maxtotal, addtotal, removetotal, binary
2329 return maxfile, maxtotal, addtotal, removetotal, binary
2315
2330
2316 def diffstatdata(lines):
2331 def diffstatdata(lines):
2317 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2332 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2318
2333
2319 results = []
2334 results = []
2320 filename, adds, removes, isbinary = None, 0, 0, False
2335 filename, adds, removes, isbinary = None, 0, 0, False
2321
2336
2322 def addresult():
2337 def addresult():
2323 if filename:
2338 if filename:
2324 results.append((filename, adds, removes, isbinary))
2339 results.append((filename, adds, removes, isbinary))
2325
2340
2326 for line in lines:
2341 for line in lines:
2327 if line.startswith('diff'):
2342 if line.startswith('diff'):
2328 addresult()
2343 addresult()
2329 # set numbers to 0 anyway when starting new file
2344 # set numbers to 0 anyway when starting new file
2330 adds, removes, isbinary = 0, 0, False
2345 adds, removes, isbinary = 0, 0, False
2331 if line.startswith('diff --git a/'):
2346 if line.startswith('diff --git a/'):
2332 filename = gitre.search(line).group(2)
2347 filename = gitre.search(line).group(2)
2333 elif line.startswith('diff -r'):
2348 elif line.startswith('diff -r'):
2334 # format: "diff -r ... -r ... filename"
2349 # format: "diff -r ... -r ... filename"
2335 filename = diffre.search(line).group(1)
2350 filename = diffre.search(line).group(1)
2336 elif line.startswith('+') and not line.startswith('+++ '):
2351 elif line.startswith('+') and not line.startswith('+++ '):
2337 adds += 1
2352 adds += 1
2338 elif line.startswith('-') and not line.startswith('--- '):
2353 elif line.startswith('-') and not line.startswith('--- '):
2339 removes += 1
2354 removes += 1
2340 elif (line.startswith('GIT binary patch') or
2355 elif (line.startswith('GIT binary patch') or
2341 line.startswith('Binary file')):
2356 line.startswith('Binary file')):
2342 isbinary = True
2357 isbinary = True
2343 addresult()
2358 addresult()
2344 return results
2359 return results
2345
2360
2346 def diffstat(lines, width=80, git=False):
2361 def diffstat(lines, width=80, git=False):
2347 output = []
2362 output = []
2348 stats = diffstatdata(lines)
2363 stats = diffstatdata(lines)
2349 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2364 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2350
2365
2351 countwidth = len(str(maxtotal))
2366 countwidth = len(str(maxtotal))
2352 if hasbinary and countwidth < 3:
2367 if hasbinary and countwidth < 3:
2353 countwidth = 3
2368 countwidth = 3
2354 graphwidth = width - countwidth - maxname - 6
2369 graphwidth = width - countwidth - maxname - 6
2355 if graphwidth < 10:
2370 if graphwidth < 10:
2356 graphwidth = 10
2371 graphwidth = 10
2357
2372
2358 def scale(i):
2373 def scale(i):
2359 if maxtotal <= graphwidth:
2374 if maxtotal <= graphwidth:
2360 return i
2375 return i
2361 # If diffstat runs out of room it doesn't print anything,
2376 # If diffstat runs out of room it doesn't print anything,
2362 # which isn't very useful, so always print at least one + or -
2377 # which isn't very useful, so always print at least one + or -
2363 # if there were at least some changes.
2378 # if there were at least some changes.
2364 return max(i * graphwidth // maxtotal, int(bool(i)))
2379 return max(i * graphwidth // maxtotal, int(bool(i)))
2365
2380
2366 for filename, adds, removes, isbinary in stats:
2381 for filename, adds, removes, isbinary in stats:
2367 if isbinary:
2382 if isbinary:
2368 count = 'Bin'
2383 count = 'Bin'
2369 else:
2384 else:
2370 count = adds + removes
2385 count = adds + removes
2371 pluses = '+' * scale(adds)
2386 pluses = '+' * scale(adds)
2372 minuses = '-' * scale(removes)
2387 minuses = '-' * scale(removes)
2373 output.append(' %s%s | %*s %s%s\n' %
2388 output.append(' %s%s | %*s %s%s\n' %
2374 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2389 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2375 countwidth, count, pluses, minuses))
2390 countwidth, count, pluses, minuses))
2376
2391
2377 if stats:
2392 if stats:
2378 output.append(_(' %d files changed, %d insertions(+), '
2393 output.append(_(' %d files changed, %d insertions(+), '
2379 '%d deletions(-)\n')
2394 '%d deletions(-)\n')
2380 % (len(stats), totaladds, totalremoves))
2395 % (len(stats), totaladds, totalremoves))
2381
2396
2382 return ''.join(output)
2397 return ''.join(output)
2383
2398
2384 def diffstatui(*args, **kw):
2399 def diffstatui(*args, **kw):
2385 '''like diffstat(), but yields 2-tuples of (output, label) for
2400 '''like diffstat(), but yields 2-tuples of (output, label) for
2386 ui.write()
2401 ui.write()
2387 '''
2402 '''
2388
2403
2389 for line in diffstat(*args, **kw).splitlines():
2404 for line in diffstat(*args, **kw).splitlines():
2390 if line and line[-1] in '+-':
2405 if line and line[-1] in '+-':
2391 name, graph = line.rsplit(' ', 1)
2406 name, graph = line.rsplit(' ', 1)
2392 yield (name + ' ', '')
2407 yield (name + ' ', '')
2393 m = re.search(r'\++', graph)
2408 m = re.search(r'\++', graph)
2394 if m:
2409 if m:
2395 yield (m.group(0), 'diffstat.inserted')
2410 yield (m.group(0), 'diffstat.inserted')
2396 m = re.search(r'-+', graph)
2411 m = re.search(r'-+', graph)
2397 if m:
2412 if m:
2398 yield (m.group(0), 'diffstat.deleted')
2413 yield (m.group(0), 'diffstat.deleted')
2399 else:
2414 else:
2400 yield (line, '')
2415 yield (line, '')
2401 yield ('\n', '')
2416 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now