##// END OF EJS Templates
patch: use scmutil.marktouched instead of scmutil.addremove...
Siddharth Agarwal -
r19155:0b3689a0 default
parent child Browse files
Show More
@@ -1,1888 +1,1879 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email.Parser, os, errno, re, posixpath
9 import cStringIO, email.Parser, os, errno, re, posixpath
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11
11
12 from i18n import _
12 from i18n import _
13 from node import hex, nullid, short
13 from node import hex, nullid, short
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
14 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import context
15 import context
16
16
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
17 gitre = re.compile('diff --git a/(.*) b/(.*)')
18
18
19 class PatchError(Exception):
19 class PatchError(Exception):
20 pass
20 pass
21
21
22
22
23 # public functions
23 # public functions
24
24
25 def split(stream):
25 def split(stream):
26 '''return an iterator of individual patches from a stream'''
26 '''return an iterator of individual patches from a stream'''
27 def isheader(line, inheader):
27 def isheader(line, inheader):
28 if inheader and line[0] in (' ', '\t'):
28 if inheader and line[0] in (' ', '\t'):
29 # continuation
29 # continuation
30 return True
30 return True
31 if line[0] in (' ', '-', '+'):
31 if line[0] in (' ', '-', '+'):
32 # diff line - don't check for header pattern in there
32 # diff line - don't check for header pattern in there
33 return False
33 return False
34 l = line.split(': ', 1)
34 l = line.split(': ', 1)
35 return len(l) == 2 and ' ' not in l[0]
35 return len(l) == 2 and ' ' not in l[0]
36
36
37 def chunk(lines):
37 def chunk(lines):
38 return cStringIO.StringIO(''.join(lines))
38 return cStringIO.StringIO(''.join(lines))
39
39
40 def hgsplit(stream, cur):
40 def hgsplit(stream, cur):
41 inheader = True
41 inheader = True
42
42
43 for line in stream:
43 for line in stream:
44 if not line.strip():
44 if not line.strip():
45 inheader = False
45 inheader = False
46 if not inheader and line.startswith('# HG changeset patch'):
46 if not inheader and line.startswith('# HG changeset patch'):
47 yield chunk(cur)
47 yield chunk(cur)
48 cur = []
48 cur = []
49 inheader = True
49 inheader = True
50
50
51 cur.append(line)
51 cur.append(line)
52
52
53 if cur:
53 if cur:
54 yield chunk(cur)
54 yield chunk(cur)
55
55
56 def mboxsplit(stream, cur):
56 def mboxsplit(stream, cur):
57 for line in stream:
57 for line in stream:
58 if line.startswith('From '):
58 if line.startswith('From '):
59 for c in split(chunk(cur[1:])):
59 for c in split(chunk(cur[1:])):
60 yield c
60 yield c
61 cur = []
61 cur = []
62
62
63 cur.append(line)
63 cur.append(line)
64
64
65 if cur:
65 if cur:
66 for c in split(chunk(cur[1:])):
66 for c in split(chunk(cur[1:])):
67 yield c
67 yield c
68
68
69 def mimesplit(stream, cur):
69 def mimesplit(stream, cur):
70 def msgfp(m):
70 def msgfp(m):
71 fp = cStringIO.StringIO()
71 fp = cStringIO.StringIO()
72 g = email.Generator.Generator(fp, mangle_from_=False)
72 g = email.Generator.Generator(fp, mangle_from_=False)
73 g.flatten(m)
73 g.flatten(m)
74 fp.seek(0)
74 fp.seek(0)
75 return fp
75 return fp
76
76
77 for line in stream:
77 for line in stream:
78 cur.append(line)
78 cur.append(line)
79 c = chunk(cur)
79 c = chunk(cur)
80
80
81 m = email.Parser.Parser().parse(c)
81 m = email.Parser.Parser().parse(c)
82 if not m.is_multipart():
82 if not m.is_multipart():
83 yield msgfp(m)
83 yield msgfp(m)
84 else:
84 else:
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
85 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
86 for part in m.walk():
86 for part in m.walk():
87 ct = part.get_content_type()
87 ct = part.get_content_type()
88 if ct not in ok_types:
88 if ct not in ok_types:
89 continue
89 continue
90 yield msgfp(part)
90 yield msgfp(part)
91
91
92 def headersplit(stream, cur):
92 def headersplit(stream, cur):
93 inheader = False
93 inheader = False
94
94
95 for line in stream:
95 for line in stream:
96 if not inheader and isheader(line, inheader):
96 if not inheader and isheader(line, inheader):
97 yield chunk(cur)
97 yield chunk(cur)
98 cur = []
98 cur = []
99 inheader = True
99 inheader = True
100 if inheader and not isheader(line, inheader):
100 if inheader and not isheader(line, inheader):
101 inheader = False
101 inheader = False
102
102
103 cur.append(line)
103 cur.append(line)
104
104
105 if cur:
105 if cur:
106 yield chunk(cur)
106 yield chunk(cur)
107
107
108 def remainder(cur):
108 def remainder(cur):
109 yield chunk(cur)
109 yield chunk(cur)
110
110
111 class fiter(object):
111 class fiter(object):
112 def __init__(self, fp):
112 def __init__(self, fp):
113 self.fp = fp
113 self.fp = fp
114
114
115 def __iter__(self):
115 def __iter__(self):
116 return self
116 return self
117
117
118 def next(self):
118 def next(self):
119 l = self.fp.readline()
119 l = self.fp.readline()
120 if not l:
120 if not l:
121 raise StopIteration
121 raise StopIteration
122 return l
122 return l
123
123
124 inheader = False
124 inheader = False
125 cur = []
125 cur = []
126
126
127 mimeheaders = ['content-type']
127 mimeheaders = ['content-type']
128
128
129 if not util.safehasattr(stream, 'next'):
129 if not util.safehasattr(stream, 'next'):
130 # http responses, for example, have readline but not next
130 # http responses, for example, have readline but not next
131 stream = fiter(stream)
131 stream = fiter(stream)
132
132
133 for line in stream:
133 for line in stream:
134 cur.append(line)
134 cur.append(line)
135 if line.startswith('# HG changeset patch'):
135 if line.startswith('# HG changeset patch'):
136 return hgsplit(stream, cur)
136 return hgsplit(stream, cur)
137 elif line.startswith('From '):
137 elif line.startswith('From '):
138 return mboxsplit(stream, cur)
138 return mboxsplit(stream, cur)
139 elif isheader(line, inheader):
139 elif isheader(line, inheader):
140 inheader = True
140 inheader = True
141 if line.split(':', 1)[0].lower() in mimeheaders:
141 if line.split(':', 1)[0].lower() in mimeheaders:
142 # let email parser handle this
142 # let email parser handle this
143 return mimesplit(stream, cur)
143 return mimesplit(stream, cur)
144 elif line.startswith('--- ') and inheader:
144 elif line.startswith('--- ') and inheader:
145 # No evil headers seen by diff start, split by hand
145 # No evil headers seen by diff start, split by hand
146 return headersplit(stream, cur)
146 return headersplit(stream, cur)
147 # Not enough info, keep reading
147 # Not enough info, keep reading
148
148
149 # if we are here, we have a very plain patch
149 # if we are here, we have a very plain patch
150 return remainder(cur)
150 return remainder(cur)
151
151
152 def extract(ui, fileobj):
152 def extract(ui, fileobj):
153 '''extract patch from data read from fileobj.
153 '''extract patch from data read from fileobj.
154
154
155 patch can be a normal patch or contained in an email message.
155 patch can be a normal patch or contained in an email message.
156
156
157 return tuple (filename, message, user, date, branch, node, p1, p2).
157 return tuple (filename, message, user, date, branch, node, p1, p2).
158 Any item in the returned tuple can be None. If filename is None,
158 Any item in the returned tuple can be None. If filename is None,
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
159 fileobj did not contain a patch. Caller must unlink filename when done.'''
160
160
161 # attempt to detect the start of a patch
161 # attempt to detect the start of a patch
162 # (this heuristic is borrowed from quilt)
162 # (this heuristic is borrowed from quilt)
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
163 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
164 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
165 r'---[ \t].*?^\+\+\+[ \t]|'
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
166 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
167
167
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
168 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
169 tmpfp = os.fdopen(fd, 'w')
169 tmpfp = os.fdopen(fd, 'w')
170 try:
170 try:
171 msg = email.Parser.Parser().parse(fileobj)
171 msg = email.Parser.Parser().parse(fileobj)
172
172
173 subject = msg['Subject']
173 subject = msg['Subject']
174 user = msg['From']
174 user = msg['From']
175 if not subject and not user:
175 if not subject and not user:
176 # Not an email, restore parsed headers if any
176 # Not an email, restore parsed headers if any
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
177 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
178
178
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
179 gitsendmail = 'git-send-email' in msg.get('X-Mailer', '')
180 # should try to parse msg['Date']
180 # should try to parse msg['Date']
181 date = None
181 date = None
182 nodeid = None
182 nodeid = None
183 branch = None
183 branch = None
184 parents = []
184 parents = []
185
185
186 if subject:
186 if subject:
187 if subject.startswith('[PATCH'):
187 if subject.startswith('[PATCH'):
188 pend = subject.find(']')
188 pend = subject.find(']')
189 if pend >= 0:
189 if pend >= 0:
190 subject = subject[pend + 1:].lstrip()
190 subject = subject[pend + 1:].lstrip()
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
191 subject = re.sub(r'\n[ \t]+', ' ', subject)
192 ui.debug('Subject: %s\n' % subject)
192 ui.debug('Subject: %s\n' % subject)
193 if user:
193 if user:
194 ui.debug('From: %s\n' % user)
194 ui.debug('From: %s\n' % user)
195 diffs_seen = 0
195 diffs_seen = 0
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
196 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
197 message = ''
197 message = ''
198 for part in msg.walk():
198 for part in msg.walk():
199 content_type = part.get_content_type()
199 content_type = part.get_content_type()
200 ui.debug('Content-Type: %s\n' % content_type)
200 ui.debug('Content-Type: %s\n' % content_type)
201 if content_type not in ok_types:
201 if content_type not in ok_types:
202 continue
202 continue
203 payload = part.get_payload(decode=True)
203 payload = part.get_payload(decode=True)
204 m = diffre.search(payload)
204 m = diffre.search(payload)
205 if m:
205 if m:
206 hgpatch = False
206 hgpatch = False
207 hgpatchheader = False
207 hgpatchheader = False
208 ignoretext = False
208 ignoretext = False
209
209
210 ui.debug('found patch at byte %d\n' % m.start(0))
210 ui.debug('found patch at byte %d\n' % m.start(0))
211 diffs_seen += 1
211 diffs_seen += 1
212 cfp = cStringIO.StringIO()
212 cfp = cStringIO.StringIO()
213 for line in payload[:m.start(0)].splitlines():
213 for line in payload[:m.start(0)].splitlines():
214 if line.startswith('# HG changeset patch') and not hgpatch:
214 if line.startswith('# HG changeset patch') and not hgpatch:
215 ui.debug('patch generated by hg export\n')
215 ui.debug('patch generated by hg export\n')
216 hgpatch = True
216 hgpatch = True
217 hgpatchheader = True
217 hgpatchheader = True
218 # drop earlier commit message content
218 # drop earlier commit message content
219 cfp.seek(0)
219 cfp.seek(0)
220 cfp.truncate()
220 cfp.truncate()
221 subject = None
221 subject = None
222 elif hgpatchheader:
222 elif hgpatchheader:
223 if line.startswith('# User '):
223 if line.startswith('# User '):
224 user = line[7:]
224 user = line[7:]
225 ui.debug('From: %s\n' % user)
225 ui.debug('From: %s\n' % user)
226 elif line.startswith("# Date "):
226 elif line.startswith("# Date "):
227 date = line[7:]
227 date = line[7:]
228 elif line.startswith("# Branch "):
228 elif line.startswith("# Branch "):
229 branch = line[9:]
229 branch = line[9:]
230 elif line.startswith("# Node ID "):
230 elif line.startswith("# Node ID "):
231 nodeid = line[10:]
231 nodeid = line[10:]
232 elif line.startswith("# Parent "):
232 elif line.startswith("# Parent "):
233 parents.append(line[9:].lstrip())
233 parents.append(line[9:].lstrip())
234 elif not line.startswith("# "):
234 elif not line.startswith("# "):
235 hgpatchheader = False
235 hgpatchheader = False
236 elif line == '---' and gitsendmail:
236 elif line == '---' and gitsendmail:
237 ignoretext = True
237 ignoretext = True
238 if not hgpatchheader and not ignoretext:
238 if not hgpatchheader and not ignoretext:
239 cfp.write(line)
239 cfp.write(line)
240 cfp.write('\n')
240 cfp.write('\n')
241 message = cfp.getvalue()
241 message = cfp.getvalue()
242 if tmpfp:
242 if tmpfp:
243 tmpfp.write(payload)
243 tmpfp.write(payload)
244 if not payload.endswith('\n'):
244 if not payload.endswith('\n'):
245 tmpfp.write('\n')
245 tmpfp.write('\n')
246 elif not diffs_seen and message and content_type == 'text/plain':
246 elif not diffs_seen and message and content_type == 'text/plain':
247 message += '\n' + payload
247 message += '\n' + payload
248 except: # re-raises
248 except: # re-raises
249 tmpfp.close()
249 tmpfp.close()
250 os.unlink(tmpname)
250 os.unlink(tmpname)
251 raise
251 raise
252
252
253 if subject and not message.startswith(subject):
253 if subject and not message.startswith(subject):
254 message = '%s\n%s' % (subject, message)
254 message = '%s\n%s' % (subject, message)
255 tmpfp.close()
255 tmpfp.close()
256 if not diffs_seen:
256 if not diffs_seen:
257 os.unlink(tmpname)
257 os.unlink(tmpname)
258 return None, message, user, date, branch, None, None, None
258 return None, message, user, date, branch, None, None, None
259 p1 = parents and parents.pop(0) or None
259 p1 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
260 p2 = parents and parents.pop(0) or None
261 return tmpname, message, user, date, branch, nodeid, p1, p2
261 return tmpname, message, user, date, branch, nodeid, p1, p2
262
262
263 class patchmeta(object):
263 class patchmeta(object):
264 """Patched file metadata
264 """Patched file metadata
265
265
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
266 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
267 or COPY. 'path' is patched file path. 'oldpath' is set to the
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
268 origin file when 'op' is either COPY or RENAME, None otherwise. If
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
269 file mode is changed, 'mode' is a tuple (islink, isexec) where
270 'islink' is True if the file is a symlink and 'isexec' is True if
270 'islink' is True if the file is a symlink and 'isexec' is True if
271 the file is executable. Otherwise, 'mode' is None.
271 the file is executable. Otherwise, 'mode' is None.
272 """
272 """
273 def __init__(self, path):
273 def __init__(self, path):
274 self.path = path
274 self.path = path
275 self.oldpath = None
275 self.oldpath = None
276 self.mode = None
276 self.mode = None
277 self.op = 'MODIFY'
277 self.op = 'MODIFY'
278 self.binary = False
278 self.binary = False
279
279
280 def setmode(self, mode):
280 def setmode(self, mode):
281 islink = mode & 020000
281 islink = mode & 020000
282 isexec = mode & 0100
282 isexec = mode & 0100
283 self.mode = (islink, isexec)
283 self.mode = (islink, isexec)
284
284
285 def copy(self):
285 def copy(self):
286 other = patchmeta(self.path)
286 other = patchmeta(self.path)
287 other.oldpath = self.oldpath
287 other.oldpath = self.oldpath
288 other.mode = self.mode
288 other.mode = self.mode
289 other.op = self.op
289 other.op = self.op
290 other.binary = self.binary
290 other.binary = self.binary
291 return other
291 return other
292
292
293 def _ispatchinga(self, afile):
293 def _ispatchinga(self, afile):
294 if afile == '/dev/null':
294 if afile == '/dev/null':
295 return self.op == 'ADD'
295 return self.op == 'ADD'
296 return afile == 'a/' + (self.oldpath or self.path)
296 return afile == 'a/' + (self.oldpath or self.path)
297
297
298 def _ispatchingb(self, bfile):
298 def _ispatchingb(self, bfile):
299 if bfile == '/dev/null':
299 if bfile == '/dev/null':
300 return self.op == 'DELETE'
300 return self.op == 'DELETE'
301 return bfile == 'b/' + self.path
301 return bfile == 'b/' + self.path
302
302
303 def ispatching(self, afile, bfile):
303 def ispatching(self, afile, bfile):
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
304 return self._ispatchinga(afile) and self._ispatchingb(bfile)
305
305
306 def __repr__(self):
306 def __repr__(self):
307 return "<patchmeta %s %r>" % (self.op, self.path)
307 return "<patchmeta %s %r>" % (self.op, self.path)
308
308
309 def readgitpatch(lr):
309 def readgitpatch(lr):
310 """extract git-style metadata about patches from <patchname>"""
310 """extract git-style metadata about patches from <patchname>"""
311
311
312 # Filter patch for git information
312 # Filter patch for git information
313 gp = None
313 gp = None
314 gitpatches = []
314 gitpatches = []
315 for line in lr:
315 for line in lr:
316 line = line.rstrip(' \r\n')
316 line = line.rstrip(' \r\n')
317 if line.startswith('diff --git a/'):
317 if line.startswith('diff --git a/'):
318 m = gitre.match(line)
318 m = gitre.match(line)
319 if m:
319 if m:
320 if gp:
320 if gp:
321 gitpatches.append(gp)
321 gitpatches.append(gp)
322 dst = m.group(2)
322 dst = m.group(2)
323 gp = patchmeta(dst)
323 gp = patchmeta(dst)
324 elif gp:
324 elif gp:
325 if line.startswith('--- '):
325 if line.startswith('--- '):
326 gitpatches.append(gp)
326 gitpatches.append(gp)
327 gp = None
327 gp = None
328 continue
328 continue
329 if line.startswith('rename from '):
329 if line.startswith('rename from '):
330 gp.op = 'RENAME'
330 gp.op = 'RENAME'
331 gp.oldpath = line[12:]
331 gp.oldpath = line[12:]
332 elif line.startswith('rename to '):
332 elif line.startswith('rename to '):
333 gp.path = line[10:]
333 gp.path = line[10:]
334 elif line.startswith('copy from '):
334 elif line.startswith('copy from '):
335 gp.op = 'COPY'
335 gp.op = 'COPY'
336 gp.oldpath = line[10:]
336 gp.oldpath = line[10:]
337 elif line.startswith('copy to '):
337 elif line.startswith('copy to '):
338 gp.path = line[8:]
338 gp.path = line[8:]
339 elif line.startswith('deleted file'):
339 elif line.startswith('deleted file'):
340 gp.op = 'DELETE'
340 gp.op = 'DELETE'
341 elif line.startswith('new file mode '):
341 elif line.startswith('new file mode '):
342 gp.op = 'ADD'
342 gp.op = 'ADD'
343 gp.setmode(int(line[-6:], 8))
343 gp.setmode(int(line[-6:], 8))
344 elif line.startswith('new mode '):
344 elif line.startswith('new mode '):
345 gp.setmode(int(line[-6:], 8))
345 gp.setmode(int(line[-6:], 8))
346 elif line.startswith('GIT binary patch'):
346 elif line.startswith('GIT binary patch'):
347 gp.binary = True
347 gp.binary = True
348 if gp:
348 if gp:
349 gitpatches.append(gp)
349 gitpatches.append(gp)
350
350
351 return gitpatches
351 return gitpatches
352
352
353 class linereader(object):
353 class linereader(object):
354 # simple class to allow pushing lines back into the input stream
354 # simple class to allow pushing lines back into the input stream
355 def __init__(self, fp):
355 def __init__(self, fp):
356 self.fp = fp
356 self.fp = fp
357 self.buf = []
357 self.buf = []
358
358
359 def push(self, line):
359 def push(self, line):
360 if line is not None:
360 if line is not None:
361 self.buf.append(line)
361 self.buf.append(line)
362
362
363 def readline(self):
363 def readline(self):
364 if self.buf:
364 if self.buf:
365 l = self.buf[0]
365 l = self.buf[0]
366 del self.buf[0]
366 del self.buf[0]
367 return l
367 return l
368 return self.fp.readline()
368 return self.fp.readline()
369
369
370 def __iter__(self):
370 def __iter__(self):
371 while True:
371 while True:
372 l = self.readline()
372 l = self.readline()
373 if not l:
373 if not l:
374 break
374 break
375 yield l
375 yield l
376
376
377 class abstractbackend(object):
377 class abstractbackend(object):
378 def __init__(self, ui):
378 def __init__(self, ui):
379 self.ui = ui
379 self.ui = ui
380
380
381 def getfile(self, fname):
381 def getfile(self, fname):
382 """Return target file data and flags as a (data, (islink,
382 """Return target file data and flags as a (data, (islink,
383 isexec)) tuple.
383 isexec)) tuple.
384 """
384 """
385 raise NotImplementedError
385 raise NotImplementedError
386
386
387 def setfile(self, fname, data, mode, copysource):
387 def setfile(self, fname, data, mode, copysource):
388 """Write data to target file fname and set its mode. mode is a
388 """Write data to target file fname and set its mode. mode is a
389 (islink, isexec) tuple. If data is None, the file content should
389 (islink, isexec) tuple. If data is None, the file content should
390 be left unchanged. If the file is modified after being copied,
390 be left unchanged. If the file is modified after being copied,
391 copysource is set to the original file name.
391 copysource is set to the original file name.
392 """
392 """
393 raise NotImplementedError
393 raise NotImplementedError
394
394
395 def unlink(self, fname):
395 def unlink(self, fname):
396 """Unlink target file."""
396 """Unlink target file."""
397 raise NotImplementedError
397 raise NotImplementedError
398
398
399 def writerej(self, fname, failed, total, lines):
399 def writerej(self, fname, failed, total, lines):
400 """Write rejected lines for fname. total is the number of hunks
400 """Write rejected lines for fname. total is the number of hunks
401 which failed to apply and total the total number of hunks for this
401 which failed to apply and total the total number of hunks for this
402 files.
402 files.
403 """
403 """
404 pass
404 pass
405
405
406 def exists(self, fname):
406 def exists(self, fname):
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 class fsbackend(abstractbackend):
409 class fsbackend(abstractbackend):
410 def __init__(self, ui, basedir):
410 def __init__(self, ui, basedir):
411 super(fsbackend, self).__init__(ui)
411 super(fsbackend, self).__init__(ui)
412 self.opener = scmutil.opener(basedir)
412 self.opener = scmutil.opener(basedir)
413
413
414 def _join(self, f):
414 def _join(self, f):
415 return os.path.join(self.opener.base, f)
415 return os.path.join(self.opener.base, f)
416
416
417 def getfile(self, fname):
417 def getfile(self, fname):
418 path = self._join(fname)
418 path = self._join(fname)
419 if os.path.islink(path):
419 if os.path.islink(path):
420 return (os.readlink(path), (True, False))
420 return (os.readlink(path), (True, False))
421 isexec = False
421 isexec = False
422 try:
422 try:
423 isexec = os.lstat(path).st_mode & 0100 != 0
423 isexec = os.lstat(path).st_mode & 0100 != 0
424 except OSError, e:
424 except OSError, e:
425 if e.errno != errno.ENOENT:
425 if e.errno != errno.ENOENT:
426 raise
426 raise
427 return (self.opener.read(fname), (False, isexec))
427 return (self.opener.read(fname), (False, isexec))
428
428
429 def setfile(self, fname, data, mode, copysource):
429 def setfile(self, fname, data, mode, copysource):
430 islink, isexec = mode
430 islink, isexec = mode
431 if data is None:
431 if data is None:
432 util.setflags(self._join(fname), islink, isexec)
432 util.setflags(self._join(fname), islink, isexec)
433 return
433 return
434 if islink:
434 if islink:
435 self.opener.symlink(data, fname)
435 self.opener.symlink(data, fname)
436 else:
436 else:
437 self.opener.write(fname, data)
437 self.opener.write(fname, data)
438 if isexec:
438 if isexec:
439 util.setflags(self._join(fname), False, True)
439 util.setflags(self._join(fname), False, True)
440
440
441 def unlink(self, fname):
441 def unlink(self, fname):
442 util.unlinkpath(self._join(fname), ignoremissing=True)
442 util.unlinkpath(self._join(fname), ignoremissing=True)
443
443
444 def writerej(self, fname, failed, total, lines):
444 def writerej(self, fname, failed, total, lines):
445 fname = fname + ".rej"
445 fname = fname + ".rej"
446 self.ui.warn(
446 self.ui.warn(
447 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
447 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
448 (failed, total, fname))
448 (failed, total, fname))
449 fp = self.opener(fname, 'w')
449 fp = self.opener(fname, 'w')
450 fp.writelines(lines)
450 fp.writelines(lines)
451 fp.close()
451 fp.close()
452
452
453 def exists(self, fname):
453 def exists(self, fname):
454 return os.path.lexists(self._join(fname))
454 return os.path.lexists(self._join(fname))
455
455
456 class workingbackend(fsbackend):
456 class workingbackend(fsbackend):
457 def __init__(self, ui, repo, similarity):
457 def __init__(self, ui, repo, similarity):
458 super(workingbackend, self).__init__(ui, repo.root)
458 super(workingbackend, self).__init__(ui, repo.root)
459 self.repo = repo
459 self.repo = repo
460 self.similarity = similarity
460 self.similarity = similarity
461 self.removed = set()
461 self.removed = set()
462 self.changed = set()
462 self.changed = set()
463 self.copied = []
463 self.copied = []
464
464
465 def _checkknown(self, fname):
465 def _checkknown(self, fname):
466 if self.repo.dirstate[fname] == '?' and self.exists(fname):
466 if self.repo.dirstate[fname] == '?' and self.exists(fname):
467 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
467 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
468
468
469 def setfile(self, fname, data, mode, copysource):
469 def setfile(self, fname, data, mode, copysource):
470 self._checkknown(fname)
470 self._checkknown(fname)
471 super(workingbackend, self).setfile(fname, data, mode, copysource)
471 super(workingbackend, self).setfile(fname, data, mode, copysource)
472 if copysource is not None:
472 if copysource is not None:
473 self.copied.append((copysource, fname))
473 self.copied.append((copysource, fname))
474 self.changed.add(fname)
474 self.changed.add(fname)
475
475
476 def unlink(self, fname):
476 def unlink(self, fname):
477 self._checkknown(fname)
477 self._checkknown(fname)
478 super(workingbackend, self).unlink(fname)
478 super(workingbackend, self).unlink(fname)
479 self.removed.add(fname)
479 self.removed.add(fname)
480 self.changed.add(fname)
480 self.changed.add(fname)
481
481
482 def close(self):
482 def close(self):
483 wctx = self.repo[None]
483 wctx = self.repo[None]
484 addremoved = set(self.changed)
484 changed = set(self.changed)
485 for src, dst in self.copied:
485 for src, dst in self.copied:
486 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
486 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
487 if self.removed:
487 if self.removed:
488 wctx.forget(sorted(self.removed))
488 wctx.forget(sorted(self.removed))
489 for f in self.removed:
489 for f in self.removed:
490 if f not in self.repo.dirstate:
490 if f not in self.repo.dirstate:
491 # File was deleted and no longer belongs to the
491 # File was deleted and no longer belongs to the
492 # dirstate, it was probably marked added then
492 # dirstate, it was probably marked added then
493 # deleted, and should not be considered by
493 # deleted, and should not be considered by
494 # addremove().
494 # marktouched().
495 addremoved.discard(f)
495 changed.discard(f)
496 if addremoved:
496 if changed:
497 cwd = self.repo.getcwd()
497 scmutil.marktouched(self.repo, changed, self.similarity)
498 if cwd:
499 addremoved = [util.pathto(self.repo.root, cwd, f)
500 for f in addremoved]
501 scmutil.addremove(self.repo, addremoved, similarity=self.similarity)
502 return sorted(self.changed)
498 return sorted(self.changed)
503
499
504 class filestore(object):
500 class filestore(object):
505 def __init__(self, maxsize=None):
501 def __init__(self, maxsize=None):
506 self.opener = None
502 self.opener = None
507 self.files = {}
503 self.files = {}
508 self.created = 0
504 self.created = 0
509 self.maxsize = maxsize
505 self.maxsize = maxsize
510 if self.maxsize is None:
506 if self.maxsize is None:
511 self.maxsize = 4*(2**20)
507 self.maxsize = 4*(2**20)
512 self.size = 0
508 self.size = 0
513 self.data = {}
509 self.data = {}
514
510
515 def setfile(self, fname, data, mode, copied=None):
511 def setfile(self, fname, data, mode, copied=None):
516 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
512 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
517 self.data[fname] = (data, mode, copied)
513 self.data[fname] = (data, mode, copied)
518 self.size += len(data)
514 self.size += len(data)
519 else:
515 else:
520 if self.opener is None:
516 if self.opener is None:
521 root = tempfile.mkdtemp(prefix='hg-patch-')
517 root = tempfile.mkdtemp(prefix='hg-patch-')
522 self.opener = scmutil.opener(root)
518 self.opener = scmutil.opener(root)
523 # Avoid filename issues with these simple names
519 # Avoid filename issues with these simple names
524 fn = str(self.created)
520 fn = str(self.created)
525 self.opener.write(fn, data)
521 self.opener.write(fn, data)
526 self.created += 1
522 self.created += 1
527 self.files[fname] = (fn, mode, copied)
523 self.files[fname] = (fn, mode, copied)
528
524
529 def getfile(self, fname):
525 def getfile(self, fname):
530 if fname in self.data:
526 if fname in self.data:
531 return self.data[fname]
527 return self.data[fname]
532 if not self.opener or fname not in self.files:
528 if not self.opener or fname not in self.files:
533 raise IOError
529 raise IOError
534 fn, mode, copied = self.files[fname]
530 fn, mode, copied = self.files[fname]
535 return self.opener.read(fn), mode, copied
531 return self.opener.read(fn), mode, copied
536
532
537 def close(self):
533 def close(self):
538 if self.opener:
534 if self.opener:
539 shutil.rmtree(self.opener.base)
535 shutil.rmtree(self.opener.base)
540
536
541 class repobackend(abstractbackend):
537 class repobackend(abstractbackend):
542 def __init__(self, ui, repo, ctx, store):
538 def __init__(self, ui, repo, ctx, store):
543 super(repobackend, self).__init__(ui)
539 super(repobackend, self).__init__(ui)
544 self.repo = repo
540 self.repo = repo
545 self.ctx = ctx
541 self.ctx = ctx
546 self.store = store
542 self.store = store
547 self.changed = set()
543 self.changed = set()
548 self.removed = set()
544 self.removed = set()
549 self.copied = {}
545 self.copied = {}
550
546
551 def _checkknown(self, fname):
547 def _checkknown(self, fname):
552 if fname not in self.ctx:
548 if fname not in self.ctx:
553 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
549 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
554
550
555 def getfile(self, fname):
551 def getfile(self, fname):
556 try:
552 try:
557 fctx = self.ctx[fname]
553 fctx = self.ctx[fname]
558 except error.LookupError:
554 except error.LookupError:
559 raise IOError
555 raise IOError
560 flags = fctx.flags()
556 flags = fctx.flags()
561 return fctx.data(), ('l' in flags, 'x' in flags)
557 return fctx.data(), ('l' in flags, 'x' in flags)
562
558
563 def setfile(self, fname, data, mode, copysource):
559 def setfile(self, fname, data, mode, copysource):
564 if copysource:
560 if copysource:
565 self._checkknown(copysource)
561 self._checkknown(copysource)
566 if data is None:
562 if data is None:
567 data = self.ctx[fname].data()
563 data = self.ctx[fname].data()
568 self.store.setfile(fname, data, mode, copysource)
564 self.store.setfile(fname, data, mode, copysource)
569 self.changed.add(fname)
565 self.changed.add(fname)
570 if copysource:
566 if copysource:
571 self.copied[fname] = copysource
567 self.copied[fname] = copysource
572
568
573 def unlink(self, fname):
569 def unlink(self, fname):
574 self._checkknown(fname)
570 self._checkknown(fname)
575 self.removed.add(fname)
571 self.removed.add(fname)
576
572
577 def exists(self, fname):
573 def exists(self, fname):
578 return fname in self.ctx
574 return fname in self.ctx
579
575
580 def close(self):
576 def close(self):
581 return self.changed | self.removed
577 return self.changed | self.removed
582
578
583 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
579 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
584 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
580 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
585 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
581 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
586 eolmodes = ['strict', 'crlf', 'lf', 'auto']
582 eolmodes = ['strict', 'crlf', 'lf', 'auto']
587
583
588 class patchfile(object):
584 class patchfile(object):
589 def __init__(self, ui, gp, backend, store, eolmode='strict'):
585 def __init__(self, ui, gp, backend, store, eolmode='strict'):
590 self.fname = gp.path
586 self.fname = gp.path
591 self.eolmode = eolmode
587 self.eolmode = eolmode
592 self.eol = None
588 self.eol = None
593 self.backend = backend
589 self.backend = backend
594 self.ui = ui
590 self.ui = ui
595 self.lines = []
591 self.lines = []
596 self.exists = False
592 self.exists = False
597 self.missing = True
593 self.missing = True
598 self.mode = gp.mode
594 self.mode = gp.mode
599 self.copysource = gp.oldpath
595 self.copysource = gp.oldpath
600 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
596 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
601 self.remove = gp.op == 'DELETE'
597 self.remove = gp.op == 'DELETE'
602 try:
598 try:
603 if self.copysource is None:
599 if self.copysource is None:
604 data, mode = backend.getfile(self.fname)
600 data, mode = backend.getfile(self.fname)
605 self.exists = True
601 self.exists = True
606 else:
602 else:
607 data, mode = store.getfile(self.copysource)[:2]
603 data, mode = store.getfile(self.copysource)[:2]
608 self.exists = backend.exists(self.fname)
604 self.exists = backend.exists(self.fname)
609 self.missing = False
605 self.missing = False
610 if data:
606 if data:
611 self.lines = mdiff.splitnewlines(data)
607 self.lines = mdiff.splitnewlines(data)
612 if self.mode is None:
608 if self.mode is None:
613 self.mode = mode
609 self.mode = mode
614 if self.lines:
610 if self.lines:
615 # Normalize line endings
611 # Normalize line endings
616 if self.lines[0].endswith('\r\n'):
612 if self.lines[0].endswith('\r\n'):
617 self.eol = '\r\n'
613 self.eol = '\r\n'
618 elif self.lines[0].endswith('\n'):
614 elif self.lines[0].endswith('\n'):
619 self.eol = '\n'
615 self.eol = '\n'
620 if eolmode != 'strict':
616 if eolmode != 'strict':
621 nlines = []
617 nlines = []
622 for l in self.lines:
618 for l in self.lines:
623 if l.endswith('\r\n'):
619 if l.endswith('\r\n'):
624 l = l[:-2] + '\n'
620 l = l[:-2] + '\n'
625 nlines.append(l)
621 nlines.append(l)
626 self.lines = nlines
622 self.lines = nlines
627 except IOError:
623 except IOError:
628 if self.create:
624 if self.create:
629 self.missing = False
625 self.missing = False
630 if self.mode is None:
626 if self.mode is None:
631 self.mode = (False, False)
627 self.mode = (False, False)
632 if self.missing:
628 if self.missing:
633 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
629 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
634
630
635 self.hash = {}
631 self.hash = {}
636 self.dirty = 0
632 self.dirty = 0
637 self.offset = 0
633 self.offset = 0
638 self.skew = 0
634 self.skew = 0
639 self.rej = []
635 self.rej = []
640 self.fileprinted = False
636 self.fileprinted = False
641 self.printfile(False)
637 self.printfile(False)
642 self.hunks = 0
638 self.hunks = 0
643
639
644 def writelines(self, fname, lines, mode):
640 def writelines(self, fname, lines, mode):
645 if self.eolmode == 'auto':
641 if self.eolmode == 'auto':
646 eol = self.eol
642 eol = self.eol
647 elif self.eolmode == 'crlf':
643 elif self.eolmode == 'crlf':
648 eol = '\r\n'
644 eol = '\r\n'
649 else:
645 else:
650 eol = '\n'
646 eol = '\n'
651
647
652 if self.eolmode != 'strict' and eol and eol != '\n':
648 if self.eolmode != 'strict' and eol and eol != '\n':
653 rawlines = []
649 rawlines = []
654 for l in lines:
650 for l in lines:
655 if l and l[-1] == '\n':
651 if l and l[-1] == '\n':
656 l = l[:-1] + eol
652 l = l[:-1] + eol
657 rawlines.append(l)
653 rawlines.append(l)
658 lines = rawlines
654 lines = rawlines
659
655
660 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
656 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
661
657
662 def printfile(self, warn):
658 def printfile(self, warn):
663 if self.fileprinted:
659 if self.fileprinted:
664 return
660 return
665 if warn or self.ui.verbose:
661 if warn or self.ui.verbose:
666 self.fileprinted = True
662 self.fileprinted = True
667 s = _("patching file %s\n") % self.fname
663 s = _("patching file %s\n") % self.fname
668 if warn:
664 if warn:
669 self.ui.warn(s)
665 self.ui.warn(s)
670 else:
666 else:
671 self.ui.note(s)
667 self.ui.note(s)
672
668
673
669
674 def findlines(self, l, linenum):
670 def findlines(self, l, linenum):
675 # looks through the hash and finds candidate lines. The
671 # looks through the hash and finds candidate lines. The
676 # result is a list of line numbers sorted based on distance
672 # result is a list of line numbers sorted based on distance
677 # from linenum
673 # from linenum
678
674
679 cand = self.hash.get(l, [])
675 cand = self.hash.get(l, [])
680 if len(cand) > 1:
676 if len(cand) > 1:
681 # resort our list of potentials forward then back.
677 # resort our list of potentials forward then back.
682 cand.sort(key=lambda x: abs(x - linenum))
678 cand.sort(key=lambda x: abs(x - linenum))
683 return cand
679 return cand
684
680
685 def write_rej(self):
681 def write_rej(self):
686 # our rejects are a little different from patch(1). This always
682 # our rejects are a little different from patch(1). This always
687 # creates rejects in the same form as the original patch. A file
683 # creates rejects in the same form as the original patch. A file
688 # header is inserted so that you can run the reject through patch again
684 # header is inserted so that you can run the reject through patch again
689 # without having to type the filename.
685 # without having to type the filename.
690 if not self.rej:
686 if not self.rej:
691 return
687 return
692 base = os.path.basename(self.fname)
688 base = os.path.basename(self.fname)
693 lines = ["--- %s\n+++ %s\n" % (base, base)]
689 lines = ["--- %s\n+++ %s\n" % (base, base)]
694 for x in self.rej:
690 for x in self.rej:
695 for l in x.hunk:
691 for l in x.hunk:
696 lines.append(l)
692 lines.append(l)
697 if l[-1] != '\n':
693 if l[-1] != '\n':
698 lines.append("\n\ No newline at end of file\n")
694 lines.append("\n\ No newline at end of file\n")
699 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
695 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
700
696
701 def apply(self, h):
697 def apply(self, h):
702 if not h.complete():
698 if not h.complete():
703 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
699 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
704 (h.number, h.desc, len(h.a), h.lena, len(h.b),
700 (h.number, h.desc, len(h.a), h.lena, len(h.b),
705 h.lenb))
701 h.lenb))
706
702
707 self.hunks += 1
703 self.hunks += 1
708
704
709 if self.missing:
705 if self.missing:
710 self.rej.append(h)
706 self.rej.append(h)
711 return -1
707 return -1
712
708
713 if self.exists and self.create:
709 if self.exists and self.create:
714 if self.copysource:
710 if self.copysource:
715 self.ui.warn(_("cannot create %s: destination already "
711 self.ui.warn(_("cannot create %s: destination already "
716 "exists\n" % self.fname))
712 "exists\n" % self.fname))
717 else:
713 else:
718 self.ui.warn(_("file %s already exists\n") % self.fname)
714 self.ui.warn(_("file %s already exists\n") % self.fname)
719 self.rej.append(h)
715 self.rej.append(h)
720 return -1
716 return -1
721
717
722 if isinstance(h, binhunk):
718 if isinstance(h, binhunk):
723 if self.remove:
719 if self.remove:
724 self.backend.unlink(self.fname)
720 self.backend.unlink(self.fname)
725 else:
721 else:
726 self.lines[:] = h.new()
722 self.lines[:] = h.new()
727 self.offset += len(h.new())
723 self.offset += len(h.new())
728 self.dirty = True
724 self.dirty = True
729 return 0
725 return 0
730
726
731 horig = h
727 horig = h
732 if (self.eolmode in ('crlf', 'lf')
728 if (self.eolmode in ('crlf', 'lf')
733 or self.eolmode == 'auto' and self.eol):
729 or self.eolmode == 'auto' and self.eol):
734 # If new eols are going to be normalized, then normalize
730 # If new eols are going to be normalized, then normalize
735 # hunk data before patching. Otherwise, preserve input
731 # hunk data before patching. Otherwise, preserve input
736 # line-endings.
732 # line-endings.
737 h = h.getnormalized()
733 h = h.getnormalized()
738
734
739 # fast case first, no offsets, no fuzz
735 # fast case first, no offsets, no fuzz
740 old, oldstart, new, newstart = h.fuzzit(0, False)
736 old, oldstart, new, newstart = h.fuzzit(0, False)
741 oldstart += self.offset
737 oldstart += self.offset
742 orig_start = oldstart
738 orig_start = oldstart
743 # if there's skew we want to emit the "(offset %d lines)" even
739 # if there's skew we want to emit the "(offset %d lines)" even
744 # when the hunk cleanly applies at start + skew, so skip the
740 # when the hunk cleanly applies at start + skew, so skip the
745 # fast case code
741 # fast case code
746 if (self.skew == 0 and
742 if (self.skew == 0 and
747 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
743 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
748 if self.remove:
744 if self.remove:
749 self.backend.unlink(self.fname)
745 self.backend.unlink(self.fname)
750 else:
746 else:
751 self.lines[oldstart:oldstart + len(old)] = new
747 self.lines[oldstart:oldstart + len(old)] = new
752 self.offset += len(new) - len(old)
748 self.offset += len(new) - len(old)
753 self.dirty = True
749 self.dirty = True
754 return 0
750 return 0
755
751
756 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
752 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
757 self.hash = {}
753 self.hash = {}
758 for x, s in enumerate(self.lines):
754 for x, s in enumerate(self.lines):
759 self.hash.setdefault(s, []).append(x)
755 self.hash.setdefault(s, []).append(x)
760
756
761 for fuzzlen in xrange(3):
757 for fuzzlen in xrange(3):
762 for toponly in [True, False]:
758 for toponly in [True, False]:
763 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
759 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
764 oldstart = oldstart + self.offset + self.skew
760 oldstart = oldstart + self.offset + self.skew
765 oldstart = min(oldstart, len(self.lines))
761 oldstart = min(oldstart, len(self.lines))
766 if old:
762 if old:
767 cand = self.findlines(old[0][1:], oldstart)
763 cand = self.findlines(old[0][1:], oldstart)
768 else:
764 else:
769 # Only adding lines with no or fuzzed context, just
765 # Only adding lines with no or fuzzed context, just
770 # take the skew in account
766 # take the skew in account
771 cand = [oldstart]
767 cand = [oldstart]
772
768
773 for l in cand:
769 for l in cand:
774 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
770 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
775 self.lines[l : l + len(old)] = new
771 self.lines[l : l + len(old)] = new
776 self.offset += len(new) - len(old)
772 self.offset += len(new) - len(old)
777 self.skew = l - orig_start
773 self.skew = l - orig_start
778 self.dirty = True
774 self.dirty = True
779 offset = l - orig_start - fuzzlen
775 offset = l - orig_start - fuzzlen
780 if fuzzlen:
776 if fuzzlen:
781 msg = _("Hunk #%d succeeded at %d "
777 msg = _("Hunk #%d succeeded at %d "
782 "with fuzz %d "
778 "with fuzz %d "
783 "(offset %d lines).\n")
779 "(offset %d lines).\n")
784 self.printfile(True)
780 self.printfile(True)
785 self.ui.warn(msg %
781 self.ui.warn(msg %
786 (h.number, l + 1, fuzzlen, offset))
782 (h.number, l + 1, fuzzlen, offset))
787 else:
783 else:
788 msg = _("Hunk #%d succeeded at %d "
784 msg = _("Hunk #%d succeeded at %d "
789 "(offset %d lines).\n")
785 "(offset %d lines).\n")
790 self.ui.note(msg % (h.number, l + 1, offset))
786 self.ui.note(msg % (h.number, l + 1, offset))
791 return fuzzlen
787 return fuzzlen
792 self.printfile(True)
788 self.printfile(True)
793 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
789 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
794 self.rej.append(horig)
790 self.rej.append(horig)
795 return -1
791 return -1
796
792
797 def close(self):
793 def close(self):
798 if self.dirty:
794 if self.dirty:
799 self.writelines(self.fname, self.lines, self.mode)
795 self.writelines(self.fname, self.lines, self.mode)
800 self.write_rej()
796 self.write_rej()
801 return len(self.rej)
797 return len(self.rej)
802
798
803 class hunk(object):
799 class hunk(object):
804 def __init__(self, desc, num, lr, context):
800 def __init__(self, desc, num, lr, context):
805 self.number = num
801 self.number = num
806 self.desc = desc
802 self.desc = desc
807 self.hunk = [desc]
803 self.hunk = [desc]
808 self.a = []
804 self.a = []
809 self.b = []
805 self.b = []
810 self.starta = self.lena = None
806 self.starta = self.lena = None
811 self.startb = self.lenb = None
807 self.startb = self.lenb = None
812 if lr is not None:
808 if lr is not None:
813 if context:
809 if context:
814 self.read_context_hunk(lr)
810 self.read_context_hunk(lr)
815 else:
811 else:
816 self.read_unified_hunk(lr)
812 self.read_unified_hunk(lr)
817
813
818 def getnormalized(self):
814 def getnormalized(self):
819 """Return a copy with line endings normalized to LF."""
815 """Return a copy with line endings normalized to LF."""
820
816
821 def normalize(lines):
817 def normalize(lines):
822 nlines = []
818 nlines = []
823 for line in lines:
819 for line in lines:
824 if line.endswith('\r\n'):
820 if line.endswith('\r\n'):
825 line = line[:-2] + '\n'
821 line = line[:-2] + '\n'
826 nlines.append(line)
822 nlines.append(line)
827 return nlines
823 return nlines
828
824
829 # Dummy object, it is rebuilt manually
825 # Dummy object, it is rebuilt manually
830 nh = hunk(self.desc, self.number, None, None)
826 nh = hunk(self.desc, self.number, None, None)
831 nh.number = self.number
827 nh.number = self.number
832 nh.desc = self.desc
828 nh.desc = self.desc
833 nh.hunk = self.hunk
829 nh.hunk = self.hunk
834 nh.a = normalize(self.a)
830 nh.a = normalize(self.a)
835 nh.b = normalize(self.b)
831 nh.b = normalize(self.b)
836 nh.starta = self.starta
832 nh.starta = self.starta
837 nh.startb = self.startb
833 nh.startb = self.startb
838 nh.lena = self.lena
834 nh.lena = self.lena
839 nh.lenb = self.lenb
835 nh.lenb = self.lenb
840 return nh
836 return nh
841
837
842 def read_unified_hunk(self, lr):
838 def read_unified_hunk(self, lr):
843 m = unidesc.match(self.desc)
839 m = unidesc.match(self.desc)
844 if not m:
840 if not m:
845 raise PatchError(_("bad hunk #%d") % self.number)
841 raise PatchError(_("bad hunk #%d") % self.number)
846 self.starta, self.lena, self.startb, self.lenb = m.groups()
842 self.starta, self.lena, self.startb, self.lenb = m.groups()
847 if self.lena is None:
843 if self.lena is None:
848 self.lena = 1
844 self.lena = 1
849 else:
845 else:
850 self.lena = int(self.lena)
846 self.lena = int(self.lena)
851 if self.lenb is None:
847 if self.lenb is None:
852 self.lenb = 1
848 self.lenb = 1
853 else:
849 else:
854 self.lenb = int(self.lenb)
850 self.lenb = int(self.lenb)
855 self.starta = int(self.starta)
851 self.starta = int(self.starta)
856 self.startb = int(self.startb)
852 self.startb = int(self.startb)
857 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
853 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
858 self.b)
854 self.b)
859 # if we hit eof before finishing out the hunk, the last line will
855 # if we hit eof before finishing out the hunk, the last line will
860 # be zero length. Lets try to fix it up.
856 # be zero length. Lets try to fix it up.
861 while len(self.hunk[-1]) == 0:
857 while len(self.hunk[-1]) == 0:
862 del self.hunk[-1]
858 del self.hunk[-1]
863 del self.a[-1]
859 del self.a[-1]
864 del self.b[-1]
860 del self.b[-1]
865 self.lena -= 1
861 self.lena -= 1
866 self.lenb -= 1
862 self.lenb -= 1
867 self._fixnewline(lr)
863 self._fixnewline(lr)
868
864
869 def read_context_hunk(self, lr):
865 def read_context_hunk(self, lr):
870 self.desc = lr.readline()
866 self.desc = lr.readline()
871 m = contextdesc.match(self.desc)
867 m = contextdesc.match(self.desc)
872 if not m:
868 if not m:
873 raise PatchError(_("bad hunk #%d") % self.number)
869 raise PatchError(_("bad hunk #%d") % self.number)
874 self.starta, aend = m.groups()
870 self.starta, aend = m.groups()
875 self.starta = int(self.starta)
871 self.starta = int(self.starta)
876 if aend is None:
872 if aend is None:
877 aend = self.starta
873 aend = self.starta
878 self.lena = int(aend) - self.starta
874 self.lena = int(aend) - self.starta
879 if self.starta:
875 if self.starta:
880 self.lena += 1
876 self.lena += 1
881 for x in xrange(self.lena):
877 for x in xrange(self.lena):
882 l = lr.readline()
878 l = lr.readline()
883 if l.startswith('---'):
879 if l.startswith('---'):
884 # lines addition, old block is empty
880 # lines addition, old block is empty
885 lr.push(l)
881 lr.push(l)
886 break
882 break
887 s = l[2:]
883 s = l[2:]
888 if l.startswith('- ') or l.startswith('! '):
884 if l.startswith('- ') or l.startswith('! '):
889 u = '-' + s
885 u = '-' + s
890 elif l.startswith(' '):
886 elif l.startswith(' '):
891 u = ' ' + s
887 u = ' ' + s
892 else:
888 else:
893 raise PatchError(_("bad hunk #%d old text line %d") %
889 raise PatchError(_("bad hunk #%d old text line %d") %
894 (self.number, x))
890 (self.number, x))
895 self.a.append(u)
891 self.a.append(u)
896 self.hunk.append(u)
892 self.hunk.append(u)
897
893
898 l = lr.readline()
894 l = lr.readline()
899 if l.startswith('\ '):
895 if l.startswith('\ '):
900 s = self.a[-1][:-1]
896 s = self.a[-1][:-1]
901 self.a[-1] = s
897 self.a[-1] = s
902 self.hunk[-1] = s
898 self.hunk[-1] = s
903 l = lr.readline()
899 l = lr.readline()
904 m = contextdesc.match(l)
900 m = contextdesc.match(l)
905 if not m:
901 if not m:
906 raise PatchError(_("bad hunk #%d") % self.number)
902 raise PatchError(_("bad hunk #%d") % self.number)
907 self.startb, bend = m.groups()
903 self.startb, bend = m.groups()
908 self.startb = int(self.startb)
904 self.startb = int(self.startb)
909 if bend is None:
905 if bend is None:
910 bend = self.startb
906 bend = self.startb
911 self.lenb = int(bend) - self.startb
907 self.lenb = int(bend) - self.startb
912 if self.startb:
908 if self.startb:
913 self.lenb += 1
909 self.lenb += 1
914 hunki = 1
910 hunki = 1
915 for x in xrange(self.lenb):
911 for x in xrange(self.lenb):
916 l = lr.readline()
912 l = lr.readline()
917 if l.startswith('\ '):
913 if l.startswith('\ '):
918 # XXX: the only way to hit this is with an invalid line range.
914 # XXX: the only way to hit this is with an invalid line range.
919 # The no-eol marker is not counted in the line range, but I
915 # The no-eol marker is not counted in the line range, but I
920 # guess there are diff(1) out there which behave differently.
916 # guess there are diff(1) out there which behave differently.
921 s = self.b[-1][:-1]
917 s = self.b[-1][:-1]
922 self.b[-1] = s
918 self.b[-1] = s
923 self.hunk[hunki - 1] = s
919 self.hunk[hunki - 1] = s
924 continue
920 continue
925 if not l:
921 if not l:
926 # line deletions, new block is empty and we hit EOF
922 # line deletions, new block is empty and we hit EOF
927 lr.push(l)
923 lr.push(l)
928 break
924 break
929 s = l[2:]
925 s = l[2:]
930 if l.startswith('+ ') or l.startswith('! '):
926 if l.startswith('+ ') or l.startswith('! '):
931 u = '+' + s
927 u = '+' + s
932 elif l.startswith(' '):
928 elif l.startswith(' '):
933 u = ' ' + s
929 u = ' ' + s
934 elif len(self.b) == 0:
930 elif len(self.b) == 0:
935 # line deletions, new block is empty
931 # line deletions, new block is empty
936 lr.push(l)
932 lr.push(l)
937 break
933 break
938 else:
934 else:
939 raise PatchError(_("bad hunk #%d old text line %d") %
935 raise PatchError(_("bad hunk #%d old text line %d") %
940 (self.number, x))
936 (self.number, x))
941 self.b.append(s)
937 self.b.append(s)
942 while True:
938 while True:
943 if hunki >= len(self.hunk):
939 if hunki >= len(self.hunk):
944 h = ""
940 h = ""
945 else:
941 else:
946 h = self.hunk[hunki]
942 h = self.hunk[hunki]
947 hunki += 1
943 hunki += 1
948 if h == u:
944 if h == u:
949 break
945 break
950 elif h.startswith('-'):
946 elif h.startswith('-'):
951 continue
947 continue
952 else:
948 else:
953 self.hunk.insert(hunki - 1, u)
949 self.hunk.insert(hunki - 1, u)
954 break
950 break
955
951
956 if not self.a:
952 if not self.a:
957 # this happens when lines were only added to the hunk
953 # this happens when lines were only added to the hunk
958 for x in self.hunk:
954 for x in self.hunk:
959 if x.startswith('-') or x.startswith(' '):
955 if x.startswith('-') or x.startswith(' '):
960 self.a.append(x)
956 self.a.append(x)
961 if not self.b:
957 if not self.b:
962 # this happens when lines were only deleted from the hunk
958 # this happens when lines were only deleted from the hunk
963 for x in self.hunk:
959 for x in self.hunk:
964 if x.startswith('+') or x.startswith(' '):
960 if x.startswith('+') or x.startswith(' '):
965 self.b.append(x[1:])
961 self.b.append(x[1:])
966 # @@ -start,len +start,len @@
962 # @@ -start,len +start,len @@
967 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
963 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
968 self.startb, self.lenb)
964 self.startb, self.lenb)
969 self.hunk[0] = self.desc
965 self.hunk[0] = self.desc
970 self._fixnewline(lr)
966 self._fixnewline(lr)
971
967
972 def _fixnewline(self, lr):
968 def _fixnewline(self, lr):
973 l = lr.readline()
969 l = lr.readline()
974 if l.startswith('\ '):
970 if l.startswith('\ '):
975 diffhelpers.fix_newline(self.hunk, self.a, self.b)
971 diffhelpers.fix_newline(self.hunk, self.a, self.b)
976 else:
972 else:
977 lr.push(l)
973 lr.push(l)
978
974
979 def complete(self):
975 def complete(self):
980 return len(self.a) == self.lena and len(self.b) == self.lenb
976 return len(self.a) == self.lena and len(self.b) == self.lenb
981
977
982 def _fuzzit(self, old, new, fuzz, toponly):
978 def _fuzzit(self, old, new, fuzz, toponly):
983 # this removes context lines from the top and bottom of list 'l'. It
979 # this removes context lines from the top and bottom of list 'l'. It
984 # checks the hunk to make sure only context lines are removed, and then
980 # checks the hunk to make sure only context lines are removed, and then
985 # returns a new shortened list of lines.
981 # returns a new shortened list of lines.
986 fuzz = min(fuzz, len(old))
982 fuzz = min(fuzz, len(old))
987 if fuzz:
983 if fuzz:
988 top = 0
984 top = 0
989 bot = 0
985 bot = 0
990 hlen = len(self.hunk)
986 hlen = len(self.hunk)
991 for x in xrange(hlen - 1):
987 for x in xrange(hlen - 1):
992 # the hunk starts with the @@ line, so use x+1
988 # the hunk starts with the @@ line, so use x+1
993 if self.hunk[x + 1][0] == ' ':
989 if self.hunk[x + 1][0] == ' ':
994 top += 1
990 top += 1
995 else:
991 else:
996 break
992 break
997 if not toponly:
993 if not toponly:
998 for x in xrange(hlen - 1):
994 for x in xrange(hlen - 1):
999 if self.hunk[hlen - bot - 1][0] == ' ':
995 if self.hunk[hlen - bot - 1][0] == ' ':
1000 bot += 1
996 bot += 1
1001 else:
997 else:
1002 break
998 break
1003
999
1004 bot = min(fuzz, bot)
1000 bot = min(fuzz, bot)
1005 top = min(fuzz, top)
1001 top = min(fuzz, top)
1006 return old[top:len(old) - bot], new[top:len(new) - bot], top
1002 return old[top:len(old) - bot], new[top:len(new) - bot], top
1007 return old, new, 0
1003 return old, new, 0
1008
1004
1009 def fuzzit(self, fuzz, toponly):
1005 def fuzzit(self, fuzz, toponly):
1010 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1006 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1011 oldstart = self.starta + top
1007 oldstart = self.starta + top
1012 newstart = self.startb + top
1008 newstart = self.startb + top
1013 # zero length hunk ranges already have their start decremented
1009 # zero length hunk ranges already have their start decremented
1014 if self.lena and oldstart > 0:
1010 if self.lena and oldstart > 0:
1015 oldstart -= 1
1011 oldstart -= 1
1016 if self.lenb and newstart > 0:
1012 if self.lenb and newstart > 0:
1017 newstart -= 1
1013 newstart -= 1
1018 return old, oldstart, new, newstart
1014 return old, oldstart, new, newstart
1019
1015
1020 class binhunk(object):
1016 class binhunk(object):
1021 'A binary patch file. Only understands literals so far.'
1017 'A binary patch file. Only understands literals so far.'
1022 def __init__(self, lr, fname):
1018 def __init__(self, lr, fname):
1023 self.text = None
1019 self.text = None
1024 self.hunk = ['GIT binary patch\n']
1020 self.hunk = ['GIT binary patch\n']
1025 self._fname = fname
1021 self._fname = fname
1026 self._read(lr)
1022 self._read(lr)
1027
1023
1028 def complete(self):
1024 def complete(self):
1029 return self.text is not None
1025 return self.text is not None
1030
1026
1031 def new(self):
1027 def new(self):
1032 return [self.text]
1028 return [self.text]
1033
1029
1034 def _read(self, lr):
1030 def _read(self, lr):
1035 def getline(lr, hunk):
1031 def getline(lr, hunk):
1036 l = lr.readline()
1032 l = lr.readline()
1037 hunk.append(l)
1033 hunk.append(l)
1038 return l.rstrip('\r\n')
1034 return l.rstrip('\r\n')
1039
1035
1040 while True:
1036 while True:
1041 line = getline(lr, self.hunk)
1037 line = getline(lr, self.hunk)
1042 if not line:
1038 if not line:
1043 raise PatchError(_('could not extract "%s" binary data')
1039 raise PatchError(_('could not extract "%s" binary data')
1044 % self._fname)
1040 % self._fname)
1045 if line.startswith('literal '):
1041 if line.startswith('literal '):
1046 break
1042 break
1047 size = int(line[8:].rstrip())
1043 size = int(line[8:].rstrip())
1048 dec = []
1044 dec = []
1049 line = getline(lr, self.hunk)
1045 line = getline(lr, self.hunk)
1050 while len(line) > 1:
1046 while len(line) > 1:
1051 l = line[0]
1047 l = line[0]
1052 if l <= 'Z' and l >= 'A':
1048 if l <= 'Z' and l >= 'A':
1053 l = ord(l) - ord('A') + 1
1049 l = ord(l) - ord('A') + 1
1054 else:
1050 else:
1055 l = ord(l) - ord('a') + 27
1051 l = ord(l) - ord('a') + 27
1056 try:
1052 try:
1057 dec.append(base85.b85decode(line[1:])[:l])
1053 dec.append(base85.b85decode(line[1:])[:l])
1058 except ValueError, e:
1054 except ValueError, e:
1059 raise PatchError(_('could not decode "%s" binary patch: %s')
1055 raise PatchError(_('could not decode "%s" binary patch: %s')
1060 % (self._fname, str(e)))
1056 % (self._fname, str(e)))
1061 line = getline(lr, self.hunk)
1057 line = getline(lr, self.hunk)
1062 text = zlib.decompress(''.join(dec))
1058 text = zlib.decompress(''.join(dec))
1063 if len(text) != size:
1059 if len(text) != size:
1064 raise PatchError(_('"%s" length is %d bytes, should be %d')
1060 raise PatchError(_('"%s" length is %d bytes, should be %d')
1065 % (self._fname, len(text), size))
1061 % (self._fname, len(text), size))
1066 self.text = text
1062 self.text = text
1067
1063
1068 def parsefilename(str):
1064 def parsefilename(str):
1069 # --- filename \t|space stuff
1065 # --- filename \t|space stuff
1070 s = str[4:].rstrip('\r\n')
1066 s = str[4:].rstrip('\r\n')
1071 i = s.find('\t')
1067 i = s.find('\t')
1072 if i < 0:
1068 if i < 0:
1073 i = s.find(' ')
1069 i = s.find(' ')
1074 if i < 0:
1070 if i < 0:
1075 return s
1071 return s
1076 return s[:i]
1072 return s[:i]
1077
1073
1078 def pathstrip(path, strip):
1074 def pathstrip(path, strip):
1079 pathlen = len(path)
1075 pathlen = len(path)
1080 i = 0
1076 i = 0
1081 if strip == 0:
1077 if strip == 0:
1082 return '', path.rstrip()
1078 return '', path.rstrip()
1083 count = strip
1079 count = strip
1084 while count > 0:
1080 while count > 0:
1085 i = path.find('/', i)
1081 i = path.find('/', i)
1086 if i == -1:
1082 if i == -1:
1087 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1083 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1088 (count, strip, path))
1084 (count, strip, path))
1089 i += 1
1085 i += 1
1090 # consume '//' in the path
1086 # consume '//' in the path
1091 while i < pathlen - 1 and path[i] == '/':
1087 while i < pathlen - 1 and path[i] == '/':
1092 i += 1
1088 i += 1
1093 count -= 1
1089 count -= 1
1094 return path[:i].lstrip(), path[i:].rstrip()
1090 return path[:i].lstrip(), path[i:].rstrip()
1095
1091
1096 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1092 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip):
1097 nulla = afile_orig == "/dev/null"
1093 nulla = afile_orig == "/dev/null"
1098 nullb = bfile_orig == "/dev/null"
1094 nullb = bfile_orig == "/dev/null"
1099 create = nulla and hunk.starta == 0 and hunk.lena == 0
1095 create = nulla and hunk.starta == 0 and hunk.lena == 0
1100 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1096 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1101 abase, afile = pathstrip(afile_orig, strip)
1097 abase, afile = pathstrip(afile_orig, strip)
1102 gooda = not nulla and backend.exists(afile)
1098 gooda = not nulla and backend.exists(afile)
1103 bbase, bfile = pathstrip(bfile_orig, strip)
1099 bbase, bfile = pathstrip(bfile_orig, strip)
1104 if afile == bfile:
1100 if afile == bfile:
1105 goodb = gooda
1101 goodb = gooda
1106 else:
1102 else:
1107 goodb = not nullb and backend.exists(bfile)
1103 goodb = not nullb and backend.exists(bfile)
1108 missing = not goodb and not gooda and not create
1104 missing = not goodb and not gooda and not create
1109
1105
1110 # some diff programs apparently produce patches where the afile is
1106 # some diff programs apparently produce patches where the afile is
1111 # not /dev/null, but afile starts with bfile
1107 # not /dev/null, but afile starts with bfile
1112 abasedir = afile[:afile.rfind('/') + 1]
1108 abasedir = afile[:afile.rfind('/') + 1]
1113 bbasedir = bfile[:bfile.rfind('/') + 1]
1109 bbasedir = bfile[:bfile.rfind('/') + 1]
1114 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1110 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1115 and hunk.starta == 0 and hunk.lena == 0):
1111 and hunk.starta == 0 and hunk.lena == 0):
1116 create = True
1112 create = True
1117 missing = False
1113 missing = False
1118
1114
1119 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1115 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1120 # diff is between a file and its backup. In this case, the original
1116 # diff is between a file and its backup. In this case, the original
1121 # file should be patched (see original mpatch code).
1117 # file should be patched (see original mpatch code).
1122 isbackup = (abase == bbase and bfile.startswith(afile))
1118 isbackup = (abase == bbase and bfile.startswith(afile))
1123 fname = None
1119 fname = None
1124 if not missing:
1120 if not missing:
1125 if gooda and goodb:
1121 if gooda and goodb:
1126 fname = isbackup and afile or bfile
1122 fname = isbackup and afile or bfile
1127 elif gooda:
1123 elif gooda:
1128 fname = afile
1124 fname = afile
1129
1125
1130 if not fname:
1126 if not fname:
1131 if not nullb:
1127 if not nullb:
1132 fname = isbackup and afile or bfile
1128 fname = isbackup and afile or bfile
1133 elif not nulla:
1129 elif not nulla:
1134 fname = afile
1130 fname = afile
1135 else:
1131 else:
1136 raise PatchError(_("undefined source and destination files"))
1132 raise PatchError(_("undefined source and destination files"))
1137
1133
1138 gp = patchmeta(fname)
1134 gp = patchmeta(fname)
1139 if create:
1135 if create:
1140 gp.op = 'ADD'
1136 gp.op = 'ADD'
1141 elif remove:
1137 elif remove:
1142 gp.op = 'DELETE'
1138 gp.op = 'DELETE'
1143 return gp
1139 return gp
1144
1140
1145 def scangitpatch(lr, firstline):
1141 def scangitpatch(lr, firstline):
1146 """
1142 """
1147 Git patches can emit:
1143 Git patches can emit:
1148 - rename a to b
1144 - rename a to b
1149 - change b
1145 - change b
1150 - copy a to c
1146 - copy a to c
1151 - change c
1147 - change c
1152
1148
1153 We cannot apply this sequence as-is, the renamed 'a' could not be
1149 We cannot apply this sequence as-is, the renamed 'a' could not be
1154 found for it would have been renamed already. And we cannot copy
1150 found for it would have been renamed already. And we cannot copy
1155 from 'b' instead because 'b' would have been changed already. So
1151 from 'b' instead because 'b' would have been changed already. So
1156 we scan the git patch for copy and rename commands so we can
1152 we scan the git patch for copy and rename commands so we can
1157 perform the copies ahead of time.
1153 perform the copies ahead of time.
1158 """
1154 """
1159 pos = 0
1155 pos = 0
1160 try:
1156 try:
1161 pos = lr.fp.tell()
1157 pos = lr.fp.tell()
1162 fp = lr.fp
1158 fp = lr.fp
1163 except IOError:
1159 except IOError:
1164 fp = cStringIO.StringIO(lr.fp.read())
1160 fp = cStringIO.StringIO(lr.fp.read())
1165 gitlr = linereader(fp)
1161 gitlr = linereader(fp)
1166 gitlr.push(firstline)
1162 gitlr.push(firstline)
1167 gitpatches = readgitpatch(gitlr)
1163 gitpatches = readgitpatch(gitlr)
1168 fp.seek(pos)
1164 fp.seek(pos)
1169 return gitpatches
1165 return gitpatches
1170
1166
1171 def iterhunks(fp):
1167 def iterhunks(fp):
1172 """Read a patch and yield the following events:
1168 """Read a patch and yield the following events:
1173 - ("file", afile, bfile, firsthunk): select a new target file.
1169 - ("file", afile, bfile, firsthunk): select a new target file.
1174 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1170 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1175 "file" event.
1171 "file" event.
1176 - ("git", gitchanges): current diff is in git format, gitchanges
1172 - ("git", gitchanges): current diff is in git format, gitchanges
1177 maps filenames to gitpatch records. Unique event.
1173 maps filenames to gitpatch records. Unique event.
1178 """
1174 """
1179 afile = ""
1175 afile = ""
1180 bfile = ""
1176 bfile = ""
1181 state = None
1177 state = None
1182 hunknum = 0
1178 hunknum = 0
1183 emitfile = newfile = False
1179 emitfile = newfile = False
1184 gitpatches = None
1180 gitpatches = None
1185
1181
1186 # our states
1182 # our states
1187 BFILE = 1
1183 BFILE = 1
1188 context = None
1184 context = None
1189 lr = linereader(fp)
1185 lr = linereader(fp)
1190
1186
1191 while True:
1187 while True:
1192 x = lr.readline()
1188 x = lr.readline()
1193 if not x:
1189 if not x:
1194 break
1190 break
1195 if state == BFILE and (
1191 if state == BFILE and (
1196 (not context and x[0] == '@')
1192 (not context and x[0] == '@')
1197 or (context is not False and x.startswith('***************'))
1193 or (context is not False and x.startswith('***************'))
1198 or x.startswith('GIT binary patch')):
1194 or x.startswith('GIT binary patch')):
1199 gp = None
1195 gp = None
1200 if (gitpatches and
1196 if (gitpatches and
1201 gitpatches[-1].ispatching(afile, bfile)):
1197 gitpatches[-1].ispatching(afile, bfile)):
1202 gp = gitpatches.pop()
1198 gp = gitpatches.pop()
1203 if x.startswith('GIT binary patch'):
1199 if x.startswith('GIT binary patch'):
1204 h = binhunk(lr, gp.path)
1200 h = binhunk(lr, gp.path)
1205 else:
1201 else:
1206 if context is None and x.startswith('***************'):
1202 if context is None and x.startswith('***************'):
1207 context = True
1203 context = True
1208 h = hunk(x, hunknum + 1, lr, context)
1204 h = hunk(x, hunknum + 1, lr, context)
1209 hunknum += 1
1205 hunknum += 1
1210 if emitfile:
1206 if emitfile:
1211 emitfile = False
1207 emitfile = False
1212 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1208 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1213 yield 'hunk', h
1209 yield 'hunk', h
1214 elif x.startswith('diff --git a/'):
1210 elif x.startswith('diff --git a/'):
1215 m = gitre.match(x.rstrip(' \r\n'))
1211 m = gitre.match(x.rstrip(' \r\n'))
1216 if not m:
1212 if not m:
1217 continue
1213 continue
1218 if gitpatches is None:
1214 if gitpatches is None:
1219 # scan whole input for git metadata
1215 # scan whole input for git metadata
1220 gitpatches = scangitpatch(lr, x)
1216 gitpatches = scangitpatch(lr, x)
1221 yield 'git', [g.copy() for g in gitpatches
1217 yield 'git', [g.copy() for g in gitpatches
1222 if g.op in ('COPY', 'RENAME')]
1218 if g.op in ('COPY', 'RENAME')]
1223 gitpatches.reverse()
1219 gitpatches.reverse()
1224 afile = 'a/' + m.group(1)
1220 afile = 'a/' + m.group(1)
1225 bfile = 'b/' + m.group(2)
1221 bfile = 'b/' + m.group(2)
1226 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1222 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1227 gp = gitpatches.pop()
1223 gp = gitpatches.pop()
1228 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1224 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1229 if not gitpatches:
1225 if not gitpatches:
1230 raise PatchError(_('failed to synchronize metadata for "%s"')
1226 raise PatchError(_('failed to synchronize metadata for "%s"')
1231 % afile[2:])
1227 % afile[2:])
1232 gp = gitpatches[-1]
1228 gp = gitpatches[-1]
1233 newfile = True
1229 newfile = True
1234 elif x.startswith('---'):
1230 elif x.startswith('---'):
1235 # check for a unified diff
1231 # check for a unified diff
1236 l2 = lr.readline()
1232 l2 = lr.readline()
1237 if not l2.startswith('+++'):
1233 if not l2.startswith('+++'):
1238 lr.push(l2)
1234 lr.push(l2)
1239 continue
1235 continue
1240 newfile = True
1236 newfile = True
1241 context = False
1237 context = False
1242 afile = parsefilename(x)
1238 afile = parsefilename(x)
1243 bfile = parsefilename(l2)
1239 bfile = parsefilename(l2)
1244 elif x.startswith('***'):
1240 elif x.startswith('***'):
1245 # check for a context diff
1241 # check for a context diff
1246 l2 = lr.readline()
1242 l2 = lr.readline()
1247 if not l2.startswith('---'):
1243 if not l2.startswith('---'):
1248 lr.push(l2)
1244 lr.push(l2)
1249 continue
1245 continue
1250 l3 = lr.readline()
1246 l3 = lr.readline()
1251 lr.push(l3)
1247 lr.push(l3)
1252 if not l3.startswith("***************"):
1248 if not l3.startswith("***************"):
1253 lr.push(l2)
1249 lr.push(l2)
1254 continue
1250 continue
1255 newfile = True
1251 newfile = True
1256 context = True
1252 context = True
1257 afile = parsefilename(x)
1253 afile = parsefilename(x)
1258 bfile = parsefilename(l2)
1254 bfile = parsefilename(l2)
1259
1255
1260 if newfile:
1256 if newfile:
1261 newfile = False
1257 newfile = False
1262 emitfile = True
1258 emitfile = True
1263 state = BFILE
1259 state = BFILE
1264 hunknum = 0
1260 hunknum = 0
1265
1261
1266 while gitpatches:
1262 while gitpatches:
1267 gp = gitpatches.pop()
1263 gp = gitpatches.pop()
1268 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1264 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1269
1265
1270 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1266 def applydiff(ui, fp, backend, store, strip=1, eolmode='strict'):
1271 """Reads a patch from fp and tries to apply it.
1267 """Reads a patch from fp and tries to apply it.
1272
1268
1273 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1269 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1274 there was any fuzz.
1270 there was any fuzz.
1275
1271
1276 If 'eolmode' is 'strict', the patch content and patched file are
1272 If 'eolmode' is 'strict', the patch content and patched file are
1277 read in binary mode. Otherwise, line endings are ignored when
1273 read in binary mode. Otherwise, line endings are ignored when
1278 patching then normalized according to 'eolmode'.
1274 patching then normalized according to 'eolmode'.
1279 """
1275 """
1280 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1276 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1281 eolmode=eolmode)
1277 eolmode=eolmode)
1282
1278
1283 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1279 def _applydiff(ui, fp, patcher, backend, store, strip=1,
1284 eolmode='strict'):
1280 eolmode='strict'):
1285
1281
1286 def pstrip(p):
1282 def pstrip(p):
1287 return pathstrip(p, strip - 1)[1]
1283 return pathstrip(p, strip - 1)[1]
1288
1284
1289 rejects = 0
1285 rejects = 0
1290 err = 0
1286 err = 0
1291 current_file = None
1287 current_file = None
1292
1288
1293 for state, values in iterhunks(fp):
1289 for state, values in iterhunks(fp):
1294 if state == 'hunk':
1290 if state == 'hunk':
1295 if not current_file:
1291 if not current_file:
1296 continue
1292 continue
1297 ret = current_file.apply(values)
1293 ret = current_file.apply(values)
1298 if ret > 0:
1294 if ret > 0:
1299 err = 1
1295 err = 1
1300 elif state == 'file':
1296 elif state == 'file':
1301 if current_file:
1297 if current_file:
1302 rejects += current_file.close()
1298 rejects += current_file.close()
1303 current_file = None
1299 current_file = None
1304 afile, bfile, first_hunk, gp = values
1300 afile, bfile, first_hunk, gp = values
1305 if gp:
1301 if gp:
1306 gp.path = pstrip(gp.path)
1302 gp.path = pstrip(gp.path)
1307 if gp.oldpath:
1303 if gp.oldpath:
1308 gp.oldpath = pstrip(gp.oldpath)
1304 gp.oldpath = pstrip(gp.oldpath)
1309 else:
1305 else:
1310 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1306 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1311 if gp.op == 'RENAME':
1307 if gp.op == 'RENAME':
1312 backend.unlink(gp.oldpath)
1308 backend.unlink(gp.oldpath)
1313 if not first_hunk:
1309 if not first_hunk:
1314 if gp.op == 'DELETE':
1310 if gp.op == 'DELETE':
1315 backend.unlink(gp.path)
1311 backend.unlink(gp.path)
1316 continue
1312 continue
1317 data, mode = None, None
1313 data, mode = None, None
1318 if gp.op in ('RENAME', 'COPY'):
1314 if gp.op in ('RENAME', 'COPY'):
1319 data, mode = store.getfile(gp.oldpath)[:2]
1315 data, mode = store.getfile(gp.oldpath)[:2]
1320 if gp.mode:
1316 if gp.mode:
1321 mode = gp.mode
1317 mode = gp.mode
1322 if gp.op == 'ADD':
1318 if gp.op == 'ADD':
1323 # Added files without content have no hunk and
1319 # Added files without content have no hunk and
1324 # must be created
1320 # must be created
1325 data = ''
1321 data = ''
1326 if data or mode:
1322 if data or mode:
1327 if (gp.op in ('ADD', 'RENAME', 'COPY')
1323 if (gp.op in ('ADD', 'RENAME', 'COPY')
1328 and backend.exists(gp.path)):
1324 and backend.exists(gp.path)):
1329 raise PatchError(_("cannot create %s: destination "
1325 raise PatchError(_("cannot create %s: destination "
1330 "already exists") % gp.path)
1326 "already exists") % gp.path)
1331 backend.setfile(gp.path, data, mode, gp.oldpath)
1327 backend.setfile(gp.path, data, mode, gp.oldpath)
1332 continue
1328 continue
1333 try:
1329 try:
1334 current_file = patcher(ui, gp, backend, store,
1330 current_file = patcher(ui, gp, backend, store,
1335 eolmode=eolmode)
1331 eolmode=eolmode)
1336 except PatchError, inst:
1332 except PatchError, inst:
1337 ui.warn(str(inst) + '\n')
1333 ui.warn(str(inst) + '\n')
1338 current_file = None
1334 current_file = None
1339 rejects += 1
1335 rejects += 1
1340 continue
1336 continue
1341 elif state == 'git':
1337 elif state == 'git':
1342 for gp in values:
1338 for gp in values:
1343 path = pstrip(gp.oldpath)
1339 path = pstrip(gp.oldpath)
1344 try:
1340 try:
1345 data, mode = backend.getfile(path)
1341 data, mode = backend.getfile(path)
1346 except IOError, e:
1342 except IOError, e:
1347 if e.errno != errno.ENOENT:
1343 if e.errno != errno.ENOENT:
1348 raise
1344 raise
1349 # The error ignored here will trigger a getfile()
1345 # The error ignored here will trigger a getfile()
1350 # error in a place more appropriate for error
1346 # error in a place more appropriate for error
1351 # handling, and will not interrupt the patching
1347 # handling, and will not interrupt the patching
1352 # process.
1348 # process.
1353 else:
1349 else:
1354 store.setfile(path, data, mode)
1350 store.setfile(path, data, mode)
1355 else:
1351 else:
1356 raise util.Abort(_('unsupported parser state: %s') % state)
1352 raise util.Abort(_('unsupported parser state: %s') % state)
1357
1353
1358 if current_file:
1354 if current_file:
1359 rejects += current_file.close()
1355 rejects += current_file.close()
1360
1356
1361 if rejects:
1357 if rejects:
1362 return -1
1358 return -1
1363 return err
1359 return err
1364
1360
1365 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1361 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1366 similarity):
1362 similarity):
1367 """use <patcher> to apply <patchname> to the working directory.
1363 """use <patcher> to apply <patchname> to the working directory.
1368 returns whether patch was applied with fuzz factor."""
1364 returns whether patch was applied with fuzz factor."""
1369
1365
1370 fuzz = False
1366 fuzz = False
1371 args = []
1367 args = []
1372 cwd = repo.root
1368 cwd = repo.root
1373 if cwd:
1369 if cwd:
1374 args.append('-d %s' % util.shellquote(cwd))
1370 args.append('-d %s' % util.shellquote(cwd))
1375 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1371 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1376 util.shellquote(patchname)))
1372 util.shellquote(patchname)))
1377 try:
1373 try:
1378 for line in fp:
1374 for line in fp:
1379 line = line.rstrip()
1375 line = line.rstrip()
1380 ui.note(line + '\n')
1376 ui.note(line + '\n')
1381 if line.startswith('patching file '):
1377 if line.startswith('patching file '):
1382 pf = util.parsepatchoutput(line)
1378 pf = util.parsepatchoutput(line)
1383 printed_file = False
1379 printed_file = False
1384 files.add(pf)
1380 files.add(pf)
1385 elif line.find('with fuzz') >= 0:
1381 elif line.find('with fuzz') >= 0:
1386 fuzz = True
1382 fuzz = True
1387 if not printed_file:
1383 if not printed_file:
1388 ui.warn(pf + '\n')
1384 ui.warn(pf + '\n')
1389 printed_file = True
1385 printed_file = True
1390 ui.warn(line + '\n')
1386 ui.warn(line + '\n')
1391 elif line.find('saving rejects to file') >= 0:
1387 elif line.find('saving rejects to file') >= 0:
1392 ui.warn(line + '\n')
1388 ui.warn(line + '\n')
1393 elif line.find('FAILED') >= 0:
1389 elif line.find('FAILED') >= 0:
1394 if not printed_file:
1390 if not printed_file:
1395 ui.warn(pf + '\n')
1391 ui.warn(pf + '\n')
1396 printed_file = True
1392 printed_file = True
1397 ui.warn(line + '\n')
1393 ui.warn(line + '\n')
1398 finally:
1394 finally:
1399 if files:
1395 if files:
1400 cfiles = list(files)
1396 scmutil.marktouched(repo, files, similarity)
1401 cwd = repo.getcwd()
1402 if cwd:
1403 cfiles = [util.pathto(repo.root, cwd, f)
1404 for f in cfiles]
1405 scmutil.addremove(repo, cfiles, similarity=similarity)
1406 code = fp.close()
1397 code = fp.close()
1407 if code:
1398 if code:
1408 raise PatchError(_("patch command failed: %s") %
1399 raise PatchError(_("patch command failed: %s") %
1409 util.explainexit(code)[0])
1400 util.explainexit(code)[0])
1410 return fuzz
1401 return fuzz
1411
1402
1412 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1403 def patchbackend(ui, backend, patchobj, strip, files=None, eolmode='strict'):
1413 if files is None:
1404 if files is None:
1414 files = set()
1405 files = set()
1415 if eolmode is None:
1406 if eolmode is None:
1416 eolmode = ui.config('patch', 'eol', 'strict')
1407 eolmode = ui.config('patch', 'eol', 'strict')
1417 if eolmode.lower() not in eolmodes:
1408 if eolmode.lower() not in eolmodes:
1418 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1409 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1419 eolmode = eolmode.lower()
1410 eolmode = eolmode.lower()
1420
1411
1421 store = filestore()
1412 store = filestore()
1422 try:
1413 try:
1423 fp = open(patchobj, 'rb')
1414 fp = open(patchobj, 'rb')
1424 except TypeError:
1415 except TypeError:
1425 fp = patchobj
1416 fp = patchobj
1426 try:
1417 try:
1427 ret = applydiff(ui, fp, backend, store, strip=strip,
1418 ret = applydiff(ui, fp, backend, store, strip=strip,
1428 eolmode=eolmode)
1419 eolmode=eolmode)
1429 finally:
1420 finally:
1430 if fp != patchobj:
1421 if fp != patchobj:
1431 fp.close()
1422 fp.close()
1432 files.update(backend.close())
1423 files.update(backend.close())
1433 store.close()
1424 store.close()
1434 if ret < 0:
1425 if ret < 0:
1435 raise PatchError(_('patch failed to apply'))
1426 raise PatchError(_('patch failed to apply'))
1436 return ret > 0
1427 return ret > 0
1437
1428
1438 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1429 def internalpatch(ui, repo, patchobj, strip, files=None, eolmode='strict',
1439 similarity=0):
1430 similarity=0):
1440 """use builtin patch to apply <patchobj> to the working directory.
1431 """use builtin patch to apply <patchobj> to the working directory.
1441 returns whether patch was applied with fuzz factor."""
1432 returns whether patch was applied with fuzz factor."""
1442 backend = workingbackend(ui, repo, similarity)
1433 backend = workingbackend(ui, repo, similarity)
1443 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1434 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1444
1435
1445 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1436 def patchrepo(ui, repo, ctx, store, patchobj, strip, files=None,
1446 eolmode='strict'):
1437 eolmode='strict'):
1447 backend = repobackend(ui, repo, ctx, store)
1438 backend = repobackend(ui, repo, ctx, store)
1448 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1439 return patchbackend(ui, backend, patchobj, strip, files, eolmode)
1449
1440
1450 def makememctx(repo, parents, text, user, date, branch, files, store,
1441 def makememctx(repo, parents, text, user, date, branch, files, store,
1451 editor=None):
1442 editor=None):
1452 def getfilectx(repo, memctx, path):
1443 def getfilectx(repo, memctx, path):
1453 data, (islink, isexec), copied = store.getfile(path)
1444 data, (islink, isexec), copied = store.getfile(path)
1454 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1445 return context.memfilectx(path, data, islink=islink, isexec=isexec,
1455 copied=copied)
1446 copied=copied)
1456 extra = {}
1447 extra = {}
1457 if branch:
1448 if branch:
1458 extra['branch'] = encoding.fromlocal(branch)
1449 extra['branch'] = encoding.fromlocal(branch)
1459 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1450 ctx = context.memctx(repo, parents, text, files, getfilectx, user,
1460 date, extra)
1451 date, extra)
1461 if editor:
1452 if editor:
1462 ctx._text = editor(repo, ctx, [])
1453 ctx._text = editor(repo, ctx, [])
1463 return ctx
1454 return ctx
1464
1455
1465 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1456 def patch(ui, repo, patchname, strip=1, files=None, eolmode='strict',
1466 similarity=0):
1457 similarity=0):
1467 """Apply <patchname> to the working directory.
1458 """Apply <patchname> to the working directory.
1468
1459
1469 'eolmode' specifies how end of lines should be handled. It can be:
1460 'eolmode' specifies how end of lines should be handled. It can be:
1470 - 'strict': inputs are read in binary mode, EOLs are preserved
1461 - 'strict': inputs are read in binary mode, EOLs are preserved
1471 - 'crlf': EOLs are ignored when patching and reset to CRLF
1462 - 'crlf': EOLs are ignored when patching and reset to CRLF
1472 - 'lf': EOLs are ignored when patching and reset to LF
1463 - 'lf': EOLs are ignored when patching and reset to LF
1473 - None: get it from user settings, default to 'strict'
1464 - None: get it from user settings, default to 'strict'
1474 'eolmode' is ignored when using an external patcher program.
1465 'eolmode' is ignored when using an external patcher program.
1475
1466
1476 Returns whether patch was applied with fuzz factor.
1467 Returns whether patch was applied with fuzz factor.
1477 """
1468 """
1478 patcher = ui.config('ui', 'patch')
1469 patcher = ui.config('ui', 'patch')
1479 if files is None:
1470 if files is None:
1480 files = set()
1471 files = set()
1481 try:
1472 try:
1482 if patcher:
1473 if patcher:
1483 return _externalpatch(ui, repo, patcher, patchname, strip,
1474 return _externalpatch(ui, repo, patcher, patchname, strip,
1484 files, similarity)
1475 files, similarity)
1485 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1476 return internalpatch(ui, repo, patchname, strip, files, eolmode,
1486 similarity)
1477 similarity)
1487 except PatchError, err:
1478 except PatchError, err:
1488 raise util.Abort(str(err))
1479 raise util.Abort(str(err))
1489
1480
1490 def changedfiles(ui, repo, patchpath, strip=1):
1481 def changedfiles(ui, repo, patchpath, strip=1):
1491 backend = fsbackend(ui, repo.root)
1482 backend = fsbackend(ui, repo.root)
1492 fp = open(patchpath, 'rb')
1483 fp = open(patchpath, 'rb')
1493 try:
1484 try:
1494 changed = set()
1485 changed = set()
1495 for state, values in iterhunks(fp):
1486 for state, values in iterhunks(fp):
1496 if state == 'file':
1487 if state == 'file':
1497 afile, bfile, first_hunk, gp = values
1488 afile, bfile, first_hunk, gp = values
1498 if gp:
1489 if gp:
1499 gp.path = pathstrip(gp.path, strip - 1)[1]
1490 gp.path = pathstrip(gp.path, strip - 1)[1]
1500 if gp.oldpath:
1491 if gp.oldpath:
1501 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1492 gp.oldpath = pathstrip(gp.oldpath, strip - 1)[1]
1502 else:
1493 else:
1503 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1494 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip)
1504 changed.add(gp.path)
1495 changed.add(gp.path)
1505 if gp.op == 'RENAME':
1496 if gp.op == 'RENAME':
1506 changed.add(gp.oldpath)
1497 changed.add(gp.oldpath)
1507 elif state not in ('hunk', 'git'):
1498 elif state not in ('hunk', 'git'):
1508 raise util.Abort(_('unsupported parser state: %s') % state)
1499 raise util.Abort(_('unsupported parser state: %s') % state)
1509 return changed
1500 return changed
1510 finally:
1501 finally:
1511 fp.close()
1502 fp.close()
1512
1503
1513 class GitDiffRequired(Exception):
1504 class GitDiffRequired(Exception):
1514 pass
1505 pass
1515
1506
1516 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1507 def diffopts(ui, opts=None, untrusted=False, section='diff'):
1517 def get(key, name=None, getter=ui.configbool):
1508 def get(key, name=None, getter=ui.configbool):
1518 return ((opts and opts.get(key)) or
1509 return ((opts and opts.get(key)) or
1519 getter(section, name or key, None, untrusted=untrusted))
1510 getter(section, name or key, None, untrusted=untrusted))
1520 return mdiff.diffopts(
1511 return mdiff.diffopts(
1521 text=opts and opts.get('text'),
1512 text=opts and opts.get('text'),
1522 git=get('git'),
1513 git=get('git'),
1523 nodates=get('nodates'),
1514 nodates=get('nodates'),
1524 showfunc=get('show_function', 'showfunc'),
1515 showfunc=get('show_function', 'showfunc'),
1525 ignorews=get('ignore_all_space', 'ignorews'),
1516 ignorews=get('ignore_all_space', 'ignorews'),
1526 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1517 ignorewsamount=get('ignore_space_change', 'ignorewsamount'),
1527 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1518 ignoreblanklines=get('ignore_blank_lines', 'ignoreblanklines'),
1528 context=get('unified', getter=ui.config))
1519 context=get('unified', getter=ui.config))
1529
1520
1530 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1521 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
1531 losedatafn=None, prefix=''):
1522 losedatafn=None, prefix=''):
1532 '''yields diff of changes to files between two nodes, or node and
1523 '''yields diff of changes to files between two nodes, or node and
1533 working directory.
1524 working directory.
1534
1525
1535 if node1 is None, use first dirstate parent instead.
1526 if node1 is None, use first dirstate parent instead.
1536 if node2 is None, compare node1 with working directory.
1527 if node2 is None, compare node1 with working directory.
1537
1528
1538 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1529 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
1539 every time some change cannot be represented with the current
1530 every time some change cannot be represented with the current
1540 patch format. Return False to upgrade to git patch format, True to
1531 patch format. Return False to upgrade to git patch format, True to
1541 accept the loss or raise an exception to abort the diff. It is
1532 accept the loss or raise an exception to abort the diff. It is
1542 called with the name of current file being diffed as 'fn'. If set
1533 called with the name of current file being diffed as 'fn'. If set
1543 to None, patches will always be upgraded to git format when
1534 to None, patches will always be upgraded to git format when
1544 necessary.
1535 necessary.
1545
1536
1546 prefix is a filename prefix that is prepended to all filenames on
1537 prefix is a filename prefix that is prepended to all filenames on
1547 display (used for subrepos).
1538 display (used for subrepos).
1548 '''
1539 '''
1549
1540
1550 if opts is None:
1541 if opts is None:
1551 opts = mdiff.defaultopts
1542 opts = mdiff.defaultopts
1552
1543
1553 if not node1 and not node2:
1544 if not node1 and not node2:
1554 node1 = repo.dirstate.p1()
1545 node1 = repo.dirstate.p1()
1555
1546
1556 def lrugetfilectx():
1547 def lrugetfilectx():
1557 cache = {}
1548 cache = {}
1558 order = util.deque()
1549 order = util.deque()
1559 def getfilectx(f, ctx):
1550 def getfilectx(f, ctx):
1560 fctx = ctx.filectx(f, filelog=cache.get(f))
1551 fctx = ctx.filectx(f, filelog=cache.get(f))
1561 if f not in cache:
1552 if f not in cache:
1562 if len(cache) > 20:
1553 if len(cache) > 20:
1563 del cache[order.popleft()]
1554 del cache[order.popleft()]
1564 cache[f] = fctx.filelog()
1555 cache[f] = fctx.filelog()
1565 else:
1556 else:
1566 order.remove(f)
1557 order.remove(f)
1567 order.append(f)
1558 order.append(f)
1568 return fctx
1559 return fctx
1569 return getfilectx
1560 return getfilectx
1570 getfilectx = lrugetfilectx()
1561 getfilectx = lrugetfilectx()
1571
1562
1572 ctx1 = repo[node1]
1563 ctx1 = repo[node1]
1573 ctx2 = repo[node2]
1564 ctx2 = repo[node2]
1574
1565
1575 if not changes:
1566 if not changes:
1576 changes = repo.status(ctx1, ctx2, match=match)
1567 changes = repo.status(ctx1, ctx2, match=match)
1577 modified, added, removed = changes[:3]
1568 modified, added, removed = changes[:3]
1578
1569
1579 if not modified and not added and not removed:
1570 if not modified and not added and not removed:
1580 return []
1571 return []
1581
1572
1582 revs = None
1573 revs = None
1583 hexfunc = repo.ui.debugflag and hex or short
1574 hexfunc = repo.ui.debugflag and hex or short
1584 revs = [hexfunc(node) for node in [node1, node2] if node]
1575 revs = [hexfunc(node) for node in [node1, node2] if node]
1585
1576
1586 copy = {}
1577 copy = {}
1587 if opts.git or opts.upgrade:
1578 if opts.git or opts.upgrade:
1588 copy = copies.pathcopies(ctx1, ctx2)
1579 copy = copies.pathcopies(ctx1, ctx2)
1589
1580
1590 def difffn(opts, losedata):
1581 def difffn(opts, losedata):
1591 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1582 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1592 copy, getfilectx, opts, losedata, prefix)
1583 copy, getfilectx, opts, losedata, prefix)
1593 if opts.upgrade and not opts.git:
1584 if opts.upgrade and not opts.git:
1594 try:
1585 try:
1595 def losedata(fn):
1586 def losedata(fn):
1596 if not losedatafn or not losedatafn(fn=fn):
1587 if not losedatafn or not losedatafn(fn=fn):
1597 raise GitDiffRequired
1588 raise GitDiffRequired
1598 # Buffer the whole output until we are sure it can be generated
1589 # Buffer the whole output until we are sure it can be generated
1599 return list(difffn(opts.copy(git=False), losedata))
1590 return list(difffn(opts.copy(git=False), losedata))
1600 except GitDiffRequired:
1591 except GitDiffRequired:
1601 return difffn(opts.copy(git=True), None)
1592 return difffn(opts.copy(git=True), None)
1602 else:
1593 else:
1603 return difffn(opts, None)
1594 return difffn(opts, None)
1604
1595
1605 def difflabel(func, *args, **kw):
1596 def difflabel(func, *args, **kw):
1606 '''yields 2-tuples of (output, label) based on the output of func()'''
1597 '''yields 2-tuples of (output, label) based on the output of func()'''
1607 headprefixes = [('diff', 'diff.diffline'),
1598 headprefixes = [('diff', 'diff.diffline'),
1608 ('copy', 'diff.extended'),
1599 ('copy', 'diff.extended'),
1609 ('rename', 'diff.extended'),
1600 ('rename', 'diff.extended'),
1610 ('old', 'diff.extended'),
1601 ('old', 'diff.extended'),
1611 ('new', 'diff.extended'),
1602 ('new', 'diff.extended'),
1612 ('deleted', 'diff.extended'),
1603 ('deleted', 'diff.extended'),
1613 ('---', 'diff.file_a'),
1604 ('---', 'diff.file_a'),
1614 ('+++', 'diff.file_b')]
1605 ('+++', 'diff.file_b')]
1615 textprefixes = [('@', 'diff.hunk'),
1606 textprefixes = [('@', 'diff.hunk'),
1616 ('-', 'diff.deleted'),
1607 ('-', 'diff.deleted'),
1617 ('+', 'diff.inserted')]
1608 ('+', 'diff.inserted')]
1618 head = False
1609 head = False
1619 for chunk in func(*args, **kw):
1610 for chunk in func(*args, **kw):
1620 lines = chunk.split('\n')
1611 lines = chunk.split('\n')
1621 for i, line in enumerate(lines):
1612 for i, line in enumerate(lines):
1622 if i != 0:
1613 if i != 0:
1623 yield ('\n', '')
1614 yield ('\n', '')
1624 if head:
1615 if head:
1625 if line.startswith('@'):
1616 if line.startswith('@'):
1626 head = False
1617 head = False
1627 else:
1618 else:
1628 if line and line[0] not in ' +-@\\':
1619 if line and line[0] not in ' +-@\\':
1629 head = True
1620 head = True
1630 stripline = line
1621 stripline = line
1631 if not head and line and line[0] in '+-':
1622 if not head and line and line[0] in '+-':
1632 # highlight trailing whitespace, but only in changed lines
1623 # highlight trailing whitespace, but only in changed lines
1633 stripline = line.rstrip()
1624 stripline = line.rstrip()
1634 prefixes = textprefixes
1625 prefixes = textprefixes
1635 if head:
1626 if head:
1636 prefixes = headprefixes
1627 prefixes = headprefixes
1637 for prefix, label in prefixes:
1628 for prefix, label in prefixes:
1638 if stripline.startswith(prefix):
1629 if stripline.startswith(prefix):
1639 yield (stripline, label)
1630 yield (stripline, label)
1640 break
1631 break
1641 else:
1632 else:
1642 yield (line, '')
1633 yield (line, '')
1643 if line != stripline:
1634 if line != stripline:
1644 yield (line[len(stripline):], 'diff.trailingwhitespace')
1635 yield (line[len(stripline):], 'diff.trailingwhitespace')
1645
1636
1646 def diffui(*args, **kw):
1637 def diffui(*args, **kw):
1647 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1638 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
1648 return difflabel(diff, *args, **kw)
1639 return difflabel(diff, *args, **kw)
1649
1640
1650 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1641 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
1651 copy, getfilectx, opts, losedatafn, prefix):
1642 copy, getfilectx, opts, losedatafn, prefix):
1652
1643
1653 def join(f):
1644 def join(f):
1654 return posixpath.join(prefix, f)
1645 return posixpath.join(prefix, f)
1655
1646
1656 def addmodehdr(header, omode, nmode):
1647 def addmodehdr(header, omode, nmode):
1657 if omode != nmode:
1648 if omode != nmode:
1658 header.append('old mode %s\n' % omode)
1649 header.append('old mode %s\n' % omode)
1659 header.append('new mode %s\n' % nmode)
1650 header.append('new mode %s\n' % nmode)
1660
1651
1661 def addindexmeta(meta, revs):
1652 def addindexmeta(meta, revs):
1662 if opts.git:
1653 if opts.git:
1663 i = len(revs)
1654 i = len(revs)
1664 if i==2:
1655 if i==2:
1665 meta.append('index %s..%s\n' % tuple(revs))
1656 meta.append('index %s..%s\n' % tuple(revs))
1666 elif i==3:
1657 elif i==3:
1667 meta.append('index %s,%s..%s\n' % tuple(revs))
1658 meta.append('index %s,%s..%s\n' % tuple(revs))
1668
1659
1669 def gitindex(text):
1660 def gitindex(text):
1670 if not text:
1661 if not text:
1671 return hex(nullid)
1662 return hex(nullid)
1672 l = len(text)
1663 l = len(text)
1673 s = util.sha1('blob %d\0' % l)
1664 s = util.sha1('blob %d\0' % l)
1674 s.update(text)
1665 s.update(text)
1675 return s.hexdigest()
1666 return s.hexdigest()
1676
1667
1677 def diffline(a, b, revs):
1668 def diffline(a, b, revs):
1678 if opts.git:
1669 if opts.git:
1679 line = 'diff --git a/%s b/%s\n' % (a, b)
1670 line = 'diff --git a/%s b/%s\n' % (a, b)
1680 elif not repo.ui.quiet:
1671 elif not repo.ui.quiet:
1681 if revs:
1672 if revs:
1682 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1673 revinfo = ' '.join(["-r %s" % rev for rev in revs])
1683 line = 'diff %s %s\n' % (revinfo, a)
1674 line = 'diff %s %s\n' % (revinfo, a)
1684 else:
1675 else:
1685 line = 'diff %s\n' % a
1676 line = 'diff %s\n' % a
1686 else:
1677 else:
1687 line = ''
1678 line = ''
1688 return line
1679 return line
1689
1680
1690 date1 = util.datestr(ctx1.date())
1681 date1 = util.datestr(ctx1.date())
1691 man1 = ctx1.manifest()
1682 man1 = ctx1.manifest()
1692
1683
1693 gone = set()
1684 gone = set()
1694 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1685 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
1695
1686
1696 copyto = dict([(v, k) for k, v in copy.items()])
1687 copyto = dict([(v, k) for k, v in copy.items()])
1697
1688
1698 if opts.git:
1689 if opts.git:
1699 revs = None
1690 revs = None
1700
1691
1701 for f in sorted(modified + added + removed):
1692 for f in sorted(modified + added + removed):
1702 to = None
1693 to = None
1703 tn = None
1694 tn = None
1704 dodiff = True
1695 dodiff = True
1705 header = []
1696 header = []
1706 if f in man1:
1697 if f in man1:
1707 to = getfilectx(f, ctx1).data()
1698 to = getfilectx(f, ctx1).data()
1708 if f not in removed:
1699 if f not in removed:
1709 tn = getfilectx(f, ctx2).data()
1700 tn = getfilectx(f, ctx2).data()
1710 a, b = f, f
1701 a, b = f, f
1711 if opts.git or losedatafn:
1702 if opts.git or losedatafn:
1712 if f in added:
1703 if f in added:
1713 mode = gitmode[ctx2.flags(f)]
1704 mode = gitmode[ctx2.flags(f)]
1714 if f in copy or f in copyto:
1705 if f in copy or f in copyto:
1715 if opts.git:
1706 if opts.git:
1716 if f in copy:
1707 if f in copy:
1717 a = copy[f]
1708 a = copy[f]
1718 else:
1709 else:
1719 a = copyto[f]
1710 a = copyto[f]
1720 omode = gitmode[man1.flags(a)]
1711 omode = gitmode[man1.flags(a)]
1721 addmodehdr(header, omode, mode)
1712 addmodehdr(header, omode, mode)
1722 if a in removed and a not in gone:
1713 if a in removed and a not in gone:
1723 op = 'rename'
1714 op = 'rename'
1724 gone.add(a)
1715 gone.add(a)
1725 else:
1716 else:
1726 op = 'copy'
1717 op = 'copy'
1727 header.append('%s from %s\n' % (op, join(a)))
1718 header.append('%s from %s\n' % (op, join(a)))
1728 header.append('%s to %s\n' % (op, join(f)))
1719 header.append('%s to %s\n' % (op, join(f)))
1729 to = getfilectx(a, ctx1).data()
1720 to = getfilectx(a, ctx1).data()
1730 else:
1721 else:
1731 losedatafn(f)
1722 losedatafn(f)
1732 else:
1723 else:
1733 if opts.git:
1724 if opts.git:
1734 header.append('new file mode %s\n' % mode)
1725 header.append('new file mode %s\n' % mode)
1735 elif ctx2.flags(f):
1726 elif ctx2.flags(f):
1736 losedatafn(f)
1727 losedatafn(f)
1737 # In theory, if tn was copied or renamed we should check
1728 # In theory, if tn was copied or renamed we should check
1738 # if the source is binary too but the copy record already
1729 # if the source is binary too but the copy record already
1739 # forces git mode.
1730 # forces git mode.
1740 if util.binary(tn):
1731 if util.binary(tn):
1741 if opts.git:
1732 if opts.git:
1742 dodiff = 'binary'
1733 dodiff = 'binary'
1743 else:
1734 else:
1744 losedatafn(f)
1735 losedatafn(f)
1745 if not opts.git and not tn:
1736 if not opts.git and not tn:
1746 # regular diffs cannot represent new empty file
1737 # regular diffs cannot represent new empty file
1747 losedatafn(f)
1738 losedatafn(f)
1748 elif f in removed:
1739 elif f in removed:
1749 if opts.git:
1740 if opts.git:
1750 # have we already reported a copy above?
1741 # have we already reported a copy above?
1751 if ((f in copy and copy[f] in added
1742 if ((f in copy and copy[f] in added
1752 and copyto[copy[f]] == f) or
1743 and copyto[copy[f]] == f) or
1753 (f in copyto and copyto[f] in added
1744 (f in copyto and copyto[f] in added
1754 and copy[copyto[f]] == f)):
1745 and copy[copyto[f]] == f)):
1755 dodiff = False
1746 dodiff = False
1756 else:
1747 else:
1757 header.append('deleted file mode %s\n' %
1748 header.append('deleted file mode %s\n' %
1758 gitmode[man1.flags(f)])
1749 gitmode[man1.flags(f)])
1759 if util.binary(to):
1750 if util.binary(to):
1760 dodiff = 'binary'
1751 dodiff = 'binary'
1761 elif not to or util.binary(to):
1752 elif not to or util.binary(to):
1762 # regular diffs cannot represent empty file deletion
1753 # regular diffs cannot represent empty file deletion
1763 losedatafn(f)
1754 losedatafn(f)
1764 else:
1755 else:
1765 oflag = man1.flags(f)
1756 oflag = man1.flags(f)
1766 nflag = ctx2.flags(f)
1757 nflag = ctx2.flags(f)
1767 binary = util.binary(to) or util.binary(tn)
1758 binary = util.binary(to) or util.binary(tn)
1768 if opts.git:
1759 if opts.git:
1769 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1760 addmodehdr(header, gitmode[oflag], gitmode[nflag])
1770 if binary:
1761 if binary:
1771 dodiff = 'binary'
1762 dodiff = 'binary'
1772 elif binary or nflag != oflag:
1763 elif binary or nflag != oflag:
1773 losedatafn(f)
1764 losedatafn(f)
1774
1765
1775 if dodiff:
1766 if dodiff:
1776 if opts.git or revs:
1767 if opts.git or revs:
1777 header.insert(0, diffline(join(a), join(b), revs))
1768 header.insert(0, diffline(join(a), join(b), revs))
1778 if dodiff == 'binary':
1769 if dodiff == 'binary':
1779 text = mdiff.b85diff(to, tn)
1770 text = mdiff.b85diff(to, tn)
1780 if text:
1771 if text:
1781 addindexmeta(header, [gitindex(to), gitindex(tn)])
1772 addindexmeta(header, [gitindex(to), gitindex(tn)])
1782 else:
1773 else:
1783 text = mdiff.unidiff(to, date1,
1774 text = mdiff.unidiff(to, date1,
1784 # ctx2 date may be dynamic
1775 # ctx2 date may be dynamic
1785 tn, util.datestr(ctx2.date()),
1776 tn, util.datestr(ctx2.date()),
1786 join(a), join(b), opts=opts)
1777 join(a), join(b), opts=opts)
1787 if header and (text or len(header) > 1):
1778 if header and (text or len(header) > 1):
1788 yield ''.join(header)
1779 yield ''.join(header)
1789 if text:
1780 if text:
1790 yield text
1781 yield text
1791
1782
1792 def diffstatsum(stats):
1783 def diffstatsum(stats):
1793 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1784 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
1794 for f, a, r, b in stats:
1785 for f, a, r, b in stats:
1795 maxfile = max(maxfile, encoding.colwidth(f))
1786 maxfile = max(maxfile, encoding.colwidth(f))
1796 maxtotal = max(maxtotal, a + r)
1787 maxtotal = max(maxtotal, a + r)
1797 addtotal += a
1788 addtotal += a
1798 removetotal += r
1789 removetotal += r
1799 binary = binary or b
1790 binary = binary or b
1800
1791
1801 return maxfile, maxtotal, addtotal, removetotal, binary
1792 return maxfile, maxtotal, addtotal, removetotal, binary
1802
1793
1803 def diffstatdata(lines):
1794 def diffstatdata(lines):
1804 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1795 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
1805
1796
1806 results = []
1797 results = []
1807 filename, adds, removes, isbinary = None, 0, 0, False
1798 filename, adds, removes, isbinary = None, 0, 0, False
1808
1799
1809 def addresult():
1800 def addresult():
1810 if filename:
1801 if filename:
1811 results.append((filename, adds, removes, isbinary))
1802 results.append((filename, adds, removes, isbinary))
1812
1803
1813 for line in lines:
1804 for line in lines:
1814 if line.startswith('diff'):
1805 if line.startswith('diff'):
1815 addresult()
1806 addresult()
1816 # set numbers to 0 anyway when starting new file
1807 # set numbers to 0 anyway when starting new file
1817 adds, removes, isbinary = 0, 0, False
1808 adds, removes, isbinary = 0, 0, False
1818 if line.startswith('diff --git a/'):
1809 if line.startswith('diff --git a/'):
1819 filename = gitre.search(line).group(1)
1810 filename = gitre.search(line).group(1)
1820 elif line.startswith('diff -r'):
1811 elif line.startswith('diff -r'):
1821 # format: "diff -r ... -r ... filename"
1812 # format: "diff -r ... -r ... filename"
1822 filename = diffre.search(line).group(1)
1813 filename = diffre.search(line).group(1)
1823 elif line.startswith('+') and not line.startswith('+++ '):
1814 elif line.startswith('+') and not line.startswith('+++ '):
1824 adds += 1
1815 adds += 1
1825 elif line.startswith('-') and not line.startswith('--- '):
1816 elif line.startswith('-') and not line.startswith('--- '):
1826 removes += 1
1817 removes += 1
1827 elif (line.startswith('GIT binary patch') or
1818 elif (line.startswith('GIT binary patch') or
1828 line.startswith('Binary file')):
1819 line.startswith('Binary file')):
1829 isbinary = True
1820 isbinary = True
1830 addresult()
1821 addresult()
1831 return results
1822 return results
1832
1823
1833 def diffstat(lines, width=80, git=False):
1824 def diffstat(lines, width=80, git=False):
1834 output = []
1825 output = []
1835 stats = diffstatdata(lines)
1826 stats = diffstatdata(lines)
1836 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1827 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
1837
1828
1838 countwidth = len(str(maxtotal))
1829 countwidth = len(str(maxtotal))
1839 if hasbinary and countwidth < 3:
1830 if hasbinary and countwidth < 3:
1840 countwidth = 3
1831 countwidth = 3
1841 graphwidth = width - countwidth - maxname - 6
1832 graphwidth = width - countwidth - maxname - 6
1842 if graphwidth < 10:
1833 if graphwidth < 10:
1843 graphwidth = 10
1834 graphwidth = 10
1844
1835
1845 def scale(i):
1836 def scale(i):
1846 if maxtotal <= graphwidth:
1837 if maxtotal <= graphwidth:
1847 return i
1838 return i
1848 # If diffstat runs out of room it doesn't print anything,
1839 # If diffstat runs out of room it doesn't print anything,
1849 # which isn't very useful, so always print at least one + or -
1840 # which isn't very useful, so always print at least one + or -
1850 # if there were at least some changes.
1841 # if there were at least some changes.
1851 return max(i * graphwidth // maxtotal, int(bool(i)))
1842 return max(i * graphwidth // maxtotal, int(bool(i)))
1852
1843
1853 for filename, adds, removes, isbinary in stats:
1844 for filename, adds, removes, isbinary in stats:
1854 if isbinary:
1845 if isbinary:
1855 count = 'Bin'
1846 count = 'Bin'
1856 else:
1847 else:
1857 count = adds + removes
1848 count = adds + removes
1858 pluses = '+' * scale(adds)
1849 pluses = '+' * scale(adds)
1859 minuses = '-' * scale(removes)
1850 minuses = '-' * scale(removes)
1860 output.append(' %s%s | %*s %s%s\n' %
1851 output.append(' %s%s | %*s %s%s\n' %
1861 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1852 (filename, ' ' * (maxname - encoding.colwidth(filename)),
1862 countwidth, count, pluses, minuses))
1853 countwidth, count, pluses, minuses))
1863
1854
1864 if stats:
1855 if stats:
1865 output.append(_(' %d files changed, %d insertions(+), '
1856 output.append(_(' %d files changed, %d insertions(+), '
1866 '%d deletions(-)\n')
1857 '%d deletions(-)\n')
1867 % (len(stats), totaladds, totalremoves))
1858 % (len(stats), totaladds, totalremoves))
1868
1859
1869 return ''.join(output)
1860 return ''.join(output)
1870
1861
1871 def diffstatui(*args, **kw):
1862 def diffstatui(*args, **kw):
1872 '''like diffstat(), but yields 2-tuples of (output, label) for
1863 '''like diffstat(), but yields 2-tuples of (output, label) for
1873 ui.write()
1864 ui.write()
1874 '''
1865 '''
1875
1866
1876 for line in diffstat(*args, **kw).splitlines():
1867 for line in diffstat(*args, **kw).splitlines():
1877 if line and line[-1] in '+-':
1868 if line and line[-1] in '+-':
1878 name, graph = line.rsplit(' ', 1)
1869 name, graph = line.rsplit(' ', 1)
1879 yield (name + ' ', '')
1870 yield (name + ' ', '')
1880 m = re.search(r'\++', graph)
1871 m = re.search(r'\++', graph)
1881 if m:
1872 if m:
1882 yield (m.group(0), 'diffstat.inserted')
1873 yield (m.group(0), 'diffstat.inserted')
1883 m = re.search(r'-+', graph)
1874 m = re.search(r'-+', graph)
1884 if m:
1875 if m:
1885 yield (m.group(0), 'diffstat.deleted')
1876 yield (m.group(0), 'diffstat.deleted')
1886 else:
1877 else:
1887 yield (line, '')
1878 yield (line, '')
1888 yield ('\n', '')
1879 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now