##// END OF EJS Templates
py3: use email.parser module to parse email messages...
Pulkit Goyal -
r35651:a981ab2a default
parent child Browse files
Show More
@@ -1,352 +1,352 b''
1 # gnuarch.py - GNU Arch support for the convert extension
1 # gnuarch.py - GNU Arch support for the convert extension
2 #
2 #
3 # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
3 # Copyright 2008, 2009 Aleix Conchillo Flaque <aleix@member.fsf.org>
4 # and others
4 # and others
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import email
10 import email.parser as emailparser
11 import os
11 import os
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import tempfile
14 import tempfile
15
15
16 from mercurial.i18n import _
16 from mercurial.i18n import _
17 from mercurial import (
17 from mercurial import (
18 encoding,
18 encoding,
19 error,
19 error,
20 util,
20 util,
21 )
21 )
22 from . import common
22 from . import common
23
23
24 class gnuarch_source(common.converter_source, common.commandline):
24 class gnuarch_source(common.converter_source, common.commandline):
25
25
26 class gnuarch_rev(object):
26 class gnuarch_rev(object):
27 def __init__(self, rev):
27 def __init__(self, rev):
28 self.rev = rev
28 self.rev = rev
29 self.summary = ''
29 self.summary = ''
30 self.date = None
30 self.date = None
31 self.author = ''
31 self.author = ''
32 self.continuationof = None
32 self.continuationof = None
33 self.add_files = []
33 self.add_files = []
34 self.mod_files = []
34 self.mod_files = []
35 self.del_files = []
35 self.del_files = []
36 self.ren_files = {}
36 self.ren_files = {}
37 self.ren_dirs = {}
37 self.ren_dirs = {}
38
38
39 def __init__(self, ui, repotype, path, revs=None):
39 def __init__(self, ui, repotype, path, revs=None):
40 super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs)
40 super(gnuarch_source, self).__init__(ui, repotype, path, revs=revs)
41
41
42 if not os.path.exists(os.path.join(path, '{arch}')):
42 if not os.path.exists(os.path.join(path, '{arch}')):
43 raise common.NoRepo(_("%s does not look like a GNU Arch repository")
43 raise common.NoRepo(_("%s does not look like a GNU Arch repository")
44 % path)
44 % path)
45
45
46 # Could use checktool, but we want to check for baz or tla.
46 # Could use checktool, but we want to check for baz or tla.
47 self.execmd = None
47 self.execmd = None
48 if util.findexe('baz'):
48 if util.findexe('baz'):
49 self.execmd = 'baz'
49 self.execmd = 'baz'
50 else:
50 else:
51 if util.findexe('tla'):
51 if util.findexe('tla'):
52 self.execmd = 'tla'
52 self.execmd = 'tla'
53 else:
53 else:
54 raise error.Abort(_('cannot find a GNU Arch tool'))
54 raise error.Abort(_('cannot find a GNU Arch tool'))
55
55
56 common.commandline.__init__(self, ui, self.execmd)
56 common.commandline.__init__(self, ui, self.execmd)
57
57
58 self.path = os.path.realpath(path)
58 self.path = os.path.realpath(path)
59 self.tmppath = None
59 self.tmppath = None
60
60
61 self.treeversion = None
61 self.treeversion = None
62 self.lastrev = None
62 self.lastrev = None
63 self.changes = {}
63 self.changes = {}
64 self.parents = {}
64 self.parents = {}
65 self.tags = {}
65 self.tags = {}
66 self.catlogparser = email.Parser.Parser()
66 self.catlogparser = emailparser.Parser()
67 self.encoding = encoding.encoding
67 self.encoding = encoding.encoding
68 self.archives = []
68 self.archives = []
69
69
70 def before(self):
70 def before(self):
71 # Get registered archives
71 # Get registered archives
72 self.archives = [i.rstrip('\n')
72 self.archives = [i.rstrip('\n')
73 for i in self.runlines0('archives', '-n')]
73 for i in self.runlines0('archives', '-n')]
74
74
75 if self.execmd == 'tla':
75 if self.execmd == 'tla':
76 output = self.run0('tree-version', self.path)
76 output = self.run0('tree-version', self.path)
77 else:
77 else:
78 output = self.run0('tree-version', '-d', self.path)
78 output = self.run0('tree-version', '-d', self.path)
79 self.treeversion = output.strip()
79 self.treeversion = output.strip()
80
80
81 # Get name of temporary directory
81 # Get name of temporary directory
82 version = self.treeversion.split('/')
82 version = self.treeversion.split('/')
83 self.tmppath = os.path.join(tempfile.gettempdir(),
83 self.tmppath = os.path.join(tempfile.gettempdir(),
84 'hg-%s' % version[1])
84 'hg-%s' % version[1])
85
85
86 # Generate parents dictionary
86 # Generate parents dictionary
87 self.parents[None] = []
87 self.parents[None] = []
88 treeversion = self.treeversion
88 treeversion = self.treeversion
89 child = None
89 child = None
90 while treeversion:
90 while treeversion:
91 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
91 self.ui.status(_('analyzing tree version %s...\n') % treeversion)
92
92
93 archive = treeversion.split('/')[0]
93 archive = treeversion.split('/')[0]
94 if archive not in self.archives:
94 if archive not in self.archives:
95 self.ui.status(_('tree analysis stopped because it points to '
95 self.ui.status(_('tree analysis stopped because it points to '
96 'an unregistered archive %s...\n') % archive)
96 'an unregistered archive %s...\n') % archive)
97 break
97 break
98
98
99 # Get the complete list of revisions for that tree version
99 # Get the complete list of revisions for that tree version
100 output, status = self.runlines('revisions', '-r', '-f', treeversion)
100 output, status = self.runlines('revisions', '-r', '-f', treeversion)
101 self.checkexit(status, 'failed retrieving revisions for %s'
101 self.checkexit(status, 'failed retrieving revisions for %s'
102 % treeversion)
102 % treeversion)
103
103
104 # No new iteration unless a revision has a continuation-of header
104 # No new iteration unless a revision has a continuation-of header
105 treeversion = None
105 treeversion = None
106
106
107 for l in output:
107 for l in output:
108 rev = l.strip()
108 rev = l.strip()
109 self.changes[rev] = self.gnuarch_rev(rev)
109 self.changes[rev] = self.gnuarch_rev(rev)
110 self.parents[rev] = []
110 self.parents[rev] = []
111
111
112 # Read author, date and summary
112 # Read author, date and summary
113 catlog, status = self.run('cat-log', '-d', self.path, rev)
113 catlog, status = self.run('cat-log', '-d', self.path, rev)
114 if status:
114 if status:
115 catlog = self.run0('cat-archive-log', rev)
115 catlog = self.run0('cat-archive-log', rev)
116 self._parsecatlog(catlog, rev)
116 self._parsecatlog(catlog, rev)
117
117
118 # Populate the parents map
118 # Populate the parents map
119 self.parents[child].append(rev)
119 self.parents[child].append(rev)
120
120
121 # Keep track of the current revision as the child of the next
121 # Keep track of the current revision as the child of the next
122 # revision scanned
122 # revision scanned
123 child = rev
123 child = rev
124
124
125 # Check if we have to follow the usual incremental history
125 # Check if we have to follow the usual incremental history
126 # or if we have to 'jump' to a different treeversion given
126 # or if we have to 'jump' to a different treeversion given
127 # by the continuation-of header.
127 # by the continuation-of header.
128 if self.changes[rev].continuationof:
128 if self.changes[rev].continuationof:
129 treeversion = '--'.join(
129 treeversion = '--'.join(
130 self.changes[rev].continuationof.split('--')[:-1])
130 self.changes[rev].continuationof.split('--')[:-1])
131 break
131 break
132
132
133 # If we reached a base-0 revision w/o any continuation-of
133 # If we reached a base-0 revision w/o any continuation-of
134 # header, it means the tree history ends here.
134 # header, it means the tree history ends here.
135 if rev[-6:] == 'base-0':
135 if rev[-6:] == 'base-0':
136 break
136 break
137
137
138 def after(self):
138 def after(self):
139 self.ui.debug('cleaning up %s\n' % self.tmppath)
139 self.ui.debug('cleaning up %s\n' % self.tmppath)
140 shutil.rmtree(self.tmppath, ignore_errors=True)
140 shutil.rmtree(self.tmppath, ignore_errors=True)
141
141
142 def getheads(self):
142 def getheads(self):
143 return self.parents[None]
143 return self.parents[None]
144
144
145 def getfile(self, name, rev):
145 def getfile(self, name, rev):
146 if rev != self.lastrev:
146 if rev != self.lastrev:
147 raise error.Abort(_('internal calling inconsistency'))
147 raise error.Abort(_('internal calling inconsistency'))
148
148
149 if not os.path.lexists(os.path.join(self.tmppath, name)):
149 if not os.path.lexists(os.path.join(self.tmppath, name)):
150 return None, None
150 return None, None
151
151
152 return self._getfile(name, rev)
152 return self._getfile(name, rev)
153
153
154 def getchanges(self, rev, full):
154 def getchanges(self, rev, full):
155 if full:
155 if full:
156 raise error.Abort(_("convert from arch does not support --full"))
156 raise error.Abort(_("convert from arch does not support --full"))
157 self._update(rev)
157 self._update(rev)
158 changes = []
158 changes = []
159 copies = {}
159 copies = {}
160
160
161 for f in self.changes[rev].add_files:
161 for f in self.changes[rev].add_files:
162 changes.append((f, rev))
162 changes.append((f, rev))
163
163
164 for f in self.changes[rev].mod_files:
164 for f in self.changes[rev].mod_files:
165 changes.append((f, rev))
165 changes.append((f, rev))
166
166
167 for f in self.changes[rev].del_files:
167 for f in self.changes[rev].del_files:
168 changes.append((f, rev))
168 changes.append((f, rev))
169
169
170 for src in self.changes[rev].ren_files:
170 for src in self.changes[rev].ren_files:
171 to = self.changes[rev].ren_files[src]
171 to = self.changes[rev].ren_files[src]
172 changes.append((src, rev))
172 changes.append((src, rev))
173 changes.append((to, rev))
173 changes.append((to, rev))
174 copies[to] = src
174 copies[to] = src
175
175
176 for src in self.changes[rev].ren_dirs:
176 for src in self.changes[rev].ren_dirs:
177 to = self.changes[rev].ren_dirs[src]
177 to = self.changes[rev].ren_dirs[src]
178 chgs, cps = self._rendirchanges(src, to)
178 chgs, cps = self._rendirchanges(src, to)
179 changes += [(f, rev) for f in chgs]
179 changes += [(f, rev) for f in chgs]
180 copies.update(cps)
180 copies.update(cps)
181
181
182 self.lastrev = rev
182 self.lastrev = rev
183 return sorted(set(changes)), copies, set()
183 return sorted(set(changes)), copies, set()
184
184
185 def getcommit(self, rev):
185 def getcommit(self, rev):
186 changes = self.changes[rev]
186 changes = self.changes[rev]
187 return common.commit(author=changes.author, date=changes.date,
187 return common.commit(author=changes.author, date=changes.date,
188 desc=changes.summary, parents=self.parents[rev],
188 desc=changes.summary, parents=self.parents[rev],
189 rev=rev)
189 rev=rev)
190
190
191 def gettags(self):
191 def gettags(self):
192 return self.tags
192 return self.tags
193
193
194 def _execute(self, cmd, *args, **kwargs):
194 def _execute(self, cmd, *args, **kwargs):
195 cmdline = [self.execmd, cmd]
195 cmdline = [self.execmd, cmd]
196 cmdline += args
196 cmdline += args
197 cmdline = [util.shellquote(arg) for arg in cmdline]
197 cmdline = [util.shellquote(arg) for arg in cmdline]
198 cmdline += ['>', os.devnull, '2>', os.devnull]
198 cmdline += ['>', os.devnull, '2>', os.devnull]
199 cmdline = util.quotecommand(' '.join(cmdline))
199 cmdline = util.quotecommand(' '.join(cmdline))
200 self.ui.debug(cmdline, '\n')
200 self.ui.debug(cmdline, '\n')
201 return os.system(cmdline)
201 return os.system(cmdline)
202
202
203 def _update(self, rev):
203 def _update(self, rev):
204 self.ui.debug('applying revision %s...\n' % rev)
204 self.ui.debug('applying revision %s...\n' % rev)
205 changeset, status = self.runlines('replay', '-d', self.tmppath,
205 changeset, status = self.runlines('replay', '-d', self.tmppath,
206 rev)
206 rev)
207 if status:
207 if status:
208 # Something went wrong while merging (baz or tla
208 # Something went wrong while merging (baz or tla
209 # issue?), get latest revision and try from there
209 # issue?), get latest revision and try from there
210 shutil.rmtree(self.tmppath, ignore_errors=True)
210 shutil.rmtree(self.tmppath, ignore_errors=True)
211 self._obtainrevision(rev)
211 self._obtainrevision(rev)
212 else:
212 else:
213 old_rev = self.parents[rev][0]
213 old_rev = self.parents[rev][0]
214 self.ui.debug('computing changeset between %s and %s...\n'
214 self.ui.debug('computing changeset between %s and %s...\n'
215 % (old_rev, rev))
215 % (old_rev, rev))
216 self._parsechangeset(changeset, rev)
216 self._parsechangeset(changeset, rev)
217
217
218 def _getfile(self, name, rev):
218 def _getfile(self, name, rev):
219 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
219 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
220 if stat.S_ISLNK(mode):
220 if stat.S_ISLNK(mode):
221 data = os.readlink(os.path.join(self.tmppath, name))
221 data = os.readlink(os.path.join(self.tmppath, name))
222 if mode:
222 if mode:
223 mode = 'l'
223 mode = 'l'
224 else:
224 else:
225 mode = ''
225 mode = ''
226 else:
226 else:
227 data = open(os.path.join(self.tmppath, name), 'rb').read()
227 data = open(os.path.join(self.tmppath, name), 'rb').read()
228 mode = (mode & 0o111) and 'x' or ''
228 mode = (mode & 0o111) and 'x' or ''
229 return data, mode
229 return data, mode
230
230
231 def _exclude(self, name):
231 def _exclude(self, name):
232 exclude = ['{arch}', '.arch-ids', '.arch-inventory']
232 exclude = ['{arch}', '.arch-ids', '.arch-inventory']
233 for exc in exclude:
233 for exc in exclude:
234 if name.find(exc) != -1:
234 if name.find(exc) != -1:
235 return True
235 return True
236 return False
236 return False
237
237
238 def _readcontents(self, path):
238 def _readcontents(self, path):
239 files = []
239 files = []
240 contents = os.listdir(path)
240 contents = os.listdir(path)
241 while len(contents) > 0:
241 while len(contents) > 0:
242 c = contents.pop()
242 c = contents.pop()
243 p = os.path.join(path, c)
243 p = os.path.join(path, c)
244 # os.walk could be used, but here we avoid internal GNU
244 # os.walk could be used, but here we avoid internal GNU
245 # Arch files and directories, thus saving a lot time.
245 # Arch files and directories, thus saving a lot time.
246 if not self._exclude(p):
246 if not self._exclude(p):
247 if os.path.isdir(p):
247 if os.path.isdir(p):
248 contents += [os.path.join(c, f) for f in os.listdir(p)]
248 contents += [os.path.join(c, f) for f in os.listdir(p)]
249 else:
249 else:
250 files.append(c)
250 files.append(c)
251 return files
251 return files
252
252
253 def _rendirchanges(self, src, dest):
253 def _rendirchanges(self, src, dest):
254 changes = []
254 changes = []
255 copies = {}
255 copies = {}
256 files = self._readcontents(os.path.join(self.tmppath, dest))
256 files = self._readcontents(os.path.join(self.tmppath, dest))
257 for f in files:
257 for f in files:
258 s = os.path.join(src, f)
258 s = os.path.join(src, f)
259 d = os.path.join(dest, f)
259 d = os.path.join(dest, f)
260 changes.append(s)
260 changes.append(s)
261 changes.append(d)
261 changes.append(d)
262 copies[d] = s
262 copies[d] = s
263 return changes, copies
263 return changes, copies
264
264
265 def _obtainrevision(self, rev):
265 def _obtainrevision(self, rev):
266 self.ui.debug('obtaining revision %s...\n' % rev)
266 self.ui.debug('obtaining revision %s...\n' % rev)
267 output = self._execute('get', rev, self.tmppath)
267 output = self._execute('get', rev, self.tmppath)
268 self.checkexit(output)
268 self.checkexit(output)
269 self.ui.debug('analyzing revision %s...\n' % rev)
269 self.ui.debug('analyzing revision %s...\n' % rev)
270 files = self._readcontents(self.tmppath)
270 files = self._readcontents(self.tmppath)
271 self.changes[rev].add_files += files
271 self.changes[rev].add_files += files
272
272
273 def _stripbasepath(self, path):
273 def _stripbasepath(self, path):
274 if path.startswith('./'):
274 if path.startswith('./'):
275 return path[2:]
275 return path[2:]
276 return path
276 return path
277
277
278 def _parsecatlog(self, data, rev):
278 def _parsecatlog(self, data, rev):
279 try:
279 try:
280 catlog = self.catlogparser.parsestr(data)
280 catlog = self.catlogparser.parsestr(data)
281
281
282 # Commit date
282 # Commit date
283 self.changes[rev].date = util.datestr(
283 self.changes[rev].date = util.datestr(
284 util.strdate(catlog['Standard-date'],
284 util.strdate(catlog['Standard-date'],
285 '%Y-%m-%d %H:%M:%S'))
285 '%Y-%m-%d %H:%M:%S'))
286
286
287 # Commit author
287 # Commit author
288 self.changes[rev].author = self.recode(catlog['Creator'])
288 self.changes[rev].author = self.recode(catlog['Creator'])
289
289
290 # Commit description
290 # Commit description
291 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
291 self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
292 catlog.get_payload()))
292 catlog.get_payload()))
293 self.changes[rev].summary = self.recode(self.changes[rev].summary)
293 self.changes[rev].summary = self.recode(self.changes[rev].summary)
294
294
295 # Commit revision origin when dealing with a branch or tag
295 # Commit revision origin when dealing with a branch or tag
296 if 'Continuation-of' in catlog:
296 if 'Continuation-of' in catlog:
297 self.changes[rev].continuationof = self.recode(
297 self.changes[rev].continuationof = self.recode(
298 catlog['Continuation-of'])
298 catlog['Continuation-of'])
299 except Exception:
299 except Exception:
300 raise error.Abort(_('could not parse cat-log of %s') % rev)
300 raise error.Abort(_('could not parse cat-log of %s') % rev)
301
301
302 def _parsechangeset(self, data, rev):
302 def _parsechangeset(self, data, rev):
303 for l in data:
303 for l in data:
304 l = l.strip()
304 l = l.strip()
305 # Added file (ignore added directory)
305 # Added file (ignore added directory)
306 if l.startswith('A') and not l.startswith('A/'):
306 if l.startswith('A') and not l.startswith('A/'):
307 file = self._stripbasepath(l[1:].strip())
307 file = self._stripbasepath(l[1:].strip())
308 if not self._exclude(file):
308 if not self._exclude(file):
309 self.changes[rev].add_files.append(file)
309 self.changes[rev].add_files.append(file)
310 # Deleted file (ignore deleted directory)
310 # Deleted file (ignore deleted directory)
311 elif l.startswith('D') and not l.startswith('D/'):
311 elif l.startswith('D') and not l.startswith('D/'):
312 file = self._stripbasepath(l[1:].strip())
312 file = self._stripbasepath(l[1:].strip())
313 if not self._exclude(file):
313 if not self._exclude(file):
314 self.changes[rev].del_files.append(file)
314 self.changes[rev].del_files.append(file)
315 # Modified binary file
315 # Modified binary file
316 elif l.startswith('Mb'):
316 elif l.startswith('Mb'):
317 file = self._stripbasepath(l[2:].strip())
317 file = self._stripbasepath(l[2:].strip())
318 if not self._exclude(file):
318 if not self._exclude(file):
319 self.changes[rev].mod_files.append(file)
319 self.changes[rev].mod_files.append(file)
320 # Modified link
320 # Modified link
321 elif l.startswith('M->'):
321 elif l.startswith('M->'):
322 file = self._stripbasepath(l[3:].strip())
322 file = self._stripbasepath(l[3:].strip())
323 if not self._exclude(file):
323 if not self._exclude(file):
324 self.changes[rev].mod_files.append(file)
324 self.changes[rev].mod_files.append(file)
325 # Modified file
325 # Modified file
326 elif l.startswith('M'):
326 elif l.startswith('M'):
327 file = self._stripbasepath(l[1:].strip())
327 file = self._stripbasepath(l[1:].strip())
328 if not self._exclude(file):
328 if not self._exclude(file):
329 self.changes[rev].mod_files.append(file)
329 self.changes[rev].mod_files.append(file)
330 # Renamed file (or link)
330 # Renamed file (or link)
331 elif l.startswith('=>'):
331 elif l.startswith('=>'):
332 files = l[2:].strip().split(' ')
332 files = l[2:].strip().split(' ')
333 if len(files) == 1:
333 if len(files) == 1:
334 files = l[2:].strip().split('\t')
334 files = l[2:].strip().split('\t')
335 src = self._stripbasepath(files[0])
335 src = self._stripbasepath(files[0])
336 dst = self._stripbasepath(files[1])
336 dst = self._stripbasepath(files[1])
337 if not self._exclude(src) and not self._exclude(dst):
337 if not self._exclude(src) and not self._exclude(dst):
338 self.changes[rev].ren_files[src] = dst
338 self.changes[rev].ren_files[src] = dst
339 # Conversion from file to link or from link to file (modified)
339 # Conversion from file to link or from link to file (modified)
340 elif l.startswith('ch'):
340 elif l.startswith('ch'):
341 file = self._stripbasepath(l[2:].strip())
341 file = self._stripbasepath(l[2:].strip())
342 if not self._exclude(file):
342 if not self._exclude(file):
343 self.changes[rev].mod_files.append(file)
343 self.changes[rev].mod_files.append(file)
344 # Renamed directory
344 # Renamed directory
345 elif l.startswith('/>'):
345 elif l.startswith('/>'):
346 dirs = l[2:].strip().split(' ')
346 dirs = l[2:].strip().split(' ')
347 if len(dirs) == 1:
347 if len(dirs) == 1:
348 dirs = l[2:].strip().split('\t')
348 dirs = l[2:].strip().split('\t')
349 src = self._stripbasepath(dirs[0])
349 src = self._stripbasepath(dirs[0])
350 dst = self._stripbasepath(dirs[1])
350 dst = self._stripbasepath(dirs[1])
351 if not self._exclude(src) and not self._exclude(dst):
351 if not self._exclude(src) and not self._exclude(dst):
352 self.changes[rev].ren_dirs[src] = dst
352 self.changes[rev].ren_dirs[src] = dst
@@ -1,484 +1,485 b''
1 # notify.py - email notifications for mercurial
1 # notify.py - email notifications for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''hooks for sending email push notifications
8 '''hooks for sending email push notifications
9
9
10 This extension implements hooks to send email notifications when
10 This extension implements hooks to send email notifications when
11 changesets are sent from or received by the local repository.
11 changesets are sent from or received by the local repository.
12
12
13 First, enable the extension as explained in :hg:`help extensions`, and
13 First, enable the extension as explained in :hg:`help extensions`, and
14 register the hook you want to run. ``incoming`` and ``changegroup`` hooks
14 register the hook you want to run. ``incoming`` and ``changegroup`` hooks
15 are run when changesets are received, while ``outgoing`` hooks are for
15 are run when changesets are received, while ``outgoing`` hooks are for
16 changesets sent to another repository::
16 changesets sent to another repository::
17
17
18 [hooks]
18 [hooks]
19 # one email for each incoming changeset
19 # one email for each incoming changeset
20 incoming.notify = python:hgext.notify.hook
20 incoming.notify = python:hgext.notify.hook
21 # one email for all incoming changesets
21 # one email for all incoming changesets
22 changegroup.notify = python:hgext.notify.hook
22 changegroup.notify = python:hgext.notify.hook
23
23
24 # one email for all outgoing changesets
24 # one email for all outgoing changesets
25 outgoing.notify = python:hgext.notify.hook
25 outgoing.notify = python:hgext.notify.hook
26
26
27 This registers the hooks. To enable notification, subscribers must
27 This registers the hooks. To enable notification, subscribers must
28 be assigned to repositories. The ``[usersubs]`` section maps multiple
28 be assigned to repositories. The ``[usersubs]`` section maps multiple
29 repositories to a given recipient. The ``[reposubs]`` section maps
29 repositories to a given recipient. The ``[reposubs]`` section maps
30 multiple recipients to a single repository::
30 multiple recipients to a single repository::
31
31
32 [usersubs]
32 [usersubs]
33 # key is subscriber email, value is a comma-separated list of repo patterns
33 # key is subscriber email, value is a comma-separated list of repo patterns
34 user@host = pattern
34 user@host = pattern
35
35
36 [reposubs]
36 [reposubs]
37 # key is repo pattern, value is a comma-separated list of subscriber emails
37 # key is repo pattern, value is a comma-separated list of subscriber emails
38 pattern = user@host
38 pattern = user@host
39
39
40 A ``pattern`` is a ``glob`` matching the absolute path to a repository,
40 A ``pattern`` is a ``glob`` matching the absolute path to a repository,
41 optionally combined with a revset expression. A revset expression, if
41 optionally combined with a revset expression. A revset expression, if
42 present, is separated from the glob by a hash. Example::
42 present, is separated from the glob by a hash. Example::
43
43
44 [reposubs]
44 [reposubs]
45 */widgets#branch(release) = qa-team@example.com
45 */widgets#branch(release) = qa-team@example.com
46
46
47 This sends to ``qa-team@example.com`` whenever a changeset on the ``release``
47 This sends to ``qa-team@example.com`` whenever a changeset on the ``release``
48 branch triggers a notification in any repository ending in ``widgets``.
48 branch triggers a notification in any repository ending in ``widgets``.
49
49
50 In order to place them under direct user management, ``[usersubs]`` and
50 In order to place them under direct user management, ``[usersubs]`` and
51 ``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
51 ``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
52 incorporated by reference::
52 incorporated by reference::
53
53
54 [notify]
54 [notify]
55 config = /path/to/subscriptionsfile
55 config = /path/to/subscriptionsfile
56
56
57 Notifications will not be sent until the ``notify.test`` value is set
57 Notifications will not be sent until the ``notify.test`` value is set
58 to ``False``; see below.
58 to ``False``; see below.
59
59
60 Notifications content can be tweaked with the following configuration entries:
60 Notifications content can be tweaked with the following configuration entries:
61
61
62 notify.test
62 notify.test
63 If ``True``, print messages to stdout instead of sending them. Default: True.
63 If ``True``, print messages to stdout instead of sending them. Default: True.
64
64
65 notify.sources
65 notify.sources
66 Space-separated list of change sources. Notifications are activated only
66 Space-separated list of change sources. Notifications are activated only
67 when a changeset's source is in this list. Sources may be:
67 when a changeset's source is in this list. Sources may be:
68
68
69 :``serve``: changesets received via http or ssh
69 :``serve``: changesets received via http or ssh
70 :``pull``: changesets received via ``hg pull``
70 :``pull``: changesets received via ``hg pull``
71 :``unbundle``: changesets received via ``hg unbundle``
71 :``unbundle``: changesets received via ``hg unbundle``
72 :``push``: changesets sent or received via ``hg push``
72 :``push``: changesets sent or received via ``hg push``
73 :``bundle``: changesets sent via ``hg unbundle``
73 :``bundle``: changesets sent via ``hg unbundle``
74
74
75 Default: serve.
75 Default: serve.
76
76
77 notify.strip
77 notify.strip
78 Number of leading slashes to strip from url paths. By default, notifications
78 Number of leading slashes to strip from url paths. By default, notifications
79 reference repositories with their absolute path. ``notify.strip`` lets you
79 reference repositories with their absolute path. ``notify.strip`` lets you
80 turn them into relative paths. For example, ``notify.strip=3`` will change
80 turn them into relative paths. For example, ``notify.strip=3`` will change
81 ``/long/path/repository`` into ``repository``. Default: 0.
81 ``/long/path/repository`` into ``repository``. Default: 0.
82
82
83 notify.domain
83 notify.domain
84 Default email domain for sender or recipients with no explicit domain.
84 Default email domain for sender or recipients with no explicit domain.
85
85
86 notify.style
86 notify.style
87 Style file to use when formatting emails.
87 Style file to use when formatting emails.
88
88
89 notify.template
89 notify.template
90 Template to use when formatting emails.
90 Template to use when formatting emails.
91
91
92 notify.incoming
92 notify.incoming
93 Template to use when run as an incoming hook, overriding ``notify.template``.
93 Template to use when run as an incoming hook, overriding ``notify.template``.
94
94
95 notify.outgoing
95 notify.outgoing
96 Template to use when run as an outgoing hook, overriding ``notify.template``.
96 Template to use when run as an outgoing hook, overriding ``notify.template``.
97
97
98 notify.changegroup
98 notify.changegroup
99 Template to use when running as a changegroup hook, overriding
99 Template to use when running as a changegroup hook, overriding
100 ``notify.template``.
100 ``notify.template``.
101
101
102 notify.maxdiff
102 notify.maxdiff
103 Maximum number of diff lines to include in notification email. Set to 0
103 Maximum number of diff lines to include in notification email. Set to 0
104 to disable the diff, or -1 to include all of it. Default: 300.
104 to disable the diff, or -1 to include all of it. Default: 300.
105
105
106 notify.maxsubject
106 notify.maxsubject
107 Maximum number of characters in email's subject line. Default: 67.
107 Maximum number of characters in email's subject line. Default: 67.
108
108
109 notify.diffstat
109 notify.diffstat
110 Set to True to include a diffstat before diff content. Default: True.
110 Set to True to include a diffstat before diff content. Default: True.
111
111
112 notify.merge
112 notify.merge
113 If True, send notifications for merge changesets. Default: True.
113 If True, send notifications for merge changesets. Default: True.
114
114
115 notify.mbox
115 notify.mbox
116 If set, append mails to this mbox file instead of sending. Default: None.
116 If set, append mails to this mbox file instead of sending. Default: None.
117
117
118 notify.fromauthor
118 notify.fromauthor
119 If set, use the committer of the first changeset in a changegroup for
119 If set, use the committer of the first changeset in a changegroup for
120 the "From" field of the notification mail. If not set, take the user
120 the "From" field of the notification mail. If not set, take the user
121 from the pushing repo. Default: False.
121 from the pushing repo. Default: False.
122
122
123 If set, the following entries will also be used to customize the
123 If set, the following entries will also be used to customize the
124 notifications:
124 notifications:
125
125
126 email.from
126 email.from
127 Email ``From`` address to use if none can be found in the generated
127 Email ``From`` address to use if none can be found in the generated
128 email content.
128 email content.
129
129
130 web.baseurl
130 web.baseurl
131 Root repository URL to combine with repository paths when making
131 Root repository URL to combine with repository paths when making
132 references. See also ``notify.strip``.
132 references. See also ``notify.strip``.
133
133
134 '''
134 '''
135 from __future__ import absolute_import
135 from __future__ import absolute_import
136
136
137 import email
137 import email
138 import email.parser as emailparser
138 import fnmatch
139 import fnmatch
139 import socket
140 import socket
140 import time
141 import time
141
142
142 from mercurial.i18n import _
143 from mercurial.i18n import _
143 from mercurial import (
144 from mercurial import (
144 cmdutil,
145 cmdutil,
145 error,
146 error,
146 mail,
147 mail,
147 patch,
148 patch,
148 registrar,
149 registrar,
149 util,
150 util,
150 )
151 )
151
152
152 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
153 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
153 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
154 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
154 # be specifying the version(s) of Mercurial they are tested with, or
155 # be specifying the version(s) of Mercurial they are tested with, or
155 # leave the attribute unspecified.
156 # leave the attribute unspecified.
156 testedwith = 'ships-with-hg-core'
157 testedwith = 'ships-with-hg-core'
157
158
158 configtable = {}
159 configtable = {}
159 configitem = registrar.configitem(configtable)
160 configitem = registrar.configitem(configtable)
160
161
161 configitem('notify', 'changegroup',
162 configitem('notify', 'changegroup',
162 default=None,
163 default=None,
163 )
164 )
164 configitem('notify', 'config',
165 configitem('notify', 'config',
165 default=None,
166 default=None,
166 )
167 )
167 configitem('notify', 'diffstat',
168 configitem('notify', 'diffstat',
168 default=True,
169 default=True,
169 )
170 )
170 configitem('notify', 'domain',
171 configitem('notify', 'domain',
171 default=None,
172 default=None,
172 )
173 )
173 configitem('notify', 'fromauthor',
174 configitem('notify', 'fromauthor',
174 default=None,
175 default=None,
175 )
176 )
176 configitem('notify', 'incoming',
177 configitem('notify', 'incoming',
177 default=None,
178 default=None,
178 )
179 )
179 configitem('notify', 'maxdiff',
180 configitem('notify', 'maxdiff',
180 default=300,
181 default=300,
181 )
182 )
182 configitem('notify', 'maxsubject',
183 configitem('notify', 'maxsubject',
183 default=67,
184 default=67,
184 )
185 )
185 configitem('notify', 'mbox',
186 configitem('notify', 'mbox',
186 default=None,
187 default=None,
187 )
188 )
188 configitem('notify', 'merge',
189 configitem('notify', 'merge',
189 default=True,
190 default=True,
190 )
191 )
191 configitem('notify', 'outgoing',
192 configitem('notify', 'outgoing',
192 default=None,
193 default=None,
193 )
194 )
194 configitem('notify', 'sources',
195 configitem('notify', 'sources',
195 default='serve',
196 default='serve',
196 )
197 )
197 configitem('notify', 'strip',
198 configitem('notify', 'strip',
198 default=0,
199 default=0,
199 )
200 )
200 configitem('notify', 'style',
201 configitem('notify', 'style',
201 default=None,
202 default=None,
202 )
203 )
203 configitem('notify', 'template',
204 configitem('notify', 'template',
204 default=None,
205 default=None,
205 )
206 )
206 configitem('notify', 'test',
207 configitem('notify', 'test',
207 default=True,
208 default=True,
208 )
209 )
209
210
210 # template for single changeset can include email headers.
211 # template for single changeset can include email headers.
211 single_template = '''
212 single_template = '''
212 Subject: changeset in {webroot}: {desc|firstline|strip}
213 Subject: changeset in {webroot}: {desc|firstline|strip}
213 From: {author}
214 From: {author}
214
215
215 changeset {node|short} in {root}
216 changeset {node|short} in {root}
216 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
217 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
217 description:
218 description:
218 \t{desc|tabindent|strip}
219 \t{desc|tabindent|strip}
219 '''.lstrip()
220 '''.lstrip()
220
221
221 # template for multiple changesets should not contain email headers,
222 # template for multiple changesets should not contain email headers,
222 # because only first set of headers will be used and result will look
223 # because only first set of headers will be used and result will look
223 # strange.
224 # strange.
224 multiple_template = '''
225 multiple_template = '''
225 changeset {node|short} in {root}
226 changeset {node|short} in {root}
226 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
227 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
227 summary: {desc|firstline}
228 summary: {desc|firstline}
228 '''
229 '''
229
230
230 deftemplates = {
231 deftemplates = {
231 'changegroup': multiple_template,
232 'changegroup': multiple_template,
232 }
233 }
233
234
234 class notifier(object):
235 class notifier(object):
235 '''email notification class.'''
236 '''email notification class.'''
236
237
237 def __init__(self, ui, repo, hooktype):
238 def __init__(self, ui, repo, hooktype):
238 self.ui = ui
239 self.ui = ui
239 cfg = self.ui.config('notify', 'config')
240 cfg = self.ui.config('notify', 'config')
240 if cfg:
241 if cfg:
241 self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
242 self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
242 self.repo = repo
243 self.repo = repo
243 self.stripcount = int(self.ui.config('notify', 'strip'))
244 self.stripcount = int(self.ui.config('notify', 'strip'))
244 self.root = self.strip(self.repo.root)
245 self.root = self.strip(self.repo.root)
245 self.domain = self.ui.config('notify', 'domain')
246 self.domain = self.ui.config('notify', 'domain')
246 self.mbox = self.ui.config('notify', 'mbox')
247 self.mbox = self.ui.config('notify', 'mbox')
247 self.test = self.ui.configbool('notify', 'test')
248 self.test = self.ui.configbool('notify', 'test')
248 self.charsets = mail._charsets(self.ui)
249 self.charsets = mail._charsets(self.ui)
249 self.subs = self.subscribers()
250 self.subs = self.subscribers()
250 self.merge = self.ui.configbool('notify', 'merge')
251 self.merge = self.ui.configbool('notify', 'merge')
251
252
252 mapfile = None
253 mapfile = None
253 template = (self.ui.config('notify', hooktype) or
254 template = (self.ui.config('notify', hooktype) or
254 self.ui.config('notify', 'template'))
255 self.ui.config('notify', 'template'))
255 if not template:
256 if not template:
256 mapfile = self.ui.config('notify', 'style')
257 mapfile = self.ui.config('notify', 'style')
257 if not mapfile and not template:
258 if not mapfile and not template:
258 template = deftemplates.get(hooktype) or single_template
259 template = deftemplates.get(hooktype) or single_template
259 spec = cmdutil.logtemplatespec(template, mapfile)
260 spec = cmdutil.logtemplatespec(template, mapfile)
260 self.t = cmdutil.changeset_templater(self.ui, self.repo, spec,
261 self.t = cmdutil.changeset_templater(self.ui, self.repo, spec,
261 False, None, False)
262 False, None, False)
262
263
263 def strip(self, path):
264 def strip(self, path):
264 '''strip leading slashes from local path, turn into web-safe path.'''
265 '''strip leading slashes from local path, turn into web-safe path.'''
265
266
266 path = util.pconvert(path)
267 path = util.pconvert(path)
267 count = self.stripcount
268 count = self.stripcount
268 while count > 0:
269 while count > 0:
269 c = path.find('/')
270 c = path.find('/')
270 if c == -1:
271 if c == -1:
271 break
272 break
272 path = path[c + 1:]
273 path = path[c + 1:]
273 count -= 1
274 count -= 1
274 return path
275 return path
275
276
276 def fixmail(self, addr):
277 def fixmail(self, addr):
277 '''try to clean up email addresses.'''
278 '''try to clean up email addresses.'''
278
279
279 addr = util.email(addr.strip())
280 addr = util.email(addr.strip())
280 if self.domain:
281 if self.domain:
281 a = addr.find('@localhost')
282 a = addr.find('@localhost')
282 if a != -1:
283 if a != -1:
283 addr = addr[:a]
284 addr = addr[:a]
284 if '@' not in addr:
285 if '@' not in addr:
285 return addr + '@' + self.domain
286 return addr + '@' + self.domain
286 return addr
287 return addr
287
288
288 def subscribers(self):
289 def subscribers(self):
289 '''return list of email addresses of subscribers to this repo.'''
290 '''return list of email addresses of subscribers to this repo.'''
290 subs = set()
291 subs = set()
291 for user, pats in self.ui.configitems('usersubs'):
292 for user, pats in self.ui.configitems('usersubs'):
292 for pat in pats.split(','):
293 for pat in pats.split(','):
293 if '#' in pat:
294 if '#' in pat:
294 pat, revs = pat.split('#', 1)
295 pat, revs = pat.split('#', 1)
295 else:
296 else:
296 revs = None
297 revs = None
297 if fnmatch.fnmatch(self.repo.root, pat.strip()):
298 if fnmatch.fnmatch(self.repo.root, pat.strip()):
298 subs.add((self.fixmail(user), revs))
299 subs.add((self.fixmail(user), revs))
299 for pat, users in self.ui.configitems('reposubs'):
300 for pat, users in self.ui.configitems('reposubs'):
300 if '#' in pat:
301 if '#' in pat:
301 pat, revs = pat.split('#', 1)
302 pat, revs = pat.split('#', 1)
302 else:
303 else:
303 revs = None
304 revs = None
304 if fnmatch.fnmatch(self.repo.root, pat):
305 if fnmatch.fnmatch(self.repo.root, pat):
305 for user in users.split(','):
306 for user in users.split(','):
306 subs.add((self.fixmail(user), revs))
307 subs.add((self.fixmail(user), revs))
307 return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
308 return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
308 for s, r in sorted(subs)]
309 for s, r in sorted(subs)]
309
310
310 def node(self, ctx, **props):
311 def node(self, ctx, **props):
311 '''format one changeset, unless it is a suppressed merge.'''
312 '''format one changeset, unless it is a suppressed merge.'''
312 if not self.merge and len(ctx.parents()) > 1:
313 if not self.merge and len(ctx.parents()) > 1:
313 return False
314 return False
314 self.t.show(ctx, changes=ctx.changeset(),
315 self.t.show(ctx, changes=ctx.changeset(),
315 baseurl=self.ui.config('web', 'baseurl'),
316 baseurl=self.ui.config('web', 'baseurl'),
316 root=self.repo.root, webroot=self.root, **props)
317 root=self.repo.root, webroot=self.root, **props)
317 return True
318 return True
318
319
319 def skipsource(self, source):
320 def skipsource(self, source):
320 '''true if incoming changes from this source should be skipped.'''
321 '''true if incoming changes from this source should be skipped.'''
321 ok_sources = self.ui.config('notify', 'sources').split()
322 ok_sources = self.ui.config('notify', 'sources').split()
322 return source not in ok_sources
323 return source not in ok_sources
323
324
324 def send(self, ctx, count, data):
325 def send(self, ctx, count, data):
325 '''send message.'''
326 '''send message.'''
326
327
327 # Select subscribers by revset
328 # Select subscribers by revset
328 subs = set()
329 subs = set()
329 for sub, spec in self.subs:
330 for sub, spec in self.subs:
330 if spec is None:
331 if spec is None:
331 subs.add(sub)
332 subs.add(sub)
332 continue
333 continue
333 revs = self.repo.revs('%r and %d:', spec, ctx.rev())
334 revs = self.repo.revs('%r and %d:', spec, ctx.rev())
334 if len(revs):
335 if len(revs):
335 subs.add(sub)
336 subs.add(sub)
336 continue
337 continue
337 if len(subs) == 0:
338 if len(subs) == 0:
338 self.ui.debug('notify: no subscribers to selected repo '
339 self.ui.debug('notify: no subscribers to selected repo '
339 'and revset\n')
340 'and revset\n')
340 return
341 return
341
342
342 p = email.Parser.Parser()
343 p = emailparser.Parser()
343 try:
344 try:
344 msg = p.parsestr(data)
345 msg = p.parsestr(data)
345 except email.Errors.MessageParseError as inst:
346 except email.Errors.MessageParseError as inst:
346 raise error.Abort(inst)
347 raise error.Abort(inst)
347
348
348 # store sender and subject
349 # store sender and subject
349 sender, subject = msg['From'], msg['Subject']
350 sender, subject = msg['From'], msg['Subject']
350 del msg['From'], msg['Subject']
351 del msg['From'], msg['Subject']
351
352
352 if not msg.is_multipart():
353 if not msg.is_multipart():
353 # create fresh mime message from scratch
354 # create fresh mime message from scratch
354 # (multipart templates must take care of this themselves)
355 # (multipart templates must take care of this themselves)
355 headers = msg.items()
356 headers = msg.items()
356 payload = msg.get_payload()
357 payload = msg.get_payload()
357 # for notification prefer readability over data precision
358 # for notification prefer readability over data precision
358 msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
359 msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
359 # reinstate custom headers
360 # reinstate custom headers
360 for k, v in headers:
361 for k, v in headers:
361 msg[k] = v
362 msg[k] = v
362
363
363 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
364 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
364
365
365 # try to make subject line exist and be useful
366 # try to make subject line exist and be useful
366 if not subject:
367 if not subject:
367 if count > 1:
368 if count > 1:
368 subject = _('%s: %d new changesets') % (self.root, count)
369 subject = _('%s: %d new changesets') % (self.root, count)
369 else:
370 else:
370 s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
371 s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
371 subject = '%s: %s' % (self.root, s)
372 subject = '%s: %s' % (self.root, s)
372 maxsubject = int(self.ui.config('notify', 'maxsubject'))
373 maxsubject = int(self.ui.config('notify', 'maxsubject'))
373 if maxsubject:
374 if maxsubject:
374 subject = util.ellipsis(subject, maxsubject)
375 subject = util.ellipsis(subject, maxsubject)
375 msg['Subject'] = mail.headencode(self.ui, subject,
376 msg['Subject'] = mail.headencode(self.ui, subject,
376 self.charsets, self.test)
377 self.charsets, self.test)
377
378
378 # try to make message have proper sender
379 # try to make message have proper sender
379 if not sender:
380 if not sender:
380 sender = self.ui.config('email', 'from') or self.ui.username()
381 sender = self.ui.config('email', 'from') or self.ui.username()
381 if '@' not in sender or '@localhost' in sender:
382 if '@' not in sender or '@localhost' in sender:
382 sender = self.fixmail(sender)
383 sender = self.fixmail(sender)
383 msg['From'] = mail.addressencode(self.ui, sender,
384 msg['From'] = mail.addressencode(self.ui, sender,
384 self.charsets, self.test)
385 self.charsets, self.test)
385
386
386 msg['X-Hg-Notification'] = 'changeset %s' % ctx
387 msg['X-Hg-Notification'] = 'changeset %s' % ctx
387 if not msg['Message-Id']:
388 if not msg['Message-Id']:
388 msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
389 msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
389 (ctx, int(time.time()),
390 (ctx, int(time.time()),
390 hash(self.repo.root), socket.getfqdn()))
391 hash(self.repo.root), socket.getfqdn()))
391 msg['To'] = ', '.join(sorted(subs))
392 msg['To'] = ', '.join(sorted(subs))
392
393
393 msgtext = msg.as_string()
394 msgtext = msg.as_string()
394 if self.test:
395 if self.test:
395 self.ui.write(msgtext)
396 self.ui.write(msgtext)
396 if not msgtext.endswith('\n'):
397 if not msgtext.endswith('\n'):
397 self.ui.write('\n')
398 self.ui.write('\n')
398 else:
399 else:
399 self.ui.status(_('notify: sending %d subscribers %d changes\n') %
400 self.ui.status(_('notify: sending %d subscribers %d changes\n') %
400 (len(subs), count))
401 (len(subs), count))
401 mail.sendmail(self.ui, util.email(msg['From']),
402 mail.sendmail(self.ui, util.email(msg['From']),
402 subs, msgtext, mbox=self.mbox)
403 subs, msgtext, mbox=self.mbox)
403
404
404 def diff(self, ctx, ref=None):
405 def diff(self, ctx, ref=None):
405
406
406 maxdiff = int(self.ui.config('notify', 'maxdiff'))
407 maxdiff = int(self.ui.config('notify', 'maxdiff'))
407 prev = ctx.p1().node()
408 prev = ctx.p1().node()
408 if ref:
409 if ref:
409 ref = ref.node()
410 ref = ref.node()
410 else:
411 else:
411 ref = ctx.node()
412 ref = ctx.node()
412 chunks = patch.diff(self.repo, prev, ref,
413 chunks = patch.diff(self.repo, prev, ref,
413 opts=patch.diffallopts(self.ui))
414 opts=patch.diffallopts(self.ui))
414 difflines = ''.join(chunks).splitlines()
415 difflines = ''.join(chunks).splitlines()
415
416
416 if self.ui.configbool('notify', 'diffstat'):
417 if self.ui.configbool('notify', 'diffstat'):
417 s = patch.diffstat(difflines)
418 s = patch.diffstat(difflines)
418 # s may be nil, don't include the header if it is
419 # s may be nil, don't include the header if it is
419 if s:
420 if s:
420 self.ui.write(_('\ndiffstat:\n\n%s') % s)
421 self.ui.write(_('\ndiffstat:\n\n%s') % s)
421
422
422 if maxdiff == 0:
423 if maxdiff == 0:
423 return
424 return
424 elif maxdiff > 0 and len(difflines) > maxdiff:
425 elif maxdiff > 0 and len(difflines) > maxdiff:
425 msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
426 msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
426 self.ui.write(msg % (len(difflines), maxdiff))
427 self.ui.write(msg % (len(difflines), maxdiff))
427 difflines = difflines[:maxdiff]
428 difflines = difflines[:maxdiff]
428 elif difflines:
429 elif difflines:
429 self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
430 self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
430
431
431 self.ui.write("\n".join(difflines))
432 self.ui.write("\n".join(difflines))
432
433
433 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
434 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
434 '''send email notifications to interested subscribers.
435 '''send email notifications to interested subscribers.
435
436
436 if used as changegroup hook, send one email for all changesets in
437 if used as changegroup hook, send one email for all changesets in
437 changegroup. else send one email per changeset.'''
438 changegroup. else send one email per changeset.'''
438
439
439 n = notifier(ui, repo, hooktype)
440 n = notifier(ui, repo, hooktype)
440 ctx = repo[node]
441 ctx = repo[node]
441
442
442 if not n.subs:
443 if not n.subs:
443 ui.debug('notify: no subscribers to repository %s\n' % n.root)
444 ui.debug('notify: no subscribers to repository %s\n' % n.root)
444 return
445 return
445 if n.skipsource(source):
446 if n.skipsource(source):
446 ui.debug('notify: changes have source "%s" - skipping\n' % source)
447 ui.debug('notify: changes have source "%s" - skipping\n' % source)
447 return
448 return
448
449
449 ui.pushbuffer()
450 ui.pushbuffer()
450 data = ''
451 data = ''
451 count = 0
452 count = 0
452 author = ''
453 author = ''
453 if hooktype == 'changegroup' or hooktype == 'outgoing':
454 if hooktype == 'changegroup' or hooktype == 'outgoing':
454 start, end = ctx.rev(), len(repo)
455 start, end = ctx.rev(), len(repo)
455 for rev in xrange(start, end):
456 for rev in xrange(start, end):
456 if n.node(repo[rev]):
457 if n.node(repo[rev]):
457 count += 1
458 count += 1
458 if not author:
459 if not author:
459 author = repo[rev].user()
460 author = repo[rev].user()
460 else:
461 else:
461 data += ui.popbuffer()
462 data += ui.popbuffer()
462 ui.note(_('notify: suppressing notification for merge %d:%s\n')
463 ui.note(_('notify: suppressing notification for merge %d:%s\n')
463 % (rev, repo[rev].hex()[:12]))
464 % (rev, repo[rev].hex()[:12]))
464 ui.pushbuffer()
465 ui.pushbuffer()
465 if count:
466 if count:
466 n.diff(ctx, repo['tip'])
467 n.diff(ctx, repo['tip'])
467 else:
468 else:
468 if not n.node(ctx):
469 if not n.node(ctx):
469 ui.popbuffer()
470 ui.popbuffer()
470 ui.note(_('notify: suppressing notification for merge %d:%s\n') %
471 ui.note(_('notify: suppressing notification for merge %d:%s\n') %
471 (ctx.rev(), ctx.hex()[:12]))
472 (ctx.rev(), ctx.hex()[:12]))
472 return
473 return
473 count += 1
474 count += 1
474 n.diff(ctx)
475 n.diff(ctx)
475 if not author:
476 if not author:
476 author = ctx.user()
477 author = ctx.user()
477
478
478 data += ui.popbuffer()
479 data += ui.popbuffer()
479 fromauthor = ui.config('notify', 'fromauthor')
480 fromauthor = ui.config('notify', 'fromauthor')
480 if author and fromauthor:
481 if author and fromauthor:
481 data = '\n'.join(['From: %s' % author, data])
482 data = '\n'.join(['From: %s' % author, data])
482
483
483 if count:
484 if count:
484 n.send(ctx, count, data)
485 n.send(ctx, count, data)
@@ -1,2900 +1,2901 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import difflib
14 import email
14 import email
15 import email.parser as emailparser
15 import errno
16 import errno
16 import hashlib
17 import hashlib
17 import os
18 import os
18 import posixpath
19 import posixpath
19 import re
20 import re
20 import shutil
21 import shutil
21 import tempfile
22 import tempfile
22 import zlib
23 import zlib
23
24
24 from .i18n import _
25 from .i18n import _
25 from .node import (
26 from .node import (
26 hex,
27 hex,
27 short,
28 short,
28 )
29 )
29 from . import (
30 from . import (
30 copies,
31 copies,
31 encoding,
32 encoding,
32 error,
33 error,
33 mail,
34 mail,
34 mdiff,
35 mdiff,
35 pathutil,
36 pathutil,
36 policy,
37 policy,
37 pycompat,
38 pycompat,
38 scmutil,
39 scmutil,
39 similar,
40 similar,
40 util,
41 util,
41 vfs as vfsmod,
42 vfs as vfsmod,
42 )
43 )
43
44
44 diffhelpers = policy.importmod(r'diffhelpers')
45 diffhelpers = policy.importmod(r'diffhelpers')
45 stringio = util.stringio
46 stringio = util.stringio
46
47
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 tabsplitter = re.compile(br'(\t+|[^\t]+)')
49 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50 _nonwordre = re.compile(br'([^a-zA-Z0-9_\x80-\xff])')
50
51
51 PatchError = error.PatchError
52 PatchError = error.PatchError
52
53
53 # public functions
54 # public functions
54
55
55 def split(stream):
56 def split(stream):
56 '''return an iterator of individual patches from a stream'''
57 '''return an iterator of individual patches from a stream'''
57 def isheader(line, inheader):
58 def isheader(line, inheader):
58 if inheader and line[0] in (' ', '\t'):
59 if inheader and line[0] in (' ', '\t'):
59 # continuation
60 # continuation
60 return True
61 return True
61 if line[0] in (' ', '-', '+'):
62 if line[0] in (' ', '-', '+'):
62 # diff line - don't check for header pattern in there
63 # diff line - don't check for header pattern in there
63 return False
64 return False
64 l = line.split(': ', 1)
65 l = line.split(': ', 1)
65 return len(l) == 2 and ' ' not in l[0]
66 return len(l) == 2 and ' ' not in l[0]
66
67
67 def chunk(lines):
68 def chunk(lines):
68 return stringio(''.join(lines))
69 return stringio(''.join(lines))
69
70
70 def hgsplit(stream, cur):
71 def hgsplit(stream, cur):
71 inheader = True
72 inheader = True
72
73
73 for line in stream:
74 for line in stream:
74 if not line.strip():
75 if not line.strip():
75 inheader = False
76 inheader = False
76 if not inheader and line.startswith('# HG changeset patch'):
77 if not inheader and line.startswith('# HG changeset patch'):
77 yield chunk(cur)
78 yield chunk(cur)
78 cur = []
79 cur = []
79 inheader = True
80 inheader = True
80
81
81 cur.append(line)
82 cur.append(line)
82
83
83 if cur:
84 if cur:
84 yield chunk(cur)
85 yield chunk(cur)
85
86
86 def mboxsplit(stream, cur):
87 def mboxsplit(stream, cur):
87 for line in stream:
88 for line in stream:
88 if line.startswith('From '):
89 if line.startswith('From '):
89 for c in split(chunk(cur[1:])):
90 for c in split(chunk(cur[1:])):
90 yield c
91 yield c
91 cur = []
92 cur = []
92
93
93 cur.append(line)
94 cur.append(line)
94
95
95 if cur:
96 if cur:
96 for c in split(chunk(cur[1:])):
97 for c in split(chunk(cur[1:])):
97 yield c
98 yield c
98
99
99 def mimesplit(stream, cur):
100 def mimesplit(stream, cur):
100 def msgfp(m):
101 def msgfp(m):
101 fp = stringio()
102 fp = stringio()
102 g = email.Generator.Generator(fp, mangle_from_=False)
103 g = email.Generator.Generator(fp, mangle_from_=False)
103 g.flatten(m)
104 g.flatten(m)
104 fp.seek(0)
105 fp.seek(0)
105 return fp
106 return fp
106
107
107 for line in stream:
108 for line in stream:
108 cur.append(line)
109 cur.append(line)
109 c = chunk(cur)
110 c = chunk(cur)
110
111
111 m = email.Parser.Parser().parse(c)
112 m = emailparser.Parser().parse(c)
112 if not m.is_multipart():
113 if not m.is_multipart():
113 yield msgfp(m)
114 yield msgfp(m)
114 else:
115 else:
115 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
116 for part in m.walk():
117 for part in m.walk():
117 ct = part.get_content_type()
118 ct = part.get_content_type()
118 if ct not in ok_types:
119 if ct not in ok_types:
119 continue
120 continue
120 yield msgfp(part)
121 yield msgfp(part)
121
122
122 def headersplit(stream, cur):
123 def headersplit(stream, cur):
123 inheader = False
124 inheader = False
124
125
125 for line in stream:
126 for line in stream:
126 if not inheader and isheader(line, inheader):
127 if not inheader and isheader(line, inheader):
127 yield chunk(cur)
128 yield chunk(cur)
128 cur = []
129 cur = []
129 inheader = True
130 inheader = True
130 if inheader and not isheader(line, inheader):
131 if inheader and not isheader(line, inheader):
131 inheader = False
132 inheader = False
132
133
133 cur.append(line)
134 cur.append(line)
134
135
135 if cur:
136 if cur:
136 yield chunk(cur)
137 yield chunk(cur)
137
138
138 def remainder(cur):
139 def remainder(cur):
139 yield chunk(cur)
140 yield chunk(cur)
140
141
141 class fiter(object):
142 class fiter(object):
142 def __init__(self, fp):
143 def __init__(self, fp):
143 self.fp = fp
144 self.fp = fp
144
145
145 def __iter__(self):
146 def __iter__(self):
146 return self
147 return self
147
148
148 def next(self):
149 def next(self):
149 l = self.fp.readline()
150 l = self.fp.readline()
150 if not l:
151 if not l:
151 raise StopIteration
152 raise StopIteration
152 return l
153 return l
153
154
154 __next__ = next
155 __next__ = next
155
156
156 inheader = False
157 inheader = False
157 cur = []
158 cur = []
158
159
159 mimeheaders = ['content-type']
160 mimeheaders = ['content-type']
160
161
161 if not util.safehasattr(stream, 'next'):
162 if not util.safehasattr(stream, 'next'):
162 # http responses, for example, have readline but not next
163 # http responses, for example, have readline but not next
163 stream = fiter(stream)
164 stream = fiter(stream)
164
165
165 for line in stream:
166 for line in stream:
166 cur.append(line)
167 cur.append(line)
167 if line.startswith('# HG changeset patch'):
168 if line.startswith('# HG changeset patch'):
168 return hgsplit(stream, cur)
169 return hgsplit(stream, cur)
169 elif line.startswith('From '):
170 elif line.startswith('From '):
170 return mboxsplit(stream, cur)
171 return mboxsplit(stream, cur)
171 elif isheader(line, inheader):
172 elif isheader(line, inheader):
172 inheader = True
173 inheader = True
173 if line.split(':', 1)[0].lower() in mimeheaders:
174 if line.split(':', 1)[0].lower() in mimeheaders:
174 # let email parser handle this
175 # let email parser handle this
175 return mimesplit(stream, cur)
176 return mimesplit(stream, cur)
176 elif line.startswith('--- ') and inheader:
177 elif line.startswith('--- ') and inheader:
177 # No evil headers seen by diff start, split by hand
178 # No evil headers seen by diff start, split by hand
178 return headersplit(stream, cur)
179 return headersplit(stream, cur)
179 # Not enough info, keep reading
180 # Not enough info, keep reading
180
181
181 # if we are here, we have a very plain patch
182 # if we are here, we have a very plain patch
182 return remainder(cur)
183 return remainder(cur)
183
184
184 ## Some facility for extensible patch parsing:
185 ## Some facility for extensible patch parsing:
185 # list of pairs ("header to match", "data key")
186 # list of pairs ("header to match", "data key")
186 patchheadermap = [('Date', 'date'),
187 patchheadermap = [('Date', 'date'),
187 ('Branch', 'branch'),
188 ('Branch', 'branch'),
188 ('Node ID', 'nodeid'),
189 ('Node ID', 'nodeid'),
189 ]
190 ]
190
191
191 def extract(ui, fileobj):
192 def extract(ui, fileobj):
192 '''extract patch from data read from fileobj.
193 '''extract patch from data read from fileobj.
193
194
194 patch can be a normal patch or contained in an email message.
195 patch can be a normal patch or contained in an email message.
195
196
196 return a dictionary. Standard keys are:
197 return a dictionary. Standard keys are:
197 - filename,
198 - filename,
198 - message,
199 - message,
199 - user,
200 - user,
200 - date,
201 - date,
201 - branch,
202 - branch,
202 - node,
203 - node,
203 - p1,
204 - p1,
204 - p2.
205 - p2.
205 Any item can be missing from the dictionary. If filename is missing,
206 Any item can be missing from the dictionary. If filename is missing,
206 fileobj did not contain a patch. Caller must unlink filename when done.'''
207 fileobj did not contain a patch. Caller must unlink filename when done.'''
207
208
208 # attempt to detect the start of a patch
209 # attempt to detect the start of a patch
209 # (this heuristic is borrowed from quilt)
210 # (this heuristic is borrowed from quilt)
210 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
211 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
212 br'---[ \t].*?^\+\+\+[ \t]|'
213 br'---[ \t].*?^\+\+\+[ \t]|'
213 br'\*\*\*[ \t].*?^---[ \t])',
214 br'\*\*\*[ \t].*?^---[ \t])',
214 re.MULTILINE | re.DOTALL)
215 re.MULTILINE | re.DOTALL)
215
216
216 data = {}
217 data = {}
217 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
218 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
219 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
219 try:
220 try:
220 msg = email.Parser.Parser().parse(fileobj)
221 msg = emailparser.Parser().parse(fileobj)
221
222
222 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
223 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 data['user'] = msg['From'] and mail.headdecode(msg['From'])
224 if not subject and not data['user']:
225 if not subject and not data['user']:
225 # Not an email, restore parsed headers if any
226 # Not an email, restore parsed headers if any
226 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
227
228
228 # should try to parse msg['Date']
229 # should try to parse msg['Date']
229 parents = []
230 parents = []
230
231
231 if subject:
232 if subject:
232 if subject.startswith('[PATCH'):
233 if subject.startswith('[PATCH'):
233 pend = subject.find(']')
234 pend = subject.find(']')
234 if pend >= 0:
235 if pend >= 0:
235 subject = subject[pend + 1:].lstrip()
236 subject = subject[pend + 1:].lstrip()
236 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 subject = re.sub(br'\n[ \t]+', ' ', subject)
237 ui.debug('Subject: %s\n' % subject)
238 ui.debug('Subject: %s\n' % subject)
238 if data['user']:
239 if data['user']:
239 ui.debug('From: %s\n' % data['user'])
240 ui.debug('From: %s\n' % data['user'])
240 diffs_seen = 0
241 diffs_seen = 0
241 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
242 message = ''
243 message = ''
243 for part in msg.walk():
244 for part in msg.walk():
244 content_type = part.get_content_type()
245 content_type = part.get_content_type()
245 ui.debug('Content-Type: %s\n' % content_type)
246 ui.debug('Content-Type: %s\n' % content_type)
246 if content_type not in ok_types:
247 if content_type not in ok_types:
247 continue
248 continue
248 payload = part.get_payload(decode=True)
249 payload = part.get_payload(decode=True)
249 m = diffre.search(payload)
250 m = diffre.search(payload)
250 if m:
251 if m:
251 hgpatch = False
252 hgpatch = False
252 hgpatchheader = False
253 hgpatchheader = False
253 ignoretext = False
254 ignoretext = False
254
255
255 ui.debug('found patch at byte %d\n' % m.start(0))
256 ui.debug('found patch at byte %d\n' % m.start(0))
256 diffs_seen += 1
257 diffs_seen += 1
257 cfp = stringio()
258 cfp = stringio()
258 for line in payload[:m.start(0)].splitlines():
259 for line in payload[:m.start(0)].splitlines():
259 if line.startswith('# HG changeset patch') and not hgpatch:
260 if line.startswith('# HG changeset patch') and not hgpatch:
260 ui.debug('patch generated by hg export\n')
261 ui.debug('patch generated by hg export\n')
261 hgpatch = True
262 hgpatch = True
262 hgpatchheader = True
263 hgpatchheader = True
263 # drop earlier commit message content
264 # drop earlier commit message content
264 cfp.seek(0)
265 cfp.seek(0)
265 cfp.truncate()
266 cfp.truncate()
266 subject = None
267 subject = None
267 elif hgpatchheader:
268 elif hgpatchheader:
268 if line.startswith('# User '):
269 if line.startswith('# User '):
269 data['user'] = line[7:]
270 data['user'] = line[7:]
270 ui.debug('From: %s\n' % data['user'])
271 ui.debug('From: %s\n' % data['user'])
271 elif line.startswith("# Parent "):
272 elif line.startswith("# Parent "):
272 parents.append(line[9:].lstrip())
273 parents.append(line[9:].lstrip())
273 elif line.startswith("# "):
274 elif line.startswith("# "):
274 for header, key in patchheadermap:
275 for header, key in patchheadermap:
275 prefix = '# %s ' % header
276 prefix = '# %s ' % header
276 if line.startswith(prefix):
277 if line.startswith(prefix):
277 data[key] = line[len(prefix):]
278 data[key] = line[len(prefix):]
278 else:
279 else:
279 hgpatchheader = False
280 hgpatchheader = False
280 elif line == '---':
281 elif line == '---':
281 ignoretext = True
282 ignoretext = True
282 if not hgpatchheader and not ignoretext:
283 if not hgpatchheader and not ignoretext:
283 cfp.write(line)
284 cfp.write(line)
284 cfp.write('\n')
285 cfp.write('\n')
285 message = cfp.getvalue()
286 message = cfp.getvalue()
286 if tmpfp:
287 if tmpfp:
287 tmpfp.write(payload)
288 tmpfp.write(payload)
288 if not payload.endswith('\n'):
289 if not payload.endswith('\n'):
289 tmpfp.write('\n')
290 tmpfp.write('\n')
290 elif not diffs_seen and message and content_type == 'text/plain':
291 elif not diffs_seen and message and content_type == 'text/plain':
291 message += '\n' + payload
292 message += '\n' + payload
292 except: # re-raises
293 except: # re-raises
293 tmpfp.close()
294 tmpfp.close()
294 os.unlink(tmpname)
295 os.unlink(tmpname)
295 raise
296 raise
296
297
297 if subject and not message.startswith(subject):
298 if subject and not message.startswith(subject):
298 message = '%s\n%s' % (subject, message)
299 message = '%s\n%s' % (subject, message)
299 data['message'] = message
300 data['message'] = message
300 tmpfp.close()
301 tmpfp.close()
301 if parents:
302 if parents:
302 data['p1'] = parents.pop(0)
303 data['p1'] = parents.pop(0)
303 if parents:
304 if parents:
304 data['p2'] = parents.pop(0)
305 data['p2'] = parents.pop(0)
305
306
306 if diffs_seen:
307 if diffs_seen:
307 data['filename'] = tmpname
308 data['filename'] = tmpname
308 else:
309 else:
309 os.unlink(tmpname)
310 os.unlink(tmpname)
310 return data
311 return data
311
312
312 class patchmeta(object):
313 class patchmeta(object):
313 """Patched file metadata
314 """Patched file metadata
314
315
315 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
316 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 or COPY. 'path' is patched file path. 'oldpath' is set to the
317 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 origin file when 'op' is either COPY or RENAME, None otherwise. If
318 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 file mode is changed, 'mode' is a tuple (islink, isexec) where
319 'islink' is True if the file is a symlink and 'isexec' is True if
320 'islink' is True if the file is a symlink and 'isexec' is True if
320 the file is executable. Otherwise, 'mode' is None.
321 the file is executable. Otherwise, 'mode' is None.
321 """
322 """
322 def __init__(self, path):
323 def __init__(self, path):
323 self.path = path
324 self.path = path
324 self.oldpath = None
325 self.oldpath = None
325 self.mode = None
326 self.mode = None
326 self.op = 'MODIFY'
327 self.op = 'MODIFY'
327 self.binary = False
328 self.binary = False
328
329
329 def setmode(self, mode):
330 def setmode(self, mode):
330 islink = mode & 0o20000
331 islink = mode & 0o20000
331 isexec = mode & 0o100
332 isexec = mode & 0o100
332 self.mode = (islink, isexec)
333 self.mode = (islink, isexec)
333
334
334 def copy(self):
335 def copy(self):
335 other = patchmeta(self.path)
336 other = patchmeta(self.path)
336 other.oldpath = self.oldpath
337 other.oldpath = self.oldpath
337 other.mode = self.mode
338 other.mode = self.mode
338 other.op = self.op
339 other.op = self.op
339 other.binary = self.binary
340 other.binary = self.binary
340 return other
341 return other
341
342
342 def _ispatchinga(self, afile):
343 def _ispatchinga(self, afile):
343 if afile == '/dev/null':
344 if afile == '/dev/null':
344 return self.op == 'ADD'
345 return self.op == 'ADD'
345 return afile == 'a/' + (self.oldpath or self.path)
346 return afile == 'a/' + (self.oldpath or self.path)
346
347
347 def _ispatchingb(self, bfile):
348 def _ispatchingb(self, bfile):
348 if bfile == '/dev/null':
349 if bfile == '/dev/null':
349 return self.op == 'DELETE'
350 return self.op == 'DELETE'
350 return bfile == 'b/' + self.path
351 return bfile == 'b/' + self.path
351
352
352 def ispatching(self, afile, bfile):
353 def ispatching(self, afile, bfile):
353 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354 return self._ispatchinga(afile) and self._ispatchingb(bfile)
354
355
355 def __repr__(self):
356 def __repr__(self):
356 return "<patchmeta %s %r>" % (self.op, self.path)
357 return "<patchmeta %s %r>" % (self.op, self.path)
357
358
358 def readgitpatch(lr):
359 def readgitpatch(lr):
359 """extract git-style metadata about patches from <patchname>"""
360 """extract git-style metadata about patches from <patchname>"""
360
361
361 # Filter patch for git information
362 # Filter patch for git information
362 gp = None
363 gp = None
363 gitpatches = []
364 gitpatches = []
364 for line in lr:
365 for line in lr:
365 line = line.rstrip(' \r\n')
366 line = line.rstrip(' \r\n')
366 if line.startswith('diff --git a/'):
367 if line.startswith('diff --git a/'):
367 m = gitre.match(line)
368 m = gitre.match(line)
368 if m:
369 if m:
369 if gp:
370 if gp:
370 gitpatches.append(gp)
371 gitpatches.append(gp)
371 dst = m.group(2)
372 dst = m.group(2)
372 gp = patchmeta(dst)
373 gp = patchmeta(dst)
373 elif gp:
374 elif gp:
374 if line.startswith('--- '):
375 if line.startswith('--- '):
375 gitpatches.append(gp)
376 gitpatches.append(gp)
376 gp = None
377 gp = None
377 continue
378 continue
378 if line.startswith('rename from '):
379 if line.startswith('rename from '):
379 gp.op = 'RENAME'
380 gp.op = 'RENAME'
380 gp.oldpath = line[12:]
381 gp.oldpath = line[12:]
381 elif line.startswith('rename to '):
382 elif line.startswith('rename to '):
382 gp.path = line[10:]
383 gp.path = line[10:]
383 elif line.startswith('copy from '):
384 elif line.startswith('copy from '):
384 gp.op = 'COPY'
385 gp.op = 'COPY'
385 gp.oldpath = line[10:]
386 gp.oldpath = line[10:]
386 elif line.startswith('copy to '):
387 elif line.startswith('copy to '):
387 gp.path = line[8:]
388 gp.path = line[8:]
388 elif line.startswith('deleted file'):
389 elif line.startswith('deleted file'):
389 gp.op = 'DELETE'
390 gp.op = 'DELETE'
390 elif line.startswith('new file mode '):
391 elif line.startswith('new file mode '):
391 gp.op = 'ADD'
392 gp.op = 'ADD'
392 gp.setmode(int(line[-6:], 8))
393 gp.setmode(int(line[-6:], 8))
393 elif line.startswith('new mode '):
394 elif line.startswith('new mode '):
394 gp.setmode(int(line[-6:], 8))
395 gp.setmode(int(line[-6:], 8))
395 elif line.startswith('GIT binary patch'):
396 elif line.startswith('GIT binary patch'):
396 gp.binary = True
397 gp.binary = True
397 if gp:
398 if gp:
398 gitpatches.append(gp)
399 gitpatches.append(gp)
399
400
400 return gitpatches
401 return gitpatches
401
402
402 class linereader(object):
403 class linereader(object):
403 # simple class to allow pushing lines back into the input stream
404 # simple class to allow pushing lines back into the input stream
404 def __init__(self, fp):
405 def __init__(self, fp):
405 self.fp = fp
406 self.fp = fp
406 self.buf = []
407 self.buf = []
407
408
408 def push(self, line):
409 def push(self, line):
409 if line is not None:
410 if line is not None:
410 self.buf.append(line)
411 self.buf.append(line)
411
412
412 def readline(self):
413 def readline(self):
413 if self.buf:
414 if self.buf:
414 l = self.buf[0]
415 l = self.buf[0]
415 del self.buf[0]
416 del self.buf[0]
416 return l
417 return l
417 return self.fp.readline()
418 return self.fp.readline()
418
419
419 def __iter__(self):
420 def __iter__(self):
420 return iter(self.readline, '')
421 return iter(self.readline, '')
421
422
422 class abstractbackend(object):
423 class abstractbackend(object):
423 def __init__(self, ui):
424 def __init__(self, ui):
424 self.ui = ui
425 self.ui = ui
425
426
426 def getfile(self, fname):
427 def getfile(self, fname):
427 """Return target file data and flags as a (data, (islink,
428 """Return target file data and flags as a (data, (islink,
428 isexec)) tuple. Data is None if file is missing/deleted.
429 isexec)) tuple. Data is None if file is missing/deleted.
429 """
430 """
430 raise NotImplementedError
431 raise NotImplementedError
431
432
432 def setfile(self, fname, data, mode, copysource):
433 def setfile(self, fname, data, mode, copysource):
433 """Write data to target file fname and set its mode. mode is a
434 """Write data to target file fname and set its mode. mode is a
434 (islink, isexec) tuple. If data is None, the file content should
435 (islink, isexec) tuple. If data is None, the file content should
435 be left unchanged. If the file is modified after being copied,
436 be left unchanged. If the file is modified after being copied,
436 copysource is set to the original file name.
437 copysource is set to the original file name.
437 """
438 """
438 raise NotImplementedError
439 raise NotImplementedError
439
440
440 def unlink(self, fname):
441 def unlink(self, fname):
441 """Unlink target file."""
442 """Unlink target file."""
442 raise NotImplementedError
443 raise NotImplementedError
443
444
444 def writerej(self, fname, failed, total, lines):
445 def writerej(self, fname, failed, total, lines):
445 """Write rejected lines for fname. total is the number of hunks
446 """Write rejected lines for fname. total is the number of hunks
446 which failed to apply and total the total number of hunks for this
447 which failed to apply and total the total number of hunks for this
447 files.
448 files.
448 """
449 """
449
450
450 def exists(self, fname):
451 def exists(self, fname):
451 raise NotImplementedError
452 raise NotImplementedError
452
453
453 def close(self):
454 def close(self):
454 raise NotImplementedError
455 raise NotImplementedError
455
456
456 class fsbackend(abstractbackend):
457 class fsbackend(abstractbackend):
457 def __init__(self, ui, basedir):
458 def __init__(self, ui, basedir):
458 super(fsbackend, self).__init__(ui)
459 super(fsbackend, self).__init__(ui)
459 self.opener = vfsmod.vfs(basedir)
460 self.opener = vfsmod.vfs(basedir)
460
461
461 def getfile(self, fname):
462 def getfile(self, fname):
462 if self.opener.islink(fname):
463 if self.opener.islink(fname):
463 return (self.opener.readlink(fname), (True, False))
464 return (self.opener.readlink(fname), (True, False))
464
465
465 isexec = False
466 isexec = False
466 try:
467 try:
467 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
468 except OSError as e:
469 except OSError as e:
469 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
470 raise
471 raise
471 try:
472 try:
472 return (self.opener.read(fname), (False, isexec))
473 return (self.opener.read(fname), (False, isexec))
473 except IOError as e:
474 except IOError as e:
474 if e.errno != errno.ENOENT:
475 if e.errno != errno.ENOENT:
475 raise
476 raise
476 return None, None
477 return None, None
477
478
478 def setfile(self, fname, data, mode, copysource):
479 def setfile(self, fname, data, mode, copysource):
479 islink, isexec = mode
480 islink, isexec = mode
480 if data is None:
481 if data is None:
481 self.opener.setflags(fname, islink, isexec)
482 self.opener.setflags(fname, islink, isexec)
482 return
483 return
483 if islink:
484 if islink:
484 self.opener.symlink(data, fname)
485 self.opener.symlink(data, fname)
485 else:
486 else:
486 self.opener.write(fname, data)
487 self.opener.write(fname, data)
487 if isexec:
488 if isexec:
488 self.opener.setflags(fname, False, True)
489 self.opener.setflags(fname, False, True)
489
490
490 def unlink(self, fname):
491 def unlink(self, fname):
491 self.opener.unlinkpath(fname, ignoremissing=True)
492 self.opener.unlinkpath(fname, ignoremissing=True)
492
493
493 def writerej(self, fname, failed, total, lines):
494 def writerej(self, fname, failed, total, lines):
494 fname = fname + ".rej"
495 fname = fname + ".rej"
495 self.ui.warn(
496 self.ui.warn(
496 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
497 (failed, total, fname))
498 (failed, total, fname))
498 fp = self.opener(fname, 'w')
499 fp = self.opener(fname, 'w')
499 fp.writelines(lines)
500 fp.writelines(lines)
500 fp.close()
501 fp.close()
501
502
502 def exists(self, fname):
503 def exists(self, fname):
503 return self.opener.lexists(fname)
504 return self.opener.lexists(fname)
504
505
505 class workingbackend(fsbackend):
506 class workingbackend(fsbackend):
506 def __init__(self, ui, repo, similarity):
507 def __init__(self, ui, repo, similarity):
507 super(workingbackend, self).__init__(ui, repo.root)
508 super(workingbackend, self).__init__(ui, repo.root)
508 self.repo = repo
509 self.repo = repo
509 self.similarity = similarity
510 self.similarity = similarity
510 self.removed = set()
511 self.removed = set()
511 self.changed = set()
512 self.changed = set()
512 self.copied = []
513 self.copied = []
513
514
514 def _checkknown(self, fname):
515 def _checkknown(self, fname):
515 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 if self.repo.dirstate[fname] == '?' and self.exists(fname):
516 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
517
518
518 def setfile(self, fname, data, mode, copysource):
519 def setfile(self, fname, data, mode, copysource):
519 self._checkknown(fname)
520 self._checkknown(fname)
520 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 super(workingbackend, self).setfile(fname, data, mode, copysource)
521 if copysource is not None:
522 if copysource is not None:
522 self.copied.append((copysource, fname))
523 self.copied.append((copysource, fname))
523 self.changed.add(fname)
524 self.changed.add(fname)
524
525
525 def unlink(self, fname):
526 def unlink(self, fname):
526 self._checkknown(fname)
527 self._checkknown(fname)
527 super(workingbackend, self).unlink(fname)
528 super(workingbackend, self).unlink(fname)
528 self.removed.add(fname)
529 self.removed.add(fname)
529 self.changed.add(fname)
530 self.changed.add(fname)
530
531
531 def close(self):
532 def close(self):
532 wctx = self.repo[None]
533 wctx = self.repo[None]
533 changed = set(self.changed)
534 changed = set(self.changed)
534 for src, dst in self.copied:
535 for src, dst in self.copied:
535 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
536 if self.removed:
537 if self.removed:
537 wctx.forget(sorted(self.removed))
538 wctx.forget(sorted(self.removed))
538 for f in self.removed:
539 for f in self.removed:
539 if f not in self.repo.dirstate:
540 if f not in self.repo.dirstate:
540 # File was deleted and no longer belongs to the
541 # File was deleted and no longer belongs to the
541 # dirstate, it was probably marked added then
542 # dirstate, it was probably marked added then
542 # deleted, and should not be considered by
543 # deleted, and should not be considered by
543 # marktouched().
544 # marktouched().
544 changed.discard(f)
545 changed.discard(f)
545 if changed:
546 if changed:
546 scmutil.marktouched(self.repo, changed, self.similarity)
547 scmutil.marktouched(self.repo, changed, self.similarity)
547 return sorted(self.changed)
548 return sorted(self.changed)
548
549
549 class filestore(object):
550 class filestore(object):
550 def __init__(self, maxsize=None):
551 def __init__(self, maxsize=None):
551 self.opener = None
552 self.opener = None
552 self.files = {}
553 self.files = {}
553 self.created = 0
554 self.created = 0
554 self.maxsize = maxsize
555 self.maxsize = maxsize
555 if self.maxsize is None:
556 if self.maxsize is None:
556 self.maxsize = 4*(2**20)
557 self.maxsize = 4*(2**20)
557 self.size = 0
558 self.size = 0
558 self.data = {}
559 self.data = {}
559
560
560 def setfile(self, fname, data, mode, copied=None):
561 def setfile(self, fname, data, mode, copied=None):
561 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
562 self.data[fname] = (data, mode, copied)
563 self.data[fname] = (data, mode, copied)
563 self.size += len(data)
564 self.size += len(data)
564 else:
565 else:
565 if self.opener is None:
566 if self.opener is None:
566 root = tempfile.mkdtemp(prefix='hg-patch-')
567 root = tempfile.mkdtemp(prefix='hg-patch-')
567 self.opener = vfsmod.vfs(root)
568 self.opener = vfsmod.vfs(root)
568 # Avoid filename issues with these simple names
569 # Avoid filename issues with these simple names
569 fn = str(self.created)
570 fn = str(self.created)
570 self.opener.write(fn, data)
571 self.opener.write(fn, data)
571 self.created += 1
572 self.created += 1
572 self.files[fname] = (fn, mode, copied)
573 self.files[fname] = (fn, mode, copied)
573
574
574 def getfile(self, fname):
575 def getfile(self, fname):
575 if fname in self.data:
576 if fname in self.data:
576 return self.data[fname]
577 return self.data[fname]
577 if not self.opener or fname not in self.files:
578 if not self.opener or fname not in self.files:
578 return None, None, None
579 return None, None, None
579 fn, mode, copied = self.files[fname]
580 fn, mode, copied = self.files[fname]
580 return self.opener.read(fn), mode, copied
581 return self.opener.read(fn), mode, copied
581
582
582 def close(self):
583 def close(self):
583 if self.opener:
584 if self.opener:
584 shutil.rmtree(self.opener.base)
585 shutil.rmtree(self.opener.base)
585
586
586 class repobackend(abstractbackend):
587 class repobackend(abstractbackend):
587 def __init__(self, ui, repo, ctx, store):
588 def __init__(self, ui, repo, ctx, store):
588 super(repobackend, self).__init__(ui)
589 super(repobackend, self).__init__(ui)
589 self.repo = repo
590 self.repo = repo
590 self.ctx = ctx
591 self.ctx = ctx
591 self.store = store
592 self.store = store
592 self.changed = set()
593 self.changed = set()
593 self.removed = set()
594 self.removed = set()
594 self.copied = {}
595 self.copied = {}
595
596
596 def _checkknown(self, fname):
597 def _checkknown(self, fname):
597 if fname not in self.ctx:
598 if fname not in self.ctx:
598 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
599
600
600 def getfile(self, fname):
601 def getfile(self, fname):
601 try:
602 try:
602 fctx = self.ctx[fname]
603 fctx = self.ctx[fname]
603 except error.LookupError:
604 except error.LookupError:
604 return None, None
605 return None, None
605 flags = fctx.flags()
606 flags = fctx.flags()
606 return fctx.data(), ('l' in flags, 'x' in flags)
607 return fctx.data(), ('l' in flags, 'x' in flags)
607
608
608 def setfile(self, fname, data, mode, copysource):
609 def setfile(self, fname, data, mode, copysource):
609 if copysource:
610 if copysource:
610 self._checkknown(copysource)
611 self._checkknown(copysource)
611 if data is None:
612 if data is None:
612 data = self.ctx[fname].data()
613 data = self.ctx[fname].data()
613 self.store.setfile(fname, data, mode, copysource)
614 self.store.setfile(fname, data, mode, copysource)
614 self.changed.add(fname)
615 self.changed.add(fname)
615 if copysource:
616 if copysource:
616 self.copied[fname] = copysource
617 self.copied[fname] = copysource
617
618
618 def unlink(self, fname):
619 def unlink(self, fname):
619 self._checkknown(fname)
620 self._checkknown(fname)
620 self.removed.add(fname)
621 self.removed.add(fname)
621
622
622 def exists(self, fname):
623 def exists(self, fname):
623 return fname in self.ctx
624 return fname in self.ctx
624
625
625 def close(self):
626 def close(self):
626 return self.changed | self.removed
627 return self.changed | self.removed
627
628
628 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
629 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
630 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
631 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632 eolmodes = ['strict', 'crlf', 'lf', 'auto']
632
633
633 class patchfile(object):
634 class patchfile(object):
634 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 def __init__(self, ui, gp, backend, store, eolmode='strict'):
635 self.fname = gp.path
636 self.fname = gp.path
636 self.eolmode = eolmode
637 self.eolmode = eolmode
637 self.eol = None
638 self.eol = None
638 self.backend = backend
639 self.backend = backend
639 self.ui = ui
640 self.ui = ui
640 self.lines = []
641 self.lines = []
641 self.exists = False
642 self.exists = False
642 self.missing = True
643 self.missing = True
643 self.mode = gp.mode
644 self.mode = gp.mode
644 self.copysource = gp.oldpath
645 self.copysource = gp.oldpath
645 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
646 self.remove = gp.op == 'DELETE'
647 self.remove = gp.op == 'DELETE'
647 if self.copysource is None:
648 if self.copysource is None:
648 data, mode = backend.getfile(self.fname)
649 data, mode = backend.getfile(self.fname)
649 else:
650 else:
650 data, mode = store.getfile(self.copysource)[:2]
651 data, mode = store.getfile(self.copysource)[:2]
651 if data is not None:
652 if data is not None:
652 self.exists = self.copysource is None or backend.exists(self.fname)
653 self.exists = self.copysource is None or backend.exists(self.fname)
653 self.missing = False
654 self.missing = False
654 if data:
655 if data:
655 self.lines = mdiff.splitnewlines(data)
656 self.lines = mdiff.splitnewlines(data)
656 if self.mode is None:
657 if self.mode is None:
657 self.mode = mode
658 self.mode = mode
658 if self.lines:
659 if self.lines:
659 # Normalize line endings
660 # Normalize line endings
660 if self.lines[0].endswith('\r\n'):
661 if self.lines[0].endswith('\r\n'):
661 self.eol = '\r\n'
662 self.eol = '\r\n'
662 elif self.lines[0].endswith('\n'):
663 elif self.lines[0].endswith('\n'):
663 self.eol = '\n'
664 self.eol = '\n'
664 if eolmode != 'strict':
665 if eolmode != 'strict':
665 nlines = []
666 nlines = []
666 for l in self.lines:
667 for l in self.lines:
667 if l.endswith('\r\n'):
668 if l.endswith('\r\n'):
668 l = l[:-2] + '\n'
669 l = l[:-2] + '\n'
669 nlines.append(l)
670 nlines.append(l)
670 self.lines = nlines
671 self.lines = nlines
671 else:
672 else:
672 if self.create:
673 if self.create:
673 self.missing = False
674 self.missing = False
674 if self.mode is None:
675 if self.mode is None:
675 self.mode = (False, False)
676 self.mode = (False, False)
676 if self.missing:
677 if self.missing:
677 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
678 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
679 "current directory)\n"))
680 "current directory)\n"))
680
681
681 self.hash = {}
682 self.hash = {}
682 self.dirty = 0
683 self.dirty = 0
683 self.offset = 0
684 self.offset = 0
684 self.skew = 0
685 self.skew = 0
685 self.rej = []
686 self.rej = []
686 self.fileprinted = False
687 self.fileprinted = False
687 self.printfile(False)
688 self.printfile(False)
688 self.hunks = 0
689 self.hunks = 0
689
690
690 def writelines(self, fname, lines, mode):
691 def writelines(self, fname, lines, mode):
691 if self.eolmode == 'auto':
692 if self.eolmode == 'auto':
692 eol = self.eol
693 eol = self.eol
693 elif self.eolmode == 'crlf':
694 elif self.eolmode == 'crlf':
694 eol = '\r\n'
695 eol = '\r\n'
695 else:
696 else:
696 eol = '\n'
697 eol = '\n'
697
698
698 if self.eolmode != 'strict' and eol and eol != '\n':
699 if self.eolmode != 'strict' and eol and eol != '\n':
699 rawlines = []
700 rawlines = []
700 for l in lines:
701 for l in lines:
701 if l and l[-1] == '\n':
702 if l and l[-1] == '\n':
702 l = l[:-1] + eol
703 l = l[:-1] + eol
703 rawlines.append(l)
704 rawlines.append(l)
704 lines = rawlines
705 lines = rawlines
705
706
706 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
707
708
708 def printfile(self, warn):
709 def printfile(self, warn):
709 if self.fileprinted:
710 if self.fileprinted:
710 return
711 return
711 if warn or self.ui.verbose:
712 if warn or self.ui.verbose:
712 self.fileprinted = True
713 self.fileprinted = True
713 s = _("patching file %s\n") % self.fname
714 s = _("patching file %s\n") % self.fname
714 if warn:
715 if warn:
715 self.ui.warn(s)
716 self.ui.warn(s)
716 else:
717 else:
717 self.ui.note(s)
718 self.ui.note(s)
718
719
719
720
720 def findlines(self, l, linenum):
721 def findlines(self, l, linenum):
721 # looks through the hash and finds candidate lines. The
722 # looks through the hash and finds candidate lines. The
722 # result is a list of line numbers sorted based on distance
723 # result is a list of line numbers sorted based on distance
723 # from linenum
724 # from linenum
724
725
725 cand = self.hash.get(l, [])
726 cand = self.hash.get(l, [])
726 if len(cand) > 1:
727 if len(cand) > 1:
727 # resort our list of potentials forward then back.
728 # resort our list of potentials forward then back.
728 cand.sort(key=lambda x: abs(x - linenum))
729 cand.sort(key=lambda x: abs(x - linenum))
729 return cand
730 return cand
730
731
731 def write_rej(self):
732 def write_rej(self):
732 # our rejects are a little different from patch(1). This always
733 # our rejects are a little different from patch(1). This always
733 # creates rejects in the same form as the original patch. A file
734 # creates rejects in the same form as the original patch. A file
734 # header is inserted so that you can run the reject through patch again
735 # header is inserted so that you can run the reject through patch again
735 # without having to type the filename.
736 # without having to type the filename.
736 if not self.rej:
737 if not self.rej:
737 return
738 return
738 base = os.path.basename(self.fname)
739 base = os.path.basename(self.fname)
739 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 lines = ["--- %s\n+++ %s\n" % (base, base)]
740 for x in self.rej:
741 for x in self.rej:
741 for l in x.hunk:
742 for l in x.hunk:
742 lines.append(l)
743 lines.append(l)
743 if l[-1:] != '\n':
744 if l[-1:] != '\n':
744 lines.append("\n\ No newline at end of file\n")
745 lines.append("\n\ No newline at end of file\n")
745 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
746
747
747 def apply(self, h):
748 def apply(self, h):
748 if not h.complete():
749 if not h.complete():
749 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
750 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 (h.number, h.desc, len(h.a), h.lena, len(h.b),
751 h.lenb))
752 h.lenb))
752
753
753 self.hunks += 1
754 self.hunks += 1
754
755
755 if self.missing:
756 if self.missing:
756 self.rej.append(h)
757 self.rej.append(h)
757 return -1
758 return -1
758
759
759 if self.exists and self.create:
760 if self.exists and self.create:
760 if self.copysource:
761 if self.copysource:
761 self.ui.warn(_("cannot create %s: destination already "
762 self.ui.warn(_("cannot create %s: destination already "
762 "exists\n") % self.fname)
763 "exists\n") % self.fname)
763 else:
764 else:
764 self.ui.warn(_("file %s already exists\n") % self.fname)
765 self.ui.warn(_("file %s already exists\n") % self.fname)
765 self.rej.append(h)
766 self.rej.append(h)
766 return -1
767 return -1
767
768
768 if isinstance(h, binhunk):
769 if isinstance(h, binhunk):
769 if self.remove:
770 if self.remove:
770 self.backend.unlink(self.fname)
771 self.backend.unlink(self.fname)
771 else:
772 else:
772 l = h.new(self.lines)
773 l = h.new(self.lines)
773 self.lines[:] = l
774 self.lines[:] = l
774 self.offset += len(l)
775 self.offset += len(l)
775 self.dirty = True
776 self.dirty = True
776 return 0
777 return 0
777
778
778 horig = h
779 horig = h
779 if (self.eolmode in ('crlf', 'lf')
780 if (self.eolmode in ('crlf', 'lf')
780 or self.eolmode == 'auto' and self.eol):
781 or self.eolmode == 'auto' and self.eol):
781 # If new eols are going to be normalized, then normalize
782 # If new eols are going to be normalized, then normalize
782 # hunk data before patching. Otherwise, preserve input
783 # hunk data before patching. Otherwise, preserve input
783 # line-endings.
784 # line-endings.
784 h = h.getnormalized()
785 h = h.getnormalized()
785
786
786 # fast case first, no offsets, no fuzz
787 # fast case first, no offsets, no fuzz
787 old, oldstart, new, newstart = h.fuzzit(0, False)
788 old, oldstart, new, newstart = h.fuzzit(0, False)
788 oldstart += self.offset
789 oldstart += self.offset
789 orig_start = oldstart
790 orig_start = oldstart
790 # if there's skew we want to emit the "(offset %d lines)" even
791 # if there's skew we want to emit the "(offset %d lines)" even
791 # when the hunk cleanly applies at start + skew, so skip the
792 # when the hunk cleanly applies at start + skew, so skip the
792 # fast case code
793 # fast case code
793 if (self.skew == 0 and
794 if (self.skew == 0 and
794 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
795 if self.remove:
796 if self.remove:
796 self.backend.unlink(self.fname)
797 self.backend.unlink(self.fname)
797 else:
798 else:
798 self.lines[oldstart:oldstart + len(old)] = new
799 self.lines[oldstart:oldstart + len(old)] = new
799 self.offset += len(new) - len(old)
800 self.offset += len(new) - len(old)
800 self.dirty = True
801 self.dirty = True
801 return 0
802 return 0
802
803
803 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
804 self.hash = {}
805 self.hash = {}
805 for x, s in enumerate(self.lines):
806 for x, s in enumerate(self.lines):
806 self.hash.setdefault(s, []).append(x)
807 self.hash.setdefault(s, []).append(x)
807
808
808 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
809 for toponly in [True, False]:
810 for toponly in [True, False]:
810 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
811 oldstart = oldstart + self.offset + self.skew
812 oldstart = oldstart + self.offset + self.skew
812 oldstart = min(oldstart, len(self.lines))
813 oldstart = min(oldstart, len(self.lines))
813 if old:
814 if old:
814 cand = self.findlines(old[0][1:], oldstart)
815 cand = self.findlines(old[0][1:], oldstart)
815 else:
816 else:
816 # Only adding lines with no or fuzzed context, just
817 # Only adding lines with no or fuzzed context, just
817 # take the skew in account
818 # take the skew in account
818 cand = [oldstart]
819 cand = [oldstart]
819
820
820 for l in cand:
821 for l in cand:
821 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
822 self.lines[l : l + len(old)] = new
823 self.lines[l : l + len(old)] = new
823 self.offset += len(new) - len(old)
824 self.offset += len(new) - len(old)
824 self.skew = l - orig_start
825 self.skew = l - orig_start
825 self.dirty = True
826 self.dirty = True
826 offset = l - orig_start - fuzzlen
827 offset = l - orig_start - fuzzlen
827 if fuzzlen:
828 if fuzzlen:
828 msg = _("Hunk #%d succeeded at %d "
829 msg = _("Hunk #%d succeeded at %d "
829 "with fuzz %d "
830 "with fuzz %d "
830 "(offset %d lines).\n")
831 "(offset %d lines).\n")
831 self.printfile(True)
832 self.printfile(True)
832 self.ui.warn(msg %
833 self.ui.warn(msg %
833 (h.number, l + 1, fuzzlen, offset))
834 (h.number, l + 1, fuzzlen, offset))
834 else:
835 else:
835 msg = _("Hunk #%d succeeded at %d "
836 msg = _("Hunk #%d succeeded at %d "
836 "(offset %d lines).\n")
837 "(offset %d lines).\n")
837 self.ui.note(msg % (h.number, l + 1, offset))
838 self.ui.note(msg % (h.number, l + 1, offset))
838 return fuzzlen
839 return fuzzlen
839 self.printfile(True)
840 self.printfile(True)
840 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
841 self.rej.append(horig)
842 self.rej.append(horig)
842 return -1
843 return -1
843
844
844 def close(self):
845 def close(self):
845 if self.dirty:
846 if self.dirty:
846 self.writelines(self.fname, self.lines, self.mode)
847 self.writelines(self.fname, self.lines, self.mode)
847 self.write_rej()
848 self.write_rej()
848 return len(self.rej)
849 return len(self.rej)
849
850
850 class header(object):
851 class header(object):
851 """patch header
852 """patch header
852 """
853 """
853 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
854 diff_re = re.compile('diff -r .* (.*)$')
855 diff_re = re.compile('diff -r .* (.*)$')
855 allhunks_re = re.compile('(?:index|deleted file) ')
856 allhunks_re = re.compile('(?:index|deleted file) ')
856 pretty_re = re.compile('(?:new file|deleted file) ')
857 pretty_re = re.compile('(?:new file|deleted file) ')
857 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 special_re = re.compile('(?:index|deleted|copy|rename) ')
858 newfile_re = re.compile('(?:new file)')
859 newfile_re = re.compile('(?:new file)')
859
860
860 def __init__(self, header):
861 def __init__(self, header):
861 self.header = header
862 self.header = header
862 self.hunks = []
863 self.hunks = []
863
864
864 def binary(self):
865 def binary(self):
865 return any(h.startswith('index ') for h in self.header)
866 return any(h.startswith('index ') for h in self.header)
866
867
867 def pretty(self, fp):
868 def pretty(self, fp):
868 for h in self.header:
869 for h in self.header:
869 if h.startswith('index '):
870 if h.startswith('index '):
870 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 fp.write(_('this modifies a binary file (all or nothing)\n'))
871 break
872 break
872 if self.pretty_re.match(h):
873 if self.pretty_re.match(h):
873 fp.write(h)
874 fp.write(h)
874 if self.binary():
875 if self.binary():
875 fp.write(_('this is a binary file\n'))
876 fp.write(_('this is a binary file\n'))
876 break
877 break
877 if h.startswith('---'):
878 if h.startswith('---'):
878 fp.write(_('%d hunks, %d lines changed\n') %
879 fp.write(_('%d hunks, %d lines changed\n') %
879 (len(self.hunks),
880 (len(self.hunks),
880 sum([max(h.added, h.removed) for h in self.hunks])))
881 sum([max(h.added, h.removed) for h in self.hunks])))
881 break
882 break
882 fp.write(h)
883 fp.write(h)
883
884
884 def write(self, fp):
885 def write(self, fp):
885 fp.write(''.join(self.header))
886 fp.write(''.join(self.header))
886
887
887 def allhunks(self):
888 def allhunks(self):
888 return any(self.allhunks_re.match(h) for h in self.header)
889 return any(self.allhunks_re.match(h) for h in self.header)
889
890
890 def files(self):
891 def files(self):
891 match = self.diffgit_re.match(self.header[0])
892 match = self.diffgit_re.match(self.header[0])
892 if match:
893 if match:
893 fromfile, tofile = match.groups()
894 fromfile, tofile = match.groups()
894 if fromfile == tofile:
895 if fromfile == tofile:
895 return [fromfile]
896 return [fromfile]
896 return [fromfile, tofile]
897 return [fromfile, tofile]
897 else:
898 else:
898 return self.diff_re.match(self.header[0]).groups()
899 return self.diff_re.match(self.header[0]).groups()
899
900
900 def filename(self):
901 def filename(self):
901 return self.files()[-1]
902 return self.files()[-1]
902
903
903 def __repr__(self):
904 def __repr__(self):
904 return '<header %s>' % (' '.join(map(repr, self.files())))
905 return '<header %s>' % (' '.join(map(repr, self.files())))
905
906
906 def isnewfile(self):
907 def isnewfile(self):
907 return any(self.newfile_re.match(h) for h in self.header)
908 return any(self.newfile_re.match(h) for h in self.header)
908
909
909 def special(self):
910 def special(self):
910 # Special files are shown only at the header level and not at the hunk
911 # Special files are shown only at the header level and not at the hunk
911 # level for example a file that has been deleted is a special file.
912 # level for example a file that has been deleted is a special file.
912 # The user cannot change the content of the operation, in the case of
913 # The user cannot change the content of the operation, in the case of
913 # the deleted file he has to take the deletion or not take it, he
914 # the deleted file he has to take the deletion or not take it, he
914 # cannot take some of it.
915 # cannot take some of it.
915 # Newly added files are special if they are empty, they are not special
916 # Newly added files are special if they are empty, they are not special
916 # if they have some content as we want to be able to change it
917 # if they have some content as we want to be able to change it
917 nocontent = len(self.header) == 2
918 nocontent = len(self.header) == 2
918 emptynewfile = self.isnewfile() and nocontent
919 emptynewfile = self.isnewfile() and nocontent
919 return emptynewfile or \
920 return emptynewfile or \
920 any(self.special_re.match(h) for h in self.header)
921 any(self.special_re.match(h) for h in self.header)
921
922
922 class recordhunk(object):
923 class recordhunk(object):
923 """patch hunk
924 """patch hunk
924
925
925 XXX shouldn't we merge this with the other hunk class?
926 XXX shouldn't we merge this with the other hunk class?
926 """
927 """
927
928
928 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 def __init__(self, header, fromline, toline, proc, before, hunk, after,
929 maxcontext=None):
930 maxcontext=None):
930 def trimcontext(lines, reverse=False):
931 def trimcontext(lines, reverse=False):
931 if maxcontext is not None:
932 if maxcontext is not None:
932 delta = len(lines) - maxcontext
933 delta = len(lines) - maxcontext
933 if delta > 0:
934 if delta > 0:
934 if reverse:
935 if reverse:
935 return delta, lines[delta:]
936 return delta, lines[delta:]
936 else:
937 else:
937 return delta, lines[:maxcontext]
938 return delta, lines[:maxcontext]
938 return 0, lines
939 return 0, lines
939
940
940 self.header = header
941 self.header = header
941 trimedbefore, self.before = trimcontext(before, True)
942 trimedbefore, self.before = trimcontext(before, True)
942 self.fromline = fromline + trimedbefore
943 self.fromline = fromline + trimedbefore
943 self.toline = toline + trimedbefore
944 self.toline = toline + trimedbefore
944 _trimedafter, self.after = trimcontext(after, False)
945 _trimedafter, self.after = trimcontext(after, False)
945 self.proc = proc
946 self.proc = proc
946 self.hunk = hunk
947 self.hunk = hunk
947 self.added, self.removed = self.countchanges(self.hunk)
948 self.added, self.removed = self.countchanges(self.hunk)
948
949
949 def __eq__(self, v):
950 def __eq__(self, v):
950 if not isinstance(v, recordhunk):
951 if not isinstance(v, recordhunk):
951 return False
952 return False
952
953
953 return ((v.hunk == self.hunk) and
954 return ((v.hunk == self.hunk) and
954 (v.proc == self.proc) and
955 (v.proc == self.proc) and
955 (self.fromline == v.fromline) and
956 (self.fromline == v.fromline) and
956 (self.header.files() == v.header.files()))
957 (self.header.files() == v.header.files()))
957
958
958 def __hash__(self):
959 def __hash__(self):
959 return hash((tuple(self.hunk),
960 return hash((tuple(self.hunk),
960 tuple(self.header.files()),
961 tuple(self.header.files()),
961 self.fromline,
962 self.fromline,
962 self.proc))
963 self.proc))
963
964
964 def countchanges(self, hunk):
965 def countchanges(self, hunk):
965 """hunk -> (n+,n-)"""
966 """hunk -> (n+,n-)"""
966 add = len([h for h in hunk if h.startswith('+')])
967 add = len([h for h in hunk if h.startswith('+')])
967 rem = len([h for h in hunk if h.startswith('-')])
968 rem = len([h for h in hunk if h.startswith('-')])
968 return add, rem
969 return add, rem
969
970
970 def reversehunk(self):
971 def reversehunk(self):
971 """return another recordhunk which is the reverse of the hunk
972 """return another recordhunk which is the reverse of the hunk
972
973
973 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
974 that, swap fromline/toline and +/- signs while keep other things
975 that, swap fromline/toline and +/- signs while keep other things
975 unchanged.
976 unchanged.
976 """
977 """
977 m = {'+': '-', '-': '+', '\\': '\\'}
978 m = {'+': '-', '-': '+', '\\': '\\'}
978 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
979 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 return recordhunk(self.header, self.toline, self.fromline, self.proc,
980 self.before, hunk, self.after)
981 self.before, hunk, self.after)
981
982
982 def write(self, fp):
983 def write(self, fp):
983 delta = len(self.before) + len(self.after)
984 delta = len(self.before) + len(self.after)
984 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 if self.after and self.after[-1] == '\\ No newline at end of file\n':
985 delta -= 1
986 delta -= 1
986 fromlen = delta + self.removed
987 fromlen = delta + self.removed
987 tolen = delta + self.added
988 tolen = delta + self.added
988 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
989 (self.fromline, fromlen, self.toline, tolen,
990 (self.fromline, fromlen, self.toline, tolen,
990 self.proc and (' ' + self.proc)))
991 self.proc and (' ' + self.proc)))
991 fp.write(''.join(self.before + self.hunk + self.after))
992 fp.write(''.join(self.before + self.hunk + self.after))
992
993
993 pretty = write
994 pretty = write
994
995
995 def filename(self):
996 def filename(self):
996 return self.header.filename()
997 return self.header.filename()
997
998
998 def __repr__(self):
999 def __repr__(self):
999 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1000
1001
1001 def getmessages():
1002 def getmessages():
1002 return {
1003 return {
1003 'multiple': {
1004 'multiple': {
1004 'apply': _("apply change %d/%d to '%s'?"),
1005 'apply': _("apply change %d/%d to '%s'?"),
1005 'discard': _("discard change %d/%d to '%s'?"),
1006 'discard': _("discard change %d/%d to '%s'?"),
1006 'record': _("record change %d/%d to '%s'?"),
1007 'record': _("record change %d/%d to '%s'?"),
1007 },
1008 },
1008 'single': {
1009 'single': {
1009 'apply': _("apply this change to '%s'?"),
1010 'apply': _("apply this change to '%s'?"),
1010 'discard': _("discard this change to '%s'?"),
1011 'discard': _("discard this change to '%s'?"),
1011 'record': _("record this change to '%s'?"),
1012 'record': _("record this change to '%s'?"),
1012 },
1013 },
1013 'help': {
1014 'help': {
1014 'apply': _('[Ynesfdaq?]'
1015 'apply': _('[Ynesfdaq?]'
1015 '$$ &Yes, apply this change'
1016 '$$ &Yes, apply this change'
1016 '$$ &No, skip this change'
1017 '$$ &No, skip this change'
1017 '$$ &Edit this change manually'
1018 '$$ &Edit this change manually'
1018 '$$ &Skip remaining changes to this file'
1019 '$$ &Skip remaining changes to this file'
1019 '$$ Apply remaining changes to this &file'
1020 '$$ Apply remaining changes to this &file'
1020 '$$ &Done, skip remaining changes and files'
1021 '$$ &Done, skip remaining changes and files'
1021 '$$ Apply &all changes to all remaining files'
1022 '$$ Apply &all changes to all remaining files'
1022 '$$ &Quit, applying no changes'
1023 '$$ &Quit, applying no changes'
1023 '$$ &? (display help)'),
1024 '$$ &? (display help)'),
1024 'discard': _('[Ynesfdaq?]'
1025 'discard': _('[Ynesfdaq?]'
1025 '$$ &Yes, discard this change'
1026 '$$ &Yes, discard this change'
1026 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1027 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1028 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1029 '$$ Discard remaining changes to this &file'
1030 '$$ Discard remaining changes to this &file'
1030 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1031 '$$ Discard &all changes to all remaining files'
1032 '$$ Discard &all changes to all remaining files'
1032 '$$ &Quit, discarding no changes'
1033 '$$ &Quit, discarding no changes'
1033 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1034 'record': _('[Ynesfdaq?]'
1035 'record': _('[Ynesfdaq?]'
1035 '$$ &Yes, record this change'
1036 '$$ &Yes, record this change'
1036 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1037 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1038 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1039 '$$ Record remaining changes to this &file'
1040 '$$ Record remaining changes to this &file'
1040 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1041 '$$ Record &all changes to all remaining files'
1042 '$$ Record &all changes to all remaining files'
1042 '$$ &Quit, recording no changes'
1043 '$$ &Quit, recording no changes'
1043 '$$ &? (display help)'),
1044 '$$ &? (display help)'),
1044 }
1045 }
1045 }
1046 }
1046
1047
1047 def filterpatch(ui, headers, operation=None):
1048 def filterpatch(ui, headers, operation=None):
1048 """Interactively filter patch chunks into applied-only chunks"""
1049 """Interactively filter patch chunks into applied-only chunks"""
1049 messages = getmessages()
1050 messages = getmessages()
1050
1051
1051 if operation is None:
1052 if operation is None:
1052 operation = 'record'
1053 operation = 'record'
1053
1054
1054 def prompt(skipfile, skipall, query, chunk):
1055 def prompt(skipfile, skipall, query, chunk):
1055 """prompt query, and process base inputs
1056 """prompt query, and process base inputs
1056
1057
1057 - y/n for the rest of file
1058 - y/n for the rest of file
1058 - y/n for the rest
1059 - y/n for the rest
1059 - ? (help)
1060 - ? (help)
1060 - q (quit)
1061 - q (quit)
1061
1062
1062 Return True/False and possibly updated skipfile and skipall.
1063 Return True/False and possibly updated skipfile and skipall.
1063 """
1064 """
1064 newpatches = None
1065 newpatches = None
1065 if skipall is not None:
1066 if skipall is not None:
1066 return skipall, skipfile, skipall, newpatches
1067 return skipall, skipfile, skipall, newpatches
1067 if skipfile is not None:
1068 if skipfile is not None:
1068 return skipfile, skipfile, skipall, newpatches
1069 return skipfile, skipfile, skipall, newpatches
1069 while True:
1070 while True:
1070 resps = messages['help'][operation]
1071 resps = messages['help'][operation]
1071 r = ui.promptchoice("%s %s" % (query, resps))
1072 r = ui.promptchoice("%s %s" % (query, resps))
1072 ui.write("\n")
1073 ui.write("\n")
1073 if r == 8: # ?
1074 if r == 8: # ?
1074 for c, t in ui.extractchoices(resps)[1]:
1075 for c, t in ui.extractchoices(resps)[1]:
1075 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1076 continue
1077 continue
1077 elif r == 0: # yes
1078 elif r == 0: # yes
1078 ret = True
1079 ret = True
1079 elif r == 1: # no
1080 elif r == 1: # no
1080 ret = False
1081 ret = False
1081 elif r == 2: # Edit patch
1082 elif r == 2: # Edit patch
1082 if chunk is None:
1083 if chunk is None:
1083 ui.write(_('cannot edit patch for whole file'))
1084 ui.write(_('cannot edit patch for whole file'))
1084 ui.write("\n")
1085 ui.write("\n")
1085 continue
1086 continue
1086 if chunk.header.binary():
1087 if chunk.header.binary():
1087 ui.write(_('cannot edit patch for binary file'))
1088 ui.write(_('cannot edit patch for binary file'))
1088 ui.write("\n")
1089 ui.write("\n")
1089 continue
1090 continue
1090 # Patch comment based on the Git one (based on comment at end of
1091 # Patch comment based on the Git one (based on comment at end of
1091 # https://mercurial-scm.org/wiki/RecordExtension)
1092 # https://mercurial-scm.org/wiki/RecordExtension)
1092 phelp = '---' + _("""
1093 phelp = '---' + _("""
1093 To remove '-' lines, make them ' ' lines (context).
1094 To remove '-' lines, make them ' ' lines (context).
1094 To remove '+' lines, delete them.
1095 To remove '+' lines, delete them.
1095 Lines starting with # will be removed from the patch.
1096 Lines starting with # will be removed from the patch.
1096
1097
1097 If the patch applies cleanly, the edited hunk will immediately be
1098 If the patch applies cleanly, the edited hunk will immediately be
1098 added to the record list. If it does not apply cleanly, a rejects
1099 added to the record list. If it does not apply cleanly, a rejects
1099 file will be generated: you can use that when you try again. If
1100 file will be generated: you can use that when you try again. If
1100 all lines of the hunk are removed, then the edit is aborted and
1101 all lines of the hunk are removed, then the edit is aborted and
1101 the hunk is left unchanged.
1102 the hunk is left unchanged.
1102 """)
1103 """)
1103 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1104 suffix=".diff", text=True)
1105 suffix=".diff", text=True)
1105 ncpatchfp = None
1106 ncpatchfp = None
1106 try:
1107 try:
1107 # Write the initial patch
1108 # Write the initial patch
1108 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1109 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1109 chunk.header.write(f)
1110 chunk.header.write(f)
1110 chunk.write(f)
1111 chunk.write(f)
1111 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1112 f.close()
1113 f.close()
1113 # Start the editor and wait for it to complete
1114 # Start the editor and wait for it to complete
1114 editor = ui.geteditor()
1115 editor = ui.geteditor()
1115 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1116 environ={'HGUSER': ui.username()},
1117 environ={'HGUSER': ui.username()},
1117 blockedtag='filterpatch')
1118 blockedtag='filterpatch')
1118 if ret != 0:
1119 if ret != 0:
1119 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 ui.warn(_("editor exited with exit code %d\n") % ret)
1120 continue
1121 continue
1121 # Remove comment lines
1122 # Remove comment lines
1122 patchfp = open(patchfn)
1123 patchfp = open(patchfn)
1123 ncpatchfp = stringio()
1124 ncpatchfp = stringio()
1124 for line in util.iterfile(patchfp):
1125 for line in util.iterfile(patchfp):
1125 if not line.startswith('#'):
1126 if not line.startswith('#'):
1126 ncpatchfp.write(line)
1127 ncpatchfp.write(line)
1127 patchfp.close()
1128 patchfp.close()
1128 ncpatchfp.seek(0)
1129 ncpatchfp.seek(0)
1129 newpatches = parsepatch(ncpatchfp)
1130 newpatches = parsepatch(ncpatchfp)
1130 finally:
1131 finally:
1131 os.unlink(patchfn)
1132 os.unlink(patchfn)
1132 del ncpatchfp
1133 del ncpatchfp
1133 # Signal that the chunk shouldn't be applied as-is, but
1134 # Signal that the chunk shouldn't be applied as-is, but
1134 # provide the new patch to be used instead.
1135 # provide the new patch to be used instead.
1135 ret = False
1136 ret = False
1136 elif r == 3: # Skip
1137 elif r == 3: # Skip
1137 ret = skipfile = False
1138 ret = skipfile = False
1138 elif r == 4: # file (Record remaining)
1139 elif r == 4: # file (Record remaining)
1139 ret = skipfile = True
1140 ret = skipfile = True
1140 elif r == 5: # done, skip remaining
1141 elif r == 5: # done, skip remaining
1141 ret = skipall = False
1142 ret = skipall = False
1142 elif r == 6: # all
1143 elif r == 6: # all
1143 ret = skipall = True
1144 ret = skipall = True
1144 elif r == 7: # quit
1145 elif r == 7: # quit
1145 raise error.Abort(_('user quit'))
1146 raise error.Abort(_('user quit'))
1146 return ret, skipfile, skipall, newpatches
1147 return ret, skipfile, skipall, newpatches
1147
1148
1148 seen = set()
1149 seen = set()
1149 applied = {} # 'filename' -> [] of chunks
1150 applied = {} # 'filename' -> [] of chunks
1150 skipfile, skipall = None, None
1151 skipfile, skipall = None, None
1151 pos, total = 1, sum(len(h.hunks) for h in headers)
1152 pos, total = 1, sum(len(h.hunks) for h in headers)
1152 for h in headers:
1153 for h in headers:
1153 pos += len(h.hunks)
1154 pos += len(h.hunks)
1154 skipfile = None
1155 skipfile = None
1155 fixoffset = 0
1156 fixoffset = 0
1156 hdr = ''.join(h.header)
1157 hdr = ''.join(h.header)
1157 if hdr in seen:
1158 if hdr in seen:
1158 continue
1159 continue
1159 seen.add(hdr)
1160 seen.add(hdr)
1160 if skipall is None:
1161 if skipall is None:
1161 h.pretty(ui)
1162 h.pretty(ui)
1162 msg = (_('examine changes to %s?') %
1163 msg = (_('examine changes to %s?') %
1163 _(' and ').join("'%s'" % f for f in h.files()))
1164 _(' and ').join("'%s'" % f for f in h.files()))
1164 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1165 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1165 if not r:
1166 if not r:
1166 continue
1167 continue
1167 applied[h.filename()] = [h]
1168 applied[h.filename()] = [h]
1168 if h.allhunks():
1169 if h.allhunks():
1169 applied[h.filename()] += h.hunks
1170 applied[h.filename()] += h.hunks
1170 continue
1171 continue
1171 for i, chunk in enumerate(h.hunks):
1172 for i, chunk in enumerate(h.hunks):
1172 if skipfile is None and skipall is None:
1173 if skipfile is None and skipall is None:
1173 chunk.pretty(ui)
1174 chunk.pretty(ui)
1174 if total == 1:
1175 if total == 1:
1175 msg = messages['single'][operation] % chunk.filename()
1176 msg = messages['single'][operation] % chunk.filename()
1176 else:
1177 else:
1177 idx = pos - len(h.hunks) + i
1178 idx = pos - len(h.hunks) + i
1178 msg = messages['multiple'][operation] % (idx, total,
1179 msg = messages['multiple'][operation] % (idx, total,
1179 chunk.filename())
1180 chunk.filename())
1180 r, skipfile, skipall, newpatches = prompt(skipfile,
1181 r, skipfile, skipall, newpatches = prompt(skipfile,
1181 skipall, msg, chunk)
1182 skipall, msg, chunk)
1182 if r:
1183 if r:
1183 if fixoffset:
1184 if fixoffset:
1184 chunk = copy.copy(chunk)
1185 chunk = copy.copy(chunk)
1185 chunk.toline += fixoffset
1186 chunk.toline += fixoffset
1186 applied[chunk.filename()].append(chunk)
1187 applied[chunk.filename()].append(chunk)
1187 elif newpatches is not None:
1188 elif newpatches is not None:
1188 for newpatch in newpatches:
1189 for newpatch in newpatches:
1189 for newhunk in newpatch.hunks:
1190 for newhunk in newpatch.hunks:
1190 if fixoffset:
1191 if fixoffset:
1191 newhunk.toline += fixoffset
1192 newhunk.toline += fixoffset
1192 applied[newhunk.filename()].append(newhunk)
1193 applied[newhunk.filename()].append(newhunk)
1193 else:
1194 else:
1194 fixoffset += chunk.removed - chunk.added
1195 fixoffset += chunk.removed - chunk.added
1195 return (sum([h for h in applied.itervalues()
1196 return (sum([h for h in applied.itervalues()
1196 if h[0].special() or len(h) > 1], []), {})
1197 if h[0].special() or len(h) > 1], []), {})
1197 class hunk(object):
1198 class hunk(object):
1198 def __init__(self, desc, num, lr, context):
1199 def __init__(self, desc, num, lr, context):
1199 self.number = num
1200 self.number = num
1200 self.desc = desc
1201 self.desc = desc
1201 self.hunk = [desc]
1202 self.hunk = [desc]
1202 self.a = []
1203 self.a = []
1203 self.b = []
1204 self.b = []
1204 self.starta = self.lena = None
1205 self.starta = self.lena = None
1205 self.startb = self.lenb = None
1206 self.startb = self.lenb = None
1206 if lr is not None:
1207 if lr is not None:
1207 if context:
1208 if context:
1208 self.read_context_hunk(lr)
1209 self.read_context_hunk(lr)
1209 else:
1210 else:
1210 self.read_unified_hunk(lr)
1211 self.read_unified_hunk(lr)
1211
1212
1212 def getnormalized(self):
1213 def getnormalized(self):
1213 """Return a copy with line endings normalized to LF."""
1214 """Return a copy with line endings normalized to LF."""
1214
1215
1215 def normalize(lines):
1216 def normalize(lines):
1216 nlines = []
1217 nlines = []
1217 for line in lines:
1218 for line in lines:
1218 if line.endswith('\r\n'):
1219 if line.endswith('\r\n'):
1219 line = line[:-2] + '\n'
1220 line = line[:-2] + '\n'
1220 nlines.append(line)
1221 nlines.append(line)
1221 return nlines
1222 return nlines
1222
1223
1223 # Dummy object, it is rebuilt manually
1224 # Dummy object, it is rebuilt manually
1224 nh = hunk(self.desc, self.number, None, None)
1225 nh = hunk(self.desc, self.number, None, None)
1225 nh.number = self.number
1226 nh.number = self.number
1226 nh.desc = self.desc
1227 nh.desc = self.desc
1227 nh.hunk = self.hunk
1228 nh.hunk = self.hunk
1228 nh.a = normalize(self.a)
1229 nh.a = normalize(self.a)
1229 nh.b = normalize(self.b)
1230 nh.b = normalize(self.b)
1230 nh.starta = self.starta
1231 nh.starta = self.starta
1231 nh.startb = self.startb
1232 nh.startb = self.startb
1232 nh.lena = self.lena
1233 nh.lena = self.lena
1233 nh.lenb = self.lenb
1234 nh.lenb = self.lenb
1234 return nh
1235 return nh
1235
1236
1236 def read_unified_hunk(self, lr):
1237 def read_unified_hunk(self, lr):
1237 m = unidesc.match(self.desc)
1238 m = unidesc.match(self.desc)
1238 if not m:
1239 if not m:
1239 raise PatchError(_("bad hunk #%d") % self.number)
1240 raise PatchError(_("bad hunk #%d") % self.number)
1240 self.starta, self.lena, self.startb, self.lenb = m.groups()
1241 self.starta, self.lena, self.startb, self.lenb = m.groups()
1241 if self.lena is None:
1242 if self.lena is None:
1242 self.lena = 1
1243 self.lena = 1
1243 else:
1244 else:
1244 self.lena = int(self.lena)
1245 self.lena = int(self.lena)
1245 if self.lenb is None:
1246 if self.lenb is None:
1246 self.lenb = 1
1247 self.lenb = 1
1247 else:
1248 else:
1248 self.lenb = int(self.lenb)
1249 self.lenb = int(self.lenb)
1249 self.starta = int(self.starta)
1250 self.starta = int(self.starta)
1250 self.startb = int(self.startb)
1251 self.startb = int(self.startb)
1251 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1252 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1252 self.b)
1253 self.b)
1253 # if we hit eof before finishing out the hunk, the last line will
1254 # if we hit eof before finishing out the hunk, the last line will
1254 # be zero length. Lets try to fix it up.
1255 # be zero length. Lets try to fix it up.
1255 while len(self.hunk[-1]) == 0:
1256 while len(self.hunk[-1]) == 0:
1256 del self.hunk[-1]
1257 del self.hunk[-1]
1257 del self.a[-1]
1258 del self.a[-1]
1258 del self.b[-1]
1259 del self.b[-1]
1259 self.lena -= 1
1260 self.lena -= 1
1260 self.lenb -= 1
1261 self.lenb -= 1
1261 self._fixnewline(lr)
1262 self._fixnewline(lr)
1262
1263
1263 def read_context_hunk(self, lr):
1264 def read_context_hunk(self, lr):
1264 self.desc = lr.readline()
1265 self.desc = lr.readline()
1265 m = contextdesc.match(self.desc)
1266 m = contextdesc.match(self.desc)
1266 if not m:
1267 if not m:
1267 raise PatchError(_("bad hunk #%d") % self.number)
1268 raise PatchError(_("bad hunk #%d") % self.number)
1268 self.starta, aend = m.groups()
1269 self.starta, aend = m.groups()
1269 self.starta = int(self.starta)
1270 self.starta = int(self.starta)
1270 if aend is None:
1271 if aend is None:
1271 aend = self.starta
1272 aend = self.starta
1272 self.lena = int(aend) - self.starta
1273 self.lena = int(aend) - self.starta
1273 if self.starta:
1274 if self.starta:
1274 self.lena += 1
1275 self.lena += 1
1275 for x in xrange(self.lena):
1276 for x in xrange(self.lena):
1276 l = lr.readline()
1277 l = lr.readline()
1277 if l.startswith('---'):
1278 if l.startswith('---'):
1278 # lines addition, old block is empty
1279 # lines addition, old block is empty
1279 lr.push(l)
1280 lr.push(l)
1280 break
1281 break
1281 s = l[2:]
1282 s = l[2:]
1282 if l.startswith('- ') or l.startswith('! '):
1283 if l.startswith('- ') or l.startswith('! '):
1283 u = '-' + s
1284 u = '-' + s
1284 elif l.startswith(' '):
1285 elif l.startswith(' '):
1285 u = ' ' + s
1286 u = ' ' + s
1286 else:
1287 else:
1287 raise PatchError(_("bad hunk #%d old text line %d") %
1288 raise PatchError(_("bad hunk #%d old text line %d") %
1288 (self.number, x))
1289 (self.number, x))
1289 self.a.append(u)
1290 self.a.append(u)
1290 self.hunk.append(u)
1291 self.hunk.append(u)
1291
1292
1292 l = lr.readline()
1293 l = lr.readline()
1293 if l.startswith('\ '):
1294 if l.startswith('\ '):
1294 s = self.a[-1][:-1]
1295 s = self.a[-1][:-1]
1295 self.a[-1] = s
1296 self.a[-1] = s
1296 self.hunk[-1] = s
1297 self.hunk[-1] = s
1297 l = lr.readline()
1298 l = lr.readline()
1298 m = contextdesc.match(l)
1299 m = contextdesc.match(l)
1299 if not m:
1300 if not m:
1300 raise PatchError(_("bad hunk #%d") % self.number)
1301 raise PatchError(_("bad hunk #%d") % self.number)
1301 self.startb, bend = m.groups()
1302 self.startb, bend = m.groups()
1302 self.startb = int(self.startb)
1303 self.startb = int(self.startb)
1303 if bend is None:
1304 if bend is None:
1304 bend = self.startb
1305 bend = self.startb
1305 self.lenb = int(bend) - self.startb
1306 self.lenb = int(bend) - self.startb
1306 if self.startb:
1307 if self.startb:
1307 self.lenb += 1
1308 self.lenb += 1
1308 hunki = 1
1309 hunki = 1
1309 for x in xrange(self.lenb):
1310 for x in xrange(self.lenb):
1310 l = lr.readline()
1311 l = lr.readline()
1311 if l.startswith('\ '):
1312 if l.startswith('\ '):
1312 # XXX: the only way to hit this is with an invalid line range.
1313 # XXX: the only way to hit this is with an invalid line range.
1313 # The no-eol marker is not counted in the line range, but I
1314 # The no-eol marker is not counted in the line range, but I
1314 # guess there are diff(1) out there which behave differently.
1315 # guess there are diff(1) out there which behave differently.
1315 s = self.b[-1][:-1]
1316 s = self.b[-1][:-1]
1316 self.b[-1] = s
1317 self.b[-1] = s
1317 self.hunk[hunki - 1] = s
1318 self.hunk[hunki - 1] = s
1318 continue
1319 continue
1319 if not l:
1320 if not l:
1320 # line deletions, new block is empty and we hit EOF
1321 # line deletions, new block is empty and we hit EOF
1321 lr.push(l)
1322 lr.push(l)
1322 break
1323 break
1323 s = l[2:]
1324 s = l[2:]
1324 if l.startswith('+ ') or l.startswith('! '):
1325 if l.startswith('+ ') or l.startswith('! '):
1325 u = '+' + s
1326 u = '+' + s
1326 elif l.startswith(' '):
1327 elif l.startswith(' '):
1327 u = ' ' + s
1328 u = ' ' + s
1328 elif len(self.b) == 0:
1329 elif len(self.b) == 0:
1329 # line deletions, new block is empty
1330 # line deletions, new block is empty
1330 lr.push(l)
1331 lr.push(l)
1331 break
1332 break
1332 else:
1333 else:
1333 raise PatchError(_("bad hunk #%d old text line %d") %
1334 raise PatchError(_("bad hunk #%d old text line %d") %
1334 (self.number, x))
1335 (self.number, x))
1335 self.b.append(s)
1336 self.b.append(s)
1336 while True:
1337 while True:
1337 if hunki >= len(self.hunk):
1338 if hunki >= len(self.hunk):
1338 h = ""
1339 h = ""
1339 else:
1340 else:
1340 h = self.hunk[hunki]
1341 h = self.hunk[hunki]
1341 hunki += 1
1342 hunki += 1
1342 if h == u:
1343 if h == u:
1343 break
1344 break
1344 elif h.startswith('-'):
1345 elif h.startswith('-'):
1345 continue
1346 continue
1346 else:
1347 else:
1347 self.hunk.insert(hunki - 1, u)
1348 self.hunk.insert(hunki - 1, u)
1348 break
1349 break
1349
1350
1350 if not self.a:
1351 if not self.a:
1351 # this happens when lines were only added to the hunk
1352 # this happens when lines were only added to the hunk
1352 for x in self.hunk:
1353 for x in self.hunk:
1353 if x.startswith('-') or x.startswith(' '):
1354 if x.startswith('-') or x.startswith(' '):
1354 self.a.append(x)
1355 self.a.append(x)
1355 if not self.b:
1356 if not self.b:
1356 # this happens when lines were only deleted from the hunk
1357 # this happens when lines were only deleted from the hunk
1357 for x in self.hunk:
1358 for x in self.hunk:
1358 if x.startswith('+') or x.startswith(' '):
1359 if x.startswith('+') or x.startswith(' '):
1359 self.b.append(x[1:])
1360 self.b.append(x[1:])
1360 # @@ -start,len +start,len @@
1361 # @@ -start,len +start,len @@
1361 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1362 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1362 self.startb, self.lenb)
1363 self.startb, self.lenb)
1363 self.hunk[0] = self.desc
1364 self.hunk[0] = self.desc
1364 self._fixnewline(lr)
1365 self._fixnewline(lr)
1365
1366
1366 def _fixnewline(self, lr):
1367 def _fixnewline(self, lr):
1367 l = lr.readline()
1368 l = lr.readline()
1368 if l.startswith('\ '):
1369 if l.startswith('\ '):
1369 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1370 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1370 else:
1371 else:
1371 lr.push(l)
1372 lr.push(l)
1372
1373
1373 def complete(self):
1374 def complete(self):
1374 return len(self.a) == self.lena and len(self.b) == self.lenb
1375 return len(self.a) == self.lena and len(self.b) == self.lenb
1375
1376
1376 def _fuzzit(self, old, new, fuzz, toponly):
1377 def _fuzzit(self, old, new, fuzz, toponly):
1377 # this removes context lines from the top and bottom of list 'l'. It
1378 # this removes context lines from the top and bottom of list 'l'. It
1378 # checks the hunk to make sure only context lines are removed, and then
1379 # checks the hunk to make sure only context lines are removed, and then
1379 # returns a new shortened list of lines.
1380 # returns a new shortened list of lines.
1380 fuzz = min(fuzz, len(old))
1381 fuzz = min(fuzz, len(old))
1381 if fuzz:
1382 if fuzz:
1382 top = 0
1383 top = 0
1383 bot = 0
1384 bot = 0
1384 hlen = len(self.hunk)
1385 hlen = len(self.hunk)
1385 for x in xrange(hlen - 1):
1386 for x in xrange(hlen - 1):
1386 # the hunk starts with the @@ line, so use x+1
1387 # the hunk starts with the @@ line, so use x+1
1387 if self.hunk[x + 1][0] == ' ':
1388 if self.hunk[x + 1][0] == ' ':
1388 top += 1
1389 top += 1
1389 else:
1390 else:
1390 break
1391 break
1391 if not toponly:
1392 if not toponly:
1392 for x in xrange(hlen - 1):
1393 for x in xrange(hlen - 1):
1393 if self.hunk[hlen - bot - 1][0] == ' ':
1394 if self.hunk[hlen - bot - 1][0] == ' ':
1394 bot += 1
1395 bot += 1
1395 else:
1396 else:
1396 break
1397 break
1397
1398
1398 bot = min(fuzz, bot)
1399 bot = min(fuzz, bot)
1399 top = min(fuzz, top)
1400 top = min(fuzz, top)
1400 return old[top:len(old) - bot], new[top:len(new) - bot], top
1401 return old[top:len(old) - bot], new[top:len(new) - bot], top
1401 return old, new, 0
1402 return old, new, 0
1402
1403
1403 def fuzzit(self, fuzz, toponly):
1404 def fuzzit(self, fuzz, toponly):
1404 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1405 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1405 oldstart = self.starta + top
1406 oldstart = self.starta + top
1406 newstart = self.startb + top
1407 newstart = self.startb + top
1407 # zero length hunk ranges already have their start decremented
1408 # zero length hunk ranges already have their start decremented
1408 if self.lena and oldstart > 0:
1409 if self.lena and oldstart > 0:
1409 oldstart -= 1
1410 oldstart -= 1
1410 if self.lenb and newstart > 0:
1411 if self.lenb and newstart > 0:
1411 newstart -= 1
1412 newstart -= 1
1412 return old, oldstart, new, newstart
1413 return old, oldstart, new, newstart
1413
1414
1414 class binhunk(object):
1415 class binhunk(object):
1415 'A binary patch file.'
1416 'A binary patch file.'
1416 def __init__(self, lr, fname):
1417 def __init__(self, lr, fname):
1417 self.text = None
1418 self.text = None
1418 self.delta = False
1419 self.delta = False
1419 self.hunk = ['GIT binary patch\n']
1420 self.hunk = ['GIT binary patch\n']
1420 self._fname = fname
1421 self._fname = fname
1421 self._read(lr)
1422 self._read(lr)
1422
1423
1423 def complete(self):
1424 def complete(self):
1424 return self.text is not None
1425 return self.text is not None
1425
1426
1426 def new(self, lines):
1427 def new(self, lines):
1427 if self.delta:
1428 if self.delta:
1428 return [applybindelta(self.text, ''.join(lines))]
1429 return [applybindelta(self.text, ''.join(lines))]
1429 return [self.text]
1430 return [self.text]
1430
1431
1431 def _read(self, lr):
1432 def _read(self, lr):
1432 def getline(lr, hunk):
1433 def getline(lr, hunk):
1433 l = lr.readline()
1434 l = lr.readline()
1434 hunk.append(l)
1435 hunk.append(l)
1435 return l.rstrip('\r\n')
1436 return l.rstrip('\r\n')
1436
1437
1437 size = 0
1438 size = 0
1438 while True:
1439 while True:
1439 line = getline(lr, self.hunk)
1440 line = getline(lr, self.hunk)
1440 if not line:
1441 if not line:
1441 raise PatchError(_('could not extract "%s" binary data')
1442 raise PatchError(_('could not extract "%s" binary data')
1442 % self._fname)
1443 % self._fname)
1443 if line.startswith('literal '):
1444 if line.startswith('literal '):
1444 size = int(line[8:].rstrip())
1445 size = int(line[8:].rstrip())
1445 break
1446 break
1446 if line.startswith('delta '):
1447 if line.startswith('delta '):
1447 size = int(line[6:].rstrip())
1448 size = int(line[6:].rstrip())
1448 self.delta = True
1449 self.delta = True
1449 break
1450 break
1450 dec = []
1451 dec = []
1451 line = getline(lr, self.hunk)
1452 line = getline(lr, self.hunk)
1452 while len(line) > 1:
1453 while len(line) > 1:
1453 l = line[0]
1454 l = line[0]
1454 if l <= 'Z' and l >= 'A':
1455 if l <= 'Z' and l >= 'A':
1455 l = ord(l) - ord('A') + 1
1456 l = ord(l) - ord('A') + 1
1456 else:
1457 else:
1457 l = ord(l) - ord('a') + 27
1458 l = ord(l) - ord('a') + 27
1458 try:
1459 try:
1459 dec.append(util.b85decode(line[1:])[:l])
1460 dec.append(util.b85decode(line[1:])[:l])
1460 except ValueError as e:
1461 except ValueError as e:
1461 raise PatchError(_('could not decode "%s" binary patch: %s')
1462 raise PatchError(_('could not decode "%s" binary patch: %s')
1462 % (self._fname, str(e)))
1463 % (self._fname, str(e)))
1463 line = getline(lr, self.hunk)
1464 line = getline(lr, self.hunk)
1464 text = zlib.decompress(''.join(dec))
1465 text = zlib.decompress(''.join(dec))
1465 if len(text) != size:
1466 if len(text) != size:
1466 raise PatchError(_('"%s" length is %d bytes, should be %d')
1467 raise PatchError(_('"%s" length is %d bytes, should be %d')
1467 % (self._fname, len(text), size))
1468 % (self._fname, len(text), size))
1468 self.text = text
1469 self.text = text
1469
1470
1470 def parsefilename(str):
1471 def parsefilename(str):
1471 # --- filename \t|space stuff
1472 # --- filename \t|space stuff
1472 s = str[4:].rstrip('\r\n')
1473 s = str[4:].rstrip('\r\n')
1473 i = s.find('\t')
1474 i = s.find('\t')
1474 if i < 0:
1475 if i < 0:
1475 i = s.find(' ')
1476 i = s.find(' ')
1476 if i < 0:
1477 if i < 0:
1477 return s
1478 return s
1478 return s[:i]
1479 return s[:i]
1479
1480
1480 def reversehunks(hunks):
1481 def reversehunks(hunks):
1481 '''reverse the signs in the hunks given as argument
1482 '''reverse the signs in the hunks given as argument
1482
1483
1483 This function operates on hunks coming out of patch.filterpatch, that is
1484 This function operates on hunks coming out of patch.filterpatch, that is
1484 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1485 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1485
1486
1486 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1487 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1487 ... --- a/folder1/g
1488 ... --- a/folder1/g
1488 ... +++ b/folder1/g
1489 ... +++ b/folder1/g
1489 ... @@ -1,7 +1,7 @@
1490 ... @@ -1,7 +1,7 @@
1490 ... +firstline
1491 ... +firstline
1491 ... c
1492 ... c
1492 ... 1
1493 ... 1
1493 ... 2
1494 ... 2
1494 ... + 3
1495 ... + 3
1495 ... -4
1496 ... -4
1496 ... 5
1497 ... 5
1497 ... d
1498 ... d
1498 ... +lastline"""
1499 ... +lastline"""
1499 >>> hunks = parsepatch([rawpatch])
1500 >>> hunks = parsepatch([rawpatch])
1500 >>> hunkscomingfromfilterpatch = []
1501 >>> hunkscomingfromfilterpatch = []
1501 >>> for h in hunks:
1502 >>> for h in hunks:
1502 ... hunkscomingfromfilterpatch.append(h)
1503 ... hunkscomingfromfilterpatch.append(h)
1503 ... hunkscomingfromfilterpatch.extend(h.hunks)
1504 ... hunkscomingfromfilterpatch.extend(h.hunks)
1504
1505
1505 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1506 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1506 >>> from . import util
1507 >>> from . import util
1507 >>> fp = util.stringio()
1508 >>> fp = util.stringio()
1508 >>> for c in reversedhunks:
1509 >>> for c in reversedhunks:
1509 ... c.write(fp)
1510 ... c.write(fp)
1510 >>> fp.seek(0) or None
1511 >>> fp.seek(0) or None
1511 >>> reversedpatch = fp.read()
1512 >>> reversedpatch = fp.read()
1512 >>> print(pycompat.sysstr(reversedpatch))
1513 >>> print(pycompat.sysstr(reversedpatch))
1513 diff --git a/folder1/g b/folder1/g
1514 diff --git a/folder1/g b/folder1/g
1514 --- a/folder1/g
1515 --- a/folder1/g
1515 +++ b/folder1/g
1516 +++ b/folder1/g
1516 @@ -1,4 +1,3 @@
1517 @@ -1,4 +1,3 @@
1517 -firstline
1518 -firstline
1518 c
1519 c
1519 1
1520 1
1520 2
1521 2
1521 @@ -2,6 +1,6 @@
1522 @@ -2,6 +1,6 @@
1522 c
1523 c
1523 1
1524 1
1524 2
1525 2
1525 - 3
1526 - 3
1526 +4
1527 +4
1527 5
1528 5
1528 d
1529 d
1529 @@ -6,3 +5,2 @@
1530 @@ -6,3 +5,2 @@
1530 5
1531 5
1531 d
1532 d
1532 -lastline
1533 -lastline
1533
1534
1534 '''
1535 '''
1535
1536
1536 newhunks = []
1537 newhunks = []
1537 for c in hunks:
1538 for c in hunks:
1538 if util.safehasattr(c, 'reversehunk'):
1539 if util.safehasattr(c, 'reversehunk'):
1539 c = c.reversehunk()
1540 c = c.reversehunk()
1540 newhunks.append(c)
1541 newhunks.append(c)
1541 return newhunks
1542 return newhunks
1542
1543
1543 def parsepatch(originalchunks, maxcontext=None):
1544 def parsepatch(originalchunks, maxcontext=None):
1544 """patch -> [] of headers -> [] of hunks
1545 """patch -> [] of headers -> [] of hunks
1545
1546
1546 If maxcontext is not None, trim context lines if necessary.
1547 If maxcontext is not None, trim context lines if necessary.
1547
1548
1548 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1549 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1549 ... --- a/folder1/g
1550 ... --- a/folder1/g
1550 ... +++ b/folder1/g
1551 ... +++ b/folder1/g
1551 ... @@ -1,8 +1,10 @@
1552 ... @@ -1,8 +1,10 @@
1552 ... 1
1553 ... 1
1553 ... 2
1554 ... 2
1554 ... -3
1555 ... -3
1555 ... 4
1556 ... 4
1556 ... 5
1557 ... 5
1557 ... 6
1558 ... 6
1558 ... +6.1
1559 ... +6.1
1559 ... +6.2
1560 ... +6.2
1560 ... 7
1561 ... 7
1561 ... 8
1562 ... 8
1562 ... +9'''
1563 ... +9'''
1563 >>> out = util.stringio()
1564 >>> out = util.stringio()
1564 >>> headers = parsepatch([rawpatch], maxcontext=1)
1565 >>> headers = parsepatch([rawpatch], maxcontext=1)
1565 >>> for header in headers:
1566 >>> for header in headers:
1566 ... header.write(out)
1567 ... header.write(out)
1567 ... for hunk in header.hunks:
1568 ... for hunk in header.hunks:
1568 ... hunk.write(out)
1569 ... hunk.write(out)
1569 >>> print(pycompat.sysstr(out.getvalue()))
1570 >>> print(pycompat.sysstr(out.getvalue()))
1570 diff --git a/folder1/g b/folder1/g
1571 diff --git a/folder1/g b/folder1/g
1571 --- a/folder1/g
1572 --- a/folder1/g
1572 +++ b/folder1/g
1573 +++ b/folder1/g
1573 @@ -2,3 +2,2 @@
1574 @@ -2,3 +2,2 @@
1574 2
1575 2
1575 -3
1576 -3
1576 4
1577 4
1577 @@ -6,2 +5,4 @@
1578 @@ -6,2 +5,4 @@
1578 6
1579 6
1579 +6.1
1580 +6.1
1580 +6.2
1581 +6.2
1581 7
1582 7
1582 @@ -8,1 +9,2 @@
1583 @@ -8,1 +9,2 @@
1583 8
1584 8
1584 +9
1585 +9
1585 """
1586 """
1586 class parser(object):
1587 class parser(object):
1587 """patch parsing state machine"""
1588 """patch parsing state machine"""
1588 def __init__(self):
1589 def __init__(self):
1589 self.fromline = 0
1590 self.fromline = 0
1590 self.toline = 0
1591 self.toline = 0
1591 self.proc = ''
1592 self.proc = ''
1592 self.header = None
1593 self.header = None
1593 self.context = []
1594 self.context = []
1594 self.before = []
1595 self.before = []
1595 self.hunk = []
1596 self.hunk = []
1596 self.headers = []
1597 self.headers = []
1597
1598
1598 def addrange(self, limits):
1599 def addrange(self, limits):
1599 fromstart, fromend, tostart, toend, proc = limits
1600 fromstart, fromend, tostart, toend, proc = limits
1600 self.fromline = int(fromstart)
1601 self.fromline = int(fromstart)
1601 self.toline = int(tostart)
1602 self.toline = int(tostart)
1602 self.proc = proc
1603 self.proc = proc
1603
1604
1604 def addcontext(self, context):
1605 def addcontext(self, context):
1605 if self.hunk:
1606 if self.hunk:
1606 h = recordhunk(self.header, self.fromline, self.toline,
1607 h = recordhunk(self.header, self.fromline, self.toline,
1607 self.proc, self.before, self.hunk, context, maxcontext)
1608 self.proc, self.before, self.hunk, context, maxcontext)
1608 self.header.hunks.append(h)
1609 self.header.hunks.append(h)
1609 self.fromline += len(self.before) + h.removed
1610 self.fromline += len(self.before) + h.removed
1610 self.toline += len(self.before) + h.added
1611 self.toline += len(self.before) + h.added
1611 self.before = []
1612 self.before = []
1612 self.hunk = []
1613 self.hunk = []
1613 self.context = context
1614 self.context = context
1614
1615
1615 def addhunk(self, hunk):
1616 def addhunk(self, hunk):
1616 if self.context:
1617 if self.context:
1617 self.before = self.context
1618 self.before = self.context
1618 self.context = []
1619 self.context = []
1619 self.hunk = hunk
1620 self.hunk = hunk
1620
1621
1621 def newfile(self, hdr):
1622 def newfile(self, hdr):
1622 self.addcontext([])
1623 self.addcontext([])
1623 h = header(hdr)
1624 h = header(hdr)
1624 self.headers.append(h)
1625 self.headers.append(h)
1625 self.header = h
1626 self.header = h
1626
1627
1627 def addother(self, line):
1628 def addother(self, line):
1628 pass # 'other' lines are ignored
1629 pass # 'other' lines are ignored
1629
1630
1630 def finished(self):
1631 def finished(self):
1631 self.addcontext([])
1632 self.addcontext([])
1632 return self.headers
1633 return self.headers
1633
1634
1634 transitions = {
1635 transitions = {
1635 'file': {'context': addcontext,
1636 'file': {'context': addcontext,
1636 'file': newfile,
1637 'file': newfile,
1637 'hunk': addhunk,
1638 'hunk': addhunk,
1638 'range': addrange},
1639 'range': addrange},
1639 'context': {'file': newfile,
1640 'context': {'file': newfile,
1640 'hunk': addhunk,
1641 'hunk': addhunk,
1641 'range': addrange,
1642 'range': addrange,
1642 'other': addother},
1643 'other': addother},
1643 'hunk': {'context': addcontext,
1644 'hunk': {'context': addcontext,
1644 'file': newfile,
1645 'file': newfile,
1645 'range': addrange},
1646 'range': addrange},
1646 'range': {'context': addcontext,
1647 'range': {'context': addcontext,
1647 'hunk': addhunk},
1648 'hunk': addhunk},
1648 'other': {'other': addother},
1649 'other': {'other': addother},
1649 }
1650 }
1650
1651
1651 p = parser()
1652 p = parser()
1652 fp = stringio()
1653 fp = stringio()
1653 fp.write(''.join(originalchunks))
1654 fp.write(''.join(originalchunks))
1654 fp.seek(0)
1655 fp.seek(0)
1655
1656
1656 state = 'context'
1657 state = 'context'
1657 for newstate, data in scanpatch(fp):
1658 for newstate, data in scanpatch(fp):
1658 try:
1659 try:
1659 p.transitions[state][newstate](p, data)
1660 p.transitions[state][newstate](p, data)
1660 except KeyError:
1661 except KeyError:
1661 raise PatchError('unhandled transition: %s -> %s' %
1662 raise PatchError('unhandled transition: %s -> %s' %
1662 (state, newstate))
1663 (state, newstate))
1663 state = newstate
1664 state = newstate
1664 del fp
1665 del fp
1665 return p.finished()
1666 return p.finished()
1666
1667
1667 def pathtransform(path, strip, prefix):
1668 def pathtransform(path, strip, prefix):
1668 '''turn a path from a patch into a path suitable for the repository
1669 '''turn a path from a patch into a path suitable for the repository
1669
1670
1670 prefix, if not empty, is expected to be normalized with a / at the end.
1671 prefix, if not empty, is expected to be normalized with a / at the end.
1671
1672
1672 Returns (stripped components, path in repository).
1673 Returns (stripped components, path in repository).
1673
1674
1674 >>> pathtransform(b'a/b/c', 0, b'')
1675 >>> pathtransform(b'a/b/c', 0, b'')
1675 ('', 'a/b/c')
1676 ('', 'a/b/c')
1676 >>> pathtransform(b' a/b/c ', 0, b'')
1677 >>> pathtransform(b' a/b/c ', 0, b'')
1677 ('', ' a/b/c')
1678 ('', ' a/b/c')
1678 >>> pathtransform(b' a/b/c ', 2, b'')
1679 >>> pathtransform(b' a/b/c ', 2, b'')
1679 ('a/b/', 'c')
1680 ('a/b/', 'c')
1680 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1681 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1681 ('', 'd/e/a/b/c')
1682 ('', 'd/e/a/b/c')
1682 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1683 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1683 ('a//b/', 'd/e/c')
1684 ('a//b/', 'd/e/c')
1684 >>> pathtransform(b'a/b/c', 3, b'')
1685 >>> pathtransform(b'a/b/c', 3, b'')
1685 Traceback (most recent call last):
1686 Traceback (most recent call last):
1686 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1687 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1687 '''
1688 '''
1688 pathlen = len(path)
1689 pathlen = len(path)
1689 i = 0
1690 i = 0
1690 if strip == 0:
1691 if strip == 0:
1691 return '', prefix + path.rstrip()
1692 return '', prefix + path.rstrip()
1692 count = strip
1693 count = strip
1693 while count > 0:
1694 while count > 0:
1694 i = path.find('/', i)
1695 i = path.find('/', i)
1695 if i == -1:
1696 if i == -1:
1696 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1697 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1697 (count, strip, path))
1698 (count, strip, path))
1698 i += 1
1699 i += 1
1699 # consume '//' in the path
1700 # consume '//' in the path
1700 while i < pathlen - 1 and path[i:i + 1] == '/':
1701 while i < pathlen - 1 and path[i:i + 1] == '/':
1701 i += 1
1702 i += 1
1702 count -= 1
1703 count -= 1
1703 return path[:i].lstrip(), prefix + path[i:].rstrip()
1704 return path[:i].lstrip(), prefix + path[i:].rstrip()
1704
1705
1705 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1706 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1706 nulla = afile_orig == "/dev/null"
1707 nulla = afile_orig == "/dev/null"
1707 nullb = bfile_orig == "/dev/null"
1708 nullb = bfile_orig == "/dev/null"
1708 create = nulla and hunk.starta == 0 and hunk.lena == 0
1709 create = nulla and hunk.starta == 0 and hunk.lena == 0
1709 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1710 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1710 abase, afile = pathtransform(afile_orig, strip, prefix)
1711 abase, afile = pathtransform(afile_orig, strip, prefix)
1711 gooda = not nulla and backend.exists(afile)
1712 gooda = not nulla and backend.exists(afile)
1712 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1713 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1713 if afile == bfile:
1714 if afile == bfile:
1714 goodb = gooda
1715 goodb = gooda
1715 else:
1716 else:
1716 goodb = not nullb and backend.exists(bfile)
1717 goodb = not nullb and backend.exists(bfile)
1717 missing = not goodb and not gooda and not create
1718 missing = not goodb and not gooda and not create
1718
1719
1719 # some diff programs apparently produce patches where the afile is
1720 # some diff programs apparently produce patches where the afile is
1720 # not /dev/null, but afile starts with bfile
1721 # not /dev/null, but afile starts with bfile
1721 abasedir = afile[:afile.rfind('/') + 1]
1722 abasedir = afile[:afile.rfind('/') + 1]
1722 bbasedir = bfile[:bfile.rfind('/') + 1]
1723 bbasedir = bfile[:bfile.rfind('/') + 1]
1723 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1724 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1724 and hunk.starta == 0 and hunk.lena == 0):
1725 and hunk.starta == 0 and hunk.lena == 0):
1725 create = True
1726 create = True
1726 missing = False
1727 missing = False
1727
1728
1728 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1729 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1729 # diff is between a file and its backup. In this case, the original
1730 # diff is between a file and its backup. In this case, the original
1730 # file should be patched (see original mpatch code).
1731 # file should be patched (see original mpatch code).
1731 isbackup = (abase == bbase and bfile.startswith(afile))
1732 isbackup = (abase == bbase and bfile.startswith(afile))
1732 fname = None
1733 fname = None
1733 if not missing:
1734 if not missing:
1734 if gooda and goodb:
1735 if gooda and goodb:
1735 if isbackup:
1736 if isbackup:
1736 fname = afile
1737 fname = afile
1737 else:
1738 else:
1738 fname = bfile
1739 fname = bfile
1739 elif gooda:
1740 elif gooda:
1740 fname = afile
1741 fname = afile
1741
1742
1742 if not fname:
1743 if not fname:
1743 if not nullb:
1744 if not nullb:
1744 if isbackup:
1745 if isbackup:
1745 fname = afile
1746 fname = afile
1746 else:
1747 else:
1747 fname = bfile
1748 fname = bfile
1748 elif not nulla:
1749 elif not nulla:
1749 fname = afile
1750 fname = afile
1750 else:
1751 else:
1751 raise PatchError(_("undefined source and destination files"))
1752 raise PatchError(_("undefined source and destination files"))
1752
1753
1753 gp = patchmeta(fname)
1754 gp = patchmeta(fname)
1754 if create:
1755 if create:
1755 gp.op = 'ADD'
1756 gp.op = 'ADD'
1756 elif remove:
1757 elif remove:
1757 gp.op = 'DELETE'
1758 gp.op = 'DELETE'
1758 return gp
1759 return gp
1759
1760
1760 def scanpatch(fp):
1761 def scanpatch(fp):
1761 """like patch.iterhunks, but yield different events
1762 """like patch.iterhunks, but yield different events
1762
1763
1763 - ('file', [header_lines + fromfile + tofile])
1764 - ('file', [header_lines + fromfile + tofile])
1764 - ('context', [context_lines])
1765 - ('context', [context_lines])
1765 - ('hunk', [hunk_lines])
1766 - ('hunk', [hunk_lines])
1766 - ('range', (-start,len, +start,len, proc))
1767 - ('range', (-start,len, +start,len, proc))
1767 """
1768 """
1768 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1769 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1769 lr = linereader(fp)
1770 lr = linereader(fp)
1770
1771
1771 def scanwhile(first, p):
1772 def scanwhile(first, p):
1772 """scan lr while predicate holds"""
1773 """scan lr while predicate holds"""
1773 lines = [first]
1774 lines = [first]
1774 for line in iter(lr.readline, ''):
1775 for line in iter(lr.readline, ''):
1775 if p(line):
1776 if p(line):
1776 lines.append(line)
1777 lines.append(line)
1777 else:
1778 else:
1778 lr.push(line)
1779 lr.push(line)
1779 break
1780 break
1780 return lines
1781 return lines
1781
1782
1782 for line in iter(lr.readline, ''):
1783 for line in iter(lr.readline, ''):
1783 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1784 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1784 def notheader(line):
1785 def notheader(line):
1785 s = line.split(None, 1)
1786 s = line.split(None, 1)
1786 return not s or s[0] not in ('---', 'diff')
1787 return not s or s[0] not in ('---', 'diff')
1787 header = scanwhile(line, notheader)
1788 header = scanwhile(line, notheader)
1788 fromfile = lr.readline()
1789 fromfile = lr.readline()
1789 if fromfile.startswith('---'):
1790 if fromfile.startswith('---'):
1790 tofile = lr.readline()
1791 tofile = lr.readline()
1791 header += [fromfile, tofile]
1792 header += [fromfile, tofile]
1792 else:
1793 else:
1793 lr.push(fromfile)
1794 lr.push(fromfile)
1794 yield 'file', header
1795 yield 'file', header
1795 elif line[0:1] == ' ':
1796 elif line[0:1] == ' ':
1796 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1797 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1797 elif line[0] in '-+':
1798 elif line[0] in '-+':
1798 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1799 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1799 else:
1800 else:
1800 m = lines_re.match(line)
1801 m = lines_re.match(line)
1801 if m:
1802 if m:
1802 yield 'range', m.groups()
1803 yield 'range', m.groups()
1803 else:
1804 else:
1804 yield 'other', line
1805 yield 'other', line
1805
1806
1806 def scangitpatch(lr, firstline):
1807 def scangitpatch(lr, firstline):
1807 """
1808 """
1808 Git patches can emit:
1809 Git patches can emit:
1809 - rename a to b
1810 - rename a to b
1810 - change b
1811 - change b
1811 - copy a to c
1812 - copy a to c
1812 - change c
1813 - change c
1813
1814
1814 We cannot apply this sequence as-is, the renamed 'a' could not be
1815 We cannot apply this sequence as-is, the renamed 'a' could not be
1815 found for it would have been renamed already. And we cannot copy
1816 found for it would have been renamed already. And we cannot copy
1816 from 'b' instead because 'b' would have been changed already. So
1817 from 'b' instead because 'b' would have been changed already. So
1817 we scan the git patch for copy and rename commands so we can
1818 we scan the git patch for copy and rename commands so we can
1818 perform the copies ahead of time.
1819 perform the copies ahead of time.
1819 """
1820 """
1820 pos = 0
1821 pos = 0
1821 try:
1822 try:
1822 pos = lr.fp.tell()
1823 pos = lr.fp.tell()
1823 fp = lr.fp
1824 fp = lr.fp
1824 except IOError:
1825 except IOError:
1825 fp = stringio(lr.fp.read())
1826 fp = stringio(lr.fp.read())
1826 gitlr = linereader(fp)
1827 gitlr = linereader(fp)
1827 gitlr.push(firstline)
1828 gitlr.push(firstline)
1828 gitpatches = readgitpatch(gitlr)
1829 gitpatches = readgitpatch(gitlr)
1829 fp.seek(pos)
1830 fp.seek(pos)
1830 return gitpatches
1831 return gitpatches
1831
1832
1832 def iterhunks(fp):
1833 def iterhunks(fp):
1833 """Read a patch and yield the following events:
1834 """Read a patch and yield the following events:
1834 - ("file", afile, bfile, firsthunk): select a new target file.
1835 - ("file", afile, bfile, firsthunk): select a new target file.
1835 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1836 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1836 "file" event.
1837 "file" event.
1837 - ("git", gitchanges): current diff is in git format, gitchanges
1838 - ("git", gitchanges): current diff is in git format, gitchanges
1838 maps filenames to gitpatch records. Unique event.
1839 maps filenames to gitpatch records. Unique event.
1839 """
1840 """
1840 afile = ""
1841 afile = ""
1841 bfile = ""
1842 bfile = ""
1842 state = None
1843 state = None
1843 hunknum = 0
1844 hunknum = 0
1844 emitfile = newfile = False
1845 emitfile = newfile = False
1845 gitpatches = None
1846 gitpatches = None
1846
1847
1847 # our states
1848 # our states
1848 BFILE = 1
1849 BFILE = 1
1849 context = None
1850 context = None
1850 lr = linereader(fp)
1851 lr = linereader(fp)
1851
1852
1852 for x in iter(lr.readline, ''):
1853 for x in iter(lr.readline, ''):
1853 if state == BFILE and (
1854 if state == BFILE and (
1854 (not context and x[0] == '@')
1855 (not context and x[0] == '@')
1855 or (context is not False and x.startswith('***************'))
1856 or (context is not False and x.startswith('***************'))
1856 or x.startswith('GIT binary patch')):
1857 or x.startswith('GIT binary patch')):
1857 gp = None
1858 gp = None
1858 if (gitpatches and
1859 if (gitpatches and
1859 gitpatches[-1].ispatching(afile, bfile)):
1860 gitpatches[-1].ispatching(afile, bfile)):
1860 gp = gitpatches.pop()
1861 gp = gitpatches.pop()
1861 if x.startswith('GIT binary patch'):
1862 if x.startswith('GIT binary patch'):
1862 h = binhunk(lr, gp.path)
1863 h = binhunk(lr, gp.path)
1863 else:
1864 else:
1864 if context is None and x.startswith('***************'):
1865 if context is None and x.startswith('***************'):
1865 context = True
1866 context = True
1866 h = hunk(x, hunknum + 1, lr, context)
1867 h = hunk(x, hunknum + 1, lr, context)
1867 hunknum += 1
1868 hunknum += 1
1868 if emitfile:
1869 if emitfile:
1869 emitfile = False
1870 emitfile = False
1870 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1871 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1871 yield 'hunk', h
1872 yield 'hunk', h
1872 elif x.startswith('diff --git a/'):
1873 elif x.startswith('diff --git a/'):
1873 m = gitre.match(x.rstrip(' \r\n'))
1874 m = gitre.match(x.rstrip(' \r\n'))
1874 if not m:
1875 if not m:
1875 continue
1876 continue
1876 if gitpatches is None:
1877 if gitpatches is None:
1877 # scan whole input for git metadata
1878 # scan whole input for git metadata
1878 gitpatches = scangitpatch(lr, x)
1879 gitpatches = scangitpatch(lr, x)
1879 yield 'git', [g.copy() for g in gitpatches
1880 yield 'git', [g.copy() for g in gitpatches
1880 if g.op in ('COPY', 'RENAME')]
1881 if g.op in ('COPY', 'RENAME')]
1881 gitpatches.reverse()
1882 gitpatches.reverse()
1882 afile = 'a/' + m.group(1)
1883 afile = 'a/' + m.group(1)
1883 bfile = 'b/' + m.group(2)
1884 bfile = 'b/' + m.group(2)
1884 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1885 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1885 gp = gitpatches.pop()
1886 gp = gitpatches.pop()
1886 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1887 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1887 if not gitpatches:
1888 if not gitpatches:
1888 raise PatchError(_('failed to synchronize metadata for "%s"')
1889 raise PatchError(_('failed to synchronize metadata for "%s"')
1889 % afile[2:])
1890 % afile[2:])
1890 gp = gitpatches[-1]
1891 gp = gitpatches[-1]
1891 newfile = True
1892 newfile = True
1892 elif x.startswith('---'):
1893 elif x.startswith('---'):
1893 # check for a unified diff
1894 # check for a unified diff
1894 l2 = lr.readline()
1895 l2 = lr.readline()
1895 if not l2.startswith('+++'):
1896 if not l2.startswith('+++'):
1896 lr.push(l2)
1897 lr.push(l2)
1897 continue
1898 continue
1898 newfile = True
1899 newfile = True
1899 context = False
1900 context = False
1900 afile = parsefilename(x)
1901 afile = parsefilename(x)
1901 bfile = parsefilename(l2)
1902 bfile = parsefilename(l2)
1902 elif x.startswith('***'):
1903 elif x.startswith('***'):
1903 # check for a context diff
1904 # check for a context diff
1904 l2 = lr.readline()
1905 l2 = lr.readline()
1905 if not l2.startswith('---'):
1906 if not l2.startswith('---'):
1906 lr.push(l2)
1907 lr.push(l2)
1907 continue
1908 continue
1908 l3 = lr.readline()
1909 l3 = lr.readline()
1909 lr.push(l3)
1910 lr.push(l3)
1910 if not l3.startswith("***************"):
1911 if not l3.startswith("***************"):
1911 lr.push(l2)
1912 lr.push(l2)
1912 continue
1913 continue
1913 newfile = True
1914 newfile = True
1914 context = True
1915 context = True
1915 afile = parsefilename(x)
1916 afile = parsefilename(x)
1916 bfile = parsefilename(l2)
1917 bfile = parsefilename(l2)
1917
1918
1918 if newfile:
1919 if newfile:
1919 newfile = False
1920 newfile = False
1920 emitfile = True
1921 emitfile = True
1921 state = BFILE
1922 state = BFILE
1922 hunknum = 0
1923 hunknum = 0
1923
1924
1924 while gitpatches:
1925 while gitpatches:
1925 gp = gitpatches.pop()
1926 gp = gitpatches.pop()
1926 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1927 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1927
1928
1928 def applybindelta(binchunk, data):
1929 def applybindelta(binchunk, data):
1929 """Apply a binary delta hunk
1930 """Apply a binary delta hunk
1930 The algorithm used is the algorithm from git's patch-delta.c
1931 The algorithm used is the algorithm from git's patch-delta.c
1931 """
1932 """
1932 def deltahead(binchunk):
1933 def deltahead(binchunk):
1933 i = 0
1934 i = 0
1934 for c in binchunk:
1935 for c in binchunk:
1935 i += 1
1936 i += 1
1936 if not (ord(c) & 0x80):
1937 if not (ord(c) & 0x80):
1937 return i
1938 return i
1938 return i
1939 return i
1939 out = ""
1940 out = ""
1940 s = deltahead(binchunk)
1941 s = deltahead(binchunk)
1941 binchunk = binchunk[s:]
1942 binchunk = binchunk[s:]
1942 s = deltahead(binchunk)
1943 s = deltahead(binchunk)
1943 binchunk = binchunk[s:]
1944 binchunk = binchunk[s:]
1944 i = 0
1945 i = 0
1945 while i < len(binchunk):
1946 while i < len(binchunk):
1946 cmd = ord(binchunk[i])
1947 cmd = ord(binchunk[i])
1947 i += 1
1948 i += 1
1948 if (cmd & 0x80):
1949 if (cmd & 0x80):
1949 offset = 0
1950 offset = 0
1950 size = 0
1951 size = 0
1951 if (cmd & 0x01):
1952 if (cmd & 0x01):
1952 offset = ord(binchunk[i])
1953 offset = ord(binchunk[i])
1953 i += 1
1954 i += 1
1954 if (cmd & 0x02):
1955 if (cmd & 0x02):
1955 offset |= ord(binchunk[i]) << 8
1956 offset |= ord(binchunk[i]) << 8
1956 i += 1
1957 i += 1
1957 if (cmd & 0x04):
1958 if (cmd & 0x04):
1958 offset |= ord(binchunk[i]) << 16
1959 offset |= ord(binchunk[i]) << 16
1959 i += 1
1960 i += 1
1960 if (cmd & 0x08):
1961 if (cmd & 0x08):
1961 offset |= ord(binchunk[i]) << 24
1962 offset |= ord(binchunk[i]) << 24
1962 i += 1
1963 i += 1
1963 if (cmd & 0x10):
1964 if (cmd & 0x10):
1964 size = ord(binchunk[i])
1965 size = ord(binchunk[i])
1965 i += 1
1966 i += 1
1966 if (cmd & 0x20):
1967 if (cmd & 0x20):
1967 size |= ord(binchunk[i]) << 8
1968 size |= ord(binchunk[i]) << 8
1968 i += 1
1969 i += 1
1969 if (cmd & 0x40):
1970 if (cmd & 0x40):
1970 size |= ord(binchunk[i]) << 16
1971 size |= ord(binchunk[i]) << 16
1971 i += 1
1972 i += 1
1972 if size == 0:
1973 if size == 0:
1973 size = 0x10000
1974 size = 0x10000
1974 offset_end = offset + size
1975 offset_end = offset + size
1975 out += data[offset:offset_end]
1976 out += data[offset:offset_end]
1976 elif cmd != 0:
1977 elif cmd != 0:
1977 offset_end = i + cmd
1978 offset_end = i + cmd
1978 out += binchunk[i:offset_end]
1979 out += binchunk[i:offset_end]
1979 i += cmd
1980 i += cmd
1980 else:
1981 else:
1981 raise PatchError(_('unexpected delta opcode 0'))
1982 raise PatchError(_('unexpected delta opcode 0'))
1982 return out
1983 return out
1983
1984
1984 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1985 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1985 """Reads a patch from fp and tries to apply it.
1986 """Reads a patch from fp and tries to apply it.
1986
1987
1987 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1988 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1988 there was any fuzz.
1989 there was any fuzz.
1989
1990
1990 If 'eolmode' is 'strict', the patch content and patched file are
1991 If 'eolmode' is 'strict', the patch content and patched file are
1991 read in binary mode. Otherwise, line endings are ignored when
1992 read in binary mode. Otherwise, line endings are ignored when
1992 patching then normalized according to 'eolmode'.
1993 patching then normalized according to 'eolmode'.
1993 """
1994 """
1994 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1995 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1995 prefix=prefix, eolmode=eolmode)
1996 prefix=prefix, eolmode=eolmode)
1996
1997
1997 def _canonprefix(repo, prefix):
1998 def _canonprefix(repo, prefix):
1998 if prefix:
1999 if prefix:
1999 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2000 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2000 if prefix != '':
2001 if prefix != '':
2001 prefix += '/'
2002 prefix += '/'
2002 return prefix
2003 return prefix
2003
2004
2004 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2005 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2005 eolmode='strict'):
2006 eolmode='strict'):
2006 prefix = _canonprefix(backend.repo, prefix)
2007 prefix = _canonprefix(backend.repo, prefix)
2007 def pstrip(p):
2008 def pstrip(p):
2008 return pathtransform(p, strip - 1, prefix)[1]
2009 return pathtransform(p, strip - 1, prefix)[1]
2009
2010
2010 rejects = 0
2011 rejects = 0
2011 err = 0
2012 err = 0
2012 current_file = None
2013 current_file = None
2013
2014
2014 for state, values in iterhunks(fp):
2015 for state, values in iterhunks(fp):
2015 if state == 'hunk':
2016 if state == 'hunk':
2016 if not current_file:
2017 if not current_file:
2017 continue
2018 continue
2018 ret = current_file.apply(values)
2019 ret = current_file.apply(values)
2019 if ret > 0:
2020 if ret > 0:
2020 err = 1
2021 err = 1
2021 elif state == 'file':
2022 elif state == 'file':
2022 if current_file:
2023 if current_file:
2023 rejects += current_file.close()
2024 rejects += current_file.close()
2024 current_file = None
2025 current_file = None
2025 afile, bfile, first_hunk, gp = values
2026 afile, bfile, first_hunk, gp = values
2026 if gp:
2027 if gp:
2027 gp.path = pstrip(gp.path)
2028 gp.path = pstrip(gp.path)
2028 if gp.oldpath:
2029 if gp.oldpath:
2029 gp.oldpath = pstrip(gp.oldpath)
2030 gp.oldpath = pstrip(gp.oldpath)
2030 else:
2031 else:
2031 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2032 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2032 prefix)
2033 prefix)
2033 if gp.op == 'RENAME':
2034 if gp.op == 'RENAME':
2034 backend.unlink(gp.oldpath)
2035 backend.unlink(gp.oldpath)
2035 if not first_hunk:
2036 if not first_hunk:
2036 if gp.op == 'DELETE':
2037 if gp.op == 'DELETE':
2037 backend.unlink(gp.path)
2038 backend.unlink(gp.path)
2038 continue
2039 continue
2039 data, mode = None, None
2040 data, mode = None, None
2040 if gp.op in ('RENAME', 'COPY'):
2041 if gp.op in ('RENAME', 'COPY'):
2041 data, mode = store.getfile(gp.oldpath)[:2]
2042 data, mode = store.getfile(gp.oldpath)[:2]
2042 if data is None:
2043 if data is None:
2043 # This means that the old path does not exist
2044 # This means that the old path does not exist
2044 raise PatchError(_("source file '%s' does not exist")
2045 raise PatchError(_("source file '%s' does not exist")
2045 % gp.oldpath)
2046 % gp.oldpath)
2046 if gp.mode:
2047 if gp.mode:
2047 mode = gp.mode
2048 mode = gp.mode
2048 if gp.op == 'ADD':
2049 if gp.op == 'ADD':
2049 # Added files without content have no hunk and
2050 # Added files without content have no hunk and
2050 # must be created
2051 # must be created
2051 data = ''
2052 data = ''
2052 if data or mode:
2053 if data or mode:
2053 if (gp.op in ('ADD', 'RENAME', 'COPY')
2054 if (gp.op in ('ADD', 'RENAME', 'COPY')
2054 and backend.exists(gp.path)):
2055 and backend.exists(gp.path)):
2055 raise PatchError(_("cannot create %s: destination "
2056 raise PatchError(_("cannot create %s: destination "
2056 "already exists") % gp.path)
2057 "already exists") % gp.path)
2057 backend.setfile(gp.path, data, mode, gp.oldpath)
2058 backend.setfile(gp.path, data, mode, gp.oldpath)
2058 continue
2059 continue
2059 try:
2060 try:
2060 current_file = patcher(ui, gp, backend, store,
2061 current_file = patcher(ui, gp, backend, store,
2061 eolmode=eolmode)
2062 eolmode=eolmode)
2062 except PatchError as inst:
2063 except PatchError as inst:
2063 ui.warn(str(inst) + '\n')
2064 ui.warn(str(inst) + '\n')
2064 current_file = None
2065 current_file = None
2065 rejects += 1
2066 rejects += 1
2066 continue
2067 continue
2067 elif state == 'git':
2068 elif state == 'git':
2068 for gp in values:
2069 for gp in values:
2069 path = pstrip(gp.oldpath)
2070 path = pstrip(gp.oldpath)
2070 data, mode = backend.getfile(path)
2071 data, mode = backend.getfile(path)
2071 if data is None:
2072 if data is None:
2072 # The error ignored here will trigger a getfile()
2073 # The error ignored here will trigger a getfile()
2073 # error in a place more appropriate for error
2074 # error in a place more appropriate for error
2074 # handling, and will not interrupt the patching
2075 # handling, and will not interrupt the patching
2075 # process.
2076 # process.
2076 pass
2077 pass
2077 else:
2078 else:
2078 store.setfile(path, data, mode)
2079 store.setfile(path, data, mode)
2079 else:
2080 else:
2080 raise error.Abort(_('unsupported parser state: %s') % state)
2081 raise error.Abort(_('unsupported parser state: %s') % state)
2081
2082
2082 if current_file:
2083 if current_file:
2083 rejects += current_file.close()
2084 rejects += current_file.close()
2084
2085
2085 if rejects:
2086 if rejects:
2086 return -1
2087 return -1
2087 return err
2088 return err
2088
2089
2089 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2090 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2090 similarity):
2091 similarity):
2091 """use <patcher> to apply <patchname> to the working directory.
2092 """use <patcher> to apply <patchname> to the working directory.
2092 returns whether patch was applied with fuzz factor."""
2093 returns whether patch was applied with fuzz factor."""
2093
2094
2094 fuzz = False
2095 fuzz = False
2095 args = []
2096 args = []
2096 cwd = repo.root
2097 cwd = repo.root
2097 if cwd:
2098 if cwd:
2098 args.append('-d %s' % util.shellquote(cwd))
2099 args.append('-d %s' % util.shellquote(cwd))
2099 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2100 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2100 util.shellquote(patchname)))
2101 util.shellquote(patchname)))
2101 try:
2102 try:
2102 for line in util.iterfile(fp):
2103 for line in util.iterfile(fp):
2103 line = line.rstrip()
2104 line = line.rstrip()
2104 ui.note(line + '\n')
2105 ui.note(line + '\n')
2105 if line.startswith('patching file '):
2106 if line.startswith('patching file '):
2106 pf = util.parsepatchoutput(line)
2107 pf = util.parsepatchoutput(line)
2107 printed_file = False
2108 printed_file = False
2108 files.add(pf)
2109 files.add(pf)
2109 elif line.find('with fuzz') >= 0:
2110 elif line.find('with fuzz') >= 0:
2110 fuzz = True
2111 fuzz = True
2111 if not printed_file:
2112 if not printed_file:
2112 ui.warn(pf + '\n')
2113 ui.warn(pf + '\n')
2113 printed_file = True
2114 printed_file = True
2114 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2115 elif line.find('saving rejects to file') >= 0:
2116 elif line.find('saving rejects to file') >= 0:
2116 ui.warn(line + '\n')
2117 ui.warn(line + '\n')
2117 elif line.find('FAILED') >= 0:
2118 elif line.find('FAILED') >= 0:
2118 if not printed_file:
2119 if not printed_file:
2119 ui.warn(pf + '\n')
2120 ui.warn(pf + '\n')
2120 printed_file = True
2121 printed_file = True
2121 ui.warn(line + '\n')
2122 ui.warn(line + '\n')
2122 finally:
2123 finally:
2123 if files:
2124 if files:
2124 scmutil.marktouched(repo, files, similarity)
2125 scmutil.marktouched(repo, files, similarity)
2125 code = fp.close()
2126 code = fp.close()
2126 if code:
2127 if code:
2127 raise PatchError(_("patch command failed: %s") %
2128 raise PatchError(_("patch command failed: %s") %
2128 util.explainexit(code)[0])
2129 util.explainexit(code)[0])
2129 return fuzz
2130 return fuzz
2130
2131
2131 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2132 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2132 eolmode='strict'):
2133 eolmode='strict'):
2133 if files is None:
2134 if files is None:
2134 files = set()
2135 files = set()
2135 if eolmode is None:
2136 if eolmode is None:
2136 eolmode = ui.config('patch', 'eol')
2137 eolmode = ui.config('patch', 'eol')
2137 if eolmode.lower() not in eolmodes:
2138 if eolmode.lower() not in eolmodes:
2138 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2139 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2139 eolmode = eolmode.lower()
2140 eolmode = eolmode.lower()
2140
2141
2141 store = filestore()
2142 store = filestore()
2142 try:
2143 try:
2143 fp = open(patchobj, 'rb')
2144 fp = open(patchobj, 'rb')
2144 except TypeError:
2145 except TypeError:
2145 fp = patchobj
2146 fp = patchobj
2146 try:
2147 try:
2147 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2148 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2148 eolmode=eolmode)
2149 eolmode=eolmode)
2149 finally:
2150 finally:
2150 if fp != patchobj:
2151 if fp != patchobj:
2151 fp.close()
2152 fp.close()
2152 files.update(backend.close())
2153 files.update(backend.close())
2153 store.close()
2154 store.close()
2154 if ret < 0:
2155 if ret < 0:
2155 raise PatchError(_('patch failed to apply'))
2156 raise PatchError(_('patch failed to apply'))
2156 return ret > 0
2157 return ret > 0
2157
2158
2158 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2159 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2159 eolmode='strict', similarity=0):
2160 eolmode='strict', similarity=0):
2160 """use builtin patch to apply <patchobj> to the working directory.
2161 """use builtin patch to apply <patchobj> to the working directory.
2161 returns whether patch was applied with fuzz factor."""
2162 returns whether patch was applied with fuzz factor."""
2162 backend = workingbackend(ui, repo, similarity)
2163 backend = workingbackend(ui, repo, similarity)
2163 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2164 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2164
2165
2165 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2166 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2166 eolmode='strict'):
2167 eolmode='strict'):
2167 backend = repobackend(ui, repo, ctx, store)
2168 backend = repobackend(ui, repo, ctx, store)
2168 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2169 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2169
2170
2170 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2171 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2171 similarity=0):
2172 similarity=0):
2172 """Apply <patchname> to the working directory.
2173 """Apply <patchname> to the working directory.
2173
2174
2174 'eolmode' specifies how end of lines should be handled. It can be:
2175 'eolmode' specifies how end of lines should be handled. It can be:
2175 - 'strict': inputs are read in binary mode, EOLs are preserved
2176 - 'strict': inputs are read in binary mode, EOLs are preserved
2176 - 'crlf': EOLs are ignored when patching and reset to CRLF
2177 - 'crlf': EOLs are ignored when patching and reset to CRLF
2177 - 'lf': EOLs are ignored when patching and reset to LF
2178 - 'lf': EOLs are ignored when patching and reset to LF
2178 - None: get it from user settings, default to 'strict'
2179 - None: get it from user settings, default to 'strict'
2179 'eolmode' is ignored when using an external patcher program.
2180 'eolmode' is ignored when using an external patcher program.
2180
2181
2181 Returns whether patch was applied with fuzz factor.
2182 Returns whether patch was applied with fuzz factor.
2182 """
2183 """
2183 patcher = ui.config('ui', 'patch')
2184 patcher = ui.config('ui', 'patch')
2184 if files is None:
2185 if files is None:
2185 files = set()
2186 files = set()
2186 if patcher:
2187 if patcher:
2187 return _externalpatch(ui, repo, patcher, patchname, strip,
2188 return _externalpatch(ui, repo, patcher, patchname, strip,
2188 files, similarity)
2189 files, similarity)
2189 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2190 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2190 similarity)
2191 similarity)
2191
2192
2192 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2193 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2193 backend = fsbackend(ui, repo.root)
2194 backend = fsbackend(ui, repo.root)
2194 prefix = _canonprefix(repo, prefix)
2195 prefix = _canonprefix(repo, prefix)
2195 with open(patchpath, 'rb') as fp:
2196 with open(patchpath, 'rb') as fp:
2196 changed = set()
2197 changed = set()
2197 for state, values in iterhunks(fp):
2198 for state, values in iterhunks(fp):
2198 if state == 'file':
2199 if state == 'file':
2199 afile, bfile, first_hunk, gp = values
2200 afile, bfile, first_hunk, gp = values
2200 if gp:
2201 if gp:
2201 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2202 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2202 if gp.oldpath:
2203 if gp.oldpath:
2203 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2204 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2204 prefix)[1]
2205 prefix)[1]
2205 else:
2206 else:
2206 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2207 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2207 prefix)
2208 prefix)
2208 changed.add(gp.path)
2209 changed.add(gp.path)
2209 if gp.op == 'RENAME':
2210 if gp.op == 'RENAME':
2210 changed.add(gp.oldpath)
2211 changed.add(gp.oldpath)
2211 elif state not in ('hunk', 'git'):
2212 elif state not in ('hunk', 'git'):
2212 raise error.Abort(_('unsupported parser state: %s') % state)
2213 raise error.Abort(_('unsupported parser state: %s') % state)
2213 return changed
2214 return changed
2214
2215
2215 class GitDiffRequired(Exception):
2216 class GitDiffRequired(Exception):
2216 pass
2217 pass
2217
2218
2218 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2219 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2219 '''return diffopts with all features supported and parsed'''
2220 '''return diffopts with all features supported and parsed'''
2220 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2221 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2221 git=True, whitespace=True, formatchanging=True)
2222 git=True, whitespace=True, formatchanging=True)
2222
2223
2223 diffopts = diffallopts
2224 diffopts = diffallopts
2224
2225
2225 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2226 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2226 whitespace=False, formatchanging=False):
2227 whitespace=False, formatchanging=False):
2227 '''return diffopts with only opted-in features parsed
2228 '''return diffopts with only opted-in features parsed
2228
2229
2229 Features:
2230 Features:
2230 - git: git-style diffs
2231 - git: git-style diffs
2231 - whitespace: whitespace options like ignoreblanklines and ignorews
2232 - whitespace: whitespace options like ignoreblanklines and ignorews
2232 - formatchanging: options that will likely break or cause correctness issues
2233 - formatchanging: options that will likely break or cause correctness issues
2233 with most diff parsers
2234 with most diff parsers
2234 '''
2235 '''
2235 def get(key, name=None, getter=ui.configbool, forceplain=None):
2236 def get(key, name=None, getter=ui.configbool, forceplain=None):
2236 if opts:
2237 if opts:
2237 v = opts.get(key)
2238 v = opts.get(key)
2238 # diffopts flags are either None-default (which is passed
2239 # diffopts flags are either None-default (which is passed
2239 # through unchanged, so we can identify unset values), or
2240 # through unchanged, so we can identify unset values), or
2240 # some other falsey default (eg --unified, which defaults
2241 # some other falsey default (eg --unified, which defaults
2241 # to an empty string). We only want to override the config
2242 # to an empty string). We only want to override the config
2242 # entries from hgrc with command line values if they
2243 # entries from hgrc with command line values if they
2243 # appear to have been set, which is any truthy value,
2244 # appear to have been set, which is any truthy value,
2244 # True, or False.
2245 # True, or False.
2245 if v or isinstance(v, bool):
2246 if v or isinstance(v, bool):
2246 return v
2247 return v
2247 if forceplain is not None and ui.plain():
2248 if forceplain is not None and ui.plain():
2248 return forceplain
2249 return forceplain
2249 return getter(section, name or key, untrusted=untrusted)
2250 return getter(section, name or key, untrusted=untrusted)
2250
2251
2251 # core options, expected to be understood by every diff parser
2252 # core options, expected to be understood by every diff parser
2252 buildopts = {
2253 buildopts = {
2253 'nodates': get('nodates'),
2254 'nodates': get('nodates'),
2254 'showfunc': get('show_function', 'showfunc'),
2255 'showfunc': get('show_function', 'showfunc'),
2255 'context': get('unified', getter=ui.config),
2256 'context': get('unified', getter=ui.config),
2256 }
2257 }
2257 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2258 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2258
2259
2259 if git:
2260 if git:
2260 buildopts['git'] = get('git')
2261 buildopts['git'] = get('git')
2261
2262
2262 # since this is in the experimental section, we need to call
2263 # since this is in the experimental section, we need to call
2263 # ui.configbool directory
2264 # ui.configbool directory
2264 buildopts['showsimilarity'] = ui.configbool('experimental',
2265 buildopts['showsimilarity'] = ui.configbool('experimental',
2265 'extendedheader.similarity')
2266 'extendedheader.similarity')
2266
2267
2267 # need to inspect the ui object instead of using get() since we want to
2268 # need to inspect the ui object instead of using get() since we want to
2268 # test for an int
2269 # test for an int
2269 hconf = ui.config('experimental', 'extendedheader.index')
2270 hconf = ui.config('experimental', 'extendedheader.index')
2270 if hconf is not None:
2271 if hconf is not None:
2271 hlen = None
2272 hlen = None
2272 try:
2273 try:
2273 # the hash config could be an integer (for length of hash) or a
2274 # the hash config could be an integer (for length of hash) or a
2274 # word (e.g. short, full, none)
2275 # word (e.g. short, full, none)
2275 hlen = int(hconf)
2276 hlen = int(hconf)
2276 if hlen < 0 or hlen > 40:
2277 if hlen < 0 or hlen > 40:
2277 msg = _("invalid length for extendedheader.index: '%d'\n")
2278 msg = _("invalid length for extendedheader.index: '%d'\n")
2278 ui.warn(msg % hlen)
2279 ui.warn(msg % hlen)
2279 except ValueError:
2280 except ValueError:
2280 # default value
2281 # default value
2281 if hconf == 'short' or hconf == '':
2282 if hconf == 'short' or hconf == '':
2282 hlen = 12
2283 hlen = 12
2283 elif hconf == 'full':
2284 elif hconf == 'full':
2284 hlen = 40
2285 hlen = 40
2285 elif hconf != 'none':
2286 elif hconf != 'none':
2286 msg = _("invalid value for extendedheader.index: '%s'\n")
2287 msg = _("invalid value for extendedheader.index: '%s'\n")
2287 ui.warn(msg % hconf)
2288 ui.warn(msg % hconf)
2288 finally:
2289 finally:
2289 buildopts['index'] = hlen
2290 buildopts['index'] = hlen
2290
2291
2291 if whitespace:
2292 if whitespace:
2292 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2293 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2293 buildopts['ignorewsamount'] = get('ignore_space_change',
2294 buildopts['ignorewsamount'] = get('ignore_space_change',
2294 'ignorewsamount')
2295 'ignorewsamount')
2295 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2296 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2296 'ignoreblanklines')
2297 'ignoreblanklines')
2297 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2298 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2298 if formatchanging:
2299 if formatchanging:
2299 buildopts['text'] = opts and opts.get('text')
2300 buildopts['text'] = opts and opts.get('text')
2300 binary = None if opts is None else opts.get('binary')
2301 binary = None if opts is None else opts.get('binary')
2301 buildopts['nobinary'] = (not binary if binary is not None
2302 buildopts['nobinary'] = (not binary if binary is not None
2302 else get('nobinary', forceplain=False))
2303 else get('nobinary', forceplain=False))
2303 buildopts['noprefix'] = get('noprefix', forceplain=False)
2304 buildopts['noprefix'] = get('noprefix', forceplain=False)
2304
2305
2305 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2306 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2306
2307
2307 def diff(repo, node1=None, node2=None, match=None, changes=None,
2308 def diff(repo, node1=None, node2=None, match=None, changes=None,
2308 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2309 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2309 hunksfilterfn=None):
2310 hunksfilterfn=None):
2310 '''yields diff of changes to files between two nodes, or node and
2311 '''yields diff of changes to files between two nodes, or node and
2311 working directory.
2312 working directory.
2312
2313
2313 if node1 is None, use first dirstate parent instead.
2314 if node1 is None, use first dirstate parent instead.
2314 if node2 is None, compare node1 with working directory.
2315 if node2 is None, compare node1 with working directory.
2315
2316
2316 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2317 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2317 every time some change cannot be represented with the current
2318 every time some change cannot be represented with the current
2318 patch format. Return False to upgrade to git patch format, True to
2319 patch format. Return False to upgrade to git patch format, True to
2319 accept the loss or raise an exception to abort the diff. It is
2320 accept the loss or raise an exception to abort the diff. It is
2320 called with the name of current file being diffed as 'fn'. If set
2321 called with the name of current file being diffed as 'fn'. If set
2321 to None, patches will always be upgraded to git format when
2322 to None, patches will always be upgraded to git format when
2322 necessary.
2323 necessary.
2323
2324
2324 prefix is a filename prefix that is prepended to all filenames on
2325 prefix is a filename prefix that is prepended to all filenames on
2325 display (used for subrepos).
2326 display (used for subrepos).
2326
2327
2327 relroot, if not empty, must be normalized with a trailing /. Any match
2328 relroot, if not empty, must be normalized with a trailing /. Any match
2328 patterns that fall outside it will be ignored.
2329 patterns that fall outside it will be ignored.
2329
2330
2330 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2331 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2331 information.
2332 information.
2332
2333
2333 hunksfilterfn, if not None, should be a function taking a filectx and
2334 hunksfilterfn, if not None, should be a function taking a filectx and
2334 hunks generator that may yield filtered hunks.
2335 hunks generator that may yield filtered hunks.
2335 '''
2336 '''
2336 for fctx1, fctx2, hdr, hunks in diffhunks(
2337 for fctx1, fctx2, hdr, hunks in diffhunks(
2337 repo, node1=node1, node2=node2,
2338 repo, node1=node1, node2=node2,
2338 match=match, changes=changes, opts=opts,
2339 match=match, changes=changes, opts=opts,
2339 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2340 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2340 ):
2341 ):
2341 if hunksfilterfn is not None:
2342 if hunksfilterfn is not None:
2342 # If the file has been removed, fctx2 is None; but this should
2343 # If the file has been removed, fctx2 is None; but this should
2343 # not occur here since we catch removed files early in
2344 # not occur here since we catch removed files early in
2344 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2345 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2345 assert fctx2 is not None, \
2346 assert fctx2 is not None, \
2346 'fctx2 unexpectly None in diff hunks filtering'
2347 'fctx2 unexpectly None in diff hunks filtering'
2347 hunks = hunksfilterfn(fctx2, hunks)
2348 hunks = hunksfilterfn(fctx2, hunks)
2348 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2349 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2349 if hdr and (text or len(hdr) > 1):
2350 if hdr and (text or len(hdr) > 1):
2350 yield '\n'.join(hdr) + '\n'
2351 yield '\n'.join(hdr) + '\n'
2351 if text:
2352 if text:
2352 yield text
2353 yield text
2353
2354
2354 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2355 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2355 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2356 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2356 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2357 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2357 where `header` is a list of diff headers and `hunks` is an iterable of
2358 where `header` is a list of diff headers and `hunks` is an iterable of
2358 (`hunkrange`, `hunklines`) tuples.
2359 (`hunkrange`, `hunklines`) tuples.
2359
2360
2360 See diff() for the meaning of parameters.
2361 See diff() for the meaning of parameters.
2361 """
2362 """
2362
2363
2363 if opts is None:
2364 if opts is None:
2364 opts = mdiff.defaultopts
2365 opts = mdiff.defaultopts
2365
2366
2366 if not node1 and not node2:
2367 if not node1 and not node2:
2367 node1 = repo.dirstate.p1()
2368 node1 = repo.dirstate.p1()
2368
2369
2369 def lrugetfilectx():
2370 def lrugetfilectx():
2370 cache = {}
2371 cache = {}
2371 order = collections.deque()
2372 order = collections.deque()
2372 def getfilectx(f, ctx):
2373 def getfilectx(f, ctx):
2373 fctx = ctx.filectx(f, filelog=cache.get(f))
2374 fctx = ctx.filectx(f, filelog=cache.get(f))
2374 if f not in cache:
2375 if f not in cache:
2375 if len(cache) > 20:
2376 if len(cache) > 20:
2376 del cache[order.popleft()]
2377 del cache[order.popleft()]
2377 cache[f] = fctx.filelog()
2378 cache[f] = fctx.filelog()
2378 else:
2379 else:
2379 order.remove(f)
2380 order.remove(f)
2380 order.append(f)
2381 order.append(f)
2381 return fctx
2382 return fctx
2382 return getfilectx
2383 return getfilectx
2383 getfilectx = lrugetfilectx()
2384 getfilectx = lrugetfilectx()
2384
2385
2385 ctx1 = repo[node1]
2386 ctx1 = repo[node1]
2386 ctx2 = repo[node2]
2387 ctx2 = repo[node2]
2387
2388
2388 relfiltered = False
2389 relfiltered = False
2389 if relroot != '' and match.always():
2390 if relroot != '' and match.always():
2390 # as a special case, create a new matcher with just the relroot
2391 # as a special case, create a new matcher with just the relroot
2391 pats = [relroot]
2392 pats = [relroot]
2392 match = scmutil.match(ctx2, pats, default='path')
2393 match = scmutil.match(ctx2, pats, default='path')
2393 relfiltered = True
2394 relfiltered = True
2394
2395
2395 if not changes:
2396 if not changes:
2396 changes = repo.status(ctx1, ctx2, match=match)
2397 changes = repo.status(ctx1, ctx2, match=match)
2397 modified, added, removed = changes[:3]
2398 modified, added, removed = changes[:3]
2398
2399
2399 if not modified and not added and not removed:
2400 if not modified and not added and not removed:
2400 return []
2401 return []
2401
2402
2402 if repo.ui.debugflag:
2403 if repo.ui.debugflag:
2403 hexfunc = hex
2404 hexfunc = hex
2404 else:
2405 else:
2405 hexfunc = short
2406 hexfunc = short
2406 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2407 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2407
2408
2408 if copy is None:
2409 if copy is None:
2409 copy = {}
2410 copy = {}
2410 if opts.git or opts.upgrade:
2411 if opts.git or opts.upgrade:
2411 copy = copies.pathcopies(ctx1, ctx2, match=match)
2412 copy = copies.pathcopies(ctx1, ctx2, match=match)
2412
2413
2413 if relroot is not None:
2414 if relroot is not None:
2414 if not relfiltered:
2415 if not relfiltered:
2415 # XXX this would ideally be done in the matcher, but that is
2416 # XXX this would ideally be done in the matcher, but that is
2416 # generally meant to 'or' patterns, not 'and' them. In this case we
2417 # generally meant to 'or' patterns, not 'and' them. In this case we
2417 # need to 'and' all the patterns from the matcher with relroot.
2418 # need to 'and' all the patterns from the matcher with relroot.
2418 def filterrel(l):
2419 def filterrel(l):
2419 return [f for f in l if f.startswith(relroot)]
2420 return [f for f in l if f.startswith(relroot)]
2420 modified = filterrel(modified)
2421 modified = filterrel(modified)
2421 added = filterrel(added)
2422 added = filterrel(added)
2422 removed = filterrel(removed)
2423 removed = filterrel(removed)
2423 relfiltered = True
2424 relfiltered = True
2424 # filter out copies where either side isn't inside the relative root
2425 # filter out copies where either side isn't inside the relative root
2425 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2426 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2426 if dst.startswith(relroot)
2427 if dst.startswith(relroot)
2427 and src.startswith(relroot)))
2428 and src.startswith(relroot)))
2428
2429
2429 modifiedset = set(modified)
2430 modifiedset = set(modified)
2430 addedset = set(added)
2431 addedset = set(added)
2431 removedset = set(removed)
2432 removedset = set(removed)
2432 for f in modified:
2433 for f in modified:
2433 if f not in ctx1:
2434 if f not in ctx1:
2434 # Fix up added, since merged-in additions appear as
2435 # Fix up added, since merged-in additions appear as
2435 # modifications during merges
2436 # modifications during merges
2436 modifiedset.remove(f)
2437 modifiedset.remove(f)
2437 addedset.add(f)
2438 addedset.add(f)
2438 for f in removed:
2439 for f in removed:
2439 if f not in ctx1:
2440 if f not in ctx1:
2440 # Merged-in additions that are then removed are reported as removed.
2441 # Merged-in additions that are then removed are reported as removed.
2441 # They are not in ctx1, so We don't want to show them in the diff.
2442 # They are not in ctx1, so We don't want to show them in the diff.
2442 removedset.remove(f)
2443 removedset.remove(f)
2443 modified = sorted(modifiedset)
2444 modified = sorted(modifiedset)
2444 added = sorted(addedset)
2445 added = sorted(addedset)
2445 removed = sorted(removedset)
2446 removed = sorted(removedset)
2446 for dst, src in list(copy.items()):
2447 for dst, src in list(copy.items()):
2447 if src not in ctx1:
2448 if src not in ctx1:
2448 # Files merged in during a merge and then copied/renamed are
2449 # Files merged in during a merge and then copied/renamed are
2449 # reported as copies. We want to show them in the diff as additions.
2450 # reported as copies. We want to show them in the diff as additions.
2450 del copy[dst]
2451 del copy[dst]
2451
2452
2452 def difffn(opts, losedata):
2453 def difffn(opts, losedata):
2453 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2454 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2454 copy, getfilectx, opts, losedata, prefix, relroot)
2455 copy, getfilectx, opts, losedata, prefix, relroot)
2455 if opts.upgrade and not opts.git:
2456 if opts.upgrade and not opts.git:
2456 try:
2457 try:
2457 def losedata(fn):
2458 def losedata(fn):
2458 if not losedatafn or not losedatafn(fn=fn):
2459 if not losedatafn or not losedatafn(fn=fn):
2459 raise GitDiffRequired
2460 raise GitDiffRequired
2460 # Buffer the whole output until we are sure it can be generated
2461 # Buffer the whole output until we are sure it can be generated
2461 return list(difffn(opts.copy(git=False), losedata))
2462 return list(difffn(opts.copy(git=False), losedata))
2462 except GitDiffRequired:
2463 except GitDiffRequired:
2463 return difffn(opts.copy(git=True), None)
2464 return difffn(opts.copy(git=True), None)
2464 else:
2465 else:
2465 return difffn(opts, None)
2466 return difffn(opts, None)
2466
2467
2467 def difflabel(func, *args, **kw):
2468 def difflabel(func, *args, **kw):
2468 '''yields 2-tuples of (output, label) based on the output of func()'''
2469 '''yields 2-tuples of (output, label) based on the output of func()'''
2469 inlinecolor = False
2470 inlinecolor = False
2470 if kw.get(r'opts'):
2471 if kw.get(r'opts'):
2471 inlinecolor = kw[r'opts'].worddiff
2472 inlinecolor = kw[r'opts'].worddiff
2472 headprefixes = [('diff', 'diff.diffline'),
2473 headprefixes = [('diff', 'diff.diffline'),
2473 ('copy', 'diff.extended'),
2474 ('copy', 'diff.extended'),
2474 ('rename', 'diff.extended'),
2475 ('rename', 'diff.extended'),
2475 ('old', 'diff.extended'),
2476 ('old', 'diff.extended'),
2476 ('new', 'diff.extended'),
2477 ('new', 'diff.extended'),
2477 ('deleted', 'diff.extended'),
2478 ('deleted', 'diff.extended'),
2478 ('index', 'diff.extended'),
2479 ('index', 'diff.extended'),
2479 ('similarity', 'diff.extended'),
2480 ('similarity', 'diff.extended'),
2480 ('---', 'diff.file_a'),
2481 ('---', 'diff.file_a'),
2481 ('+++', 'diff.file_b')]
2482 ('+++', 'diff.file_b')]
2482 textprefixes = [('@', 'diff.hunk'),
2483 textprefixes = [('@', 'diff.hunk'),
2483 ('-', 'diff.deleted'),
2484 ('-', 'diff.deleted'),
2484 ('+', 'diff.inserted')]
2485 ('+', 'diff.inserted')]
2485 head = False
2486 head = False
2486 for chunk in func(*args, **kw):
2487 for chunk in func(*args, **kw):
2487 lines = chunk.split('\n')
2488 lines = chunk.split('\n')
2488 matches = {}
2489 matches = {}
2489 if inlinecolor:
2490 if inlinecolor:
2490 matches = _findmatches(lines)
2491 matches = _findmatches(lines)
2491 for i, line in enumerate(lines):
2492 for i, line in enumerate(lines):
2492 if i != 0:
2493 if i != 0:
2493 yield ('\n', '')
2494 yield ('\n', '')
2494 if head:
2495 if head:
2495 if line.startswith('@'):
2496 if line.startswith('@'):
2496 head = False
2497 head = False
2497 else:
2498 else:
2498 if line and line[0] not in ' +-@\\':
2499 if line and line[0] not in ' +-@\\':
2499 head = True
2500 head = True
2500 stripline = line
2501 stripline = line
2501 diffline = False
2502 diffline = False
2502 if not head and line and line[0] in '+-':
2503 if not head and line and line[0] in '+-':
2503 # highlight tabs and trailing whitespace, but only in
2504 # highlight tabs and trailing whitespace, but only in
2504 # changed lines
2505 # changed lines
2505 stripline = line.rstrip()
2506 stripline = line.rstrip()
2506 diffline = True
2507 diffline = True
2507
2508
2508 prefixes = textprefixes
2509 prefixes = textprefixes
2509 if head:
2510 if head:
2510 prefixes = headprefixes
2511 prefixes = headprefixes
2511 for prefix, label in prefixes:
2512 for prefix, label in prefixes:
2512 if stripline.startswith(prefix):
2513 if stripline.startswith(prefix):
2513 if diffline:
2514 if diffline:
2514 if i in matches:
2515 if i in matches:
2515 for t, l in _inlinediff(lines[i].rstrip(),
2516 for t, l in _inlinediff(lines[i].rstrip(),
2516 lines[matches[i]].rstrip(),
2517 lines[matches[i]].rstrip(),
2517 label):
2518 label):
2518 yield (t, l)
2519 yield (t, l)
2519 else:
2520 else:
2520 for token in tabsplitter.findall(stripline):
2521 for token in tabsplitter.findall(stripline):
2521 if '\t' == token[0]:
2522 if '\t' == token[0]:
2522 yield (token, 'diff.tab')
2523 yield (token, 'diff.tab')
2523 else:
2524 else:
2524 yield (token, label)
2525 yield (token, label)
2525 else:
2526 else:
2526 yield (stripline, label)
2527 yield (stripline, label)
2527 break
2528 break
2528 else:
2529 else:
2529 yield (line, '')
2530 yield (line, '')
2530 if line != stripline:
2531 if line != stripline:
2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532 yield (line[len(stripline):], 'diff.trailingwhitespace')
2532
2533
2533 def _findmatches(slist):
2534 def _findmatches(slist):
2534 '''Look for insertion matches to deletion and returns a dict of
2535 '''Look for insertion matches to deletion and returns a dict of
2535 correspondences.
2536 correspondences.
2536 '''
2537 '''
2537 lastmatch = 0
2538 lastmatch = 0
2538 matches = {}
2539 matches = {}
2539 for i, line in enumerate(slist):
2540 for i, line in enumerate(slist):
2540 if line == '':
2541 if line == '':
2541 continue
2542 continue
2542 if line[0] == '-':
2543 if line[0] == '-':
2543 lastmatch = max(lastmatch, i)
2544 lastmatch = max(lastmatch, i)
2544 newgroup = False
2545 newgroup = False
2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 if newline == '':
2547 if newline == '':
2547 continue
2548 continue
2548 if newline[0] == '-' and newgroup: # too far, no match
2549 if newline[0] == '-' and newgroup: # too far, no match
2549 break
2550 break
2550 if newline[0] == '+': # potential match
2551 if newline[0] == '+': # potential match
2551 newgroup = True
2552 newgroup = True
2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 if sim > 0.7:
2554 if sim > 0.7:
2554 lastmatch = lastmatch + 1 + j
2555 lastmatch = lastmatch + 1 + j
2555 matches[i] = lastmatch
2556 matches[i] = lastmatch
2556 matches[lastmatch] = i
2557 matches[lastmatch] = i
2557 break
2558 break
2558 return matches
2559 return matches
2559
2560
2560 def _inlinediff(s1, s2, operation):
2561 def _inlinediff(s1, s2, operation):
2561 '''Perform string diff to highlight specific changes.'''
2562 '''Perform string diff to highlight specific changes.'''
2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 if operation == 'diff.deleted':
2564 if operation == 'diff.deleted':
2564 s2, s1 = s1, s2
2565 s2, s1 = s1, s2
2565
2566
2566 buff = []
2567 buff = []
2567 # we never want to higlight the leading +-
2568 # we never want to higlight the leading +-
2568 if operation == 'diff.deleted' and s2.startswith('-'):
2569 if operation == 'diff.deleted' and s2.startswith('-'):
2569 label = operation
2570 label = operation
2570 token = '-'
2571 token = '-'
2571 s2 = s2[1:]
2572 s2 = s2[1:]
2572 s1 = s1[1:]
2573 s1 = s1[1:]
2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 label = operation
2575 label = operation
2575 token = '+'
2576 token = '+'
2576 s2 = s2[1:]
2577 s2 = s2[1:]
2577 s1 = s1[1:]
2578 s1 = s1[1:]
2578 else:
2579 else:
2579 raise error.ProgrammingError("Case not expected, operation = %s" %
2580 raise error.ProgrammingError("Case not expected, operation = %s" %
2580 operation)
2581 operation)
2581
2582
2582 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2583 s = difflib.ndiff(_nonwordre.split(s2), _nonwordre.split(s1))
2583 for part in s:
2584 for part in s:
2584 if part[0] in operation_skip or len(part) == 2:
2585 if part[0] in operation_skip or len(part) == 2:
2585 continue
2586 continue
2586 l = operation + '.highlight'
2587 l = operation + '.highlight'
2587 if part[0] in ' ':
2588 if part[0] in ' ':
2588 l = operation
2589 l = operation
2589 if part[2:] == '\t':
2590 if part[2:] == '\t':
2590 l = 'diff.tab'
2591 l = 'diff.tab'
2591 if l == label: # contiguous token with same label
2592 if l == label: # contiguous token with same label
2592 token += part[2:]
2593 token += part[2:]
2593 continue
2594 continue
2594 else:
2595 else:
2595 buff.append((token, label))
2596 buff.append((token, label))
2596 label = l
2597 label = l
2597 token = part[2:]
2598 token = part[2:]
2598 buff.append((token, label))
2599 buff.append((token, label))
2599
2600
2600 return buff
2601 return buff
2601
2602
2602 def diffui(*args, **kw):
2603 def diffui(*args, **kw):
2603 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2604 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2604 return difflabel(diff, *args, **kw)
2605 return difflabel(diff, *args, **kw)
2605
2606
2606 def _filepairs(modified, added, removed, copy, opts):
2607 def _filepairs(modified, added, removed, copy, opts):
2607 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2608 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2608 before and f2 is the the name after. For added files, f1 will be None,
2609 before and f2 is the the name after. For added files, f1 will be None,
2609 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2610 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2610 or 'rename' (the latter two only if opts.git is set).'''
2611 or 'rename' (the latter two only if opts.git is set).'''
2611 gone = set()
2612 gone = set()
2612
2613
2613 copyto = dict([(v, k) for k, v in copy.items()])
2614 copyto = dict([(v, k) for k, v in copy.items()])
2614
2615
2615 addedset, removedset = set(added), set(removed)
2616 addedset, removedset = set(added), set(removed)
2616
2617
2617 for f in sorted(modified + added + removed):
2618 for f in sorted(modified + added + removed):
2618 copyop = None
2619 copyop = None
2619 f1, f2 = f, f
2620 f1, f2 = f, f
2620 if f in addedset:
2621 if f in addedset:
2621 f1 = None
2622 f1 = None
2622 if f in copy:
2623 if f in copy:
2623 if opts.git:
2624 if opts.git:
2624 f1 = copy[f]
2625 f1 = copy[f]
2625 if f1 in removedset and f1 not in gone:
2626 if f1 in removedset and f1 not in gone:
2626 copyop = 'rename'
2627 copyop = 'rename'
2627 gone.add(f1)
2628 gone.add(f1)
2628 else:
2629 else:
2629 copyop = 'copy'
2630 copyop = 'copy'
2630 elif f in removedset:
2631 elif f in removedset:
2631 f2 = None
2632 f2 = None
2632 if opts.git:
2633 if opts.git:
2633 # have we already reported a copy above?
2634 # have we already reported a copy above?
2634 if (f in copyto and copyto[f] in addedset
2635 if (f in copyto and copyto[f] in addedset
2635 and copy[copyto[f]] == f):
2636 and copy[copyto[f]] == f):
2636 continue
2637 continue
2637 yield f1, f2, copyop
2638 yield f1, f2, copyop
2638
2639
2639 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2640 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2640 copy, getfilectx, opts, losedatafn, prefix, relroot):
2641 copy, getfilectx, opts, losedatafn, prefix, relroot):
2641 '''given input data, generate a diff and yield it in blocks
2642 '''given input data, generate a diff and yield it in blocks
2642
2643
2643 If generating a diff would lose data like flags or binary data and
2644 If generating a diff would lose data like flags or binary data and
2644 losedatafn is not None, it will be called.
2645 losedatafn is not None, it will be called.
2645
2646
2646 relroot is removed and prefix is added to every path in the diff output.
2647 relroot is removed and prefix is added to every path in the diff output.
2647
2648
2648 If relroot is not empty, this function expects every path in modified,
2649 If relroot is not empty, this function expects every path in modified,
2649 added, removed and copy to start with it.'''
2650 added, removed and copy to start with it.'''
2650
2651
2651 def gitindex(text):
2652 def gitindex(text):
2652 if not text:
2653 if not text:
2653 text = ""
2654 text = ""
2654 l = len(text)
2655 l = len(text)
2655 s = hashlib.sha1('blob %d\0' % l)
2656 s = hashlib.sha1('blob %d\0' % l)
2656 s.update(text)
2657 s.update(text)
2657 return hex(s.digest())
2658 return hex(s.digest())
2658
2659
2659 if opts.noprefix:
2660 if opts.noprefix:
2660 aprefix = bprefix = ''
2661 aprefix = bprefix = ''
2661 else:
2662 else:
2662 aprefix = 'a/'
2663 aprefix = 'a/'
2663 bprefix = 'b/'
2664 bprefix = 'b/'
2664
2665
2665 def diffline(f, revs):
2666 def diffline(f, revs):
2666 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2667 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2667 return 'diff %s %s' % (revinfo, f)
2668 return 'diff %s %s' % (revinfo, f)
2668
2669
2669 def isempty(fctx):
2670 def isempty(fctx):
2670 return fctx is None or fctx.size() == 0
2671 return fctx is None or fctx.size() == 0
2671
2672
2672 date1 = util.datestr(ctx1.date())
2673 date1 = util.datestr(ctx1.date())
2673 date2 = util.datestr(ctx2.date())
2674 date2 = util.datestr(ctx2.date())
2674
2675
2675 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2676 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2676
2677
2677 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2678 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2678 or repo.ui.configbool('devel', 'check-relroot')):
2679 or repo.ui.configbool('devel', 'check-relroot')):
2679 for f in modified + added + removed + list(copy) + list(copy.values()):
2680 for f in modified + added + removed + list(copy) + list(copy.values()):
2680 if f is not None and not f.startswith(relroot):
2681 if f is not None and not f.startswith(relroot):
2681 raise AssertionError(
2682 raise AssertionError(
2682 "file %s doesn't start with relroot %s" % (f, relroot))
2683 "file %s doesn't start with relroot %s" % (f, relroot))
2683
2684
2684 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2685 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2685 content1 = None
2686 content1 = None
2686 content2 = None
2687 content2 = None
2687 fctx1 = None
2688 fctx1 = None
2688 fctx2 = None
2689 fctx2 = None
2689 flag1 = None
2690 flag1 = None
2690 flag2 = None
2691 flag2 = None
2691 if f1:
2692 if f1:
2692 fctx1 = getfilectx(f1, ctx1)
2693 fctx1 = getfilectx(f1, ctx1)
2693 if opts.git or losedatafn:
2694 if opts.git or losedatafn:
2694 flag1 = ctx1.flags(f1)
2695 flag1 = ctx1.flags(f1)
2695 if f2:
2696 if f2:
2696 fctx2 = getfilectx(f2, ctx2)
2697 fctx2 = getfilectx(f2, ctx2)
2697 if opts.git or losedatafn:
2698 if opts.git or losedatafn:
2698 flag2 = ctx2.flags(f2)
2699 flag2 = ctx2.flags(f2)
2699 # if binary is True, output "summary" or "base85", but not "text diff"
2700 # if binary is True, output "summary" or "base85", but not "text diff"
2700 binary = not opts.text and any(f.isbinary()
2701 binary = not opts.text and any(f.isbinary()
2701 for f in [fctx1, fctx2] if f is not None)
2702 for f in [fctx1, fctx2] if f is not None)
2702
2703
2703 if losedatafn and not opts.git:
2704 if losedatafn and not opts.git:
2704 if (binary or
2705 if (binary or
2705 # copy/rename
2706 # copy/rename
2706 f2 in copy or
2707 f2 in copy or
2707 # empty file creation
2708 # empty file creation
2708 (not f1 and isempty(fctx2)) or
2709 (not f1 and isempty(fctx2)) or
2709 # empty file deletion
2710 # empty file deletion
2710 (isempty(fctx1) and not f2) or
2711 (isempty(fctx1) and not f2) or
2711 # create with flags
2712 # create with flags
2712 (not f1 and flag2) or
2713 (not f1 and flag2) or
2713 # change flags
2714 # change flags
2714 (f1 and f2 and flag1 != flag2)):
2715 (f1 and f2 and flag1 != flag2)):
2715 losedatafn(f2 or f1)
2716 losedatafn(f2 or f1)
2716
2717
2717 path1 = f1 or f2
2718 path1 = f1 or f2
2718 path2 = f2 or f1
2719 path2 = f2 or f1
2719 path1 = posixpath.join(prefix, path1[len(relroot):])
2720 path1 = posixpath.join(prefix, path1[len(relroot):])
2720 path2 = posixpath.join(prefix, path2[len(relroot):])
2721 path2 = posixpath.join(prefix, path2[len(relroot):])
2721 header = []
2722 header = []
2722 if opts.git:
2723 if opts.git:
2723 header.append('diff --git %s%s %s%s' %
2724 header.append('diff --git %s%s %s%s' %
2724 (aprefix, path1, bprefix, path2))
2725 (aprefix, path1, bprefix, path2))
2725 if not f1: # added
2726 if not f1: # added
2726 header.append('new file mode %s' % gitmode[flag2])
2727 header.append('new file mode %s' % gitmode[flag2])
2727 elif not f2: # removed
2728 elif not f2: # removed
2728 header.append('deleted file mode %s' % gitmode[flag1])
2729 header.append('deleted file mode %s' % gitmode[flag1])
2729 else: # modified/copied/renamed
2730 else: # modified/copied/renamed
2730 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2731 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2731 if mode1 != mode2:
2732 if mode1 != mode2:
2732 header.append('old mode %s' % mode1)
2733 header.append('old mode %s' % mode1)
2733 header.append('new mode %s' % mode2)
2734 header.append('new mode %s' % mode2)
2734 if copyop is not None:
2735 if copyop is not None:
2735 if opts.showsimilarity:
2736 if opts.showsimilarity:
2736 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2737 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2737 header.append('similarity index %d%%' % sim)
2738 header.append('similarity index %d%%' % sim)
2738 header.append('%s from %s' % (copyop, path1))
2739 header.append('%s from %s' % (copyop, path1))
2739 header.append('%s to %s' % (copyop, path2))
2740 header.append('%s to %s' % (copyop, path2))
2740 elif revs and not repo.ui.quiet:
2741 elif revs and not repo.ui.quiet:
2741 header.append(diffline(path1, revs))
2742 header.append(diffline(path1, revs))
2742
2743
2743 # fctx.is | diffopts | what to | is fctx.data()
2744 # fctx.is | diffopts | what to | is fctx.data()
2744 # binary() | text nobinary git index | output? | outputted?
2745 # binary() | text nobinary git index | output? | outputted?
2745 # ------------------------------------|----------------------------
2746 # ------------------------------------|----------------------------
2746 # yes | no no no * | summary | no
2747 # yes | no no no * | summary | no
2747 # yes | no no yes * | base85 | yes
2748 # yes | no no yes * | base85 | yes
2748 # yes | no yes no * | summary | no
2749 # yes | no yes no * | summary | no
2749 # yes | no yes yes 0 | summary | no
2750 # yes | no yes yes 0 | summary | no
2750 # yes | no yes yes >0 | summary | semi [1]
2751 # yes | no yes yes >0 | summary | semi [1]
2751 # yes | yes * * * | text diff | yes
2752 # yes | yes * * * | text diff | yes
2752 # no | * * * * | text diff | yes
2753 # no | * * * * | text diff | yes
2753 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2754 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2754 if binary and (not opts.git or (opts.git and opts.nobinary and not
2755 if binary and (not opts.git or (opts.git and opts.nobinary and not
2755 opts.index)):
2756 opts.index)):
2756 # fast path: no binary content will be displayed, content1 and
2757 # fast path: no binary content will be displayed, content1 and
2757 # content2 are only used for equivalent test. cmp() could have a
2758 # content2 are only used for equivalent test. cmp() could have a
2758 # fast path.
2759 # fast path.
2759 if fctx1 is not None:
2760 if fctx1 is not None:
2760 content1 = b'\0'
2761 content1 = b'\0'
2761 if fctx2 is not None:
2762 if fctx2 is not None:
2762 if fctx1 is not None and not fctx1.cmp(fctx2):
2763 if fctx1 is not None and not fctx1.cmp(fctx2):
2763 content2 = b'\0' # not different
2764 content2 = b'\0' # not different
2764 else:
2765 else:
2765 content2 = b'\0\0'
2766 content2 = b'\0\0'
2766 else:
2767 else:
2767 # normal path: load contents
2768 # normal path: load contents
2768 if fctx1 is not None:
2769 if fctx1 is not None:
2769 content1 = fctx1.data()
2770 content1 = fctx1.data()
2770 if fctx2 is not None:
2771 if fctx2 is not None:
2771 content2 = fctx2.data()
2772 content2 = fctx2.data()
2772
2773
2773 if binary and opts.git and not opts.nobinary:
2774 if binary and opts.git and not opts.nobinary:
2774 text = mdiff.b85diff(content1, content2)
2775 text = mdiff.b85diff(content1, content2)
2775 if text:
2776 if text:
2776 header.append('index %s..%s' %
2777 header.append('index %s..%s' %
2777 (gitindex(content1), gitindex(content2)))
2778 (gitindex(content1), gitindex(content2)))
2778 hunks = (None, [text]),
2779 hunks = (None, [text]),
2779 else:
2780 else:
2780 if opts.git and opts.index > 0:
2781 if opts.git and opts.index > 0:
2781 flag = flag1
2782 flag = flag1
2782 if flag is None:
2783 if flag is None:
2783 flag = flag2
2784 flag = flag2
2784 header.append('index %s..%s %s' %
2785 header.append('index %s..%s %s' %
2785 (gitindex(content1)[0:opts.index],
2786 (gitindex(content1)[0:opts.index],
2786 gitindex(content2)[0:opts.index],
2787 gitindex(content2)[0:opts.index],
2787 gitmode[flag]))
2788 gitmode[flag]))
2788
2789
2789 uheaders, hunks = mdiff.unidiff(content1, date1,
2790 uheaders, hunks = mdiff.unidiff(content1, date1,
2790 content2, date2,
2791 content2, date2,
2791 path1, path2, opts=opts)
2792 path1, path2, opts=opts)
2792 header.extend(uheaders)
2793 header.extend(uheaders)
2793 yield fctx1, fctx2, header, hunks
2794 yield fctx1, fctx2, header, hunks
2794
2795
2795 def diffstatsum(stats):
2796 def diffstatsum(stats):
2796 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2797 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2797 for f, a, r, b in stats:
2798 for f, a, r, b in stats:
2798 maxfile = max(maxfile, encoding.colwidth(f))
2799 maxfile = max(maxfile, encoding.colwidth(f))
2799 maxtotal = max(maxtotal, a + r)
2800 maxtotal = max(maxtotal, a + r)
2800 addtotal += a
2801 addtotal += a
2801 removetotal += r
2802 removetotal += r
2802 binary = binary or b
2803 binary = binary or b
2803
2804
2804 return maxfile, maxtotal, addtotal, removetotal, binary
2805 return maxfile, maxtotal, addtotal, removetotal, binary
2805
2806
2806 def diffstatdata(lines):
2807 def diffstatdata(lines):
2807 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2808 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2808
2809
2809 results = []
2810 results = []
2810 filename, adds, removes, isbinary = None, 0, 0, False
2811 filename, adds, removes, isbinary = None, 0, 0, False
2811
2812
2812 def addresult():
2813 def addresult():
2813 if filename:
2814 if filename:
2814 results.append((filename, adds, removes, isbinary))
2815 results.append((filename, adds, removes, isbinary))
2815
2816
2816 # inheader is used to track if a line is in the
2817 # inheader is used to track if a line is in the
2817 # header portion of the diff. This helps properly account
2818 # header portion of the diff. This helps properly account
2818 # for lines that start with '--' or '++'
2819 # for lines that start with '--' or '++'
2819 inheader = False
2820 inheader = False
2820
2821
2821 for line in lines:
2822 for line in lines:
2822 if line.startswith('diff'):
2823 if line.startswith('diff'):
2823 addresult()
2824 addresult()
2824 # starting a new file diff
2825 # starting a new file diff
2825 # set numbers to 0 and reset inheader
2826 # set numbers to 0 and reset inheader
2826 inheader = True
2827 inheader = True
2827 adds, removes, isbinary = 0, 0, False
2828 adds, removes, isbinary = 0, 0, False
2828 if line.startswith('diff --git a/'):
2829 if line.startswith('diff --git a/'):
2829 filename = gitre.search(line).group(2)
2830 filename = gitre.search(line).group(2)
2830 elif line.startswith('diff -r'):
2831 elif line.startswith('diff -r'):
2831 # format: "diff -r ... -r ... filename"
2832 # format: "diff -r ... -r ... filename"
2832 filename = diffre.search(line).group(1)
2833 filename = diffre.search(line).group(1)
2833 elif line.startswith('@@'):
2834 elif line.startswith('@@'):
2834 inheader = False
2835 inheader = False
2835 elif line.startswith('+') and not inheader:
2836 elif line.startswith('+') and not inheader:
2836 adds += 1
2837 adds += 1
2837 elif line.startswith('-') and not inheader:
2838 elif line.startswith('-') and not inheader:
2838 removes += 1
2839 removes += 1
2839 elif (line.startswith('GIT binary patch') or
2840 elif (line.startswith('GIT binary patch') or
2840 line.startswith('Binary file')):
2841 line.startswith('Binary file')):
2841 isbinary = True
2842 isbinary = True
2842 addresult()
2843 addresult()
2843 return results
2844 return results
2844
2845
2845 def diffstat(lines, width=80):
2846 def diffstat(lines, width=80):
2846 output = []
2847 output = []
2847 stats = diffstatdata(lines)
2848 stats = diffstatdata(lines)
2848 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2849 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2849
2850
2850 countwidth = len(str(maxtotal))
2851 countwidth = len(str(maxtotal))
2851 if hasbinary and countwidth < 3:
2852 if hasbinary and countwidth < 3:
2852 countwidth = 3
2853 countwidth = 3
2853 graphwidth = width - countwidth - maxname - 6
2854 graphwidth = width - countwidth - maxname - 6
2854 if graphwidth < 10:
2855 if graphwidth < 10:
2855 graphwidth = 10
2856 graphwidth = 10
2856
2857
2857 def scale(i):
2858 def scale(i):
2858 if maxtotal <= graphwidth:
2859 if maxtotal <= graphwidth:
2859 return i
2860 return i
2860 # If diffstat runs out of room it doesn't print anything,
2861 # If diffstat runs out of room it doesn't print anything,
2861 # which isn't very useful, so always print at least one + or -
2862 # which isn't very useful, so always print at least one + or -
2862 # if there were at least some changes.
2863 # if there were at least some changes.
2863 return max(i * graphwidth // maxtotal, int(bool(i)))
2864 return max(i * graphwidth // maxtotal, int(bool(i)))
2864
2865
2865 for filename, adds, removes, isbinary in stats:
2866 for filename, adds, removes, isbinary in stats:
2866 if isbinary:
2867 if isbinary:
2867 count = 'Bin'
2868 count = 'Bin'
2868 else:
2869 else:
2869 count = '%d' % (adds + removes)
2870 count = '%d' % (adds + removes)
2870 pluses = '+' * scale(adds)
2871 pluses = '+' * scale(adds)
2871 minuses = '-' * scale(removes)
2872 minuses = '-' * scale(removes)
2872 output.append(' %s%s | %*s %s%s\n' %
2873 output.append(' %s%s | %*s %s%s\n' %
2873 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2874 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2874 countwidth, count, pluses, minuses))
2875 countwidth, count, pluses, minuses))
2875
2876
2876 if stats:
2877 if stats:
2877 output.append(_(' %d files changed, %d insertions(+), '
2878 output.append(_(' %d files changed, %d insertions(+), '
2878 '%d deletions(-)\n')
2879 '%d deletions(-)\n')
2879 % (len(stats), totaladds, totalremoves))
2880 % (len(stats), totaladds, totalremoves))
2880
2881
2881 return ''.join(output)
2882 return ''.join(output)
2882
2883
2883 def diffstatui(*args, **kw):
2884 def diffstatui(*args, **kw):
2884 '''like diffstat(), but yields 2-tuples of (output, label) for
2885 '''like diffstat(), but yields 2-tuples of (output, label) for
2885 ui.write()
2886 ui.write()
2886 '''
2887 '''
2887
2888
2888 for line in diffstat(*args, **kw).splitlines():
2889 for line in diffstat(*args, **kw).splitlines():
2889 if line and line[-1] in '+-':
2890 if line and line[-1] in '+-':
2890 name, graph = line.rsplit(' ', 1)
2891 name, graph = line.rsplit(' ', 1)
2891 yield (name + ' ', '')
2892 yield (name + ' ', '')
2892 m = re.search(br'\++', graph)
2893 m = re.search(br'\++', graph)
2893 if m:
2894 if m:
2894 yield (m.group(0), 'diffstat.inserted')
2895 yield (m.group(0), 'diffstat.inserted')
2895 m = re.search(br'-+', graph)
2896 m = re.search(br'-+', graph)
2896 if m:
2897 if m:
2897 yield (m.group(0), 'diffstat.deleted')
2898 yield (m.group(0), 'diffstat.deleted')
2898 else:
2899 else:
2899 yield (line, '')
2900 yield (line, '')
2900 yield ('\n', '')
2901 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now